YAP 7.1.0
locks_x86.h
1/************************************************************************
2** **
3** The YapTab/YapOr/OPTYap systems **
4** **
5** YapTab extends the Yap Prolog engine to support sequential tabling **
6** YapOr extends the Yap Prolog engine to support or-parallelism **
7** OPTYap extends the Yap Prolog engine to support or-parallel tabling **
8** **
9** **
10** YAP Prolog was developed at University of Porto, Portugal **
11** **
12************************************************************************/
13
14/************************************************************************
15** Atomic locks for X86 **
16************************************************************************/
17
18typedef struct {
19 volatile unsigned int lock;
21
22static inline int
23spin_trylock(spinlock_t *lock)
24{
25 char tmp = 1;
26 __asm__ __volatile__(
27 "xchgb %b0, %1"
28 : "=q"(tmp), "=m"(lock->lock)
29 : "0"(tmp) : "memory");
30 return tmp == 0;
31}
32
33static inline void
34spin_unlock(spinlock_t *lock)
35{
36 /* To unlock we move 0 to the lock.
37 * On i386 this needs to be a locked operation
38 * to avoid Pentium Pro errata 66 and 92.
39 */
40#if defined(__x86_64__)
41 __asm__ __volatile__("" : : : "memory");
42 *(unsigned char*)&lock->lock = 0;
43#else
44 char tmp = 0;
45 __asm__ __volatile__(
46 "xchgb %b0, %1"
47 : "=q"(tmp), "=m"(lock->lock)
48 : "0"(tmp) : "memory");
49#endif
50}
51
52
53#define INIT_LOCK(LOCK_VAR) ((LOCK_VAR) = 0)
54#define TRY_LOCK(LOCK_VAR) spin_trylock((spinlock_t *)(LOCK_VAR))
55
56//#define DEBUG_LOCKS 1
57#if DEBUG_LOCKS
58
59extern int debug_locks;
60#define LOCK(LOCK_VAR) do { \
61 if (debug_locks) fprintf(stderr,"[%d] %s:%d: LOCK(%p)\n", \
62 (int)pthread_self(), \
63 __BASE_FILE__, __LINE__,&(LOCK_VAR)); \
64 if (TRY_LOCK(&(LOCK_VAR))) break; \
65 while (IS_LOCKED(LOCK_VAR)) continue; \
66 } while (1)
67#define IS_LOCKED(LOCK_VAR) ((LOCK_VAR) != 0)
68#define IS_UNLOCKED(LOCK_VAR) ((LOCK_VAR) == 0)
69#define UNLOCK(LOCK_VAR) if (debug_locks) fprintf(stderr,"[%d] %s:%d: UNLOCK(%p)\n", \
70 (int)pthread_self(), \
71 __BASE_FILE__, __LINE__,&(LOCK_VAR)); \
72 spin_unlock((spinlock_t *)&(LOCK_VAR))
73#else
74#define LOCK(LOCK_VAR) { do { \
75 if (TRY_LOCK(&(LOCK_VAR))) break; \
76 while (IS_LOCKED(LOCK_VAR)) continue; \
77 } while (1); }
78#define IS_LOCKED(LOCK_VAR) ((LOCK_VAR) != 0)
79#define IS_UNLOCKED(LOCK_VAR) ((LOCK_VAR) == 0)
80#define UNLOCK(LOCK_VAR) spin_unlock((spinlock_t *)&(LOCK_VAR))
81#endif
82
83/* the code that follows has been adapted from the Erlang sources */
84
85typedef struct {
86 volatile int lock;
87} rwlock_t;
88
89#define RWLOCK_OFFSET (1<<24)
90
91static inline void
92init_rwlock(rwlock_t *lock)
93{
94 lock->lock = 0;
95}
96
97static inline void
98read_unlock(rwlock_t *lock)
99{
100 __asm__ __volatile__(
101 "lock; decl %0"
102 : "=m"(lock->lock)
103 : "m"(lock->lock)
104 );
105}
106
107static inline int
108read_trylock(rwlock_t *lock)
109{
110 int tmp;
111
112 tmp = 1;
113 __asm__ __volatile__(
114 "lock; xaddl %0, %1"
115 : "=r"(tmp)
116 : "m"(lock->lock), "0"(tmp));
117 /* tmp is now the lock's previous value */
118 if (__builtin_expect(tmp >= 0, 1))
119 return 1;
120 read_unlock(lock);
121 return 0;
122}
123
124static inline int
125read_is_locked(rwlock_t *lock)
126{
127 return lock->lock < 0;
128}
129
130static inline void
131read_lock(rwlock_t *lock)
132{
133 for(;;) {
134 if (__builtin_expect(read_trylock(lock) != 0, 1))
135 break;
136 do {
137 __asm__ __volatile__("rep;nop" : "=m"(lock->lock) : : "memory");
138 } while (read_is_locked(lock));
139 }
140}
141
142static inline void
143write_unlock(rwlock_t *lock)
144{
145 __asm__ __volatile__(
146 "lock; addl %2,%0"
147 : "=m"(lock->lock)
148 : "m"(lock->lock), "i"(RWLOCK_OFFSET));
149}
150
151static inline int
152write_trylock(rwlock_t *lock)
153{
154 int tmp;
155
156 tmp = -RWLOCK_OFFSET;
157 __asm__ __volatile__(
158 "lock; xaddl %0, %1"
159 : "=r"(tmp)
160 : "m"(lock->lock), "0"(tmp));
161 /* tmp is now the lock's previous value */
162 if (__builtin_expect(tmp == 0, 1))
163 return 1;
164 write_unlock(lock);
165 return 0;
166}
167
168static inline int
169write_is_locked(rwlock_t *lock)
170{
171 return lock->lock != 0;
172}
173
174static inline void
175write_lock(rwlock_t *lock)
176{
177 for(;;) {
178 if (__builtin_expect(write_trylock(lock) != 0, 1))
179 break;
180 do {
181 __asm__ __volatile__("rep;nop" : "=m"(lock->lock) : : "memory");
182 } while (write_is_locked(lock));
183 }
184}
185
186#define INIT_RWLOCK(lock) init_rwlock(&(lock))
187#define READ_LOCK(lock) read_lock(&(lock))
188#define READ_UNLOCK(lock) read_unlock(&(lock))
189#define WRITE_LOCK(lock) write_lock(&(lock))
190#define WRITE_UNLOCK(lock) write_unlock(&(lock))
191
192
193#if THREADS
194
195/* pthread mutex */
196
197#if DEBUG_LOCKS
198
199#define MUTEX_LOCK(LOCK_VAR) ((debug_locks ? fprintf(stderr,"[%d] %s:%d: MULOCK(%p)\n", (int)pthread_self(), \
200 __BASE_FILE__, __LINE__,(LOCK_VAR)) : 1), \
201 pthread_mutex_lock((LOCK_VAR)) )
202#define MUTEX_TRYLOCK(LOCK_VAR) pthread_mutex_trylock(LOCK_VAR)
203#define MUTEX_UNLOCK(LOCK_VAR) if ((debug_locks ? fprintf(stderr,"[%d] %s:%d: MUNLOCK(%p)\n", (int)pthread_self(), \
204 __BASE_FILE__, __LINE__,(LOCK_VAR)) : 1), \
205 pthread_mutex_unlock((LOCK_VAR)) )
206#else
207#define MUTEX_LOCK(LOCK_VAR) pthread_mutex_lock(LOCK_VAR)
208#define MUTEX_TRYLOCK(LOCK_VAR) pthread_mutex_trylock(LOCK_VAR)
209#define MUTEX_UNLOCK(LOCK_VAR) pthread_mutex_unlock(LOCK_VAR)
210#endif
211
212#else
213
214#define MUTEX_LOCK(LOCK_VAR)
215#define MUTEX_TRYLOCK(LOCK_VAR)
216#define MUTEX_UNLOCK(LOCK_VAR)
217
218
219#endif