x86, ticketlock: Make __ticket_spin_trylock common
[pandora-kernel.git] / arch / x86 / include / asm / spinlock.h
1 #ifndef _ASM_X86_SPINLOCK_H
2 #define _ASM_X86_SPINLOCK_H
3
4 #include <linux/atomic.h>
5 #include <asm/page.h>
6 #include <asm/processor.h>
7 #include <linux/compiler.h>
8 #include <asm/paravirt.h>
9 /*
10  * Your basic SMP spinlocks, allowing only a single CPU anywhere
11  *
12  * Simple spin lock operations.  There are two variants, one clears IRQ's
13  * on the local processor, one does not.
14  *
15  * These are fair FIFO ticket locks, which are currently limited to 256
16  * CPUs.
17  *
18  * (the type definitions are in asm/spinlock_types.h)
19  */
20
21 #ifdef CONFIG_X86_32
22 # define LOCK_PTR_REG "a"
23 # define REG_PTR_MODE "k"
24 #else
25 # define LOCK_PTR_REG "D"
26 # define REG_PTR_MODE "q"
27 #endif
28
29 #if defined(CONFIG_X86_32) && \
30         (defined(CONFIG_X86_OOSTORE) || defined(CONFIG_X86_PPRO_FENCE))
31 /*
32  * On PPro SMP or if we are using OOSTORE, we use a locked operation to unlock
33  * (PPro errata 66, 92)
34  */
35 # define UNLOCK_LOCK_PREFIX LOCK_PREFIX
36 #else
37 # define UNLOCK_LOCK_PREFIX
38 #endif
39
40 /*
41  * Ticket locks are conceptually two parts, one indicating the current head of
42  * the queue, and the other indicating the current tail. The lock is acquired
43  * by atomically noting the tail and incrementing it by one (thus adding
44  * ourself to the queue and noting our position), then waiting until the head
45  * becomes equal to the the initial value of the tail.
46  *
47  * We use an xadd covering *both* parts of the lock, to increment the tail and
48  * also load the position of the head, which takes care of memory ordering
49  * issues and should be optimal for the uncontended case. Note the tail must be
50  * in the high part, because a wide xadd increment of the low part would carry
51  * up and contaminate the high part.
52  *
53  * With fewer than 2^8 possible CPUs, we can use x86's partial registers to
54  * save some instructions and make the code more elegant. There really isn't
55  * much between them in performance though, especially as locks are out of line.
56  */
57 static __always_inline void __ticket_spin_lock(arch_spinlock_t *lock)
58 {
59         register struct __raw_tickets inc = { .tail = 1 };
60
61         inc = xadd(&lock->tickets, inc);
62
63         for (;;) {
64                 if (inc.head == inc.tail)
65                         break;
66                 cpu_relax();
67                 inc.head = ACCESS_ONCE(lock->tickets.head);
68         }
69         barrier();              /* make sure nothing creeps before the lock is taken */
70 }
71
72 static __always_inline int __ticket_spin_trylock(arch_spinlock_t *lock)
73 {
74         arch_spinlock_t old, new;
75
76         old.tickets = ACCESS_ONCE(lock->tickets);
77         if (old.tickets.head != old.tickets.tail)
78                 return 0;
79
80         new.head_tail = old.head_tail + (1 << TICKET_SHIFT);
81
82         /* cmpxchg is a full barrier, so nothing can move before it */
83         return cmpxchg(&lock->head_tail, old.head_tail, new.head_tail) == old.head_tail;
84 }
85
86 #if (NR_CPUS < 256)
87 static __always_inline void __ticket_spin_unlock(arch_spinlock_t *lock)
88 {
89         asm volatile(UNLOCK_LOCK_PREFIX "incb %0"
90                      : "+m" (lock->head_tail)
91                      :
92                      : "memory", "cc");
93 }
94 #else
95 static __always_inline void __ticket_spin_unlock(arch_spinlock_t *lock)
96 {
97         asm volatile(UNLOCK_LOCK_PREFIX "incw %0"
98                      : "+m" (lock->head_tail)
99                      :
100                      : "memory", "cc");
101 }
102 #endif
103
104 static inline int __ticket_spin_is_locked(arch_spinlock_t *lock)
105 {
106         struct __raw_tickets tmp = ACCESS_ONCE(lock->tickets);
107
108         return !!(tmp.tail ^ tmp.head);
109 }
110
111 static inline int __ticket_spin_is_contended(arch_spinlock_t *lock)
112 {
113         struct __raw_tickets tmp = ACCESS_ONCE(lock->tickets);
114
115         return ((tmp.tail - tmp.head) & TICKET_MASK) > 1;
116 }
117
118 #ifndef CONFIG_PARAVIRT_SPINLOCKS
119
120 static inline int arch_spin_is_locked(arch_spinlock_t *lock)
121 {
122         return __ticket_spin_is_locked(lock);
123 }
124
125 static inline int arch_spin_is_contended(arch_spinlock_t *lock)
126 {
127         return __ticket_spin_is_contended(lock);
128 }
129 #define arch_spin_is_contended  arch_spin_is_contended
130
131 static __always_inline void arch_spin_lock(arch_spinlock_t *lock)
132 {
133         __ticket_spin_lock(lock);
134 }
135
136 static __always_inline int arch_spin_trylock(arch_spinlock_t *lock)
137 {
138         return __ticket_spin_trylock(lock);
139 }
140
141 static __always_inline void arch_spin_unlock(arch_spinlock_t *lock)
142 {
143         __ticket_spin_unlock(lock);
144 }
145
146 static __always_inline void arch_spin_lock_flags(arch_spinlock_t *lock,
147                                                   unsigned long flags)
148 {
149         arch_spin_lock(lock);
150 }
151
152 #endif  /* CONFIG_PARAVIRT_SPINLOCKS */
153
154 static inline void arch_spin_unlock_wait(arch_spinlock_t *lock)
155 {
156         while (arch_spin_is_locked(lock))
157                 cpu_relax();
158 }
159
160 /*
161  * Read-write spinlocks, allowing multiple readers
162  * but only one writer.
163  *
164  * NOTE! it is quite common to have readers in interrupts
165  * but no interrupt writers. For those circumstances we
166  * can "mix" irq-safe locks - any writer needs to get a
167  * irq-safe write-lock, but readers can get non-irqsafe
168  * read-locks.
169  *
170  * On x86, we implement read-write locks as a 32-bit counter
171  * with the high bit (sign) being the "contended" bit.
172  */
173
174 /**
175  * read_can_lock - would read_trylock() succeed?
176  * @lock: the rwlock in question.
177  */
178 static inline int arch_read_can_lock(arch_rwlock_t *lock)
179 {
180         return lock->lock > 0;
181 }
182
183 /**
184  * write_can_lock - would write_trylock() succeed?
185  * @lock: the rwlock in question.
186  */
187 static inline int arch_write_can_lock(arch_rwlock_t *lock)
188 {
189         return lock->write == WRITE_LOCK_CMP;
190 }
191
192 static inline void arch_read_lock(arch_rwlock_t *rw)
193 {
194         asm volatile(LOCK_PREFIX READ_LOCK_SIZE(dec) " (%0)\n\t"
195                      "jns 1f\n"
196                      "call __read_lock_failed\n\t"
197                      "1:\n"
198                      ::LOCK_PTR_REG (rw) : "memory");
199 }
200
201 static inline void arch_write_lock(arch_rwlock_t *rw)
202 {
203         asm volatile(LOCK_PREFIX WRITE_LOCK_SUB(%1) "(%0)\n\t"
204                      "jz 1f\n"
205                      "call __write_lock_failed\n\t"
206                      "1:\n"
207                      ::LOCK_PTR_REG (&rw->write), "i" (RW_LOCK_BIAS)
208                      : "memory");
209 }
210
211 static inline int arch_read_trylock(arch_rwlock_t *lock)
212 {
213         READ_LOCK_ATOMIC(t) *count = (READ_LOCK_ATOMIC(t) *)lock;
214
215         if (READ_LOCK_ATOMIC(dec_return)(count) >= 0)
216                 return 1;
217         READ_LOCK_ATOMIC(inc)(count);
218         return 0;
219 }
220
221 static inline int arch_write_trylock(arch_rwlock_t *lock)
222 {
223         atomic_t *count = (atomic_t *)&lock->write;
224
225         if (atomic_sub_and_test(WRITE_LOCK_CMP, count))
226                 return 1;
227         atomic_add(WRITE_LOCK_CMP, count);
228         return 0;
229 }
230
231 static inline void arch_read_unlock(arch_rwlock_t *rw)
232 {
233         asm volatile(LOCK_PREFIX READ_LOCK_SIZE(inc) " %0"
234                      :"+m" (rw->lock) : : "memory");
235 }
236
237 static inline void arch_write_unlock(arch_rwlock_t *rw)
238 {
239         asm volatile(LOCK_PREFIX WRITE_LOCK_ADD(%1) "%0"
240                      : "+m" (rw->write) : "i" (RW_LOCK_BIAS) : "memory");
241 }
242
243 #define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
244 #define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
245
246 #undef READ_LOCK_SIZE
247 #undef READ_LOCK_ATOMIC
248 #undef WRITE_LOCK_ADD
249 #undef WRITE_LOCK_SUB
250 #undef WRITE_LOCK_CMP
251
252 #define arch_spin_relax(lock)   cpu_relax()
253 #define arch_read_relax(lock)   cpu_relax()
254 #define arch_write_relax(lock)  cpu_relax()
255
256 /* The {read|write|spin}_lock() on x86 are full memory barriers. */
257 static inline void smp_mb__after_lock(void) { }
258 #define ARCH_HAS_SMP_MB_AFTER_LOCK
259
260 #endif /* _ASM_X86_SPINLOCK_H */