1 #ifndef __ASM_SPINLOCK_H
2 #define __ASM_SPINLOCK_H
4 #if __LINUX_ARM_ARCH__ < 6
5 #error SMP not supported on pre-ARMv6 CPUs
8 #include <asm/processor.h>
11 * sev and wfe are ARMv6K extensions. Uniprocessor ARMv6 may not have the K
12 * extensions, so when running on UP, we have to patch these instructions away.
14 #define ALT_SMP(smp, up) \
16 " .pushsection \".alt.smp.init\", \"a\"\n" \
21 #ifdef CONFIG_THUMB2_KERNEL
22 #define SEV ALT_SMP("sev.w", "nop.w")
24 * For Thumb-2, special care is needed to ensure that the conditional WFE
25 * instruction really does assemble to exactly 4 bytes (as required by
26 * the SMP_ON_UP fixup code). By itself "wfene" might cause the
27 * assembler to insert a extra (16-bit) IT instruction, depending on the
28 * presence or absence of neighbouring conditional instructions.
30 * To avoid this unpredictableness, an approprite IT is inserted explicitly:
31 * the assembler won't change IT instructions which are explicitly present
34 #define WFE(cond) ALT_SMP( \
41 #define SEV ALT_SMP("sev", "nop")
42 #define WFE(cond) ALT_SMP("wfe" cond, "nop")
45 static inline void dsb_sev(void)
55 * We exclusively read the old value. If it is zero, we may have
56 * won the lock, so we try exclusively storing it. A memory barrier
57 * is required after we get a lock, and before we release it, because
58 * V6 CPUs are assumed to have weakly ordered memory.
64 #define arch_spin_is_locked(x) ((x)->lock != 0)
65 #define arch_spin_unlock_wait(lock) \
66 do { while (arch_spin_is_locked(lock)) cpu_relax(); } while (0)
68 #define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock)
70 static inline void arch_spin_lock(arch_spinlock_t *lock)
78 " strexeq %0, %2, [%1]\n"
82 : "r" (&lock->lock), "r" (1)
88 static inline int arch_spin_trylock(arch_spinlock_t *lock)
95 " strexeq %0, %2, [%1]"
97 : "r" (&lock->lock), "r" (1)
108 static inline void arch_spin_unlock(arch_spinlock_t *lock)
112 __asm__ __volatile__(
115 : "r" (&lock->lock), "r" (0)
125 * Write locks are easy - we just set bit 31. When unlocking, we can
126 * just write zero since the lock is exclusively held.
129 static inline void arch_write_lock(arch_rwlock_t *rw)
133 __asm__ __volatile__(
134 "1: ldrex %0, [%1]\n"
137 " strexeq %0, %2, [%1]\n"
141 : "r" (&rw->lock), "r" (0x80000000)
147 static inline int arch_write_trylock(arch_rwlock_t *rw)
151 __asm__ __volatile__(
152 "1: ldrex %0, [%1]\n"
154 " strexeq %0, %2, [%1]"
156 : "r" (&rw->lock), "r" (0x80000000)
167 static inline void arch_write_unlock(arch_rwlock_t *rw)
171 __asm__ __volatile__(
174 : "r" (&rw->lock), "r" (0)
180 /* write_can_lock - would write_trylock() succeed? */
181 #define arch_write_can_lock(x) ((x)->lock == 0)
184 * Read locks are a bit more hairy:
185 * - Exclusively load the lock value.
187 * - Store new lock value if positive, and we still own this location.
188 * If the value is negative, we've already failed.
189 * - If we failed to store the value, we want a negative result.
190 * - If we failed, try again.
191 * Unlocking is similarly hairy. We may have multiple read locks
192 * currently active. However, we know we won't have any write
195 static inline void arch_read_lock(arch_rwlock_t *rw)
197 unsigned long tmp, tmp2;
199 __asm__ __volatile__(
200 "1: ldrex %0, [%2]\n"
202 " strexpl %1, %0, [%2]\n"
204 " rsbpls %0, %1, #0\n"
206 : "=&r" (tmp), "=&r" (tmp2)
213 static inline void arch_read_unlock(arch_rwlock_t *rw)
215 unsigned long tmp, tmp2;
219 __asm__ __volatile__(
220 "1: ldrex %0, [%2]\n"
222 " strex %1, %0, [%2]\n"
225 : "=&r" (tmp), "=&r" (tmp2)
233 static inline int arch_read_trylock(arch_rwlock_t *rw)
235 unsigned long tmp, tmp2 = 1;
237 __asm__ __volatile__(
238 "1: ldrex %0, [%2]\n"
240 " strexpl %1, %0, [%2]\n"
241 : "=&r" (tmp), "+r" (tmp2)
249 /* read_can_lock - would read_trylock() succeed? */
250 #define arch_read_can_lock(x) ((x)->lock < 0x80000000)
252 #define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
253 #define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
255 #define arch_spin_relax(lock) cpu_relax()
256 #define arch_read_relax(lock) cpu_relax()
257 #define arch_write_relax(lock) cpu_relax()
259 #endif /* __ASM_SPINLOCK_H */