1 #ifndef __ASM_SPINLOCK_H
2 #define __ASM_SPINLOCK_H
4 #if __LINUX_ARM_ARCH__ < 6
5 #error SMP not supported on pre-ARMv6 CPUs
9 * sev and wfe are ARMv6K extensions. Uniprocessor ARMv6 may not have the K
10 * extensions, so when running on UP, we have to patch these instructions away.
12 #define ALT_SMP(smp, up) \
14 " .pushsection \".alt.smp.init\", \"a\"\n" \
19 #ifdef CONFIG_THUMB2_KERNEL
20 #define SEV ALT_SMP("sev.w", "nop.w")
22 * For Thumb-2, special care is needed to ensure that the conditional WFE
23 * instruction really does assemble to exactly 4 bytes (as required by
24 * the SMP_ON_UP fixup code). By itself "wfene" might cause the
25 * assembler to insert a extra (16-bit) IT instruction, depending on the
26 * presence or absence of neighbouring conditional instructions.
28 * To avoid this unpredictableness, an approprite IT is inserted explicitly:
29 * the assembler won't change IT instructions which are explicitly present
32 #define WFE(cond) ALT_SMP( \
39 #define SEV ALT_SMP("sev", "nop")
40 #define WFE(cond) ALT_SMP("wfe" cond, "nop")
43 static inline void dsb_sev(void)
45 #if __LINUX_ARM_ARCH__ >= 7
46 __asm__ __volatile__ (
51 __asm__ __volatile__ (
52 "mcr p15, 0, %0, c7, c10, 4\n"
62 * We exclusively read the old value. If it is zero, we may have
63 * won the lock, so we try exclusively storing it. A memory barrier
64 * is required after we get a lock, and before we release it, because
65 * V6 CPUs are assumed to have weakly ordered memory.
71 #define arch_spin_is_locked(x) ((x)->lock != 0)
72 #define arch_spin_unlock_wait(lock) \
73 do { while (arch_spin_is_locked(lock)) cpu_relax(); } while (0)
75 #define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock)
77 static inline void arch_spin_lock(arch_spinlock_t *lock)
85 " strexeq %0, %2, [%1]\n"
89 : "r" (&lock->lock), "r" (1)
95 static inline int arch_spin_trylock(arch_spinlock_t *lock)
102 " strexeq %0, %2, [%1]"
104 : "r" (&lock->lock), "r" (1)
115 static inline void arch_spin_unlock(arch_spinlock_t *lock)
119 __asm__ __volatile__(
122 : "r" (&lock->lock), "r" (0)
132 * Write locks are easy - we just set bit 31. When unlocking, we can
133 * just write zero since the lock is exclusively held.
136 static inline void arch_write_lock(arch_rwlock_t *rw)
140 __asm__ __volatile__(
141 "1: ldrex %0, [%1]\n"
144 " strexeq %0, %2, [%1]\n"
148 : "r" (&rw->lock), "r" (0x80000000)
154 static inline int arch_write_trylock(arch_rwlock_t *rw)
158 __asm__ __volatile__(
159 "1: ldrex %0, [%1]\n"
161 " strexeq %0, %2, [%1]"
163 : "r" (&rw->lock), "r" (0x80000000)
174 static inline void arch_write_unlock(arch_rwlock_t *rw)
178 __asm__ __volatile__(
181 : "r" (&rw->lock), "r" (0)
187 /* write_can_lock - would write_trylock() succeed? */
188 #define arch_write_can_lock(x) ((x)->lock == 0)
191 * Read locks are a bit more hairy:
192 * - Exclusively load the lock value.
194 * - Store new lock value if positive, and we still own this location.
195 * If the value is negative, we've already failed.
196 * - If we failed to store the value, we want a negative result.
197 * - If we failed, try again.
198 * Unlocking is similarly hairy. We may have multiple read locks
199 * currently active. However, we know we won't have any write
202 static inline void arch_read_lock(arch_rwlock_t *rw)
204 unsigned long tmp, tmp2;
206 __asm__ __volatile__(
207 "1: ldrex %0, [%2]\n"
209 " strexpl %1, %0, [%2]\n"
211 " rsbpls %0, %1, #0\n"
213 : "=&r" (tmp), "=&r" (tmp2)
220 static inline void arch_read_unlock(arch_rwlock_t *rw)
222 unsigned long tmp, tmp2;
226 __asm__ __volatile__(
227 "1: ldrex %0, [%2]\n"
229 " strex %1, %0, [%2]\n"
232 : "=&r" (tmp), "=&r" (tmp2)
240 static inline int arch_read_trylock(arch_rwlock_t *rw)
242 unsigned long tmp, tmp2 = 1;
244 __asm__ __volatile__(
245 "1: ldrex %0, [%2]\n"
247 " strexpl %1, %0, [%2]\n"
248 : "=&r" (tmp), "+r" (tmp2)
256 /* read_can_lock - would read_trylock() succeed? */
257 #define arch_read_can_lock(x) ((x)->lock < 0x80000000)
259 #define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
260 #define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
262 #define arch_spin_relax(lock) cpu_relax()
263 #define arch_read_relax(lock) cpu_relax()
264 #define arch_write_relax(lock) cpu_relax()
266 #endif /* __ASM_SPINLOCK_H */