2 * arch/s390/lib/spinlock.c
3 * Out of line spinlock code.
5 * Copyright (C) IBM Corp. 2004, 2006
6 * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
9 #include <linux/types.h>
10 #include <linux/module.h>
11 #include <linux/spinlock.h>
12 #include <linux/init.h>
13 #include <linux/smp.h>
16 int spin_retry = 1000;
19 * spin_retry= parameter
21 static int __init spin_retry_setup(char *str)
23 spin_retry = simple_strtoul(str, &str, 0);
26 __setup("spin_retry=", spin_retry_setup);
28 void arch_spin_lock_wait(arch_spinlock_t *lp)
30 int count = spin_retry;
31 unsigned int cpu = ~smp_processor_id();
35 owner = lp->owner_cpu;
36 if (!owner || smp_vcpu_scheduled(~owner)) {
37 for (count = spin_retry; count > 0; count--) {
38 if (arch_spin_is_locked(lp))
40 if (_raw_compare_and_swap(&lp->owner_cpu, 0,
47 owner = lp->owner_cpu;
49 smp_yield_cpu(~owner);
50 if (_raw_compare_and_swap(&lp->owner_cpu, 0, cpu) == 0)
54 EXPORT_SYMBOL(arch_spin_lock_wait);
56 void arch_spin_lock_wait_flags(arch_spinlock_t *lp, unsigned long flags)
58 int count = spin_retry;
59 unsigned int cpu = ~smp_processor_id();
62 local_irq_restore(flags);
64 owner = lp->owner_cpu;
65 if (!owner || smp_vcpu_scheduled(~owner)) {
66 for (count = spin_retry; count > 0; count--) {
67 if (arch_spin_is_locked(lp))
70 if (_raw_compare_and_swap(&lp->owner_cpu, 0,
73 local_irq_restore(flags);
78 owner = lp->owner_cpu;
80 smp_yield_cpu(~owner);
82 if (_raw_compare_and_swap(&lp->owner_cpu, 0, cpu) == 0)
84 local_irq_restore(flags);
87 EXPORT_SYMBOL(arch_spin_lock_wait_flags);
89 int arch_spin_trylock_retry(arch_spinlock_t *lp)
91 unsigned int cpu = ~smp_processor_id();
94 for (count = spin_retry; count > 0; count--) {
95 if (arch_spin_is_locked(lp))
97 if (_raw_compare_and_swap(&lp->owner_cpu, 0, cpu) == 0)
102 EXPORT_SYMBOL(arch_spin_trylock_retry);
104 void arch_spin_relax(arch_spinlock_t *lock)
106 unsigned int cpu = lock->owner_cpu;
108 if (MACHINE_IS_VM || MACHINE_IS_KVM ||
109 !smp_vcpu_scheduled(~cpu))
113 EXPORT_SYMBOL(arch_spin_relax);
115 void _raw_read_lock_wait(arch_rwlock_t *rw)
118 int count = spin_retry;
125 if (!arch_read_can_lock(rw))
127 old = rw->lock & 0x7fffffffU;
128 if (_raw_compare_and_swap(&rw->lock, old, old + 1) == old)
132 EXPORT_SYMBOL(_raw_read_lock_wait);
134 void _raw_read_lock_wait_flags(arch_rwlock_t *rw, unsigned long flags)
137 int count = spin_retry;
139 local_irq_restore(flags);
145 if (!arch_read_can_lock(rw))
147 old = rw->lock & 0x7fffffffU;
149 if (_raw_compare_and_swap(&rw->lock, old, old + 1) == old)
153 EXPORT_SYMBOL(_raw_read_lock_wait_flags);
155 int _raw_read_trylock_retry(arch_rwlock_t *rw)
158 int count = spin_retry;
160 while (count-- > 0) {
161 if (!arch_read_can_lock(rw))
163 old = rw->lock & 0x7fffffffU;
164 if (_raw_compare_and_swap(&rw->lock, old, old + 1) == old)
169 EXPORT_SYMBOL(_raw_read_trylock_retry);
171 void _raw_write_lock_wait(arch_rwlock_t *rw)
173 int count = spin_retry;
180 if (!arch_write_can_lock(rw))
182 if (_raw_compare_and_swap(&rw->lock, 0, 0x80000000) == 0)
186 EXPORT_SYMBOL(_raw_write_lock_wait);
188 void _raw_write_lock_wait_flags(arch_rwlock_t *rw, unsigned long flags)
190 int count = spin_retry;
192 local_irq_restore(flags);
198 if (!arch_write_can_lock(rw))
201 if (_raw_compare_and_swap(&rw->lock, 0, 0x80000000) == 0)
205 EXPORT_SYMBOL(_raw_write_lock_wait_flags);
207 int _raw_write_trylock_retry(arch_rwlock_t *rw)
209 int count = spin_retry;
211 while (count-- > 0) {
212 if (!arch_write_can_lock(rw))
214 if (_raw_compare_and_swap(&rw->lock, 0, 0x80000000) == 0)
219 EXPORT_SYMBOL(_raw_write_trylock_retry);