Merge master.kernel.org:/pub/scm/linux/kernel/git/sam/kbuild
[pandora-kernel.git] / include / asm-ppc / rwsem.h
1 /*
2  * include/asm-ppc/rwsem.h: R/W semaphores for PPC using the stuff
3  * in lib/rwsem.c.  Adapted largely from include/asm-i386/rwsem.h
4  * by Paul Mackerras <paulus@samba.org>.
5  */
6
7 #ifndef _PPC_RWSEM_H
8 #define _PPC_RWSEM_H
9
10 #ifdef __KERNEL__
11 #include <linux/list.h>
12 #include <linux/spinlock.h>
13 #include <asm/atomic.h>
14 #include <asm/system.h>
15
16 /*
17  * the semaphore definition
18  */
19 struct rw_semaphore {
20         /* XXX this should be able to be an atomic_t  -- paulus */
21         signed long             count;
22 #define RWSEM_UNLOCKED_VALUE            0x00000000
23 #define RWSEM_ACTIVE_BIAS               0x00000001
24 #define RWSEM_ACTIVE_MASK               0x0000ffff
25 #define RWSEM_WAITING_BIAS              (-0x00010000)
26 #define RWSEM_ACTIVE_READ_BIAS          RWSEM_ACTIVE_BIAS
27 #define RWSEM_ACTIVE_WRITE_BIAS         (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS)
28         spinlock_t              wait_lock;
29         struct list_head        wait_list;
30 #if RWSEM_DEBUG
31         int                     debug;
32 #endif
33 };
34
35 /*
36  * initialisation
37  */
38 #if RWSEM_DEBUG
39 #define __RWSEM_DEBUG_INIT      , 0
40 #else
41 #define __RWSEM_DEBUG_INIT      /* */
42 #endif
43
44 #define __RWSEM_INITIALIZER(name) \
45         { RWSEM_UNLOCKED_VALUE, SPIN_LOCK_UNLOCKED, \
46           LIST_HEAD_INIT((name).wait_list) \
47           __RWSEM_DEBUG_INIT }
48
49 #define DECLARE_RWSEM(name)             \
50         struct rw_semaphore name = __RWSEM_INITIALIZER(name)
51
52 extern struct rw_semaphore *rwsem_down_read_failed(struct rw_semaphore *sem);
53 extern struct rw_semaphore *rwsem_down_write_failed(struct rw_semaphore *sem);
54 extern struct rw_semaphore *rwsem_wake(struct rw_semaphore *sem);
55 extern struct rw_semaphore *rwsem_downgrade_wake(struct rw_semaphore *sem);
56
57 static inline void init_rwsem(struct rw_semaphore *sem)
58 {
59         sem->count = RWSEM_UNLOCKED_VALUE;
60         spin_lock_init(&sem->wait_lock);
61         INIT_LIST_HEAD(&sem->wait_list);
62 #if RWSEM_DEBUG
63         sem->debug = 0;
64 #endif
65 }
66
67 /*
68  * lock for reading
69  */
70 static inline void __down_read(struct rw_semaphore *sem)
71 {
72         if (atomic_inc_return((atomic_t *)(&sem->count)) > 0)
73                 smp_wmb();
74         else
75                 rwsem_down_read_failed(sem);
76 }
77
78 static inline int __down_read_trylock(struct rw_semaphore *sem)
79 {
80         int tmp;
81
82         while ((tmp = sem->count) >= 0) {
83                 if (tmp == cmpxchg(&sem->count, tmp,
84                                    tmp + RWSEM_ACTIVE_READ_BIAS)) {
85                         smp_wmb();
86                         return 1;
87                 }
88         }
89         return 0;
90 }
91
92 /*
93  * lock for writing
94  */
95 static inline void __down_write(struct rw_semaphore *sem)
96 {
97         int tmp;
98
99         tmp = atomic_add_return(RWSEM_ACTIVE_WRITE_BIAS,
100                                 (atomic_t *)(&sem->count));
101         if (tmp == RWSEM_ACTIVE_WRITE_BIAS)
102                 smp_wmb();
103         else
104                 rwsem_down_write_failed(sem);
105 }
106
107 static inline int __down_write_trylock(struct rw_semaphore *sem)
108 {
109         int tmp;
110
111         tmp = cmpxchg(&sem->count, RWSEM_UNLOCKED_VALUE,
112                       RWSEM_ACTIVE_WRITE_BIAS);
113         smp_wmb();
114         return tmp == RWSEM_UNLOCKED_VALUE;
115 }
116
117 /*
118  * unlock after reading
119  */
120 static inline void __up_read(struct rw_semaphore *sem)
121 {
122         int tmp;
123
124         smp_wmb();
125         tmp = atomic_dec_return((atomic_t *)(&sem->count));
126         if (tmp < -1 && (tmp & RWSEM_ACTIVE_MASK) == 0)
127                 rwsem_wake(sem);
128 }
129
130 /*
131  * unlock after writing
132  */
133 static inline void __up_write(struct rw_semaphore *sem)
134 {
135         smp_wmb();
136         if (atomic_sub_return(RWSEM_ACTIVE_WRITE_BIAS,
137                               (atomic_t *)(&sem->count)) < 0)
138                 rwsem_wake(sem);
139 }
140
141 /*
142  * implement atomic add functionality
143  */
144 static inline void rwsem_atomic_add(int delta, struct rw_semaphore *sem)
145 {
146         atomic_add(delta, (atomic_t *)(&sem->count));
147 }
148
149 /*
150  * downgrade write lock to read lock
151  */
152 static inline void __downgrade_write(struct rw_semaphore *sem)
153 {
154         int tmp;
155
156         smp_wmb();
157         tmp = atomic_add_return(-RWSEM_WAITING_BIAS, (atomic_t *)(&sem->count));
158         if (tmp < 0)
159                 rwsem_downgrade_wake(sem);
160 }
161
162 /*
163  * implement exchange and add functionality
164  */
165 static inline int rwsem_atomic_update(int delta, struct rw_semaphore *sem)
166 {
167         smp_mb();
168         return atomic_add_return(delta, (atomic_t *)(&sem->count));
169 }
170
171 #endif /* __KERNEL__ */
172 #endif /* _PPC_RWSEM_XADD_H */