Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/cjb/mmc
[pandora-kernel.git] / arch / x86 / include / asm / cmpxchg.h
1 #ifndef ASM_X86_CMPXCHG_H
2 #define ASM_X86_CMPXCHG_H
3
4 #include <linux/compiler.h>
5 #include <asm/alternative.h> /* Provides LOCK_PREFIX */
6
7 /*
8  * Non-existant functions to indicate usage errors at link time
9  * (or compile-time if the compiler implements __compiletime_error().
10  */
11 extern void __xchg_wrong_size(void)
12         __compiletime_error("Bad argument size for xchg");
13 extern void __cmpxchg_wrong_size(void)
14         __compiletime_error("Bad argument size for cmpxchg");
15 extern void __xadd_wrong_size(void)
16         __compiletime_error("Bad argument size for xadd");
17
18 /*
19  * Constants for operation sizes. On 32-bit, the 64-bit size it set to
20  * -1 because sizeof will never return -1, thereby making those switch
21  * case statements guaranteeed dead code which the compiler will
22  * eliminate, and allowing the "missing symbol in the default case" to
23  * indicate a usage error.
24  */
25 #define __X86_CASE_B    1
26 #define __X86_CASE_W    2
27 #define __X86_CASE_L    4
28 #ifdef CONFIG_64BIT
29 #define __X86_CASE_Q    8
30 #else
31 #define __X86_CASE_Q    -1              /* sizeof will never return -1 */
32 #endif
33
34 /*
35  * Note: no "lock" prefix even on SMP: xchg always implies lock anyway.
36  * Since this is generally used to protect other memory information, we
37  * use "asm volatile" and "memory" clobbers to prevent gcc from moving
38  * information around.
39  */
40 #define __xchg(x, ptr, size)                                            \
41 ({                                                                      \
42         __typeof(*(ptr)) __x = (x);                                     \
43         switch (size) {                                                 \
44         case __X86_CASE_B:                                              \
45         {                                                               \
46                 volatile u8 *__ptr = (volatile u8 *)(ptr);              \
47                 asm volatile("xchgb %0,%1"                              \
48                              : "=q" (__x), "+m" (*__ptr)                \
49                              : "0" (__x)                                \
50                              : "memory");                               \
51                 break;                                                  \
52         }                                                               \
53         case __X86_CASE_W:                                              \
54         {                                                               \
55                 volatile u16 *__ptr = (volatile u16 *)(ptr);            \
56                 asm volatile("xchgw %0,%1"                              \
57                              : "=r" (__x), "+m" (*__ptr)                \
58                              : "0" (__x)                                \
59                              : "memory");                               \
60                 break;                                                  \
61         }                                                               \
62         case __X86_CASE_L:                                              \
63         {                                                               \
64                 volatile u32 *__ptr = (volatile u32 *)(ptr);            \
65                 asm volatile("xchgl %0,%1"                              \
66                              : "=r" (__x), "+m" (*__ptr)                \
67                              : "0" (__x)                                \
68                              : "memory");                               \
69                 break;                                                  \
70         }                                                               \
71         case __X86_CASE_Q:                                              \
72         {                                                               \
73                 volatile u64 *__ptr = (volatile u64 *)(ptr);            \
74                 asm volatile("xchgq %0,%1"                              \
75                              : "=r" (__x), "+m" (*__ptr)                \
76                              : "0" (__x)                                \
77                              : "memory");                               \
78                 break;                                                  \
79         }                                                               \
80         default:                                                        \
81                 __xchg_wrong_size();                                    \
82         }                                                               \
83         __x;                                                            \
84 })
85
86 #define xchg(ptr, v)                                                    \
87         __xchg((v), (ptr), sizeof(*ptr))
88
89 /*
90  * Atomic compare and exchange.  Compare OLD with MEM, if identical,
91  * store NEW in MEM.  Return the initial value in MEM.  Success is
92  * indicated by comparing RETURN with OLD.
93  */
94 #define __raw_cmpxchg(ptr, old, new, size, lock)                        \
95 ({                                                                      \
96         __typeof__(*(ptr)) __ret;                                       \
97         __typeof__(*(ptr)) __old = (old);                               \
98         __typeof__(*(ptr)) __new = (new);                               \
99         switch (size) {                                                 \
100         case __X86_CASE_B:                                              \
101         {                                                               \
102                 volatile u8 *__ptr = (volatile u8 *)(ptr);              \
103                 asm volatile(lock "cmpxchgb %2,%1"                      \
104                              : "=a" (__ret), "+m" (*__ptr)              \
105                              : "q" (__new), "0" (__old)                 \
106                              : "memory");                               \
107                 break;                                                  \
108         }                                                               \
109         case __X86_CASE_W:                                              \
110         {                                                               \
111                 volatile u16 *__ptr = (volatile u16 *)(ptr);            \
112                 asm volatile(lock "cmpxchgw %2,%1"                      \
113                              : "=a" (__ret), "+m" (*__ptr)              \
114                              : "r" (__new), "0" (__old)                 \
115                              : "memory");                               \
116                 break;                                                  \
117         }                                                               \
118         case __X86_CASE_L:                                              \
119         {                                                               \
120                 volatile u32 *__ptr = (volatile u32 *)(ptr);            \
121                 asm volatile(lock "cmpxchgl %2,%1"                      \
122                              : "=a" (__ret), "+m" (*__ptr)              \
123                              : "r" (__new), "0" (__old)                 \
124                              : "memory");                               \
125                 break;                                                  \
126         }                                                               \
127         case __X86_CASE_Q:                                              \
128         {                                                               \
129                 volatile u64 *__ptr = (volatile u64 *)(ptr);            \
130                 asm volatile(lock "cmpxchgq %2,%1"                      \
131                              : "=a" (__ret), "+m" (*__ptr)              \
132                              : "r" (__new), "0" (__old)                 \
133                              : "memory");                               \
134                 break;                                                  \
135         }                                                               \
136         default:                                                        \
137                 __cmpxchg_wrong_size();                                 \
138         }                                                               \
139         __ret;                                                          \
140 })
141
142 #define __cmpxchg(ptr, old, new, size)                                  \
143         __raw_cmpxchg((ptr), (old), (new), (size), LOCK_PREFIX)
144
145 #define __sync_cmpxchg(ptr, old, new, size)                             \
146         __raw_cmpxchg((ptr), (old), (new), (size), "lock; ")
147
148 #define __cmpxchg_local(ptr, old, new, size)                            \
149         __raw_cmpxchg((ptr), (old), (new), (size), "")
150
151 #ifdef CONFIG_X86_32
152 # include "cmpxchg_32.h"
153 #else
154 # include "cmpxchg_64.h"
155 #endif
156
157 #ifdef __HAVE_ARCH_CMPXCHG
158 #define cmpxchg(ptr, old, new)                                          \
159         __cmpxchg((ptr), (old), (new), sizeof(*ptr))
160
161 #define sync_cmpxchg(ptr, old, new)                                     \
162         __sync_cmpxchg((ptr), (old), (new), sizeof(*ptr))
163
164 #define cmpxchg_local(ptr, old, new)                                    \
165         __cmpxchg_local((ptr), (old), (new), sizeof(*ptr))
166 #endif
167
168 #define __xadd(ptr, inc, lock)                                          \
169         ({                                                              \
170                 __typeof__ (*(ptr)) __ret = (inc);                      \
171                 switch (sizeof(*(ptr))) {                               \
172                 case __X86_CASE_B:                                      \
173                         asm volatile (lock "xaddb %b0, %1\n"            \
174                                       : "+r" (__ret), "+m" (*(ptr))     \
175                                       : : "memory", "cc");              \
176                         break;                                          \
177                 case __X86_CASE_W:                                      \
178                         asm volatile (lock "xaddw %w0, %1\n"            \
179                                       : "+r" (__ret), "+m" (*(ptr))     \
180                                       : : "memory", "cc");              \
181                         break;                                          \
182                 case __X86_CASE_L:                                      \
183                         asm volatile (lock "xaddl %0, %1\n"             \
184                                       : "+r" (__ret), "+m" (*(ptr))     \
185                                       : : "memory", "cc");              \
186                         break;                                          \
187                 case __X86_CASE_Q:                                      \
188                         asm volatile (lock "xaddq %q0, %1\n"            \
189                                       : "+r" (__ret), "+m" (*(ptr))     \
190                                       : : "memory", "cc");              \
191                         break;                                          \
192                 default:                                                \
193                         __xadd_wrong_size();                            \
194                 }                                                       \
195                 __ret;                                                  \
196         })
197
198 /*
199  * xadd() adds "inc" to "*ptr" and atomically returns the previous
200  * value of "*ptr".
201  *
202  * xadd() is locked when multiple CPUs are online
203  * xadd_sync() is always locked
204  * xadd_local() is never locked
205  */
206 #define xadd(ptr, inc)          __xadd((ptr), (inc), LOCK_PREFIX)
207 #define xadd_sync(ptr, inc)     __xadd((ptr), (inc), "lock; ")
208 #define xadd_local(ptr, inc)    __xadd((ptr), (inc), "")
209
210 #endif  /* ASM_X86_CMPXCHG_H */