Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tiwai/sound-2.6
[pandora-kernel.git] / arch / x86 / include / asm / cmpxchg_64.h
1 #ifndef _ASM_X86_CMPXCHG_64_H
2 #define _ASM_X86_CMPXCHG_64_H
3
4 #include <asm/alternative.h> /* Provides LOCK_PREFIX */
5
6 static inline void set_64bit(volatile u64 *ptr, u64 val)
7 {
8         *ptr = val;
9 }
10
11 extern void __xchg_wrong_size(void);
12 extern void __cmpxchg_wrong_size(void);
13
14 /*
15  * Note: no "lock" prefix even on SMP: xchg always implies lock anyway.
16  * Since this is generally used to protect other memory information, we
17  * use "asm volatile" and "memory" clobbers to prevent gcc from moving
18  * information around.
19  */
20 #define __xchg(x, ptr, size)                                            \
21 ({                                                                      \
22         __typeof(*(ptr)) __x = (x);                                     \
23         switch (size) {                                                 \
24         case 1:                                                         \
25         {                                                               \
26                 volatile u8 *__ptr = (volatile u8 *)(ptr);              \
27                 asm volatile("xchgb %0,%1"                              \
28                              : "=q" (__x), "+m" (*__ptr)                \
29                              : "0" (__x)                                \
30                              : "memory");                               \
31                 break;                                                  \
32         }                                                               \
33         case 2:                                                         \
34         {                                                               \
35                 volatile u16 *__ptr = (volatile u16 *)(ptr);            \
36                 asm volatile("xchgw %0,%1"                              \
37                              : "=r" (__x), "+m" (*__ptr)                \
38                              : "0" (__x)                                \
39                              : "memory");                               \
40                 break;                                                  \
41         }                                                               \
42         case 4:                                                         \
43         {                                                               \
44                 volatile u32 *__ptr = (volatile u32 *)(ptr);            \
45                 asm volatile("xchgl %0,%1"                              \
46                              : "=r" (__x), "+m" (*__ptr)                \
47                              : "0" (__x)                                \
48                              : "memory");                               \
49                 break;                                                  \
50         }                                                               \
51         case 8:                                                         \
52         {                                                               \
53                 volatile u64 *__ptr = (volatile u64 *)(ptr);            \
54                 asm volatile("xchgq %0,%1"                              \
55                              : "=r" (__x), "+m" (*__ptr)                \
56                              : "0" (__x)                                \
57                              : "memory");                               \
58                 break;                                                  \
59         }                                                               \
60         default:                                                        \
61                 __xchg_wrong_size();                                    \
62         }                                                               \
63         __x;                                                            \
64 })
65
66 #define xchg(ptr, v)                                                    \
67         __xchg((v), (ptr), sizeof(*ptr))
68
69 #define __HAVE_ARCH_CMPXCHG 1
70
71 /*
72  * Atomic compare and exchange.  Compare OLD with MEM, if identical,
73  * store NEW in MEM.  Return the initial value in MEM.  Success is
74  * indicated by comparing RETURN with OLD.
75  */
76 #define __raw_cmpxchg(ptr, old, new, size, lock)                        \
77 ({                                                                      \
78         __typeof__(*(ptr)) __ret;                                       \
79         __typeof__(*(ptr)) __old = (old);                               \
80         __typeof__(*(ptr)) __new = (new);                               \
81         switch (size) {                                                 \
82         case 1:                                                         \
83         {                                                               \
84                 volatile u8 *__ptr = (volatile u8 *)(ptr);              \
85                 asm volatile(lock "cmpxchgb %2,%1"                      \
86                              : "=a" (__ret), "+m" (*__ptr)              \
87                              : "q" (__new), "0" (__old)                 \
88                              : "memory");                               \
89                 break;                                                  \
90         }                                                               \
91         case 2:                                                         \
92         {                                                               \
93                 volatile u16 *__ptr = (volatile u16 *)(ptr);            \
94                 asm volatile(lock "cmpxchgw %2,%1"                      \
95                              : "=a" (__ret), "+m" (*__ptr)              \
96                              : "r" (__new), "0" (__old)                 \
97                              : "memory");                               \
98                 break;                                                  \
99         }                                                               \
100         case 4:                                                         \
101         {                                                               \
102                 volatile u32 *__ptr = (volatile u32 *)(ptr);            \
103                 asm volatile(lock "cmpxchgl %2,%1"                      \
104                              : "=a" (__ret), "+m" (*__ptr)              \
105                              : "r" (__new), "0" (__old)                 \
106                              : "memory");                               \
107                 break;                                                  \
108         }                                                               \
109         case 8:                                                         \
110         {                                                               \
111                 volatile u64 *__ptr = (volatile u64 *)(ptr);            \
112                 asm volatile(lock "cmpxchgq %2,%1"                      \
113                              : "=a" (__ret), "+m" (*__ptr)              \
114                              : "r" (__new), "0" (__old)                 \
115                              : "memory");                               \
116                 break;                                                  \
117         }                                                               \
118         default:                                                        \
119                 __cmpxchg_wrong_size();                                 \
120         }                                                               \
121         __ret;                                                          \
122 })
123
124 #define __cmpxchg(ptr, old, new, size)                                  \
125         __raw_cmpxchg((ptr), (old), (new), (size), LOCK_PREFIX)
126
127 #define __sync_cmpxchg(ptr, old, new, size)                             \
128         __raw_cmpxchg((ptr), (old), (new), (size), "lock; ")
129
130 #define __cmpxchg_local(ptr, old, new, size)                            \
131         __raw_cmpxchg((ptr), (old), (new), (size), "")
132
133 #define cmpxchg(ptr, old, new)                                          \
134         __cmpxchg((ptr), (old), (new), sizeof(*ptr))
135
136 #define sync_cmpxchg(ptr, old, new)                                     \
137         __sync_cmpxchg((ptr), (old), (new), sizeof(*ptr))
138
139 #define cmpxchg_local(ptr, old, new)                                    \
140         __cmpxchg_local((ptr), (old), (new), sizeof(*ptr))
141
142 #define cmpxchg64(ptr, o, n)                                            \
143 ({                                                                      \
144         BUILD_BUG_ON(sizeof(*(ptr)) != 8);                              \
145         cmpxchg((ptr), (o), (n));                                       \
146 })
147
148 #define cmpxchg64_local(ptr, o, n)                                      \
149 ({                                                                      \
150         BUILD_BUG_ON(sizeof(*(ptr)) != 8);                              \
151         cmpxchg_local((ptr), (o), (n));                                 \
152 })
153
154 #endif /* _ASM_X86_CMPXCHG_64_H */