kvm: vmx: handle invvpid vm exit gracefully
[pandora-kernel.git] / arch / x86 / include / asm / cmpxchg_32.h
1 #ifndef _ASM_X86_CMPXCHG_32_H
2 #define _ASM_X86_CMPXCHG_32_H
3
4 /*
5  * Note: if you use set64_bit(), __cmpxchg64(), or their variants, you
6  *       you need to test for the feature in boot_cpu_data.
7  */
8
9 /*
10  * CMPXCHG8B only writes to the target if we had the previous
11  * value in registers, otherwise it acts as a read and gives us the
12  * "new previous" value.  That is why there is a loop.  Preloading
13  * EDX:EAX is a performance optimization: in the common case it means
14  * we need only one locked operation.
15  *
16  * A SIMD/3DNOW!/MMX/FPU 64-bit store here would require at the very
17  * least an FPU save and/or %cr0.ts manipulation.
18  *
19  * cmpxchg8b must be used with the lock prefix here to allow the
20  * instruction to be executed atomically.  We need to have the reader
21  * side to see the coherent 64bit value.
22  */
23 static inline void set_64bit(volatile u64 *ptr, u64 value)
24 {
25         u32 low  = value;
26         u32 high = value >> 32;
27         u64 prev = *ptr;
28
29         asm volatile("\n1:\t"
30                      LOCK_PREFIX "cmpxchg8b %0\n\t"
31                      "jnz 1b"
32                      : "=m" (*ptr), "+A" (prev)
33                      : "b" (low), "c" (high)
34                      : "memory");
35 }
36
37 #ifdef CONFIG_X86_CMPXCHG
38 #define __HAVE_ARCH_CMPXCHG 1
39 #endif
40
41 #ifdef CONFIG_X86_CMPXCHG64
42 #define cmpxchg64(ptr, o, n)                                            \
43         ((__typeof__(*(ptr)))__cmpxchg64((ptr), (unsigned long long)(o), \
44                                          (unsigned long long)(n)))
45 #define cmpxchg64_local(ptr, o, n)                                      \
46         ((__typeof__(*(ptr)))__cmpxchg64_local((ptr), (unsigned long long)(o), \
47                                                (unsigned long long)(n)))
48 #endif
49
50 static inline u64 __cmpxchg64(volatile u64 *ptr, u64 old, u64 new)
51 {
52         u64 prev;
53         asm volatile(LOCK_PREFIX "cmpxchg8b %1"
54                      : "=A" (prev),
55                        "+m" (*ptr)
56                      : "b" ((u32)new),
57                        "c" ((u32)(new >> 32)),
58                        "0" (old)
59                      : "memory");
60         return prev;
61 }
62
63 static inline u64 __cmpxchg64_local(volatile u64 *ptr, u64 old, u64 new)
64 {
65         u64 prev;
66         asm volatile("cmpxchg8b %1"
67                      : "=A" (prev),
68                        "+m" (*ptr)
69                      : "b" ((u32)new),
70                        "c" ((u32)(new >> 32)),
71                        "0" (old)
72                      : "memory");
73         return prev;
74 }
75
76 #ifndef CONFIG_X86_CMPXCHG
77 /*
78  * Building a kernel capable running on 80386. It may be necessary to
79  * simulate the cmpxchg on the 80386 CPU. For that purpose we define
80  * a function for each of the sizes we support.
81  */
82
83 extern unsigned long cmpxchg_386_u8(volatile void *, u8, u8);
84 extern unsigned long cmpxchg_386_u16(volatile void *, u16, u16);
85 extern unsigned long cmpxchg_386_u32(volatile void *, u32, u32);
86
87 static inline unsigned long cmpxchg_386(volatile void *ptr, unsigned long old,
88                                         unsigned long new, int size)
89 {
90         switch (size) {
91         case 1:
92                 return cmpxchg_386_u8(ptr, old, new);
93         case 2:
94                 return cmpxchg_386_u16(ptr, old, new);
95         case 4:
96                 return cmpxchg_386_u32(ptr, old, new);
97         }
98         return old;
99 }
100
101 #define cmpxchg(ptr, o, n)                                              \
102 ({                                                                      \
103         __typeof__(*(ptr)) __ret;                                       \
104         if (likely(boot_cpu_data.x86 > 3))                              \
105                 __ret = (__typeof__(*(ptr)))__cmpxchg((ptr),            \
106                                 (unsigned long)(o), (unsigned long)(n), \
107                                 sizeof(*(ptr)));                        \
108         else                                                            \
109                 __ret = (__typeof__(*(ptr)))cmpxchg_386((ptr),          \
110                                 (unsigned long)(o), (unsigned long)(n), \
111                                 sizeof(*(ptr)));                        \
112         __ret;                                                          \
113 })
114 #define cmpxchg_local(ptr, o, n)                                        \
115 ({                                                                      \
116         __typeof__(*(ptr)) __ret;                                       \
117         if (likely(boot_cpu_data.x86 > 3))                              \
118                 __ret = (__typeof__(*(ptr)))__cmpxchg_local((ptr),      \
119                                 (unsigned long)(o), (unsigned long)(n), \
120                                 sizeof(*(ptr)));                        \
121         else                                                            \
122                 __ret = (__typeof__(*(ptr)))cmpxchg_386((ptr),          \
123                                 (unsigned long)(o), (unsigned long)(n), \
124                                 sizeof(*(ptr)));                        \
125         __ret;                                                          \
126 })
127 #endif
128
129 #ifndef CONFIG_X86_CMPXCHG64
130 /*
131  * Building a kernel capable running on 80386 and 80486. It may be necessary
132  * to simulate the cmpxchg8b on the 80386 and 80486 CPU.
133  */
134
135 #define cmpxchg64(ptr, o, n)                                    \
136 ({                                                              \
137         __typeof__(*(ptr)) __ret;                               \
138         __typeof__(*(ptr)) __old = (o);                         \
139         __typeof__(*(ptr)) __new = (n);                         \
140         alternative_io(LOCK_PREFIX_HERE                         \
141                         "call cmpxchg8b_emu",                   \
142                         "lock; cmpxchg8b (%%esi)" ,             \
143                        X86_FEATURE_CX8,                         \
144                        "=A" (__ret),                            \
145                        "S" ((ptr)), "0" (__old),                \
146                        "b" ((unsigned int)__new),               \
147                        "c" ((unsigned int)(__new>>32))          \
148                        : "memory");                             \
149         __ret; })
150
151
152 #define cmpxchg64_local(ptr, o, n)                              \
153 ({                                                              \
154         __typeof__(*(ptr)) __ret;                               \
155         __typeof__(*(ptr)) __old = (o);                         \
156         __typeof__(*(ptr)) __new = (n);                         \
157         alternative_io("call cmpxchg8b_emu",                    \
158                        "cmpxchg8b (%%esi)" ,                    \
159                        X86_FEATURE_CX8,                         \
160                        "=A" (__ret),                            \
161                        "S" ((ptr)), "0" (__old),                \
162                        "b" ((unsigned int)__new),               \
163                        "c" ((unsigned int)(__new>>32))          \
164                        : "memory");                             \
165         __ret; })
166
167 #endif
168
169 #define cmpxchg8b(ptr, o1, o2, n1, n2)                          \
170 ({                                                              \
171         char __ret;                                             \
172         __typeof__(o2) __dummy;                                 \
173         __typeof__(*(ptr)) __old1 = (o1);                       \
174         __typeof__(o2) __old2 = (o2);                           \
175         __typeof__(*(ptr)) __new1 = (n1);                       \
176         __typeof__(o2) __new2 = (n2);                           \
177         asm volatile(LOCK_PREFIX "cmpxchg8b %2; setz %1"        \
178                        : "=d"(__dummy), "=a" (__ret), "+m" (*ptr)\
179                        : "a" (__old1), "d"(__old2),             \
180                          "b" (__new1), "c" (__new2)             \
181                        : "memory");                             \
182         __ret; })
183
184
185 #define cmpxchg8b_local(ptr, o1, o2, n1, n2)                    \
186 ({                                                              \
187         char __ret;                                             \
188         __typeof__(o2) __dummy;                                 \
189         __typeof__(*(ptr)) __old1 = (o1);                       \
190         __typeof__(o2) __old2 = (o2);                           \
191         __typeof__(*(ptr)) __new1 = (n1);                       \
192         __typeof__(o2) __new2 = (n2);                           \
193         asm volatile("cmpxchg8b %2; setz %1"                    \
194                        : "=d"(__dummy), "=a"(__ret), "+m" (*ptr)\
195                        : "a" (__old), "d"(__old2),              \
196                          "b" (__new1), "c" (__new2),            \
197                        : "memory");                             \
198         __ret; })
199
200
201 #define cmpxchg_double(ptr, o1, o2, n1, n2)                             \
202 ({                                                                      \
203         BUILD_BUG_ON(sizeof(*(ptr)) != 4);                              \
204         VM_BUG_ON((unsigned long)(ptr) % 8);                            \
205         cmpxchg8b((ptr), (o1), (o2), (n1), (n2));                       \
206 })
207
208 #define cmpxchg_double_local(ptr, o1, o2, n1, n2)                       \
209 ({                                                                      \
210        BUILD_BUG_ON(sizeof(*(ptr)) != 4);                               \
211        VM_BUG_ON((unsigned long)(ptr) % 8);                             \
212        cmpxchg16b_local((ptr), (o1), (o2), (n1), (n2));                 \
213 })
214
215 #define system_has_cmpxchg_double() cpu_has_cx8
216
217 #endif /* _ASM_X86_CMPXCHG_32_H */