Merge branch 'x86-asm-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git...
[pandora-kernel.git] / arch / x86 / include / asm / cmpxchg_32.h
index ee1931b..ffb9bb6 100644 (file)
@@ -8,14 +8,50 @@
  *       you need to test for the feature in boot_cpu_data.
  */
 
-#define xchg(ptr, v)                                                   \
-       ((__typeof__(*(ptr)))__xchg((unsigned long)(v), (ptr), sizeof(*(ptr))))
+extern void __xchg_wrong_size(void);
+
+/*
+ * Note: no "lock" prefix even on SMP: xchg always implies lock anyway
+ * Note 2: xchg has side effect, so that attribute volatile is necessary,
+ *       but generally the primitive is invalid, *ptr is output argument. --ANK
+ */
 
 struct __xchg_dummy {
        unsigned long a[100];
 };
 #define __xg(x) ((struct __xchg_dummy *)(x))
 
+#define __xchg(x, ptr, size)                                           \
+({                                                                     \
+       __typeof(*(ptr)) __x = (x);                                     \
+       switch (size) {                                                 \
+       case 1:                                                         \
+               asm volatile("xchgb %b0,%1"                             \
+                            : "=q" (__x)                               \
+                            : "m" (*__xg(ptr)), "0" (__x)              \
+                            : "memory");                               \
+               break;                                                  \
+       case 2:                                                         \
+               asm volatile("xchgw %w0,%1"                             \
+                            : "=r" (__x)                               \
+                            : "m" (*__xg(ptr)), "0" (__x)              \
+                            : "memory");                               \
+               break;                                                  \
+       case 4:                                                         \
+               asm volatile("xchgl %0,%1"                              \
+                            : "=r" (__x)                               \
+                            : "m" (*__xg(ptr)), "0" (__x)              \
+                            : "memory");                               \
+               break;                                                  \
+       default:                                                        \
+               __xchg_wrong_size();                                    \
+       }                                                               \
+       __x;                                                            \
+})
+
+#define xchg(ptr, v)                                                   \
+       __xchg((v), (ptr), sizeof(*ptr))
+
 /*
  * The semantics of XCHGCMP8B are a bit strange, this is why
  * there is a loop and the loading of %%eax and %%edx has to
@@ -71,57 +107,63 @@ static inline void __set_64bit_var(unsigned long long *ptr,
                       (unsigned int)((value) >> 32))                   \
         : __set_64bit(ptr, ll_low((value)), ll_high((value))))
 
-/*
- * Note: no "lock" prefix even on SMP: xchg always implies lock anyway
- * Note 2: xchg has side effect, so that attribute volatile is necessary,
- *       but generally the primitive is invalid, *ptr is output argument. --ANK
- */
-static inline unsigned long __xchg(unsigned long x, volatile void *ptr,
-                                  int size)
-{
-       switch (size) {
-       case 1:
-               asm volatile("xchgb %b0,%1"
-                            : "=q" (x)
-                            : "m" (*__xg(ptr)), "0" (x)
-                            : "memory");
-               break;
-       case 2:
-               asm volatile("xchgw %w0,%1"
-                            : "=r" (x)
-                            : "m" (*__xg(ptr)), "0" (x)
-                            : "memory");
-               break;
-       case 4:
-               asm volatile("xchgl %0,%1"
-                            : "=r" (x)
-                            : "m" (*__xg(ptr)), "0" (x)
-                            : "memory");
-               break;
-       }
-       return x;
-}
+extern void __cmpxchg_wrong_size(void);
 
 /*
  * Atomic compare and exchange.  Compare OLD with MEM, if identical,
  * store NEW in MEM.  Return the initial value in MEM.  Success is
  * indicated by comparing RETURN with OLD.
  */
+#define __raw_cmpxchg(ptr, old, new, size, lock)                       \
+({                                                                     \
+       __typeof__(*(ptr)) __ret;                                       \
+       __typeof__(*(ptr)) __old = (old);                               \
+       __typeof__(*(ptr)) __new = (new);                               \
+       switch (size) {                                                 \
+       case 1:                                                         \
+               asm volatile(lock "cmpxchgb %b1,%2"                     \
+                            : "=a"(__ret)                              \
+                            : "q"(__new), "m"(*__xg(ptr)), "0"(__old)  \
+                            : "memory");                               \
+               break;                                                  \
+       case 2:                                                         \
+               asm volatile(lock "cmpxchgw %w1,%2"                     \
+                            : "=a"(__ret)                              \
+                            : "r"(__new), "m"(*__xg(ptr)), "0"(__old)  \
+                            : "memory");                               \
+               break;                                                  \
+       case 4:                                                         \
+               asm volatile(lock "cmpxchgl %1,%2"                      \
+                            : "=a"(__ret)                              \
+                            : "r"(__new), "m"(*__xg(ptr)), "0"(__old)  \
+                            : "memory");                               \
+               break;                                                  \
+       default:                                                        \
+               __cmpxchg_wrong_size();                                 \
+       }                                                               \
+       __ret;                                                          \
+})
+
+#define __cmpxchg(ptr, old, new, size)                                 \
+       __raw_cmpxchg((ptr), (old), (new), (size), LOCK_PREFIX)
+
+#define __sync_cmpxchg(ptr, old, new, size)                            \
+       __raw_cmpxchg((ptr), (old), (new), (size), "lock; ")
+
+#define __cmpxchg_local(ptr, old, new, size)                           \
+       __raw_cmpxchg((ptr), (old), (new), (size), "")
 
 #ifdef CONFIG_X86_CMPXCHG
 #define __HAVE_ARCH_CMPXCHG 1
-#define cmpxchg(ptr, o, n)                                             \
-       ((__typeof__(*(ptr)))__cmpxchg((ptr), (unsigned long)(o),       \
-                                      (unsigned long)(n),              \
-                                      sizeof(*(ptr))))
-#define sync_cmpxchg(ptr, o, n)                                                \
-       ((__typeof__(*(ptr)))__sync_cmpxchg((ptr), (unsigned long)(o),  \
-                                           (unsigned long)(n),         \
-                                           sizeof(*(ptr))))
-#define cmpxchg_local(ptr, o, n)                                       \
-       ((__typeof__(*(ptr)))__cmpxchg_local((ptr), (unsigned long)(o), \
-                                            (unsigned long)(n),        \
-                                            sizeof(*(ptr))))
+
+#define cmpxchg(ptr, old, new)                                         \
+       __cmpxchg((ptr), (old), (new), sizeof(*ptr))
+
+#define sync_cmpxchg(ptr, old, new)                                    \
+       __sync_cmpxchg((ptr), (old), (new), sizeof(*ptr))
+
+#define cmpxchg_local(ptr, old, new)                                   \
+       __cmpxchg_local((ptr), (old), (new), sizeof(*ptr))
 #endif
 
 #ifdef CONFIG_X86_CMPXCHG64
@@ -133,94 +175,6 @@ static inline unsigned long __xchg(unsigned long x, volatile void *ptr,
                                               (unsigned long long)(n)))
 #endif
 
-static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
-                                     unsigned long new, int size)
-{
-       unsigned long prev;
-       switch (size) {
-       case 1:
-               asm volatile(LOCK_PREFIX "cmpxchgb %b1,%2"
-                            : "=a"(prev)
-                            : "q"(new), "m"(*__xg(ptr)), "0"(old)
-                            : "memory");
-               return prev;
-       case 2:
-               asm volatile(LOCK_PREFIX "cmpxchgw %w1,%2"
-                            : "=a"(prev)
-                            : "r"(new), "m"(*__xg(ptr)), "0"(old)
-                            : "memory");
-               return prev;
-       case 4:
-               asm volatile(LOCK_PREFIX "cmpxchgl %1,%2"
-                            : "=a"(prev)
-                            : "r"(new), "m"(*__xg(ptr)), "0"(old)
-                            : "memory");
-               return prev;
-       }
-       return old;
-}
-
-/*
- * Always use locked operations when touching memory shared with a
- * hypervisor, since the system may be SMP even if the guest kernel
- * isn't.
- */
-static inline unsigned long __sync_cmpxchg(volatile void *ptr,
-                                          unsigned long old,
-                                          unsigned long new, int size)
-{
-       unsigned long prev;
-       switch (size) {
-       case 1:
-               asm volatile("lock; cmpxchgb %b1,%2"
-                            : "=a"(prev)
-                            : "q"(new), "m"(*__xg(ptr)), "0"(old)
-                            : "memory");
-               return prev;
-       case 2:
-               asm volatile("lock; cmpxchgw %w1,%2"
-                            : "=a"(prev)
-                            : "r"(new), "m"(*__xg(ptr)), "0"(old)
-                            : "memory");
-               return prev;
-       case 4:
-               asm volatile("lock; cmpxchgl %1,%2"
-                            : "=a"(prev)
-                            : "r"(new), "m"(*__xg(ptr)), "0"(old)
-                            : "memory");
-               return prev;
-       }
-       return old;
-}
-
-static inline unsigned long __cmpxchg_local(volatile void *ptr,
-                                           unsigned long old,
-                                           unsigned long new, int size)
-{
-       unsigned long prev;
-       switch (size) {
-       case 1:
-               asm volatile("cmpxchgb %b1,%2"
-                            : "=a"(prev)
-                            : "q"(new), "m"(*__xg(ptr)), "0"(old)
-                            : "memory");
-               return prev;
-       case 2:
-               asm volatile("cmpxchgw %w1,%2"
-                            : "=a"(prev)
-                            : "r"(new), "m"(*__xg(ptr)), "0"(old)
-                            : "memory");
-               return prev;
-       case 4:
-               asm volatile("cmpxchgl %1,%2"
-                            : "=a"(prev)
-                            : "r"(new), "m"(*__xg(ptr)), "0"(old)
-                            : "memory");
-               return prev;
-       }
-       return old;
-}
-
 static inline unsigned long long __cmpxchg64(volatile void *ptr,
                                             unsigned long long old,
                                             unsigned long long new)