[SPARC64]: Fix sparse warning wrt. fault_in_user_windows.
[pandora-kernel.git] / include / asm-sparc64 / system.h
index 64891cb..ed91a5d 100644 (file)
@@ -1,4 +1,3 @@
-/* $Id: system.h,v 1.69 2002/02/09 19:49:31 davem Exp $ */
 #ifndef __SPARC64_SYSTEM_H
 #define __SPARC64_SYSTEM_H
 
@@ -9,6 +8,7 @@
 #ifndef __ASSEMBLY__
 
 #include <linux/irqflags.h>
+#include <asm-generic/cmpxchg-local.h>
 
 /*
  * Sparc (general) CPU types
@@ -117,6 +117,7 @@ do {        __asm__ __volatile__("ba,pt     %%xcc, 1f\n\t" \
 extern void sun_do_break(void);
 extern int stop_a_enabled;
 
+extern void fault_in_user_windows(void);
 extern void synchronize_user_stack(void);
 
 extern void __flushw_user(void);
@@ -141,7 +142,6 @@ do {                                                \
         * not preserve it's value.  Hairy, but it lets us remove 2 loads
         * and 2 stores in this critical code path.  -DaveM
         */
-#define EXTRA_CLOBBER ,"%l1"
 #define switch_to(prev, next, last)                                    \
 do {   if (test_thread_flag(TIF_PERFCTR)) {                            \
                unsigned long __tmp;                                    \
@@ -164,33 +164,36 @@ do {      if (test_thread_flag(TIF_PERFCTR)) {                            \
        "stx    %%i6, [%%sp + 2047 + 0x70]\n\t"                         \
        "stx    %%i7, [%%sp + 2047 + 0x78]\n\t"                         \
        "rdpr   %%wstate, %%o5\n\t"                                     \
-       "stx    %%o6, [%%g6 + %3]\n\t"                                  \
-       "stb    %%o5, [%%g6 + %2]\n\t"                                  \
-       "rdpr   %%cwp, %%o5\n\t"                                        \
+       "stx    %%o6, [%%g6 + %6]\n\t"                                  \
        "stb    %%o5, [%%g6 + %5]\n\t"                                  \
-       "mov    %1, %%g6\n\t"                                           \
-       "ldub   [%1 + %5], %%g1\n\t"                                    \
+       "rdpr   %%cwp, %%o5\n\t"                                        \
+       "stb    %%o5, [%%g6 + %8]\n\t"                                  \
+       "mov    %4, %%g6\n\t"                                           \
+       "ldub   [%4 + %8], %%g1\n\t"                                    \
        "wrpr   %%g1, %%cwp\n\t"                                        \
-       "ldx    [%%g6 + %3], %%o6\n\t"                                  \
-       "ldub   [%%g6 + %2], %%o5\n\t"                                  \
-       "ldub   [%%g6 + %4], %%o7\n\t"                                  \
+       "ldx    [%%g6 + %6], %%o6\n\t"                                  \
+       "ldub   [%%g6 + %5], %%o5\n\t"                                  \
+       "ldub   [%%g6 + %7], %%o7\n\t"                                  \
        "wrpr   %%o5, 0x0, %%wstate\n\t"                                \
        "ldx    [%%sp + 2047 + 0x70], %%i6\n\t"                         \
        "ldx    [%%sp + 2047 + 0x78], %%i7\n\t"                         \
-       "ldx    [%%g6 + %6], %%g4\n\t"                                  \
+       "ldx    [%%g6 + %9], %%g4\n\t"                                  \
        "brz,pt %%o7, 1f\n\t"                                           \
        " mov   %%g7, %0\n\t"                                           \
-       "b,a ret_from_syscall\n\t"                                      \
+       "sethi  %%hi(ret_from_syscall), %%g1\n\t"                       \
+       "jmpl   %%g1 + %%lo(ret_from_syscall), %%g0\n\t"                \
+       " nop\n\t"                                                      \
        "1:\n\t"                                                        \
-       : "=&r" (last)                                                  \
+       : "=&r" (last), "=r" (current), "=r" (current_thread_info_reg), \
+         "=r" (__local_per_cpu_offset)                                 \
        : "0" (task_thread_info(next)),                                 \
          "i" (TI_WSTATE), "i" (TI_KSP), "i" (TI_NEW_CHILD),            \
          "i" (TI_CWP), "i" (TI_TASK)                                   \
        : "cc",                                                         \
                "g1", "g2", "g3",                   "g7",               \
-                     "l2", "l3", "l4", "l5", "l6", "l7",               \
+               "l1", "l2", "l3", "l4", "l5", "l6", "l7",               \
          "i0", "i1", "i2", "i3", "i4", "i5",                           \
-         "o0", "o1", "o2", "o3", "o4", "o5",       "o7" EXTRA_CLOBBER);\
+         "o0", "o1", "o2", "o3", "o4", "o5",       "o7");              \
        /* If you fuck with this, update ret_from_syscall code too. */  \
        if (test_thread_flag(TIF_PERFCTR)) {                            \
                write_pcr(current_thread_info()->pcr_reg);              \
@@ -240,7 +243,7 @@ static inline unsigned long xchg64(__volatile__ unsigned long *m, unsigned long
 
 extern void __xchg_called_with_bad_pointer(void);
 
-static __inline__ unsigned long __xchg(unsigned long x, __volatile__ void * ptr,
+static inline unsigned long __xchg(unsigned long x, __volatile__ void * ptr,
                                       int size)
 {
        switch (size) {
@@ -263,7 +266,7 @@ extern void die_if_kernel(char *str, struct pt_regs *regs) __attribute__ ((noret
 
 #define __HAVE_ARCH_CMPXCHG 1
 
-static __inline__ unsigned long
+static inline unsigned long
 __cmpxchg_u32(volatile int *m, int old, int new)
 {
        __asm__ __volatile__("membar #StoreLoad | #LoadLoad\n"
@@ -276,7 +279,7 @@ __cmpxchg_u32(volatile int *m, int old, int new)
        return new;
 }
 
-static __inline__ unsigned long
+static inline unsigned long
 __cmpxchg_u64(volatile long *m, unsigned long old, unsigned long new)
 {
        __asm__ __volatile__("membar #StoreLoad | #LoadLoad\n"
@@ -293,7 +296,7 @@ __cmpxchg_u64(volatile long *m, unsigned long old, unsigned long new)
    if something tries to do an invalid cmpxchg().  */
 extern void __cmpxchg_called_with_bad_pointer(void);
 
-static __inline__ unsigned long
+static inline unsigned long
 __cmpxchg(volatile void *ptr, unsigned long old, unsigned long new, int size)
 {
        switch (size) {
@@ -314,6 +317,34 @@ __cmpxchg(volatile void *ptr, unsigned long old, unsigned long new, int size)
                                    (unsigned long)_n_, sizeof(*(ptr))); \
   })
 
+/*
+ * cmpxchg_local and cmpxchg64_local are atomic wrt current CPU. Always make
+ * them available.
+ */
+
+static inline unsigned long __cmpxchg_local(volatile void *ptr,
+                                     unsigned long old,
+                                     unsigned long new, int size)
+{
+       switch (size) {
+       case 4:
+       case 8: return __cmpxchg(ptr, old, new, size);
+       default:
+               return __cmpxchg_local_generic(ptr, old, new, size);
+       }
+
+       return old;
+}
+
+#define cmpxchg_local(ptr, o, n)                                       \
+       ((__typeof__(*(ptr)))__cmpxchg_local((ptr), (unsigned long)(o), \
+                       (unsigned long)(n), sizeof(*(ptr))))
+#define cmpxchg64_local(ptr, o, n)                                     \
+  ({                                                                   \
+       BUILD_BUG_ON(sizeof(*(ptr)) != 8);                              \
+       cmpxchg_local((ptr), (o), (n));                                 \
+  })
+
 #endif /* !(__ASSEMBLY__) */
 
 #define arch_align_stack(x) (x)