Merge branch 'next' of git://git.kernel.org/pub/scm/linux/kernel/git/djbw/async_tx
[pandora-kernel.git] / arch / parisc / kernel / entry.S
index 3a44f7f..6337ade 100644 (file)
        .align          32
        .endm
 
-       /* The following are simple 32 vs 64 bit instruction
-        * abstractions for the macros */
-       .macro          EXTR    reg1,start,length,reg2
-#ifdef CONFIG_64BIT
-       extrd,u         \reg1,32+(\start),\length,\reg2
-#else
-       extrw,u         \reg1,\start,\length,\reg2
-#endif
-       .endm
-
-       .macro          DEP     reg1,start,length,reg2
-#ifdef CONFIG_64BIT
-       depd            \reg1,32+(\start),\length,\reg2
-#else
-       depw            \reg1,\start,\length,\reg2
-#endif
-       .endm
-
-       .macro          DEPI    val,start,length,reg
-#ifdef CONFIG_64BIT
-       depdi           \val,32+(\start),\length,\reg
-#else
-       depwi           \val,\start,\length,\reg
-#endif
-       .endm
-
        /* In LP64, the space contains part of the upper 32 bits of the
         * fault.  We have to extract this and place it in the va,
         * zeroing the corresponding bits in the space register */
         */
        .macro          L2_ptep pmd,pte,index,va,fault
 #if PT_NLEVELS == 3
-       EXTR            \va,31-ASM_PMD_SHIFT,ASM_BITS_PER_PMD,\index
+       extru           \va,31-ASM_PMD_SHIFT,ASM_BITS_PER_PMD,\index
 #else
-       EXTR            \va,31-ASM_PGDIR_SHIFT,ASM_BITS_PER_PGD,\index
+       extru           \va,31-ASM_PGDIR_SHIFT,ASM_BITS_PER_PGD,\index
 #endif
-       DEP             %r0,31,PAGE_SHIFT,\pmd  /* clear offset */
+       dep             %r0,31,PAGE_SHIFT,\pmd  /* clear offset */
        copy            %r0,\pte
        ldw,s           \index(\pmd),\pmd
        bb,>=,n         \pmd,_PxD_PRESENT_BIT,\fault
-       DEP             %r0,31,PxD_FLAG_SHIFT,\pmd /* clear flags */
+       dep             %r0,31,PxD_FLAG_SHIFT,\pmd /* clear flags */
        copy            \pmd,%r9
        SHLREG          %r9,PxD_VALUE_SHIFT,\pmd
-       EXTR            \va,31-PAGE_SHIFT,ASM_BITS_PER_PTE,\index
-       DEP             %r0,31,PAGE_SHIFT,\pmd  /* clear offset */
+       extru           \va,31-PAGE_SHIFT,ASM_BITS_PER_PTE,\index
+       dep             %r0,31,PAGE_SHIFT,\pmd  /* clear offset */
        shladd          \index,BITS_PER_PTE_ENTRY,\pmd,\pmd
        LDREG           %r0(\pmd),\pte          /* pmd is now pte */
        bb,>=,n         \pte,_PAGE_PRESENT_BIT,\fault
        depdi           0,31,32,\tmp
 #endif
        copy            \va,\tmp1
-       DEPI            0,31,23,\tmp1
+       depi            0,31,23,\tmp1
        cmpb,COND(<>),n \tmp,\tmp1,\fault
        ldi             (_PAGE_DIRTY|_PAGE_WRITE|_PAGE_READ),\prot
        depd,z          \prot,8,7,\prot
@@ -997,13 +971,6 @@ intr_restore:
 
        rfi
        nop
-       nop
-       nop
-       nop
-       nop
-       nop
-       nop
-       nop
 
 #ifndef CONFIG_PREEMPT
 # define intr_do_preempt       intr_restore
@@ -2076,9 +2043,10 @@ syscall_restore:
        LDREG   TASK_PT_GR31(%r1),%r31     /* restore syscall rp */
 
        /* NOTE: We use rsm/ssm pair to make this operation atomic */
+       LDREG   TASK_PT_GR30(%r1),%r1              /* Get user sp */
        rsm     PSW_SM_I, %r0
-       LDREG   TASK_PT_GR30(%r1),%r30             /* restore user sp */
-       mfsp    %sr3,%r1                           /* Get users space id */
+       copy    %r1,%r30                           /* Restore user sp */
+       mfsp    %sr3,%r1                           /* Get user space id */
        mtsp    %r1,%sr7                           /* Restore sr7 */
        ssm     PSW_SM_I, %r0