Merge master.kernel.org:/pub/scm/linux/kernel/git/wim/linux-2.6-watchdog
[pandora-kernel.git] / arch / powerpc / mm / slb_low.S
index a3a03da..8548dcf 100644 (file)
@@ -1,6 +1,4 @@
 /*
- * arch/ppc64/mm/slb_low.S
- *
  * Low-level SLB routines
  *
  * Copyright (C) 2004 David Gibson <dwg@au.ibm.com>, IBM
 
 #include <linux/config.h>
 #include <asm/processor.h>
-#include <asm/page.h>
-#include <asm/mmu.h>
 #include <asm/ppc_asm.h>
 #include <asm/asm-offsets.h>
 #include <asm/cputable.h>
+#include <asm/page.h>
+#include <asm/mmu.h>
+#include <asm/pgtable.h>
 
-/* void slb_allocate(unsigned long ea);
+/* void slb_allocate_realmode(unsigned long ea);
  *
  * Create an SLB entry for the given EA (user or kernel).
  *     r3 = faulting address, r13 = PACA
  *     r9, r10, r11 are clobbered by this function
  * No other registers are examined or changed.
  */
-_GLOBAL(slb_allocate)
-       /*
-        * First find a slot, round robin. Previously we tried to find
-        * a free slot first but that took too long. Unfortunately we
-        * dont have any LRU information to help us choose a slot.
+_GLOBAL(slb_allocate_realmode)
+       /* r3 = faulting address */
+
+       srdi    r9,r3,60                /* get region */
+       srdi    r10,r3,28               /* get esid */
+       cmpldi  cr7,r9,0xc              /* cmp PAGE_OFFSET for later use */
+
+       /* r3 = address, r10 = esid, cr7 = <> PAGE_OFFSET */
+       blt     cr7,0f                  /* user or kernel? */
+
+       /* kernel address: proto-VSID = ESID */
+       /* WARNING - MAGIC: we don't use the VSID 0xfffffffff, but
+        * this code will generate the protoVSID 0xfffffffff for the
+        * top segment.  That's ok, the scramble below will translate
+        * it to VSID 0, which is reserved as a bad VSID - one which
+        * will never have any pages in it.  */
+
+       /* Check if hitting the linear mapping of the vmalloc/ioremap
+        * kernel space
+       */
+       bne     cr7,1f
+
+       /* Linear mapping encoding bits, the "li" instruction below will
+        * be patched by the kernel at boot
+        */
+_GLOBAL(slb_miss_kernel_load_linear)
+       li      r11,0
+       b       slb_finish_load
+
+1:     /* vmalloc/ioremap mapping encoding bits, the "li" instructions below
+        * will be patched by the kernel at boot
+        */
+BEGIN_FTR_SECTION
+       /* check whether this is in vmalloc or ioremap space */
+       clrldi  r11,r10,48
+       cmpldi  r11,(VMALLOC_SIZE >> 28) - 1
+       bgt     5f
+       lhz     r11,PACAVMALLOCSLLP(r13)
+       b       slb_finish_load
+5:
+END_FTR_SECTION_IFCLR(CPU_FTR_CI_LARGE_PAGE)
+_GLOBAL(slb_miss_kernel_load_io)
+       li      r11,0
+       b       slb_finish_load
+
+
+0:     /* user address: proto-VSID = context << 15 | ESID. First check
+        * if the address is within the boundaries of the user region
+        */
+       srdi.   r9,r10,USER_ESID_BITS
+       bne-    8f                      /* invalid ea bits set */
+
+       /* Figure out if the segment contains huge pages */
+#ifdef CONFIG_HUGETLB_PAGE
+BEGIN_FTR_SECTION
+       b       1f
+END_FTR_SECTION_IFCLR(CPU_FTR_16M_PAGE)
+       cmpldi  r10,16
+
+       lhz     r9,PACALOWHTLBAREAS(r13)
+       mr      r11,r10
+       blt     5f
+
+       lhz     r9,PACAHIGHHTLBAREAS(r13)
+       srdi    r11,r10,(HTLB_AREA_SHIFT-SID_SHIFT)
+
+5:     srd     r9,r9,r11
+       andi.   r9,r9,1
+       beq     1f
+_GLOBAL(slb_miss_user_load_huge)
+       li      r11,0
+       b       2f
+1:
+#endif /* CONFIG_HUGETLB_PAGE */
+
+       lhz     r11,PACACONTEXTSLLP(r13)
+2:
+       ld      r9,PACACONTEXTID(r13)
+       rldimi  r10,r9,USER_ESID_BITS,0
+       b       slb_finish_load
+
+8:     /* invalid EA */
+       li      r10,0                   /* BAD_VSID */
+       li      r11,SLB_VSID_USER       /* flags don't much matter */
+       b       slb_finish_load
+
+#ifdef __DISABLED__
+
+/* void slb_allocate_user(unsigned long ea);
+ *
+ * Create an SLB entry for the given EA (user or kernel).
+ *     r3 = faulting address, r13 = PACA
+ *     r9, r10, r11 are clobbered by this function
+ * No other registers are examined or changed.
+ *
+ * It is called with translation enabled in order to be able to walk the
+ * page tables. This is not currently used.
+ */
+_GLOBAL(slb_allocate_user)
+       /* r3 = faulting address */
+       srdi    r10,r3,28               /* get esid */
+
+       crset   4*cr7+lt                /* set "user" flag for later */
+
+       /* check if we fit in the range covered by the pagetables*/
+       srdi.   r9,r3,PGTABLE_EADDR_SIZE
+       crnot   4*cr0+eq,4*cr0+eq
+       beqlr
+
+       /* now we need to get to the page tables in order to get the page
+        * size encoding from the PMD. In the future, we'll be able to deal
+        * with 1T segments too by getting the encoding from the PGD instead
         */
+       ld      r9,PACAPGDIR(r13)
+       cmpldi  cr0,r9,0
+       beqlr
+       rlwinm  r11,r10,8,25,28
+       ldx     r9,r9,r11               /* get pgd_t */
+       cmpldi  cr0,r9,0
+       beqlr
+       rlwinm  r11,r10,3,17,28
+       ldx     r9,r9,r11               /* get pmd_t */
+       cmpldi  cr0,r9,0
+       beqlr
+
+       /* build vsid flags */
+       andi.   r11,r9,SLB_VSID_LLP
+       ori     r11,r11,SLB_VSID_USER
+
+       /* get context to calculate proto-VSID */
+       ld      r9,PACACONTEXTID(r13)
+       rldimi  r10,r9,USER_ESID_BITS,0
+
+       /* fall through slb_finish_load */
+
+#endif /* __DISABLED__ */
+
+
+/*
+ * Finish loading of an SLB entry and return
+ *
+ * r3 = EA, r10 = proto-VSID, r11 = flags, clobbers r9, cr7 = <> PAGE_OFFSET
+ */
+slb_finish_load:
+       ASM_VSID_SCRAMBLE(r10,r9)
+       rldimi  r11,r10,SLB_VSID_SHIFT,16       /* combine VSID and flags */
+
+       /* r3 = EA, r11 = VSID data */
+       /*
+        * Find a slot, round robin. Previously we tried to find a
+        * free slot first but that took too long. Unfortunately we
+        * dont have any LRU information to help us choose a slot.
+        */
 #ifdef CONFIG_PPC_ISERIES
        /*
         * On iSeries, the "bolted" stack segment can be cast out on
@@ -45,9 +191,9 @@ _GLOBAL(slb_allocate)
         */
        ld      r9,PACAKSAVE(r13)
        clrrdi  r9,r9,28
-       clrrdi  r11,r3,28
+       clrrdi  r3,r3,28
        li      r10,SLB_NUM_BOLTED-1    /* Stack goes in last bolted slot */
-       cmpld   r9,r11
+       cmpld   r9,r3
        beq     3f
 #endif /* CONFIG_PPC_ISERIES */
 
@@ -61,63 +207,12 @@ _GLOBAL(slb_allocate)
 
 4:
        std     r10,PACASTABRR(r13)
-3:
-       /* r3 = faulting address, r10 = entry */
-
-       srdi    r9,r3,60                /* get region */
-       srdi    r3,r3,28                /* get esid */
-       cmpldi  cr7,r9,0xc              /* cmp KERNELBASE for later use */
 
-       rldimi  r10,r3,28,0             /* r10= ESID<<28 | entry */
-       oris    r10,r10,SLB_ESID_V@h    /* r10 |= SLB_ESID_V */
-
-       /* r3 = esid, r10 = esid_data, cr7 = <>KERNELBASE */
-
-       blt     cr7,0f                  /* user or kernel? */
-
-       /* kernel address: proto-VSID = ESID */
-       /* WARNING - MAGIC: we don't use the VSID 0xfffffffff, but
-        * this code will generate the protoVSID 0xfffffffff for the
-        * top segment.  That's ok, the scramble below will translate
-        * it to VSID 0, which is reserved as a bad VSID - one which
-        * will never have any pages in it.  */
-       li      r11,SLB_VSID_KERNEL
-BEGIN_FTR_SECTION
-       bne     cr7,9f
-       li      r11,(SLB_VSID_KERNEL|SLB_VSID_L)
-END_FTR_SECTION_IFSET(CPU_FTR_16M_PAGE)
-       b       9f
-
-0:     /* user address: proto-VSID = context<<15 | ESID */
-       srdi.   r9,r3,USER_ESID_BITS
-       bne-    8f                      /* invalid ea bits set */
-
-#ifdef CONFIG_HUGETLB_PAGE
-BEGIN_FTR_SECTION
-       lhz     r9,PACAHIGHHTLBAREAS(r13)
-       srdi    r11,r3,(HTLB_AREA_SHIFT-SID_SHIFT)
-       srd     r9,r9,r11
-       lhz     r11,PACALOWHTLBAREAS(r13)
-       srd     r11,r11,r3
-       or      r9,r9,r11
-END_FTR_SECTION_IFSET(CPU_FTR_16M_PAGE)
-#endif /* CONFIG_HUGETLB_PAGE */
-
-       li      r11,SLB_VSID_USER
-
-#ifdef CONFIG_HUGETLB_PAGE
-BEGIN_FTR_SECTION
-       rldimi  r11,r9,8,55             /* shift masked bit into SLB_VSID_L */
-END_FTR_SECTION_IFSET(CPU_FTR_16M_PAGE)
-#endif /* CONFIG_HUGETLB_PAGE */
-
-       ld      r9,PACACONTEXTID(r13)
-       rldimi  r3,r9,USER_ESID_BITS,0
-
-9:     /* r3 = protovsid, r11 = flags, r10 = esid_data, cr7 = <>KERNELBASE */
-       ASM_VSID_SCRAMBLE(r3,r9)
+3:
+       rldimi  r3,r10,0,36             /* r3= EA[0:35] | entry */
+       oris    r10,r3,SLB_ESID_V@h     /* r3 |= SLB_ESID_V */
 
-       rldimi  r11,r3,SLB_VSID_SHIFT,16        /* combine VSID and flags */
+       /* r3 = ESID data, r11 = VSID data */
 
        /*
         * No need for an isync before or after this slbmte. The exception
@@ -125,7 +220,9 @@ END_FTR_SECTION_IFSET(CPU_FTR_16M_PAGE)
         */
        slbmte  r11,r10
 
-       bgelr   cr7                     /* we're done for kernel addresses */
+       /* we're done for kernel addresses */
+       crclr   4*cr0+eq                /* set result to "success" */
+       bgelr   cr7
 
        /* Update the slb cache */
        lhz     r3,PACASLBCACHEPTR(r13) /* offset = paca->slb_cache_ptr */
@@ -143,9 +240,6 @@ END_FTR_SECTION_IFSET(CPU_FTR_16M_PAGE)
        li      r3,SLB_CACHE_ENTRIES+1
 2:
        sth     r3,PACASLBCACHEPTR(r13) /* paca->slb_cache_ptr = offset */
+       crclr   4*cr0+eq                /* set result to "success" */
        blr
 
-8:     /* invalid EA */
-       li      r3,0                    /* BAD_VSID */
-       li      r11,SLB_VSID_USER       /* flags don't much matter */
-       b       9b