Merge branch 'merge'
[pandora-kernel.git] / arch / powerpc / mm / slb.c
index 0473953..d373391 100644 (file)
  *      2 of the License, or (at your option) any later version.
  */
 
-#include <linux/config.h>
+#undef DEBUG
+
 #include <asm/pgtable.h>
 #include <asm/mmu.h>
 #include <asm/mmu_context.h>
 #include <asm/paca.h>
 #include <asm/cputable.h>
+#include <asm/cacheflush.h>
+#include <asm/smp.h>
+#include <linux/compiler.h>
+
+#ifdef DEBUG
+#define DBG(fmt...) udbg_printf(fmt)
+#else
+#define DBG(fmt...)
+#endif
 
-extern void slb_allocate(unsigned long ea);
+extern void slb_allocate_realmode(unsigned long ea);
+extern void slb_allocate_user(unsigned long ea);
+
+static void slb_allocate(unsigned long ea)
+{
+       /* Currently, we do real mode for all SLBs including user, but
+        * that will change if we bring back dynamic VSIDs
+        */
+       slb_allocate_realmode(ea);
+}
 
 static inline unsigned long mk_esid_data(unsigned long ea, unsigned long slot)
 {
@@ -33,31 +52,60 @@ static inline unsigned long mk_vsid_data(unsigned long ea, unsigned long flags)
        return (get_kernel_vsid(ea) << SLB_VSID_SHIFT) | flags;
 }
 
-static inline void create_slbe(unsigned long ea, unsigned long flags,
-                              unsigned long entry)
+static inline void slb_shadow_update(unsigned long esid, unsigned long vsid,
+                                    unsigned long entry)
 {
+       /*
+        * Clear the ESID first so the entry is not valid while we are
+        * updating it.
+        */
+       get_slb_shadow()->save_area[entry].esid = 0;
+       barrier();
+       get_slb_shadow()->save_area[entry].vsid = vsid;
+       barrier();
+       get_slb_shadow()->save_area[entry].esid = esid;
+
+}
+
+static inline void create_shadowed_slbe(unsigned long ea, unsigned long flags,
+                                       unsigned long entry)
+{
+       /*
+        * Updating the shadow buffer before writing the SLB ensures
+        * we don't get a stale entry here if we get preempted by PHYP
+        * between these two statements.
+        */
+       slb_shadow_update(mk_esid_data(ea, entry), mk_vsid_data(ea, flags),
+                         entry);
+
        asm volatile("slbmte  %0,%1" :
                     : "r" (mk_vsid_data(ea, flags)),
                       "r" (mk_esid_data(ea, entry))
                     : "memory" );
 }
 
-static void slb_flush_and_rebolt(void)
+void slb_flush_and_rebolt(void)
 {
        /* If you change this make sure you change SLB_NUM_BOLTED
         * appropriately too. */
-       unsigned long ksp_flags = SLB_VSID_KERNEL;
+       unsigned long linear_llp, vmalloc_llp, lflags, vflags;
        unsigned long ksp_esid_data;
 
        WARN_ON(!irqs_disabled());
 
-       if (cpu_has_feature(CPU_FTR_16M_PAGE))
-               ksp_flags |= SLB_VSID_L;
+       linear_llp = mmu_psize_defs[mmu_linear_psize].sllp;
+       vmalloc_llp = mmu_psize_defs[mmu_vmalloc_psize].sllp;
+       lflags = SLB_VSID_KERNEL | linear_llp;
+       vflags = SLB_VSID_KERNEL | vmalloc_llp;
 
        ksp_esid_data = mk_esid_data(get_paca()->kstack, 2);
-       if ((ksp_esid_data & ESID_MASK) == KERNELBASE)
+       if ((ksp_esid_data & ESID_MASK) == PAGE_OFFSET)
                ksp_esid_data &= ~SLB_ESID_V;
 
+       /* Only third entry (stack) may change here so only resave that */
+       slb_shadow_update(ksp_esid_data,
+                         mk_vsid_data(ksp_esid_data, lflags), 2);
+
        /* We need to do this all in asm, so we're sure we don't touch
         * the stack between the slbia and rebolting it. */
        asm volatile("isync\n"
@@ -67,9 +115,9 @@ static void slb_flush_and_rebolt(void)
                     /* Slot 2 - kernel stack */
                     "slbmte    %2,%3\n"
                     "isync"
-                    :: "r"(mk_vsid_data(VMALLOCBASE, SLB_VSID_KERNEL)),
-                       "r"(mk_esid_data(VMALLOCBASE, 1)),
-                       "r"(mk_vsid_data(ksp_esid_data, ksp_flags)),
+                    :: "r"(mk_vsid_data(VMALLOC_START, vflags)),
+                       "r"(mk_esid_data(VMALLOC_START, 1)),
+                       "r"(mk_vsid_data(ksp_esid_data, lflags)),
                        "r"(ksp_esid_data)
                     : "memory");
 }
@@ -111,14 +159,14 @@ void switch_slb(struct task_struct *tsk, struct mm_struct *mm)
        else
                unmapped_base = TASK_UNMAPPED_BASE_USER64;
 
-       if (pc >= KERNELBASE)
+       if (is_kernel_addr(pc))
                return;
        slb_allocate(pc);
 
        if (GET_ESID(pc) == GET_ESID(stack))
                return;
 
-       if (stack >= KERNELBASE)
+       if (is_kernel_addr(stack))
                return;
        slb_allocate(stack);
 
@@ -126,33 +174,81 @@ void switch_slb(struct task_struct *tsk, struct mm_struct *mm)
            || (GET_ESID(stack) == GET_ESID(unmapped_base)))
                return;
 
-       if (unmapped_base >= KERNELBASE)
+       if (is_kernel_addr(unmapped_base))
                return;
        slb_allocate(unmapped_base);
 }
 
+static inline void patch_slb_encoding(unsigned int *insn_addr,
+                                     unsigned int immed)
+{
+       /* Assume the instruction had a "0" immediate value, just
+        * "or" in the new value
+        */
+       *insn_addr |= immed;
+       flush_icache_range((unsigned long)insn_addr, 4+
+                          (unsigned long)insn_addr);
+}
+
 void slb_initialize(void)
 {
+       unsigned long linear_llp, vmalloc_llp, io_llp;
+       static int slb_encoding_inited;
+       extern unsigned int *slb_miss_kernel_load_linear;
+       extern unsigned int *slb_miss_kernel_load_io;
+#ifdef CONFIG_HUGETLB_PAGE
+       extern unsigned int *slb_miss_user_load_huge;
+       unsigned long huge_llp;
+
+       huge_llp = mmu_psize_defs[mmu_huge_psize].sllp;
+#endif
+
+       /* Prepare our SLB miss handler based on our page size */
+       linear_llp = mmu_psize_defs[mmu_linear_psize].sllp;
+       io_llp = mmu_psize_defs[mmu_io_psize].sllp;
+       vmalloc_llp = mmu_psize_defs[mmu_vmalloc_psize].sllp;
+       get_paca()->vmalloc_sllp = SLB_VSID_KERNEL | vmalloc_llp;
+
+       if (!slb_encoding_inited) {
+               slb_encoding_inited = 1;
+               patch_slb_encoding(slb_miss_kernel_load_linear,
+                                  SLB_VSID_KERNEL | linear_llp);
+               patch_slb_encoding(slb_miss_kernel_load_io,
+                                  SLB_VSID_KERNEL | io_llp);
+
+               DBG("SLB: linear  LLP = %04x\n", linear_llp);
+               DBG("SLB: io      LLP = %04x\n", io_llp);
+#ifdef CONFIG_HUGETLB_PAGE
+               patch_slb_encoding(slb_miss_user_load_huge,
+                                  SLB_VSID_USER | huge_llp);
+               DBG("SLB: huge    LLP = %04x\n", huge_llp);
+#endif
+       }
+
        /* On iSeries the bolted entries have already been set up by
         * the hypervisor from the lparMap data in head.S */
 #ifndef CONFIG_PPC_ISERIES
-       unsigned long flags = SLB_VSID_KERNEL;
+ {
+       unsigned long lflags, vflags;
 
-       /* Invalidate the entire SLB (even slot 0) & all the ERATS */
-       if (cpu_has_feature(CPU_FTR_16M_PAGE))
-               flags |= SLB_VSID_L;
+       lflags = SLB_VSID_KERNEL | linear_llp;
+       vflags = SLB_VSID_KERNEL | vmalloc_llp;
 
-       asm volatile("isync":::"memory");
-       asm volatile("slbmte  %0,%0"::"r" (0) : "memory");
+       /* Invalidate the entire SLB (even slot 0) & all the ERATS */
+       asm volatile("isync":::"memory");
+       asm volatile("slbmte  %0,%0"::"r" (0) : "memory");
        asm volatile("isync; slbia; isync":::"memory");
-       create_slbe(KERNELBASE, flags, 0);
-       create_slbe(VMALLOCBASE, SLB_VSID_KERNEL, 1);
+       create_shadowed_slbe(PAGE_OFFSET, lflags, 0);
+
+       create_shadowed_slbe(VMALLOC_START, vflags, 1);
+
        /* We don't bolt the stack for the time being - we're in boot,
         * so the stack is in the bolted segment.  By the time it goes
         * elsewhere, we'll call _switch() which will bolt in the new
         * one. */
        asm volatile("isync":::"memory");
-#endif
+ }
+#endif /* CONFIG_PPC_ISERIES */
 
        get_paca()->stab_rr = SLB_NUM_BOLTED;
 }