[POWERPC] spufs: set up correct SLB entries for 64k pages
authorarnd@arndb.de <arnd@arndb.de>
Mon, 19 Jun 2006 18:33:23 +0000 (20:33 +0200)
committerPaul Mackerras <paulus@samba.org>
Wed, 21 Jun 2006 05:01:30 +0000 (15:01 +1000)
spufs currently knows only 4k pages and 16M hugetlb
pages. Make it use the regular methods for deciding on
the SLB bits.

Signed-off-by: Arnd Bergmann <arnd.bergmann@de.ibm.com>
Signed-off-by: Paul Mackerras <paulus@samba.org>
arch/powerpc/platforms/cell/spu_base.c
arch/powerpc/platforms/cell/spufs/switch.c

index d5877aa..fd6ea57 100644 (file)
@@ -71,7 +71,7 @@ static int __spu_trap_data_seg(struct spu *spu, unsigned long ea)
 {
        struct spu_priv2 __iomem *priv2 = spu->priv2;
        struct mm_struct *mm = spu->mm;
-       u64 esid, vsid;
+       u64 esid, vsid, llp;
 
        pr_debug("%s\n", __FUNCTION__);
 
@@ -91,9 +91,14 @@ static int __spu_trap_data_seg(struct spu *spu, unsigned long ea)
        }
 
        esid = (ea & ESID_MASK) | SLB_ESID_V;
-       vsid = (get_vsid(mm->context.id, ea) << SLB_VSID_SHIFT) | SLB_VSID_USER;
+#ifdef CONFIG_HUGETLB_PAGE
        if (in_hugepage_area(mm->context, ea))
-               vsid |= SLB_VSID_L;
+               llp = mmu_psize_defs[mmu_huge_psize].sllp;
+       else
+#endif
+               llp = mmu_psize_defs[mmu_virtual_psize].sllp;
+       vsid = (get_vsid(mm->context.id, ea) << SLB_VSID_SHIFT) |
+                       SLB_VSID_USER | llp;
 
        out_be64(&priv2->slb_index_W, spu->slb_replace);
        out_be64(&priv2->slb_vsid_RW, vsid);
index 60f8b36..97a0e80 100644 (file)
@@ -718,13 +718,15 @@ static inline void invalidate_slbs(struct spu_state *csa, struct spu *spu)
 
 static inline void get_kernel_slb(u64 ea, u64 slb[2])
 {
-       slb[0] = (get_kernel_vsid(ea) << SLB_VSID_SHIFT) | SLB_VSID_KERNEL;
+       u64 llp;
+
+       if (REGION_ID(ea) == KERNEL_REGION_ID)
+               llp = mmu_psize_defs[mmu_linear_psize].sllp;
+       else
+               llp = mmu_psize_defs[mmu_virtual_psize].sllp;
+       slb[0] = (get_kernel_vsid(ea) << SLB_VSID_SHIFT) |
+               SLB_VSID_KERNEL | llp;
        slb[1] = (ea & ESID_MASK) | SLB_ESID_V;
-
-       /* Large pages are used for kernel text/data, but not vmalloc.  */
-       if (cpu_has_feature(CPU_FTR_16M_PAGE)
-           && REGION_ID(ea) == KERNEL_REGION_ID)
-               slb[0] |= SLB_VSID_L;
 }
 
 static inline void load_mfc_slb(struct spu *spu, u64 slb[2], int slbe)