#include <asm/smp.h>
#include <asm/firmware.h>
#include <linux/compiler.h>
+#include <asm/udbg.h>
#ifdef DEBUG
-#define DBG(fmt...) udbg_printf(fmt)
+#define DBG(fmt...) printk(fmt)
#else
-#define DBG(fmt...)
+#define DBG pr_debug
#endif
extern void slb_allocate_realmode(unsigned long ea);
slb_allocate_realmode(ea);
}
+#define slb_esid_mask(ssize) \
+ (((ssize) == MMU_SEGSIZE_256M)? ESID_MASK: ESID_MASK_1T)
+
static inline unsigned long mk_esid_data(unsigned long ea, int ssize,
unsigned long slot)
{
- unsigned long mask;
-
- mask = (ssize == MMU_SEGSIZE_256M)? ESID_MASK: ESID_MASK_1T;
- return (ea & mask) | SLB_ESID_V | slot;
+ return (ea & slb_esid_mask(ssize)) | SLB_ESID_V | slot;
}
#define slb_vsid_shift(ssize) \
ksp_vsid_data = get_slb_shadow()->save_area[2].vsid;
}
+ /*
+ * We can't take a PMU exception in the following code, so hard
+ * disable interrupts.
+ */
+ hard_irq_disable();
+
/* We need to do this all in asm, so we're sure we don't touch
* the stack between the slbia and rebolting it. */
asm volatile("isync\n"
slb_flush_and_rebolt();
}
+/* Helper function to compare esids. There are four cases to handle.
+ * 1. The system is not 1T segment size capable. Use the GET_ESID compare.
+ * 2. The system is 1T capable, both addresses are < 1T, use the GET_ESID compare.
+ * 3. The system is 1T capable, only one of the two addresses is > 1T. This is not a match.
+ * 4. The system is 1T capable, both addresses are > 1T, use the GET_ESID_1T macro to compare.
+ */
+static inline int esids_match(unsigned long addr1, unsigned long addr2)
+{
+ int esid_1t_count;
+
+ /* System is not 1T segment size capable. */
+ if (!cpu_has_feature(CPU_FTR_1T_SEGMENT))
+ return (GET_ESID(addr1) == GET_ESID(addr2));
+
+ esid_1t_count = (((addr1 >> SID_SHIFT_1T) != 0) +
+ ((addr2 >> SID_SHIFT_1T) != 0));
+
+ /* both addresses are < 1T */
+ if (esid_1t_count == 0)
+ return (GET_ESID(addr1) == GET_ESID(addr2));
+
+ /* One address < 1T, the other > 1T. Not a match */
+ if (esid_1t_count == 1)
+ return 0;
+
+ /* Both addresses are > 1T. */
+ return (GET_ESID_1T(addr1) == GET_ESID_1T(addr2));
+}
+
/* Flush all user entries from the segment table of the current processor. */
void switch_slb(struct task_struct *tsk, struct mm_struct *mm)
{
unsigned long stack = KSTK_ESP(tsk);
unsigned long unmapped_base;
- if (offset <= SLB_CACHE_ENTRIES) {
+ if (!cpu_has_feature(CPU_FTR_NO_SLBIE_B) &&
+ offset <= SLB_CACHE_ENTRIES) {
int i;
asm volatile("isync" : : : "memory");
for (i = 0; i < offset; i++) {
return;
slb_allocate(pc);
- if (GET_ESID(pc) == GET_ESID(stack))
+ if (esids_match(pc,stack))
return;
if (is_kernel_addr(stack))
return;
slb_allocate(stack);
- if ((GET_ESID(pc) == GET_ESID(unmapped_base))
- || (GET_ESID(stack) == GET_ESID(unmapped_base)))
+ if (esids_match(pc,unmapped_base) || esids_match(stack,unmapped_base))
return;
if (is_kernel_addr(unmapped_base))
static int slb_encoding_inited;
extern unsigned int *slb_miss_kernel_load_linear;
extern unsigned int *slb_miss_kernel_load_io;
+ extern unsigned int *slb_compare_rr_to_size;
+#ifdef CONFIG_SPARSEMEM_VMEMMAP
+ extern unsigned int *slb_miss_kernel_load_vmemmap;
+ unsigned long vmemmap_llp;
+#endif
/* Prepare our SLB miss handler based on our page size */
linear_llp = mmu_psize_defs[mmu_linear_psize].sllp;
io_llp = mmu_psize_defs[mmu_io_psize].sllp;
vmalloc_llp = mmu_psize_defs[mmu_vmalloc_psize].sllp;
get_paca()->vmalloc_sllp = SLB_VSID_KERNEL | vmalloc_llp;
-
+#ifdef CONFIG_SPARSEMEM_VMEMMAP
+ vmemmap_llp = mmu_psize_defs[mmu_vmemmap_psize].sllp;
+#endif
if (!slb_encoding_inited) {
slb_encoding_inited = 1;
patch_slb_encoding(slb_miss_kernel_load_linear,
SLB_VSID_KERNEL | linear_llp);
patch_slb_encoding(slb_miss_kernel_load_io,
SLB_VSID_KERNEL | io_llp);
+ patch_slb_encoding(slb_compare_rr_to_size,
+ mmu_slb_size);
+
+ DBG("SLB: linear LLP = %04lx\n", linear_llp);
+ DBG("SLB: io LLP = %04lx\n", io_llp);
- DBG("SLB: linear LLP = %04x\n", linear_llp);
- DBG("SLB: io LLP = %04x\n", io_llp);
+#ifdef CONFIG_SPARSEMEM_VMEMMAP
+ patch_slb_encoding(slb_miss_kernel_load_vmemmap,
+ SLB_VSID_KERNEL | vmemmap_llp);
+ DBG("SLB: vmemmap LLP = %04lx\n", vmemmap_llp);
+#endif
}
get_paca()->stab_rr = SLB_NUM_BOLTED;
create_shadowed_slbe(VMALLOC_START, mmu_kernel_ssize, vflags, 1);
- /* We don't bolt the stack for the time being - we're in boot,
- * so the stack is in the bolted segment. By the time it goes
- * elsewhere, we'll call _switch() which will bolt in the new
- * one. */
+ /* For the boot cpu, we're running on the stack in init_thread_union,
+ * which is in the first segment of the linear mapping, and also
+ * get_paca()->kstack hasn't been initialized yet.
+ * For secondary cpus, we need to bolt the kernel stack entry now.
+ */
+ slb_shadow_clear(2);
+ if (raw_smp_processor_id() != boot_cpuid &&
+ (get_paca()->kstack & slb_esid_mask(mmu_kernel_ssize)) > PAGE_OFFSET)
+ create_shadowed_slbe(get_paca()->kstack,
+ mmu_kernel_ssize, lflags, 2);
+
asm volatile("isync":::"memory");
}