#ifndef __BLACKFIN_MMU_CONTEXT_H__
#define __BLACKFIN_MMU_CONTEXT_H__
-#include <linux/gfp.h>
+#include <linux/slab.h>
#include <linux/sched.h>
#include <asm/setup.h>
#include <asm/page.h>
#include <asm/pgalloc.h>
#include <asm/cplbinit.h>
+#include <asm/sections.h>
/* Note: L1 stacks are CPU-private things, so we bluntly disable this
feature in SMP mode, and use the per-CPU scratch SRAM bank only to
#define activate_mm(prev, next) switch_mm(prev, next, NULL)
-static inline void switch_mm(struct mm_struct *prev_mm, struct mm_struct *next_mm,
- struct task_struct *tsk)
+static inline void __switch_mm(struct mm_struct *prev_mm, struct mm_struct *next_mm,
+ struct task_struct *tsk)
{
#ifdef CONFIG_MPU
unsigned int cpu = smp_processor_id();
#endif
}
+#ifdef CONFIG_IPIPE
+#define lock_mm_switch(flags) local_irq_save_hw_cond(flags)
+#define unlock_mm_switch(flags) local_irq_restore_hw_cond(flags)
+#else
+#define lock_mm_switch(flags) do { (void)(flags); } while (0)
+#define unlock_mm_switch(flags) do { (void)(flags); } while (0)
+#endif /* CONFIG_IPIPE */
+
#ifdef CONFIG_MPU
+static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
+ struct task_struct *tsk)
+{
+ unsigned long flags;
+ lock_mm_switch(flags);
+ __switch_mm(prev, next, tsk);
+ unlock_mm_switch(flags);
+}
+
static inline void protect_page(struct mm_struct *mm, unsigned long addr,
unsigned long flags)
{
unsigned long *mask = mm->context.page_rwx_mask;
- unsigned long page = addr >> 12;
- unsigned long idx = page >> 5;
- unsigned long bit = 1 << (page & 31);
+ unsigned long page;
+ unsigned long idx;
+ unsigned long bit;
+
+ if (unlikely(addr >= ASYNC_BANK0_BASE && addr < ASYNC_BANK3_BASE + ASYNC_BANK3_SIZE))
+ page = (addr - (ASYNC_BANK0_BASE - _ramend)) >> 12;
+ else
+ page = addr >> 12;
+ idx = page >> 5;
+ bit = 1 << (page & 31);
if (flags & VM_READ)
mask[idx] |= bit;
set_mask_dcplbs(mm->context.page_rwx_mask, cpu);
}
}
+#else /* !CONFIG_MPU */
+static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
+ struct task_struct *tsk)
+{
+ __switch_mm(prev, next, tsk);
+}
#endif
static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
#endif
}
+#define ipipe_mm_switch_protect(flags) \
+ local_irq_save_hw_cond(flags)
+
+#define ipipe_mm_switch_unprotect(flags) \
+ local_irq_restore_hw_cond(flags)
+
#endif