static unsigned long *context_map;
static unsigned long *stale_map[NR_CPUS];
static struct mm_struct **context_mm;
-static spinlock_t context_lock = SPIN_LOCK_UNLOCKED;
+static DEFINE_SPINLOCK(context_lock);
#define CTX_MAP_SIZE \
(sizeof(unsigned long) * (last_context / BITS_PER_LONG + 1))
struct mm_struct *mm;
unsigned int cpu, max;
- again:
max = last_context - first_context;
/* Attempt to free next_context first and then loop until we manage */
id = first_context;
continue;
}
- pr_debug("[%d] steal context %d from mm @%p\n",
+ pr_devel("[%d] steal context %d from mm @%p\n",
smp_processor_id(), id, mm);
/* Mark this mm has having no context anymore */
spin_unlock(&context_lock);
cpu_relax();
spin_lock(&context_lock);
- goto again;
+
+ /* This will cause the caller to try again */
+ return MMU_NO_CONTEXT;
}
#endif /* CONFIG_SMP */
/* Pick up the victim mm */
mm = context_mm[id];
- pr_debug("[%d] steal context %d from mm @%p\n", cpu, id, mm);
+ pr_devel("[%d] steal context %d from mm @%p\n", cpu, id, mm);
/* Flush the TLB for that context */
local_flush_tlb_mm(mm);
spin_lock(&context_lock);
#ifndef DEBUG_STEAL_ONLY
- pr_debug("[%d] activating context for mm @%p, active=%d, id=%d\n",
+ pr_devel("[%d] activating context for mm @%p, active=%d, id=%d\n",
cpu, next, next->context.active, next->context.id);
#endif
next->context.active++;
if (prev) {
#ifndef DEBUG_STEAL_ONLY
- pr_debug(" old context %p active was: %d\n",
+ pr_devel(" old context %p active was: %d\n",
prev, prev->context.active);
#endif
WARN_ON(prev->context.active < 1);
prev->context.active--;
}
+
+ again:
#endif /* CONFIG_SMP */
/* If we already have a valid assigned context, skip all that */
#ifdef CONFIG_SMP
if (num_online_cpus() > 1) {
id = steal_context_smp(id);
- goto stolen;
+ if (id == MMU_NO_CONTEXT)
+ goto again;
}
#endif /* CONFIG_SMP */
id = steal_context_up(id);
next->context.id = id;
#ifndef DEBUG_STEAL_ONLY
- pr_debug("[%d] picked up new id %d, nrf is now %d\n",
+ pr_devel("[%d] picked up new id %d, nrf is now %d\n",
cpu, id, nr_free_contexts);
#endif
* local TLB for it and unmark it before we use it
*/
if (test_bit(id, stale_map[cpu])) {
- pr_debug("[%d] flushing stale context %d for mm @%p !\n",
+ pr_devel("[%d] flushing stale context %d for mm @%p !\n",
cpu, id, next);
local_flush_tlb_mm(next);
*/
void destroy_context(struct mm_struct *mm)
{
+ unsigned long flags;
unsigned int id;
if (mm->context.id == MMU_NO_CONTEXT)
WARN_ON(mm->context.active != 0);
- spin_lock(&context_lock);
+ spin_lock_irqsave(&context_lock, flags);
id = mm->context.id;
if (id != MMU_NO_CONTEXT) {
__clear_bit(id, context_map);
mm->context.id = MMU_NO_CONTEXT;
#ifdef DEBUG_MAP_CONSISTENCY
mm->context.active = 0;
- context_mm[id] = NULL;
#endif
+ context_mm[id] = NULL;
nr_free_contexts++;
}
- spin_unlock(&context_lock);
+ spin_unlock_irqrestore(&context_lock, flags);
}
#ifdef CONFIG_SMP
switch (action) {
case CPU_ONLINE:
case CPU_ONLINE_FROZEN:
- pr_debug("MMU: Allocating stale context map for CPU %d\n", cpu);
+ pr_devel("MMU: Allocating stale context map for CPU %d\n", cpu);
stale_map[cpu] = kzalloc(CTX_MAP_SIZE, GFP_KERNEL);
break;
#ifdef CONFIG_HOTPLUG_CPU
case CPU_DEAD:
case CPU_DEAD_FROZEN:
- pr_debug("MMU: Freeing stale context map for CPU %d\n", cpu);
+ pr_devel("MMU: Freeing stale context map for CPU %d\n", cpu);
kfree(stale_map[cpu]);
stale_map[cpu] = NULL;
break;