Merge branch 'master' of ssh://master.kernel.org/home/ftp/pub/scm/linux/kernel/git...
authorDavid S. Miller <davem@davemloft.net>
Sun, 29 Mar 2009 22:44:22 +0000 (15:44 -0700)
committerDavid S. Miller <davem@davemloft.net>
Sun, 29 Mar 2009 22:44:22 +0000 (15:44 -0700)
Conflicts:
arch/sparc/kernel/smp_64.c

1  2 
arch/sparc/kernel/irq_64.c
arch/sparc/kernel/smp_64.c

@@@ -185,7 -185,7 +185,7 @@@ int show_interrupts(struct seq_file *p
                seq_printf(p, "%10u ", kstat_irqs(i));
  #else
                for_each_online_cpu(j)
 -                      seq_printf(p, "%10u ", kstat_cpu(j).irqs[i]);
 +                      seq_printf(p, "%10u ", kstat_irqs_cpu(i, j));
  #endif
                seq_printf(p, " %9s", irq_desc[i].chip->typename);
                seq_printf(p, "  %s", action->name);
@@@ -252,10 -252,9 +252,10 @@@ struct irq_handler_data 
  #ifdef CONFIG_SMP
  static int irq_choose_cpu(unsigned int virt_irq)
  {
 -      cpumask_t mask = irq_desc[virt_irq].affinity;
 +      cpumask_t mask;
        int cpuid;
  
 +      cpumask_copy(&mask, irq_desc[virt_irq].affinity);
        if (cpus_equal(mask, CPU_MASK_ALL)) {
                static int irq_rover;
                static DEFINE_SPINLOCK(irq_rover_lock);
                spin_lock_irqsave(&irq_rover_lock, flags);
  
                while (!cpu_online(irq_rover)) {
-                       if (++irq_rover >= NR_CPUS)
+                       if (++irq_rover >= nr_cpu_ids)
                                irq_rover = 0;
                }
                cpuid = irq_rover;
                do {
-                       if (++irq_rover >= NR_CPUS)
+                       if (++irq_rover >= nr_cpu_ids)
                                irq_rover = 0;
                } while (!cpu_online(irq_rover));
  
@@@ -806,7 -805,7 +806,7 @@@ void fixup_irqs(void
                    !(irq_desc[irq].status & IRQ_PER_CPU)) {
                        if (irq_desc[irq].chip->set_affinity)
                                irq_desc[irq].chip->set_affinity(irq,
 -                                      &irq_desc[irq].affinity);
 +                                      irq_desc[irq].affinity);
                }
                spin_unlock_irqrestore(&irq_desc[irq].lock, flags);
        }
@@@ -808,9 -808,9 +808,9 @@@ static void smp_start_sync_tick_client(
  
  extern unsigned long xcall_call_function;
  
- void arch_send_call_function_ipi(cpumask_t mask)
+ void arch_send_call_function_ipi_mask(const struct cpumask *mask)
  {
-       xcall_deliver((u64) &xcall_call_function, 0, 0, &mask);
+       xcall_deliver((u64) &xcall_call_function, 0, 0, mask);
  }
  
  extern unsigned long xcall_call_function_single;
@@@ -850,7 -850,7 +850,7 @@@ static void tsb_sync(void *info
  
  void smp_tsb_sync(struct mm_struct *mm)
  {
-       smp_call_function_mask(mm->cpu_vm_mask, tsb_sync, mm, 1);
+       smp_call_function_many(mm_cpumask(mm), tsb_sync, mm, 1);
  }
  
  extern unsigned long xcall_flush_tlb_mm;
@@@ -1031,7 -1031,7 +1031,7 @@@ void smp_fetch_global_regs(void
   *    If the address space is non-shared (ie. mm->count == 1) we avoid
   *    cross calls when we want to flush the currently running process's
   *    tlb state.  This is done by clearing all cpu bits except the current
 - *    processor's in current->active_mm->cpu_vm_mask and performing the
 + *    processor's in current->mm->cpu_vm_mask and performing the
   *    flush locally only.  This will force any subsequent cpus which run
   *    this task to flush the context from the local tlb if the process
   *    migrates to another cpu (again).
@@@ -1055,13 -1055,13 +1055,13 @@@ void smp_flush_tlb_mm(struct mm_struct 
        int cpu = get_cpu();
  
        if (atomic_read(&mm->mm_users) == 1) {
-               mm->cpu_vm_mask = cpumask_of_cpu(cpu);
+               cpumask_copy(mm_cpumask(mm), cpumask_of(cpu));
                goto local_flush_and_out;
        }
  
        smp_cross_call_masked(&xcall_flush_tlb_mm,
                              ctx, 0, 0,
-                             &mm->cpu_vm_mask);
+                             mm_cpumask(mm));
  
  local_flush_and_out:
        __flush_tlb_mm(ctx, SECONDARY_CONTEXT);
@@@ -1074,12 -1074,12 +1074,12 @@@ void smp_flush_tlb_pending(struct mm_st
        u32 ctx = CTX_HWBITS(mm->context);
        int cpu = get_cpu();
  
 -      if (mm == current->active_mm && atomic_read(&mm->mm_users) == 1)
 +      if (mm == current->mm && atomic_read(&mm->mm_users) == 1)
-               mm->cpu_vm_mask = cpumask_of_cpu(cpu);
+               cpumask_copy(mm_cpumask(mm), cpumask_of(cpu));
        else
                smp_cross_call_masked(&xcall_flush_tlb_pending,
                                      ctx, nr, (unsigned long) vaddrs,
-                                     &mm->cpu_vm_mask);
+                                     mm_cpumask(mm));
  
        __flush_tlb_pending(ctx, nr, vaddrs);