[SPARC64] mm: Do not flush TLB mm in tlb_finish_mmu()
authorDavid S. Miller <davem@davemloft.net>
Mon, 7 Nov 2005 22:09:58 +0000 (14:09 -0800)
committerDavid S. Miller <davem@davemloft.net>
Mon, 7 Nov 2005 22:09:58 +0000 (14:09 -0800)
It isn't needed any longer, as noted by Hugh Dickins.

We still need the flush routines, due to the one remaining
call site in hugetlb_prefault_arch_hook().  That can be
eliminated at some later point, however.

Signed-off-by: David S. Miller <davem@davemloft.net>
arch/sparc64/kernel/smp.c
include/asm-sparc64/tlb.h

index a9089e2..5d90ee9 100644 (file)
@@ -839,43 +839,29 @@ void smp_flush_tlb_all(void)
  *    questionable (in theory the big win for threads is the massive sharing of
  *    address space state across processors).
  */
+
+/* This currently is only used by the hugetlb arch pre-fault
+ * hook on UltraSPARC-III+ and later when changing the pagesize
+ * bits of the context register for an address space.
+ */
 void smp_flush_tlb_mm(struct mm_struct *mm)
 {
-        /*
-         * This code is called from two places, dup_mmap and exit_mmap. In the
-         * former case, we really need a flush. In the later case, the callers
-         * are single threaded exec_mmap (really need a flush), multithreaded
-         * exec_mmap case (do not need to flush, since the caller gets a new
-         * context via activate_mm), and all other callers of mmput() whence
-         * the flush can be optimized since the associated threads are dead and
-         * the mm is being torn down (__exit_mm and other mmput callers) or the
-         * owning thread is dissociating itself from the mm. The
-         * (atomic_read(&mm->mm_users) == 0) check ensures real work is done
-         * for single thread exec and dup_mmap cases. An alternate check might
-         * have been (current->mm != mm).
-         *                                              Kanoj Sarcar
-         */
-        if (atomic_read(&mm->mm_users) == 0)
-                return;
-
-       {
-               u32 ctx = CTX_HWBITS(mm->context);
-               int cpu = get_cpu();
+       u32 ctx = CTX_HWBITS(mm->context);
+       int cpu = get_cpu();
 
-               if (atomic_read(&mm->mm_users) == 1) {
-                       mm->cpu_vm_mask = cpumask_of_cpu(cpu);
-                       goto local_flush_and_out;
-               }
+       if (atomic_read(&mm->mm_users) == 1) {
+               mm->cpu_vm_mask = cpumask_of_cpu(cpu);
+               goto local_flush_and_out;
+       }
 
-               smp_cross_call_masked(&xcall_flush_tlb_mm,
-                                     ctx, 0, 0,
-                                     mm->cpu_vm_mask);
+       smp_cross_call_masked(&xcall_flush_tlb_mm,
+                             ctx, 0, 0,
+                             mm->cpu_vm_mask);
 
-       local_flush_and_out:
-               __flush_tlb_mm(ctx, SECONDARY_CONTEXT);
+local_flush_and_out:
+       __flush_tlb_mm(ctx, SECONDARY_CONTEXT);
 
-               put_cpu();
-       }
+       put_cpu();
 }
 
 void smp_flush_tlb_pending(struct mm_struct *mm, unsigned long nr, unsigned long *vaddrs)
index 66138d9..1eda179 100644 (file)
@@ -78,11 +78,9 @@ static inline void tlb_finish_mmu(struct mmu_gather *mp, unsigned long start, un
 {
        tlb_flush_mmu(mp);
 
-       if (mp->fullmm) {
-               if (CTX_VALID(mp->mm->context))
-                       do_flush_tlb_mm(mp->mm);
+       if (mp->fullmm)
                mp->fullmm = 0;
-       else
+       else
                flush_tlb_pending();
 
        /* keep the page table cache within bounds */