sh: Local TLB flushing variants for SMP prep.
authorPaul Mundt <lethal@linux-sh.org>
Mon, 25 Dec 2006 10:28:54 +0000 (19:28 +0900)
committerPaul Mundt <lethal@linux-sh.org>
Tue, 13 Feb 2007 01:54:45 +0000 (10:54 +0900)
Rename the existing flush routines to local_ variants for use by
the IPI-backed global flush routines on SMP.

Signed-off-by: Paul Mundt <lethal@linux-sh.org>
arch/sh/kernel/sh_ksyms.c
arch/sh/mm/init.c
arch/sh/mm/pg-sh4.c
arch/sh/mm/tlb-flush.c
arch/sh/mm/tlb-nommu.c
arch/sh/mm/tlb-sh3.c
arch/sh/mm/tlb-sh4.c
include/asm-sh/tlbflush.h

index e610623..fe1b276 100644 (file)
@@ -105,7 +105,6 @@ EXPORT_SYMBOL(__flush_purge_region);
 EXPORT_SYMBOL(clear_user_page);
 #endif
 
-EXPORT_SYMBOL(flush_tlb_page);
 EXPORT_SYMBOL(__down_trylock);
 
 #ifdef CONFIG_SMP
index d172065..ae957a9 100644 (file)
@@ -106,7 +106,7 @@ static void set_pte_phys(unsigned long addr, unsigned long phys, pgprot_t prot)
 
        set_pte(pte, pfn_pte(phys >> PAGE_SHIFT, prot));
 
-       __flush_tlb_page(get_asid(), addr);
+       flush_tlb_one(get_asid(), addr);
 }
 
 /*
index b529d80..969efec 100644 (file)
@@ -39,7 +39,7 @@ void clear_user_page(void *to, unsigned long address, struct page *page)
                mutex_lock(&p3map_mutex[(address & CACHE_ALIAS)>>12]);
                set_pte(pte, entry);
                local_irq_save(flags);
-               __flush_tlb_page(get_asid(), p3_addr);
+               flush_tlb_one(get_asid(), p3_addr);
                local_irq_restore(flags);
                update_mmu_cache(NULL, p3_addr, entry);
                __clear_user_page((void *)p3_addr, to);
@@ -74,7 +74,7 @@ void copy_user_page(void *to, void *from, unsigned long address,
                mutex_lock(&p3map_mutex[(address & CACHE_ALIAS)>>12]);
                set_pte(pte, entry);
                local_irq_save(flags);
-               __flush_tlb_page(get_asid(), p3_addr);
+               flush_tlb_one(get_asid(), p3_addr);
                local_irq_restore(flags);
                update_mmu_cache(NULL, p3_addr, entry);
                __copy_user_page((void *)p3_addr, from, to);
index b829c17..dcaf98e 100644 (file)
@@ -14,7 +14,7 @@
 #include <asm/tlbflush.h>
 #include <asm/cacheflush.h>
 
-void flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
+void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
 {
        unsigned int cpu = smp_processor_id();
 
@@ -31,15 +31,15 @@ void flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
                        saved_asid = get_asid();
                        set_asid(asid);
                }
-               __flush_tlb_page(asid, page);
+               flush_tlb_one(asid, page);
                if (saved_asid != MMU_NO_ASID)
                        set_asid(saved_asid);
                local_irq_restore(flags);
        }
 }
 
-void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
-                    unsigned long end)
+void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
+                          unsigned long end)
 {
        struct mm_struct *mm = vma->vm_mm;
        unsigned int cpu = smp_processor_id();
@@ -67,7 +67,7 @@ void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
                                set_asid(asid);
                        }
                        while (start < end) {
-                               __flush_tlb_page(asid, start);
+                               flush_tlb_one(asid, start);
                                start += PAGE_SIZE;
                        }
                        if (saved_asid != MMU_NO_ASID)
@@ -77,7 +77,7 @@ void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
        }
 }
 
-void flush_tlb_kernel_range(unsigned long start, unsigned long end)
+void local_flush_tlb_kernel_range(unsigned long start, unsigned long end)
 {
        unsigned int cpu = smp_processor_id();
        unsigned long flags;
@@ -86,7 +86,7 @@ void flush_tlb_kernel_range(unsigned long start, unsigned long end)
        local_irq_save(flags);
        size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
        if (size > (MMU_NTLB_ENTRIES/4)) { /* Too many TLB to flush */
-               flush_tlb_all();
+               local_flush_tlb_all();
        } else {
                unsigned long asid;
                unsigned long saved_asid = get_asid();
@@ -97,7 +97,7 @@ void flush_tlb_kernel_range(unsigned long start, unsigned long end)
                end &= PAGE_MASK;
                set_asid(asid);
                while (start < end) {
-                       __flush_tlb_page(asid, start);
+                       flush_tlb_one(asid, start);
                        start += PAGE_SIZE;
                }
                set_asid(saved_asid);
@@ -105,7 +105,7 @@ void flush_tlb_kernel_range(unsigned long start, unsigned long end)
        local_irq_restore(flags);
 }
 
-void flush_tlb_mm(struct mm_struct *mm)
+void local_flush_tlb_mm(struct mm_struct *mm)
 {
        unsigned int cpu = smp_processor_id();
 
@@ -122,7 +122,7 @@ void flush_tlb_mm(struct mm_struct *mm)
        }
 }
 
-void flush_tlb_all(void)
+void local_flush_tlb_all(void)
 {
        unsigned long flags, status;
 
index e55cfea..1ccca7c 100644 (file)
 /*
  * Nothing too terribly exciting here ..
  */
-
-void flush_tlb(void)
-{
-       BUG();
-}
-
-void flush_tlb_all(void)
+void local_flush_tlb_all(void)
 {
        BUG();
 }
 
-void flush_tlb_mm(struct mm_struct *mm)
+void local_flush_tlb_mm(struct mm_struct *mm)
 {
        BUG();
 }
 
-void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
+void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
                            unsigned long end)
 {
        BUG();
 }
 
-void flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
+void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
 {
        BUG();
 }
 
-void __flush_tlb_page(unsigned long asid, unsigned long page)
+void local_flush_tlb_one(unsigned long asid, unsigned long page)
 {
        BUG();
 }
 
-void flush_tlb_kernel_range(unsigned long start, unsigned long end)
+void local_flush_tlb_kernel_range(unsigned long start, unsigned long end)
 {
        BUG();
 }
@@ -55,4 +49,3 @@ void update_mmu_cache(struct vm_area_struct * vma,
 {
        BUG();
 }
-
index 598c998..e5e76eb 100644 (file)
@@ -12,7 +12,7 @@
 #include <asm/system.h>
 #include <asm/mmu_context.h>
 
-void __flush_tlb_page(unsigned long asid, unsigned long page)
+void local_flush_tlb_one(unsigned long asid, unsigned long page)
 {
        unsigned long addr, data;
        int i, ways = MMU_NTLB_WAYS;
index 758d8de..221e709 100644 (file)
@@ -12,7 +12,7 @@
 #include <asm/system.h>
 #include <asm/mmu_context.h>
 
-void __flush_tlb_page(unsigned long asid, unsigned long page)
+void local_flush_tlb_one(unsigned long asid, unsigned long page)
 {
        unsigned long addr, data;
 
index 28c073b..455fb8d 100644 (file)
@@ -4,7 +4,6 @@
 /*
  * TLB flushing:
  *
- *  - flush_tlb() flushes the current mm struct TLBs
  *  - flush_tlb_all() flushes all processes TLBs
  *  - flush_tlb_mm(mm) flushes the specified mm context TLB's
  *  - flush_tlb_page(vma, vmaddr) flushes one page
  *  - flush_tlb_kernel_range(start, end) flushes a range of kernel pages
  *  - flush_tlb_pgtables(mm, start, end) flushes a range of page tables
  */
+extern void local_flush_tlb_all(void);
+extern void local_flush_tlb_mm(struct mm_struct *mm);
+extern void local_flush_tlb_range(struct vm_area_struct *vma,
+                                 unsigned long start,
+                                 unsigned long end);
+extern void local_flush_tlb_page(struct vm_area_struct *vma,
+                                unsigned long page);
+extern void local_flush_tlb_kernel_range(unsigned long start,
+                                        unsigned long end);
+extern void local_flush_tlb_one(unsigned long asid, unsigned long page);
+
+#ifdef CONFIG_SMP
 
-extern void flush_tlb(void);
 extern void flush_tlb_all(void);
 extern void flush_tlb_mm(struct mm_struct *mm);
 extern void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
                            unsigned long end);
 extern void flush_tlb_page(struct vm_area_struct *vma, unsigned long page);
-extern void __flush_tlb_page(unsigned long asid, unsigned long page);
+extern void flush_tlb_kernel_range(unsigned long start, unsigned long end);
+extern void flush_tlb_one(unsigned long asid, unsigned long page);
+
+#else
+
+#define flush_tlb_all()                        local_flush_tlb_all()
+#define flush_tlb_mm(mm)               local_flush_tlb_mm(mm)
+#define flush_tlb_page(vma, page)      local_flush_tlb_page(vma, page)
+#define flush_tlb_one(asid, page)      local_flush_tlb_one(asid, page)
+
+#define flush_tlb_range(vma, start, end)       \
+       local_flush_tlb_range(vma, start, end)
+
+#define flush_tlb_kernel_range(start, end)     \
+       local_flush_tlb_kernel_range(start, end)
+
+#endif /* CONFIG_SMP */
 
 static inline void flush_tlb_pgtables(struct mm_struct *mm,
                                      unsigned long start, unsigned long end)
-{ /* Nothing to do */
+{
+       /* Nothing to do */
 }
-
-extern void flush_tlb_kernel_range(unsigned long start, unsigned long end);
-
 #endif /* __ASM_SH_TLBFLUSH_H */