sh64: Wire up the shared __flush_xxx_region() flushers.
authorPaul Mundt <lethal@linux-sh.org>
Fri, 14 Aug 2009 17:00:54 +0000 (02:00 +0900)
committerPaul Mundt <lethal@linux-sh.org>
Fri, 14 Aug 2009 17:00:54 +0000 (02:00 +0900)
Now with all of the prep work out of the way, kill off the SH-5 variants
and use the SH-4 version directly. This also takes advantage of the
unrolling that was previously done for the new version.

Signed-off-by: Paul Mundt <lethal@linux-sh.org>
arch/sh/mm/Makefile_64
arch/sh/mm/cache-sh5.c

index 2863ffb..66c3910 100644 (file)
@@ -9,7 +9,7 @@ mmu-$(CONFIG_MMU)       := fault_64.o ioremap_64.o tlbflush_64.o tlb-sh5.o \
                           extable_64.o
 
 ifndef CONFIG_CACHE_OFF
-obj-y                  += cache-sh5.o
+obj-y                  += cache-sh5.o flush-sh4.o
 endif
 
 obj-y                  += $(mmu-y)
index 3e2d732..698113f 100644 (file)
@@ -539,54 +539,6 @@ static void sh64_dcache_purge_user_range(struct mm_struct *mm,
                sh64_dcache_purge_user_pages(mm, start, end);
        }
 }
-
-/*
- * Purge the range of addresses from the D-cache.
- *
- * The addresses lie in the superpage mapping. There's no harm if we
- * overpurge at either end - just a small performance loss.
- */
-void __flush_purge_region(void *start, int size)
-{
-       unsigned long long ullend, addr, aligned_start;
-
-       aligned_start = (unsigned long long)(signed long long)(signed long) start;
-       addr = L1_CACHE_ALIGN(aligned_start);
-       ullend = (unsigned long long) (signed long long) (signed long) start + size;
-
-       while (addr <= ullend) {
-               __asm__ __volatile__ ("ocbp %0, 0" : : "r" (addr));
-               addr += L1_CACHE_BYTES;
-       }
-}
-
-void __flush_wback_region(void *start, int size)
-{
-       unsigned long long ullend, addr, aligned_start;
-
-       aligned_start = (unsigned long long)(signed long long)(signed long) start;
-       addr = L1_CACHE_ALIGN(aligned_start);
-       ullend = (unsigned long long) (signed long long) (signed long) start + size;
-
-       while (addr < ullend) {
-               __asm__ __volatile__ ("ocbwb %0, 0" : : "r" (addr));
-               addr += L1_CACHE_BYTES;
-       }
-}
-
-void __flush_invalidate_region(void *start, int size)
-{
-       unsigned long long ullend, addr, aligned_start;
-
-       aligned_start = (unsigned long long)(signed long long)(signed long) start;
-       addr = L1_CACHE_ALIGN(aligned_start);
-       ullend = (unsigned long long) (signed long long) (signed long) start + size;
-
-       while (addr < ullend) {
-               __asm__ __volatile__ ("ocbi %0, 0" : : "r" (addr));
-               addr += L1_CACHE_BYTES;
-       }
-}
 #endif /* !CONFIG_DCACHE_DISABLED */
 
 /*