Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/sparc-2.6
[pandora-kernel.git] / arch / mips / mm / c-r4k.c
index 171951d..b4923a7 100644 (file)
@@ -13,6 +13,7 @@
 #include <linux/kernel.h>
 #include <linux/linkage.h>
 #include <linux/sched.h>
+#include <linux/smp.h>
 #include <linux/mm.h>
 #include <linux/module.h>
 #include <linux/bitops.h>
  *  o collapses to normal function call on UP kernels
  *  o collapses to normal function call on systems with a single shared
  *    primary cache.
+ *  o doesn't disable interrupts on the local CPU
  */
-static inline void r4k_on_each_cpu(void (*func) (void *info), void *info,
-                                   int wait)
+static inline void r4k_on_each_cpu(void (*func) (void *info), void *info)
 {
        preempt_disable();
 
 #if !defined(CONFIG_MIPS_MT_SMP) && !defined(CONFIG_MIPS_MT_SMTC)
-       smp_call_function(func, info, wait);
+       smp_call_function(func, info, 1);
 #endif
        func(info);
        preempt_enable();
@@ -100,6 +101,12 @@ static inline void r4k_blast_dcache_page_dc32(unsigned long addr)
        blast_dcache32_page(addr);
 }
 
+static inline void r4k_blast_dcache_page_dc64(unsigned long addr)
+{
+       R4600_HIT_CACHEOP_WAR_IMPL;
+       blast_dcache64_page(addr);
+}
+
 static void __cpuinit r4k_blast_dcache_page_setup(void)
 {
        unsigned long  dc_lsize = cpu_dcache_line_size();
@@ -110,6 +117,8 @@ static void __cpuinit r4k_blast_dcache_page_setup(void)
                r4k_blast_dcache_page = blast_dcache16_page;
        else if (dc_lsize == 32)
                r4k_blast_dcache_page = r4k_blast_dcache_page_dc32;
+       else if (dc_lsize == 64)
+               r4k_blast_dcache_page = r4k_blast_dcache_page_dc64;
 }
 
 static void (* r4k_blast_dcache_page_indexed)(unsigned long addr);
@@ -124,6 +133,8 @@ static void __cpuinit r4k_blast_dcache_page_indexed_setup(void)
                r4k_blast_dcache_page_indexed = blast_dcache16_page_indexed;
        else if (dc_lsize == 32)
                r4k_blast_dcache_page_indexed = blast_dcache32_page_indexed;
+       else if (dc_lsize == 64)
+               r4k_blast_dcache_page_indexed = blast_dcache64_page_indexed;
 }
 
 static void (* r4k_blast_dcache)(void);
@@ -138,6 +149,8 @@ static void __cpuinit r4k_blast_dcache_setup(void)
                r4k_blast_dcache = blast_dcache16;
        else if (dc_lsize == 32)
                r4k_blast_dcache = blast_dcache32;
+       else if (dc_lsize == 64)
+               r4k_blast_dcache = blast_dcache64;
 }
 
 /* force code alignment (used for TX49XX_ICACHE_INDEX_INV_WAR) */
@@ -350,7 +363,7 @@ static inline void local_r4k___flush_cache_all(void * args)
 
 static void r4k___flush_cache_all(void)
 {
-       r4k_on_each_cpu(local_r4k___flush_cache_all, NULL, 1);
+       r4k_on_each_cpu(local_r4k___flush_cache_all, NULL);
 }
 
 static inline int has_valid_asid(const struct mm_struct *mm)
@@ -397,7 +410,7 @@ static void r4k_flush_cache_range(struct vm_area_struct *vma,
        int exec = vma->vm_flags & VM_EXEC;
 
        if (cpu_has_dc_aliases || (exec && !cpu_has_ic_fills_f_dc))
-               r4k_on_each_cpu(local_r4k_flush_cache_range, vma, 1);
+               r4k_on_each_cpu(local_r4k_flush_cache_range, vma);
 }
 
 static inline void local_r4k_flush_cache_mm(void * args)
@@ -429,7 +442,7 @@ static void r4k_flush_cache_mm(struct mm_struct *mm)
        if (!cpu_has_dc_aliases)
                return;
 
-       r4k_on_each_cpu(local_r4k_flush_cache_mm, mm, 1);
+       r4k_on_each_cpu(local_r4k_flush_cache_mm, mm);
 }
 
 struct flush_cache_page_args {
@@ -521,7 +534,7 @@ static void r4k_flush_cache_page(struct vm_area_struct *vma,
        args.addr = addr;
        args.pfn = pfn;
 
-       r4k_on_each_cpu(local_r4k_flush_cache_page, &args, 1);
+       r4k_on_each_cpu(local_r4k_flush_cache_page, &args);
 }
 
 static inline void local_r4k_flush_data_cache_page(void * addr)
@@ -534,8 +547,7 @@ static void r4k_flush_data_cache_page(unsigned long addr)
        if (in_atomic())
                local_r4k_flush_data_cache_page((void *)addr);
        else
-               r4k_on_each_cpu(local_r4k_flush_data_cache_page, (void *) addr,
-                               1);
+               r4k_on_each_cpu(local_r4k_flush_data_cache_page, (void *) addr);
 }
 
 struct flush_icache_range_args {
@@ -576,7 +588,7 @@ static void r4k_flush_icache_range(unsigned long start, unsigned long end)
        args.start = start;
        args.end = end;
 
-       r4k_on_each_cpu(local_r4k_flush_icache_range_ipi, &args, 1);
+       r4k_on_each_cpu(local_r4k_flush_icache_range_ipi, &args);
        instruction_hazard();
 }
 
@@ -697,7 +709,7 @@ static void local_r4k_flush_cache_sigtramp(void * arg)
 
 static void r4k_flush_cache_sigtramp(unsigned long addr)
 {
-       r4k_on_each_cpu(local_r4k_flush_cache_sigtramp, (void *) addr, 1);
+       r4k_on_each_cpu(local_r4k_flush_cache_sigtramp, (void *) addr);
 }
 
 static void r4k_flush_icache_all(void)