score: add flush_dcahce_page and PG_dcache_dirty define
authorChen Liqin <liqin.chen@sunplusct.com>
Wed, 18 Nov 2009 05:22:33 +0000 (13:22 +0800)
committerChen Liqin <liqin.chen@sunplusct.com>
Thu, 17 Dec 2009 10:28:31 +0000 (18:28 +0800)
Signed-off-by: Cui Bixiong <bixiong@sunnorth.com.cn>
Signed-off-by: Chen Liqin <liqin.chen@sunplusct.com>
modified:   arch/score/include/asm/cacheflush.h
modified:   arch/score/mm/cache.c

arch/score/include/asm/cacheflush.h
arch/score/mm/cache.c

index caaba24..1d545d0 100644 (file)
@@ -14,10 +14,12 @@ extern void flush_cache_sigtramp(unsigned long addr);
 extern void flush_icache_all(void);
 extern void flush_icache_range(unsigned long start, unsigned long end);
 extern void flush_dcache_range(unsigned long start, unsigned long end);
+extern void flush_dcache_page(struct page *page);
+
+#define PG_dcache_dirty         PG_arch_1
 
 #define flush_cache_dup_mm(mm)                 do {} while (0)
 #define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 0
-#define flush_dcache_page(page)                        do {} while (0)
 #define flush_dcache_mmap_lock(mapping)                do {} while (0)
 #define flush_dcache_mmap_unlock(mapping)      do {} while (0)
 #define flush_cache_vmap(start, end)           do {} while (0)
index dbac9d9..b25e957 100644 (file)
@@ -29,6 +29,7 @@
 #include <linux/mm.h>
 #include <linux/module.h>
 #include <linux/sched.h>
+#include <linux/fs.h>
 
 #include <asm/mmu_context.h>
 
@@ -51,6 +52,27 @@ static void flush_data_cache_page(unsigned long addr)
        }
 }
 
+void flush_dcache_page(struct page *page)
+{
+       struct address_space *mapping = page_mapping(page);
+       unsigned long addr;
+
+       if (PageHighMem(page))
+               return;
+       if (mapping && !mapping_mapped(mapping)) {
+               set_bit(PG_dcache_dirty, &(page)->flags);
+               return;
+       }
+
+       /*
+        * We could delay the flush for the !page_mapping case too.  But that
+        * case is for exec env/arg pages and those are %99 certainly going to
+        * get faulted into the tlb (and thus flushed) anyways.
+        */
+       addr = (unsigned long) page_address(page);
+       flush_data_cache_page(addr);
+}
+
 /* called by update_mmu_cache. */
 void __update_cache(struct vm_area_struct *vma, unsigned long address,
                pte_t pte)
@@ -63,11 +85,11 @@ void __update_cache(struct vm_area_struct *vma, unsigned long address,
        if (unlikely(!pfn_valid(pfn)))
                return;
        page = pfn_to_page(pfn);
-       if (page_mapping(page) && test_bit(PG_arch_1, &page->flags)) {
+       if (page_mapping(page) && test_bit(PG_dcache_dirty, &(page)->flags)) {
                addr = (unsigned long) page_address(page);
                if (exec)
                        flush_data_cache_page(addr);
-               clear_bit(PG_arch_1, &page->flags);
+               clear_bit(PG_dcache_dirty, &(page)->flags);
        }
 }