1 From 6d3e6d3640052cac958d61c44597cc216f6ee09f Mon Sep 17 00:00:00 2001
2 From: Nicolas Pitre <nicolas.pitre@linaro.org>
3 Date: Thu, 16 Dec 2010 14:56:34 -0500
4 Subject: [PATCH 05/66] ARM: fix cache-feroceon-l2 after stack based kmap_atomic()
6 Since commit 3e4d3af501 "mm: stack based kmap_atomic()", it is actively
7 wrong to rely on fixed kmap type indices (namely KM_L2_CACHE) as
8 kmap_atomic() totally ignores them and a concurrent instance of it may
9 happily reuse any slot for any purpose. Because kmap_atomic() is now
10 able to deal with reentrancy, we can get rid of the ad hoc mapping here.
12 While the code is made much simpler, there is a needless cache flush
13 introduced by the usage of __kunmap_atomic(). It is not clear if the
14 performance difference to remove that is worth the cost in code
15 maintenance (I don't think there are that many highmem users on that
16 platform anyway) but that should be reconsidered when/if someone cares
17 enough to do some measurements.
19 Signed-off-by: Nicolas Pitre <nicolas.pitre@linaro.org>
21 arch/arm/mm/cache-feroceon-l2.c | 37 +++++++++++++++++++------------------
22 1 files changed, 19 insertions(+), 18 deletions(-)
24 diff --git a/arch/arm/mm/cache-feroceon-l2.c b/arch/arm/mm/cache-feroceon-l2.c
25 index 6e77c04..e0b0e7a 100644
26 --- a/arch/arm/mm/cache-feroceon-l2.c
27 +++ b/arch/arm/mm/cache-feroceon-l2.c
31 #include <linux/init.h>
32 +#include <linux/highmem.h>
33 #include <asm/cacheflush.h>
34 -#include <asm/kmap_types.h>
35 -#include <asm/fixmap.h>
36 -#include <asm/pgtable.h>
37 -#include <asm/tlbflush.h>
38 #include <plat/cache-feroceon-l2.h>
42 * Low-level cache maintenance operations.
44 * between which we don't want to be preempted.
47 -static inline unsigned long l2_start_va(unsigned long paddr)
48 +static inline unsigned long l2_get_va(unsigned long paddr)
52 - * Let's do our own fixmap stuff in a minimal way here.
53 * Because range ops can't be done on physical addresses,
54 * we simply install a virtual mapping for it only for the
55 * TLB lookup to occur, hence no need to flush the untouched
56 - * memory mapping. This is protected with the disabling of
57 - * interrupts by the caller.
58 + * memory mapping afterwards (note: a cache flush may happen
59 + * in some circumstances depending on the path taken in kunmap_atomic).
61 - unsigned long idx = KM_L2_CACHE + KM_TYPE_NR * smp_processor_id();
62 - unsigned long vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
63 - set_pte_ext(TOP_PTE(vaddr), pfn_pte(paddr >> PAGE_SHIFT, PAGE_KERNEL), 0);
64 - local_flush_tlb_kernel_page(vaddr);
65 - return vaddr + (paddr & ~PAGE_MASK);
66 + void *vaddr = kmap_atomic_pfn(paddr >> PAGE_SHIFT);
67 + return (unsigned long)vaddr + (paddr & ~PAGE_MASK);
69 return __phys_to_virt(paddr);
73 +static inline void l2_put_va(unsigned long vaddr)
75 +#ifdef CONFIG_HIGHMEM
76 + kunmap_atomic((void *)vaddr);
80 static inline void l2_clean_pa(unsigned long addr)
82 __asm__("mcr p15, 1, %0, c15, c9, 3" : : "r" (addr));
83 @@ -76,13 +75,14 @@ static inline void l2_clean_pa_range(unsigned long start, unsigned long end)
85 BUG_ON((start ^ end) >> PAGE_SHIFT);
87 - raw_local_irq_save(flags);
88 - va_start = l2_start_va(start);
89 + va_start = l2_get_va(start);
90 va_end = va_start + (end - start);
91 + raw_local_irq_save(flags);
92 __asm__("mcr p15, 1, %0, c15, c9, 4\n\t"
93 "mcr p15, 1, %1, c15, c9, 5"
94 : : "r" (va_start), "r" (va_end));
95 raw_local_irq_restore(flags);
96 + l2_put_va(va_start);
99 static inline void l2_clean_inv_pa(unsigned long addr)
100 @@ -106,13 +106,14 @@ static inline void l2_inv_pa_range(unsigned long start, unsigned long end)
102 BUG_ON((start ^ end) >> PAGE_SHIFT);
104 - raw_local_irq_save(flags);
105 - va_start = l2_start_va(start);
106 + va_start = l2_get_va(start);
107 va_end = va_start + (end - start);
108 + raw_local_irq_save(flags);
109 __asm__("mcr p15, 1, %0, c15, c11, 4\n\t"
110 "mcr p15, 1, %1, c15, c11, 5"
111 : : "r" (va_start), "r" (va_end));
112 raw_local_irq_restore(flags);
113 + l2_put_va(va_start);
116 static inline void l2_inv_all(void)