2 * arch/sh/mm/cache-sh4.c
4 * Copyright (C) 1999, 2000, 2002 Niibe Yutaka
5 * Copyright (C) 2001 - 2007 Paul Mundt
6 * Copyright (C) 2003 Richard Curnow
7 * Copyright (c) 2007 STMicroelectronics (R&D) Ltd.
9 * This file is subject to the terms and conditions of the GNU General Public
10 * License. See the file "COPYING" in the main directory of this archive
13 #include <linux/init.h>
16 #include <linux/mutex.h>
18 #include <asm/mmu_context.h>
19 #include <asm/cacheflush.h>
22 * The maximum number of pages we support up to when doing ranged dcache
23 * flushing. Anything exceeding this will simply flush the dcache in its
26 #define MAX_DCACHE_PAGES 64 /* XXX: Tune for ways */
27 #define MAX_ICACHE_PAGES 32
29 static void __flush_cache_4096(unsigned long addr, unsigned long phys,
30 unsigned long exec_offset);
33 * This is initialised here to ensure that it is not placed in the BSS. If
34 * that were to happen, note that cache_init gets called before the BSS is
35 * cleared, so this would get nulled out which would be hopeless.
37 static void (*__flush_dcache_segment_fn)(unsigned long, unsigned long) =
38 (void (*)(unsigned long, unsigned long))0xdeadbeef;
41 * Write back the range of D-cache, and purge the I-cache.
43 * Called from kernel/module.c:sys_init_module and routine for a.out format,
44 * signal handler code and kprobes code
46 static void sh4_flush_icache_range(void *args)
48 struct flusher_data *data = args;
50 unsigned long start, end;
51 unsigned long flags, v;
57 /* If there are too many pages then just blow the caches */
58 if (((end - start) >> PAGE_SHIFT) >= MAX_ICACHE_PAGES) {
59 local_flush_cache_all(args);
61 /* selectively flush d-cache then invalidate the i-cache */
62 /* this is inefficient, so only use for small ranges */
63 start &= ~(L1_CACHE_BYTES-1);
64 end += L1_CACHE_BYTES-1;
65 end &= ~(L1_CACHE_BYTES-1);
67 local_irq_save(flags);
70 for (v = start; v < end; v+=L1_CACHE_BYTES) {
71 asm volatile("ocbwb %0"
75 icacheaddr = CACHE_IC_ADDRESS_ARRAY | (
76 v & cpu_data->icache.entry_mask);
78 for (i = 0; i < cpu_data->icache.ways;
79 i++, icacheaddr += cpu_data->icache.way_incr)
80 /* Clear i-cache line valid-bit */
81 ctrl_outl(0, icacheaddr);
85 local_irq_restore(flags);
89 static inline void flush_cache_4096(unsigned long start,
92 unsigned long flags, exec_offset = 0;
95 * All types of SH-4 require PC to be in P2 to operate on the I-cache.
96 * Some types of SH-4 require PC to be in P2 to operate on the D-cache.
98 if ((boot_cpu_data.flags & CPU_HAS_P2_FLUSH_BUG) ||
99 (start < CACHE_OC_ADDRESS_ARRAY))
100 exec_offset = 0x20000000;
102 local_irq_save(flags);
103 __flush_cache_4096(start | SH_CACHE_ASSOC,
104 P1SEGADDR(phys), exec_offset);
105 local_irq_restore(flags);
109 * Write back & invalidate the D-cache of the page.
110 * (To avoid "alias" issues)
112 static void sh4_flush_dcache_page(void *arg)
114 struct page *page = arg;
116 struct address_space *mapping = page_mapping(page);
118 if (mapping && !mapping_mapped(mapping))
119 set_bit(PG_dcache_dirty, &page->flags);
123 unsigned long phys = PHYSADDR(page_address(page));
124 unsigned long addr = CACHE_OC_ADDRESS_ARRAY;
127 /* Loop all the D-cache */
128 n = boot_cpu_data.dcache.n_aliases;
129 for (i = 0; i < n; i++, addr += 4096)
130 flush_cache_4096(addr, phys);
136 /* TODO: Selective icache invalidation through IC address array.. */
137 static void __uses_jump_to_uncached flush_icache_all(void)
139 unsigned long flags, ccr;
141 local_irq_save(flags);
146 ccr |= CCR_CACHE_ICI;
150 * back_to_cached() will take care of the barrier for us, don't add
155 local_irq_restore(flags);
158 static inline void flush_dcache_all(void)
160 (*__flush_dcache_segment_fn)(0UL, boot_cpu_data.dcache.way_size);
164 static void sh4_flush_cache_all(void *unused)
170 static void __flush_cache_mm(struct mm_struct *mm, unsigned long start,
173 unsigned long d = 0, p = start & PAGE_MASK;
174 unsigned long alias_mask = boot_cpu_data.dcache.alias_mask;
175 unsigned long n_aliases = boot_cpu_data.dcache.n_aliases;
176 unsigned long select_bit;
177 unsigned long all_aliases_mask;
178 unsigned long addr_offset;
185 dir = pgd_offset(mm, p);
186 pud = pud_offset(dir, p);
187 pmd = pmd_offset(pud, p);
188 end = PAGE_ALIGN(end);
190 all_aliases_mask = (1 << n_aliases) - 1;
193 if (pmd_none(*pmd) || unlikely(pmd_bad(*pmd))) {
201 pte = pte_offset_kernel(pmd, p);
207 if (!(pte_val(entry) & _PAGE_PRESENT)) {
213 phys = pte_val(entry) & PTE_PHYS_MASK;
215 if ((p ^ phys) & alias_mask) {
216 d |= 1 << ((p & alias_mask) >> PAGE_SHIFT);
217 d |= 1 << ((phys & alias_mask) >> PAGE_SHIFT);
219 if (d == all_aliases_mask)
225 } while (p < end && ((unsigned long)pte & ~PAGE_MASK));
233 for (i = 0; i < n_aliases; i++) {
234 if (d & select_bit) {
235 (*__flush_dcache_segment_fn)(addr_offset, PAGE_SIZE);
240 addr_offset += PAGE_SIZE;
245 * Note : (RPC) since the caches are physically tagged, the only point
246 * of flush_cache_mm for SH-4 is to get rid of aliases from the
247 * D-cache. The assumption elsewhere, e.g. flush_cache_range, is that
248 * lines can stay resident so long as the virtual address they were
249 * accessed with (hence cache set) is in accord with the physical
250 * address (i.e. tag). It's no different here. So I reckon we don't
251 * need to flush the I-cache, since aliases don't matter for that. We
254 * Caller takes mm->mmap_sem.
256 static void sh4_flush_cache_mm(void *arg)
258 struct mm_struct *mm = arg;
260 if (cpu_context(smp_processor_id(), mm) == NO_CONTEXT)
264 * If cache is only 4k-per-way, there are never any 'aliases'. Since
265 * the cache is physically tagged, the data can just be left in there.
267 if (boot_cpu_data.dcache.n_aliases == 0)
271 * Don't bother groveling around the dcache for the VMA ranges
272 * if there are too many PTEs to make it worthwhile.
274 if (mm->nr_ptes >= MAX_DCACHE_PAGES)
277 struct vm_area_struct *vma;
280 * In this case there are reasonably sized ranges to flush,
281 * iterate through the VMA list and take care of any aliases.
283 for (vma = mm->mmap; vma; vma = vma->vm_next)
284 __flush_cache_mm(mm, vma->vm_start, vma->vm_end);
287 /* Only touch the icache if one of the VMAs has VM_EXEC set. */
293 * Write back and invalidate I/D-caches for the page.
295 * ADDR: Virtual Address (U0 address)
296 * PFN: Physical page number
298 static void sh4_flush_cache_page(void *args)
300 struct flusher_data *data = args;
301 struct vm_area_struct *vma;
302 unsigned long address, pfn, phys;
303 unsigned int alias_mask;
306 address = data->addr1;
308 phys = pfn << PAGE_SHIFT;
310 if (cpu_context(smp_processor_id(), vma->vm_mm) == NO_CONTEXT)
313 alias_mask = boot_cpu_data.dcache.alias_mask;
315 /* We only need to flush D-cache when we have alias */
316 if ((address^phys) & alias_mask) {
317 /* Loop 4K of the D-cache */
319 CACHE_OC_ADDRESS_ARRAY | (address & alias_mask),
321 /* Loop another 4K of the D-cache */
323 CACHE_OC_ADDRESS_ARRAY | (phys & alias_mask),
327 alias_mask = boot_cpu_data.icache.alias_mask;
328 if (vma->vm_flags & VM_EXEC) {
330 * Evict entries from the portion of the cache from which code
331 * may have been executed at this address (virtual). There's
332 * no need to evict from the portion corresponding to the
333 * physical address as for the D-cache, because we know the
334 * kernel has never executed the code through its identity
338 CACHE_IC_ADDRESS_ARRAY | (address & alias_mask),
344 * Write back and invalidate D-caches.
346 * START, END: Virtual Address (U0 address)
348 * NOTE: We need to flush the _physical_ page entry.
349 * Flushing the cache lines for U0 only isn't enough.
350 * We need to flush for P1 too, which may contain aliases.
352 static void sh4_flush_cache_range(void *args)
354 struct flusher_data *data = args;
355 struct vm_area_struct *vma;
356 unsigned long start, end;
362 if (cpu_context(smp_processor_id(), vma->vm_mm) == NO_CONTEXT)
366 * If cache is only 4k-per-way, there are never any 'aliases'. Since
367 * the cache is physically tagged, the data can just be left in there.
369 if (boot_cpu_data.dcache.n_aliases == 0)
373 * Don't bother with the lookup and alias check if we have a
374 * wide range to cover, just blow away the dcache in its
375 * entirety instead. -- PFM.
377 if (((end - start) >> PAGE_SHIFT) >= MAX_DCACHE_PAGES)
380 __flush_cache_mm(vma->vm_mm, start, end);
382 if (vma->vm_flags & VM_EXEC) {
384 * TODO: Is this required??? Need to look at how I-cache
385 * coherency is assured when new programs are loaded to see if
395 * @addr: address in memory mapped cache array
396 * @phys: P1 address to flush (has to match tags if addr has 'A' bit
397 * set i.e. associative write)
398 * @exec_offset: set to 0x20000000 if flush has to be executed from P2
401 * The offset into the cache array implied by 'addr' selects the
402 * 'colour' of the virtual address range that will be flushed. The
403 * operation (purge/write-back) is selected by the lower 2 bits of
406 static void __flush_cache_4096(unsigned long addr, unsigned long phys,
407 unsigned long exec_offset)
410 unsigned long base_addr = addr;
411 struct cache_info *dcache;
412 unsigned long way_incr;
413 unsigned long a, ea, p;
414 unsigned long temp_pc;
416 dcache = &boot_cpu_data.dcache;
417 /* Write this way for better assembly. */
418 way_count = dcache->ways;
419 way_incr = dcache->way_incr;
422 * Apply exec_offset (i.e. branch to P2 if required.).
426 * If I write "=r" for the (temp_pc), it puts this in r6 hence
427 * trashing exec_offset before it's been added on - why? Hence
428 * "=&r" as a 'workaround'
430 asm volatile("mov.l 1f, %0\n\t"
436 "2:\n" : "=&r" (temp_pc) : "r" (exec_offset));
439 * We know there will be >=1 iteration, so write as do-while to avoid
440 * pointless nead-of-loop check for 0 iterations.
443 ea = base_addr + PAGE_SIZE;
448 *(volatile unsigned long *)a = p;
450 * Next line: intentionally not p+32, saves an add, p
451 * will do since only the cache tag bits need to
454 *(volatile unsigned long *)(a+32) = p;
459 base_addr += way_incr;
460 } while (--way_count != 0);
464 * Break the 1, 2 and 4 way variants of this out into separate functions to
465 * avoid nearly all the overhead of having the conditional stuff in the function
466 * bodies (+ the 1 and 2 way cases avoid saving any registers too).
468 * We want to eliminate unnecessary bus transactions, so this code uses
469 * a non-obvious technique.
471 * Loop over a cache way sized block of, one cache line at a time. For each
472 * line, use movca.a to cause the current cache line contents to be written
473 * back, but without reading anything from main memory. However this has the
474 * side effect that the cache is now caching that memory location. So follow
475 * this with a cache invalidate to mark the cache line invalid. And do all
476 * this with interrupts disabled, to avoid the cache line being accidently
477 * evicted while it is holding garbage.
479 * This also breaks in a number of circumstances:
480 * - if there are modifications to the region of memory just above
481 * empty_zero_page (for example because a breakpoint has been placed
482 * there), then these can be lost.
484 * This is because the the memory address which the cache temporarily
485 * caches in the above description is empty_zero_page. So the
486 * movca.l hits the cache (it is assumed that it misses, or at least
487 * isn't dirty), modifies the line and then invalidates it, losing the
490 * - If caches are disabled or configured in write-through mode, then
491 * the movca.l writes garbage directly into memory.
493 static void __flush_dcache_segment_writethrough(unsigned long start,
494 unsigned long extent_per_way)
499 addr = CACHE_OC_ADDRESS_ARRAY | (start & cpu_data->dcache.entry_mask);
501 while (extent_per_way) {
502 for (i = 0; i < cpu_data->dcache.ways; i++)
503 __raw_writel(0, addr + cpu_data->dcache.way_incr * i);
505 addr += cpu_data->dcache.linesz;
506 extent_per_way -= cpu_data->dcache.linesz;
510 static void __flush_dcache_segment_1way(unsigned long start,
511 unsigned long extent_per_way)
513 unsigned long orig_sr, sr_with_bl;
514 unsigned long base_addr;
515 unsigned long way_incr, linesz, way_size;
516 struct cache_info *dcache;
517 register unsigned long a0, a0e;
519 asm volatile("stc sr, %0" : "=r" (orig_sr));
520 sr_with_bl = orig_sr | (1<<28);
521 base_addr = ((unsigned long)&empty_zero_page[0]);
524 * The previous code aligned base_addr to 16k, i.e. the way_size of all
525 * existing SH-4 D-caches. Whilst I don't see a need to have this
526 * aligned to any better than the cache line size (which it will be
527 * anyway by construction), let's align it to at least the way_size of
528 * any existing or conceivable SH-4 D-cache. -- RPC
530 base_addr = ((base_addr >> 16) << 16);
533 dcache = &boot_cpu_data.dcache;
534 linesz = dcache->linesz;
535 way_incr = dcache->way_incr;
536 way_size = dcache->way_size;
539 a0e = base_addr + extent_per_way;
541 asm volatile("ldc %0, sr" : : "r" (sr_with_bl));
542 asm volatile("movca.l r0, @%0\n\t"
543 "ocbi @%0" : : "r" (a0));
545 asm volatile("movca.l r0, @%0\n\t"
546 "ocbi @%0" : : "r" (a0));
548 asm volatile("movca.l r0, @%0\n\t"
549 "ocbi @%0" : : "r" (a0));
551 asm volatile("movca.l r0, @%0\n\t"
552 "ocbi @%0" : : "r" (a0));
553 asm volatile("ldc %0, sr" : : "r" (orig_sr));
558 static void __flush_dcache_segment_2way(unsigned long start,
559 unsigned long extent_per_way)
561 unsigned long orig_sr, sr_with_bl;
562 unsigned long base_addr;
563 unsigned long way_incr, linesz, way_size;
564 struct cache_info *dcache;
565 register unsigned long a0, a1, a0e;
567 asm volatile("stc sr, %0" : "=r" (orig_sr));
568 sr_with_bl = orig_sr | (1<<28);
569 base_addr = ((unsigned long)&empty_zero_page[0]);
571 /* See comment under 1-way above */
572 base_addr = ((base_addr >> 16) << 16);
575 dcache = &boot_cpu_data.dcache;
576 linesz = dcache->linesz;
577 way_incr = dcache->way_incr;
578 way_size = dcache->way_size;
582 a0e = base_addr + extent_per_way;
584 asm volatile("ldc %0, sr" : : "r" (sr_with_bl));
585 asm volatile("movca.l r0, @%0\n\t"
586 "movca.l r0, @%1\n\t"
592 asm volatile("movca.l r0, @%0\n\t"
593 "movca.l r0, @%1\n\t"
599 asm volatile("movca.l r0, @%0\n\t"
600 "movca.l r0, @%1\n\t"
606 asm volatile("movca.l r0, @%0\n\t"
607 "movca.l r0, @%1\n\t"
611 asm volatile("ldc %0, sr" : : "r" (orig_sr));
617 static void __flush_dcache_segment_4way(unsigned long start,
618 unsigned long extent_per_way)
620 unsigned long orig_sr, sr_with_bl;
621 unsigned long base_addr;
622 unsigned long way_incr, linesz, way_size;
623 struct cache_info *dcache;
624 register unsigned long a0, a1, a2, a3, a0e;
626 asm volatile("stc sr, %0" : "=r" (orig_sr));
627 sr_with_bl = orig_sr | (1<<28);
628 base_addr = ((unsigned long)&empty_zero_page[0]);
630 /* See comment under 1-way above */
631 base_addr = ((base_addr >> 16) << 16);
634 dcache = &boot_cpu_data.dcache;
635 linesz = dcache->linesz;
636 way_incr = dcache->way_incr;
637 way_size = dcache->way_size;
643 a0e = base_addr + extent_per_way;
645 asm volatile("ldc %0, sr" : : "r" (sr_with_bl));
646 asm volatile("movca.l r0, @%0\n\t"
647 "movca.l r0, @%1\n\t"
648 "movca.l r0, @%2\n\t"
649 "movca.l r0, @%3\n\t"
654 "r" (a0), "r" (a1), "r" (a2), "r" (a3));
659 asm volatile("movca.l r0, @%0\n\t"
660 "movca.l r0, @%1\n\t"
661 "movca.l r0, @%2\n\t"
662 "movca.l r0, @%3\n\t"
667 "r" (a0), "r" (a1), "r" (a2), "r" (a3));
672 asm volatile("movca.l r0, @%0\n\t"
673 "movca.l r0, @%1\n\t"
674 "movca.l r0, @%2\n\t"
675 "movca.l r0, @%3\n\t"
680 "r" (a0), "r" (a1), "r" (a2), "r" (a3));
685 asm volatile("movca.l r0, @%0\n\t"
686 "movca.l r0, @%1\n\t"
687 "movca.l r0, @%2\n\t"
688 "movca.l r0, @%3\n\t"
693 "r" (a0), "r" (a1), "r" (a2), "r" (a3));
694 asm volatile("ldc %0, sr" : : "r" (orig_sr));
702 extern void __weak sh4__flush_region_init(void);
705 * SH-4 has virtually indexed and physically tagged cache.
707 void __init sh4_cache_init(void)
709 unsigned int wt_enabled = !!(__raw_readl(CCR) & CCR_CACHE_WT);
711 printk("PVR=%08x CVR=%08x PRR=%08x\n",
717 __flush_dcache_segment_fn = __flush_dcache_segment_writethrough;
719 switch (boot_cpu_data.dcache.ways) {
721 __flush_dcache_segment_fn = __flush_dcache_segment_1way;
724 __flush_dcache_segment_fn = __flush_dcache_segment_2way;
727 __flush_dcache_segment_fn = __flush_dcache_segment_4way;
730 panic("unknown number of cache ways\n");
735 local_flush_icache_range = sh4_flush_icache_range;
736 local_flush_dcache_page = sh4_flush_dcache_page;
737 local_flush_cache_all = sh4_flush_cache_all;
738 local_flush_cache_mm = sh4_flush_cache_mm;
739 local_flush_cache_dup_mm = sh4_flush_cache_mm;
740 local_flush_cache_page = sh4_flush_cache_page;
741 local_flush_cache_range = sh4_flush_cache_range;
743 sh4__flush_region_init();