Merge branch 'stable-3.2' into pandora-3.2
[pandora-kernel.git] / arch / arm / mm / idmap.c
1 #include <linux/kernel.h>
2
3 #include <asm/cputype.h>
4 #include <asm/idmap.h>
5 #include <asm/pgalloc.h>
6 #include <asm/pgtable.h>
7 #include <asm/sections.h>
8
9 pgd_t *idmap_pgd;
10
11 #ifdef CONFIG_ARM_LPAE
12 static void idmap_add_pmd(pud_t *pud, unsigned long addr, unsigned long end,
13         unsigned long prot)
14 {
15         pmd_t *pmd;
16         unsigned long next;
17
18         if (pud_none_or_clear_bad(pud) || (pud_val(*pud) & L_PGD_SWAPPER)) {
19                 pmd = pmd_alloc_one(&init_mm, addr);
20                 if (!pmd) {
21                         pr_warning("Failed to allocate identity pmd.\n");
22                         return;
23                 }
24                 pud_populate(&init_mm, pud, pmd);
25                 pmd += pmd_index(addr);
26         } else
27                 pmd = pmd_offset(pud, addr);
28
29         do {
30                 next = pmd_addr_end(addr, end);
31                 *pmd = __pmd((addr & PMD_MASK) | prot);
32                 flush_pmd_entry(pmd);
33         } while (pmd++, addr = next, addr != end);
34 }
35 #else   /* !CONFIG_ARM_LPAE */
36 static void idmap_add_pmd(pud_t *pud, unsigned long addr, unsigned long end,
37         unsigned long prot)
38 {
39         pmd_t *pmd = pmd_offset(pud, addr);
40
41         addr = (addr & PMD_MASK) | prot;
42         pmd[0] = __pmd(addr);
43         addr += SECTION_SIZE;
44         pmd[1] = __pmd(addr);
45         flush_pmd_entry(pmd);
46 }
47 #endif  /* CONFIG_ARM_LPAE */
48
49 static void idmap_add_pud(pgd_t *pgd, unsigned long addr, unsigned long end,
50         unsigned long prot)
51 {
52         pud_t *pud = pud_offset(pgd, addr);
53         unsigned long next;
54
55         do {
56                 next = pud_addr_end(addr, end);
57                 idmap_add_pmd(pud, addr, next, prot);
58         } while (pud++, addr = next, addr != end);
59 }
60
61 static void identity_mapping_add(pgd_t *pgd, unsigned long addr, unsigned long end)
62 {
63         unsigned long prot, next;
64
65         prot = PMD_TYPE_SECT | PMD_SECT_AP_WRITE | PMD_SECT_AF;
66         if (cpu_architecture() <= CPU_ARCH_ARMv5TEJ && !cpu_is_xscale())
67                 prot |= PMD_BIT4;
68
69         pgd += pgd_index(addr);
70         do {
71                 next = pgd_addr_end(addr, end);
72                 idmap_add_pud(pgd, addr, next, prot);
73         } while (pgd++, addr = next, addr != end);
74 }
75
76 extern char  __idmap_text_start[], __idmap_text_end[];
77
78 static int __init init_static_idmap(void)
79 {
80         phys_addr_t idmap_start, idmap_end;
81
82         idmap_pgd = pgd_alloc(&init_mm);
83         if (!idmap_pgd)
84                 return -ENOMEM;
85
86         /* Add an identity mapping for the physical address of the section. */
87         idmap_start = virt_to_phys((void *)__idmap_text_start);
88         idmap_end = virt_to_phys((void *)__idmap_text_end);
89
90         pr_info("Setting up static identity map for 0x%llx - 0x%llx\n",
91                 (long long)idmap_start, (long long)idmap_end);
92         identity_mapping_add(idmap_pgd, idmap_start, idmap_end);
93
94         return 0;
95 }
96 early_initcall(init_static_idmap);
97
98 /*
99  * In order to soft-boot, we need to switch to a 1:1 mapping for the
100  * cpu_reset functions. This will then ensure that we have predictable
101  * results when turning off the mmu.
102  */
103 void setup_mm_for_reboot(void)
104 {
105         /* Clean and invalidate L1. */
106         flush_cache_all();
107
108         /* Switch to the identity mapping. */
109         cpu_switch_mm(idmap_pgd, &init_mm);
110
111         /* Flush the TLB. */
112         local_flush_tlb_all();
113 }