1 // SPDX-License-Identifier: GPL-2.0+
4 * Wolfgang Denk, DENX Software Engineering, wd@denx.de.
10 #include <asm/system.h>
11 #include <asm/cache.h>
12 #include <linux/compiler.h>
13 #include <asm/armv7_mpu.h>
15 #if !(CONFIG_IS_ENABLED(SYS_ICACHE_OFF) && CONFIG_IS_ENABLED(SYS_DCACHE_OFF))
17 DECLARE_GLOBAL_DATA_PTR;
19 #ifdef CONFIG_SYS_ARM_MMU
20 __weak void arm_init_before_mmu(void)
24 __weak void arm_init_domains(void)
28 void set_section_dcache(int section, enum dcache_option option)
30 #ifdef CONFIG_ARMV7_LPAE
31 u64 *page_table = (u64 *)gd->arch.tlb_addr;
32 /* Need to set the access flag to not fault */
33 u64 value = TTB_SECT_AP | TTB_SECT_AF;
35 u32 *page_table = (u32 *)gd->arch.tlb_addr;
36 u32 value = TTB_SECT_AP;
39 /* Add the page offset */
40 value |= ((u32)section << MMU_SECTION_SHIFT);
42 /* Add caching bits */
46 page_table[section] = value;
49 __weak void mmu_page_table_flush(unsigned long start, unsigned long stop)
51 debug("%s: Warning: not implemented\n", __func__);
54 void mmu_set_region_dcache_behaviour(phys_addr_t start, size_t size,
55 enum dcache_option option)
57 #ifdef CONFIG_ARMV7_LPAE
58 u64 *page_table = (u64 *)gd->arch.tlb_addr;
60 u32 *page_table = (u32 *)gd->arch.tlb_addr;
62 unsigned long startpt, stoppt;
63 unsigned long upto, end;
65 /* div by 2 before start + size to avoid phys_addr_t overflow */
66 end = ALIGN((start / 2) + (size / 2), MMU_SECTION_SIZE / 2)
67 >> (MMU_SECTION_SHIFT - 1);
68 start = start >> MMU_SECTION_SHIFT;
70 #ifdef CONFIG_ARMV7_LPAE
71 debug("%s: start=%pa, size=%zu, option=%llx\n", __func__, &start, size,
74 debug("%s: start=%pa, size=%zu, option=0x%x\n", __func__, &start, size,
77 for (upto = start; upto < end; upto++)
78 set_section_dcache(upto, option);
81 * Make sure range is cache line aligned
82 * Only CPU maintains page tables, hence it is safe to always
83 * flush complete cache lines...
86 startpt = (unsigned long)&page_table[start];
87 startpt &= ~(CONFIG_SYS_CACHELINE_SIZE - 1);
88 stoppt = (unsigned long)&page_table[end];
89 stoppt = ALIGN(stoppt, CONFIG_SYS_CACHELINE_SIZE);
90 mmu_page_table_flush(startpt, stoppt);
93 __weak void dram_bank_mmu_setup(int bank)
98 /* bd->bi_dram is available only after relocation */
99 if ((gd->flags & GD_FLG_RELOC) == 0)
102 debug("%s: bank: %d\n", __func__, bank);
103 for (i = bd->bi_dram[bank].start >> MMU_SECTION_SHIFT;
104 i < (bd->bi_dram[bank].start >> MMU_SECTION_SHIFT) +
105 (bd->bi_dram[bank].size >> MMU_SECTION_SHIFT);
107 set_section_dcache(i, DCACHE_DEFAULT_OPTION);
110 /* to activate the MMU we need to set up virtual memory: use 1M areas */
111 static inline void mmu_setup(void)
116 arm_init_before_mmu();
117 /* Set up an identity-mapping for all 4GB, rw for everyone */
118 for (i = 0; i < ((4096ULL * 1024 * 1024) >> MMU_SECTION_SHIFT); i++)
119 set_section_dcache(i, DCACHE_OFF);
121 for (i = 0; i < CONFIG_NR_DRAM_BANKS; i++) {
122 dram_bank_mmu_setup(i);
125 #if defined(CONFIG_ARMV7_LPAE) && __LINUX_ARM_ARCH__ != 4
126 /* Set up 4 PTE entries pointing to our 4 1GB page tables */
127 for (i = 0; i < 4; i++) {
128 u64 *page_table = (u64 *)(gd->arch.tlb_addr + (4096 * 4));
129 u64 tpt = gd->arch.tlb_addr + (4096 * i);
130 page_table[i] = tpt | TTB_PAGETABLE;
134 #if defined(CONFIG_SYS_ARM_CACHE_WRITETHROUGH)
135 reg |= TTBCR_ORGN0_WT | TTBCR_IRGN0_WT;
136 #elif defined(CONFIG_SYS_ARM_CACHE_WRITEALLOC)
137 reg |= TTBCR_ORGN0_WBWA | TTBCR_IRGN0_WBWA;
139 reg |= TTBCR_ORGN0_WBNWA | TTBCR_IRGN0_WBNWA;
143 /* Set HTCR to enable LPAE */
144 asm volatile("mcr p15, 4, %0, c2, c0, 2"
145 : : "r" (reg) : "memory");
147 asm volatile("mcrr p15, 4, %0, %1, c2"
149 : "r"(gd->arch.tlb_addr + (4096 * 4)), "r"(0)
152 asm volatile("mcr p15, 4, %0, c10, c2, 0"
153 : : "r" (MEMORY_ATTRIBUTES) : "memory");
155 /* Set TTBCR to enable LPAE */
156 asm volatile("mcr p15, 0, %0, c2, c0, 2"
157 : : "r" (reg) : "memory");
158 /* Set 64-bit TTBR0 */
159 asm volatile("mcrr p15, 0, %0, %1, c2"
161 : "r"(gd->arch.tlb_addr + (4096 * 4)), "r"(0)
164 asm volatile("mcr p15, 0, %0, c10, c2, 0"
165 : : "r" (MEMORY_ATTRIBUTES) : "memory");
167 #elif defined(CONFIG_CPU_V7A)
169 /* Set HTCR to disable LPAE */
170 asm volatile("mcr p15, 4, %0, c2, c0, 2"
171 : : "r" (0) : "memory");
173 /* Set TTBCR to disable LPAE */
174 asm volatile("mcr p15, 0, %0, c2, c0, 2"
175 : : "r" (0) : "memory");
178 reg = gd->arch.tlb_addr & TTBR0_BASE_ADDR_MASK;
179 #if defined(CONFIG_SYS_ARM_CACHE_WRITETHROUGH)
180 reg |= TTBR0_RGN_WT | TTBR0_IRGN_WT;
181 #elif defined(CONFIG_SYS_ARM_CACHE_WRITEALLOC)
182 reg |= TTBR0_RGN_WBWA | TTBR0_IRGN_WBWA;
184 reg |= TTBR0_RGN_WB | TTBR0_IRGN_WB;
186 asm volatile("mcr p15, 0, %0, c2, c0, 0"
187 : : "r" (reg) : "memory");
189 /* Copy the page table address to cp15 */
190 asm volatile("mcr p15, 0, %0, c2, c0, 0"
191 : : "r" (gd->arch.tlb_addr) : "memory");
193 /* Set the access control to all-supervisor */
194 asm volatile("mcr p15, 0, %0, c3, c0, 0"
199 /* and enable the mmu */
200 reg = get_cr(); /* get control reg. */
204 static int mmu_enabled(void)
206 return get_cr() & CR_M;
208 #endif /* CONFIG_SYS_ARM_MMU */
210 /* cache_bit must be either CR_I or CR_C */
211 static void cache_enable(uint32_t cache_bit)
215 /* The data cache is not active unless the mmu/mpu is enabled too */
216 #ifdef CONFIG_SYS_ARM_MMU
217 if ((cache_bit == CR_C) && !mmu_enabled())
219 #elif defined(CONFIG_SYS_ARM_MPU)
220 if ((cache_bit == CR_C) && !mpu_enabled()) {
221 printf("Consider enabling MPU before enabling caches\n");
225 reg = get_cr(); /* get control reg. */
226 set_cr(reg | cache_bit);
229 /* cache_bit must be either CR_I or CR_C */
230 static void cache_disable(uint32_t cache_bit)
236 if (cache_bit == CR_C) {
237 /* if cache isn;t enabled no need to disable */
238 if ((reg & CR_C) != CR_C)
240 #ifdef CONFIG_SYS_ARM_MMU
241 /* if disabling data cache, disable mmu too */
247 #ifdef CONFIG_SYS_ARM_MMU
248 if (cache_bit == (CR_C | CR_M))
249 #elif defined(CONFIG_SYS_ARM_MPU)
250 if (cache_bit == CR_C)
253 set_cr(reg & ~cache_bit);
257 #if CONFIG_IS_ENABLED(SYS_ICACHE_OFF)
258 void icache_enable(void)
263 void icache_disable(void)
268 int icache_status(void)
270 return 0; /* always off */
273 void icache_enable(void)
278 void icache_disable(void)
283 int icache_status(void)
285 return (get_cr() & CR_I) != 0;
289 #if CONFIG_IS_ENABLED(SYS_DCACHE_OFF)
290 void dcache_enable(void)
295 void dcache_disable(void)
300 int dcache_status(void)
302 return 0; /* always off */
305 void dcache_enable(void)
310 void dcache_disable(void)
315 int dcache_status(void)
317 return (get_cr() & CR_C) != 0;