1 // SPDX-License-Identifier: GPL-2.0+
4 * Copyright 2014-2015 Freescale Semiconductor, Inc.
8 #include <fsl_ddr_sdram.h>
10 #include <linux/errno.h>
11 #include <asm/system.h>
13 #include <asm/armv8/mmu.h>
15 #include <asm/arch/fsl_serdes.h>
16 #include <asm/arch/soc.h>
17 #include <asm/arch/cpu.h>
18 #include <asm/arch/speed.h>
19 #include <fsl_immap.h>
20 #include <asm/arch/mp.h>
21 #include <efi_loader.h>
22 #include <fsl-mc/fsl_mc.h>
23 #ifdef CONFIG_FSL_ESDHC
24 #include <fsl_esdhc.h>
26 #include <asm/armv8/sec_firmware.h>
27 #ifdef CONFIG_SYS_FSL_DDR
30 #include <asm/arch/clock.h>
32 #include <fsl_qbman.h>
35 #include <environment.h>
36 #ifdef CONFIG_CHAIN_OF_TRUST
37 #include <fsl_validate.h>
41 DECLARE_GLOBAL_DATA_PTR;
43 static struct cpu_type cpu_type_list[] = {
44 CPU_TYPE_ENTRY(LS2080A, LS2080A, 8),
45 CPU_TYPE_ENTRY(LS2085A, LS2085A, 8),
46 CPU_TYPE_ENTRY(LS2045A, LS2045A, 4),
47 CPU_TYPE_ENTRY(LS2088A, LS2088A, 8),
48 CPU_TYPE_ENTRY(LS2084A, LS2084A, 8),
49 CPU_TYPE_ENTRY(LS2048A, LS2048A, 4),
50 CPU_TYPE_ENTRY(LS2044A, LS2044A, 4),
51 CPU_TYPE_ENTRY(LS2081A, LS2081A, 8),
52 CPU_TYPE_ENTRY(LS2041A, LS2041A, 4),
53 CPU_TYPE_ENTRY(LS1043A, LS1043A, 4),
54 CPU_TYPE_ENTRY(LS1043A, LS1043A_P23, 4),
55 CPU_TYPE_ENTRY(LS1023A, LS1023A, 2),
56 CPU_TYPE_ENTRY(LS1023A, LS1023A_P23, 2),
57 CPU_TYPE_ENTRY(LS1046A, LS1046A, 4),
58 CPU_TYPE_ENTRY(LS1026A, LS1026A, 2),
59 CPU_TYPE_ENTRY(LS2040A, LS2040A, 4),
60 CPU_TYPE_ENTRY(LS1012A, LS1012A, 1),
61 CPU_TYPE_ENTRY(LS1028A, LS1028A, 2),
62 CPU_TYPE_ENTRY(LS1088A, LS1088A, 8),
63 CPU_TYPE_ENTRY(LS1084A, LS1084A, 8),
64 CPU_TYPE_ENTRY(LS1048A, LS1048A, 4),
65 CPU_TYPE_ENTRY(LS1044A, LS1044A, 4),
66 CPU_TYPE_ENTRY(LX2160A, LX2160A, 16),
67 CPU_TYPE_ENTRY(LX2120A, LX2120A, 12),
68 CPU_TYPE_ENTRY(LX2080A, LX2080A, 8),
71 #define EARLY_PGTABLE_SIZE 0x5000
72 static struct mm_region early_map[] = {
73 #ifdef CONFIG_FSL_LSCH3
74 { CONFIG_SYS_FSL_CCSR_BASE, CONFIG_SYS_FSL_CCSR_BASE,
75 CONFIG_SYS_FSL_CCSR_SIZE,
76 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
77 PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN
79 { CONFIG_SYS_FSL_OCRAM_BASE, CONFIG_SYS_FSL_OCRAM_BASE,
80 SYS_FSL_OCRAM_SPACE_SIZE,
81 PTE_BLOCK_MEMTYPE(MT_NORMAL) | PTE_BLOCK_NON_SHARE
83 { CONFIG_SYS_FSL_QSPI_BASE1, CONFIG_SYS_FSL_QSPI_BASE1,
84 CONFIG_SYS_FSL_QSPI_SIZE1,
85 PTE_BLOCK_MEMTYPE(MT_NORMAL) | PTE_BLOCK_NON_SHARE},
87 /* For IFC Region #1, only the first 4MB is cache-enabled */
88 { CONFIG_SYS_FSL_IFC_BASE1, CONFIG_SYS_FSL_IFC_BASE1,
89 CONFIG_SYS_FSL_IFC_SIZE1_1,
90 PTE_BLOCK_MEMTYPE(MT_NORMAL) | PTE_BLOCK_NON_SHARE
92 { CONFIG_SYS_FSL_IFC_BASE1 + CONFIG_SYS_FSL_IFC_SIZE1_1,
93 CONFIG_SYS_FSL_IFC_BASE1 + CONFIG_SYS_FSL_IFC_SIZE1_1,
94 CONFIG_SYS_FSL_IFC_SIZE1 - CONFIG_SYS_FSL_IFC_SIZE1_1,
95 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) | PTE_BLOCK_NON_SHARE
97 { CONFIG_SYS_FLASH_BASE, CONFIG_SYS_FSL_IFC_BASE1,
98 CONFIG_SYS_FSL_IFC_SIZE1,
99 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) | PTE_BLOCK_NON_SHARE
102 { CONFIG_SYS_FSL_DRAM_BASE1, CONFIG_SYS_FSL_DRAM_BASE1,
103 CONFIG_SYS_FSL_DRAM_SIZE1,
104 #if defined(CONFIG_TFABOOT) || \
105 (defined(CONFIG_SPL) && !defined(CONFIG_SPL_BUILD))
106 PTE_BLOCK_MEMTYPE(MT_NORMAL) |
107 #else /* Start with nGnRnE and PXN and UXN to prevent speculative access */
108 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) | PTE_BLOCK_PXN | PTE_BLOCK_UXN |
110 PTE_BLOCK_OUTER_SHARE | PTE_BLOCK_NS
112 #ifdef CONFIG_FSL_IFC
113 /* Map IFC region #2 up to CONFIG_SYS_FLASH_BASE for NAND boot */
114 { CONFIG_SYS_FSL_IFC_BASE2, CONFIG_SYS_FSL_IFC_BASE2,
115 CONFIG_SYS_FLASH_BASE - CONFIG_SYS_FSL_IFC_BASE2,
116 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) | PTE_BLOCK_NON_SHARE
119 { CONFIG_SYS_FSL_DCSR_BASE, CONFIG_SYS_FSL_DCSR_BASE,
120 CONFIG_SYS_FSL_DCSR_SIZE,
121 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
122 PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN
124 { CONFIG_SYS_FSL_DRAM_BASE2, CONFIG_SYS_FSL_DRAM_BASE2,
125 CONFIG_SYS_FSL_DRAM_SIZE2,
126 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) | PTE_BLOCK_PXN | PTE_BLOCK_UXN |
127 PTE_BLOCK_OUTER_SHARE | PTE_BLOCK_NS
129 #ifdef CONFIG_SYS_FSL_DRAM_BASE3
130 { CONFIG_SYS_FSL_DRAM_BASE3, CONFIG_SYS_FSL_DRAM_BASE3,
131 CONFIG_SYS_FSL_DRAM_SIZE3,
132 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) | PTE_BLOCK_PXN | PTE_BLOCK_UXN |
133 PTE_BLOCK_OUTER_SHARE | PTE_BLOCK_NS
136 #elif defined(CONFIG_FSL_LSCH2)
137 { CONFIG_SYS_FSL_CCSR_BASE, CONFIG_SYS_FSL_CCSR_BASE,
138 CONFIG_SYS_FSL_CCSR_SIZE,
139 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
140 PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN
142 { CONFIG_SYS_FSL_OCRAM_BASE, CONFIG_SYS_FSL_OCRAM_BASE,
143 SYS_FSL_OCRAM_SPACE_SIZE,
144 PTE_BLOCK_MEMTYPE(MT_NORMAL) | PTE_BLOCK_NON_SHARE
146 { CONFIG_SYS_FSL_DCSR_BASE, CONFIG_SYS_FSL_DCSR_BASE,
147 CONFIG_SYS_FSL_DCSR_SIZE,
148 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
149 PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN
151 { CONFIG_SYS_FSL_QSPI_BASE, CONFIG_SYS_FSL_QSPI_BASE,
152 CONFIG_SYS_FSL_QSPI_SIZE,
153 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) | PTE_BLOCK_NON_SHARE
155 #ifdef CONFIG_FSL_IFC
156 { CONFIG_SYS_FSL_IFC_BASE, CONFIG_SYS_FSL_IFC_BASE,
157 CONFIG_SYS_FSL_IFC_SIZE,
158 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) | PTE_BLOCK_NON_SHARE
161 { CONFIG_SYS_FSL_DRAM_BASE1, CONFIG_SYS_FSL_DRAM_BASE1,
162 CONFIG_SYS_FSL_DRAM_SIZE1,
163 #if defined(CONFIG_TFABOOT) || \
164 (defined(CONFIG_SPL) && !defined(CONFIG_SPL_BUILD))
165 PTE_BLOCK_MEMTYPE(MT_NORMAL) |
166 #else /* Start with nGnRnE and PXN and UXN to prevent speculative access */
167 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) | PTE_BLOCK_PXN | PTE_BLOCK_UXN |
169 PTE_BLOCK_OUTER_SHARE | PTE_BLOCK_NS
171 { CONFIG_SYS_FSL_DRAM_BASE2, CONFIG_SYS_FSL_DRAM_BASE2,
172 CONFIG_SYS_FSL_DRAM_SIZE2,
173 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) | PTE_BLOCK_PXN | PTE_BLOCK_UXN |
174 PTE_BLOCK_OUTER_SHARE | PTE_BLOCK_NS
177 {}, /* list terminator */
180 static struct mm_region final_map[] = {
181 #ifdef CONFIG_FSL_LSCH3
182 { CONFIG_SYS_FSL_CCSR_BASE, CONFIG_SYS_FSL_CCSR_BASE,
183 CONFIG_SYS_FSL_CCSR_SIZE,
184 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
185 PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN
187 { CONFIG_SYS_FSL_OCRAM_BASE, CONFIG_SYS_FSL_OCRAM_BASE,
188 SYS_FSL_OCRAM_SPACE_SIZE,
189 PTE_BLOCK_MEMTYPE(MT_NORMAL) | PTE_BLOCK_NON_SHARE
191 { CONFIG_SYS_FSL_DRAM_BASE1, CONFIG_SYS_FSL_DRAM_BASE1,
192 CONFIG_SYS_FSL_DRAM_SIZE1,
193 PTE_BLOCK_MEMTYPE(MT_NORMAL) |
194 PTE_BLOCK_OUTER_SHARE | PTE_BLOCK_NS
196 { CONFIG_SYS_FSL_QSPI_BASE1, CONFIG_SYS_FSL_QSPI_BASE1,
197 CONFIG_SYS_FSL_QSPI_SIZE1,
198 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
199 PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN
201 { CONFIG_SYS_FSL_QSPI_BASE2, CONFIG_SYS_FSL_QSPI_BASE2,
202 CONFIG_SYS_FSL_QSPI_SIZE2,
203 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
204 PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN
206 #ifdef CONFIG_FSL_IFC
207 { CONFIG_SYS_FSL_IFC_BASE2, CONFIG_SYS_FSL_IFC_BASE2,
208 CONFIG_SYS_FSL_IFC_SIZE2,
209 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
210 PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN
213 { CONFIG_SYS_FSL_DCSR_BASE, CONFIG_SYS_FSL_DCSR_BASE,
214 CONFIG_SYS_FSL_DCSR_SIZE,
215 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
216 PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN
218 { CONFIG_SYS_FSL_MC_BASE, CONFIG_SYS_FSL_MC_BASE,
219 CONFIG_SYS_FSL_MC_SIZE,
220 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
221 PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN
223 { CONFIG_SYS_FSL_NI_BASE, CONFIG_SYS_FSL_NI_BASE,
224 CONFIG_SYS_FSL_NI_SIZE,
225 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
226 PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN
228 /* For QBMAN portal, only the first 64MB is cache-enabled */
229 { CONFIG_SYS_FSL_QBMAN_BASE, CONFIG_SYS_FSL_QBMAN_BASE,
230 CONFIG_SYS_FSL_QBMAN_SIZE_1,
231 PTE_BLOCK_MEMTYPE(MT_NORMAL) |
232 PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN | PTE_BLOCK_NS
234 { CONFIG_SYS_FSL_QBMAN_BASE + CONFIG_SYS_FSL_QBMAN_SIZE_1,
235 CONFIG_SYS_FSL_QBMAN_BASE + CONFIG_SYS_FSL_QBMAN_SIZE_1,
236 CONFIG_SYS_FSL_QBMAN_SIZE - CONFIG_SYS_FSL_QBMAN_SIZE_1,
237 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
238 PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN
240 { CONFIG_SYS_PCIE1_PHYS_ADDR, CONFIG_SYS_PCIE1_PHYS_ADDR,
241 CONFIG_SYS_PCIE1_PHYS_SIZE,
242 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
243 PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN
245 { CONFIG_SYS_PCIE2_PHYS_ADDR, CONFIG_SYS_PCIE2_PHYS_ADDR,
246 CONFIG_SYS_PCIE2_PHYS_SIZE,
247 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
248 PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN
250 #ifdef CONFIG_SYS_PCIE3_PHYS_ADDR
251 { CONFIG_SYS_PCIE3_PHYS_ADDR, CONFIG_SYS_PCIE3_PHYS_ADDR,
252 CONFIG_SYS_PCIE3_PHYS_SIZE,
253 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
254 PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN
257 #ifdef CONFIG_SYS_PCIE4_PHYS_ADDR
258 { CONFIG_SYS_PCIE4_PHYS_ADDR, CONFIG_SYS_PCIE4_PHYS_ADDR,
259 CONFIG_SYS_PCIE4_PHYS_SIZE,
260 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
261 PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN
264 #ifdef SYS_PCIE5_PHYS_ADDR
265 { SYS_PCIE5_PHYS_ADDR, SYS_PCIE5_PHYS_ADDR,
267 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
268 PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN
271 #ifdef SYS_PCIE6_PHYS_ADDR
272 { SYS_PCIE6_PHYS_ADDR, SYS_PCIE6_PHYS_ADDR,
274 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
275 PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN
278 { CONFIG_SYS_FSL_WRIOP1_BASE, CONFIG_SYS_FSL_WRIOP1_BASE,
279 CONFIG_SYS_FSL_WRIOP1_SIZE,
280 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
281 PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN
283 { CONFIG_SYS_FSL_AIOP1_BASE, CONFIG_SYS_FSL_AIOP1_BASE,
284 CONFIG_SYS_FSL_AIOP1_SIZE,
285 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
286 PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN
288 { CONFIG_SYS_FSL_PEBUF_BASE, CONFIG_SYS_FSL_PEBUF_BASE,
289 CONFIG_SYS_FSL_PEBUF_SIZE,
290 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
291 PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN
293 { CONFIG_SYS_FSL_DRAM_BASE2, CONFIG_SYS_FSL_DRAM_BASE2,
294 CONFIG_SYS_FSL_DRAM_SIZE2,
295 PTE_BLOCK_MEMTYPE(MT_NORMAL) |
296 PTE_BLOCK_OUTER_SHARE | PTE_BLOCK_NS
298 #ifdef CONFIG_SYS_FSL_DRAM_BASE3
299 { CONFIG_SYS_FSL_DRAM_BASE3, CONFIG_SYS_FSL_DRAM_BASE3,
300 CONFIG_SYS_FSL_DRAM_SIZE3,
301 PTE_BLOCK_MEMTYPE(MT_NORMAL) |
302 PTE_BLOCK_OUTER_SHARE | PTE_BLOCK_NS
305 #elif defined(CONFIG_FSL_LSCH2)
306 { CONFIG_SYS_FSL_BOOTROM_BASE, CONFIG_SYS_FSL_BOOTROM_BASE,
307 CONFIG_SYS_FSL_BOOTROM_SIZE,
308 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
309 PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN
311 { CONFIG_SYS_FSL_CCSR_BASE, CONFIG_SYS_FSL_CCSR_BASE,
312 CONFIG_SYS_FSL_CCSR_SIZE,
313 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
314 PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN
316 { CONFIG_SYS_FSL_OCRAM_BASE, CONFIG_SYS_FSL_OCRAM_BASE,
317 SYS_FSL_OCRAM_SPACE_SIZE,
318 PTE_BLOCK_MEMTYPE(MT_NORMAL) | PTE_BLOCK_NON_SHARE
320 { CONFIG_SYS_FSL_DCSR_BASE, CONFIG_SYS_FSL_DCSR_BASE,
321 CONFIG_SYS_FSL_DCSR_SIZE,
322 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
323 PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN
325 { CONFIG_SYS_FSL_QSPI_BASE, CONFIG_SYS_FSL_QSPI_BASE,
326 CONFIG_SYS_FSL_QSPI_SIZE,
327 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
328 PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN
330 #ifdef CONFIG_FSL_IFC
331 { CONFIG_SYS_FSL_IFC_BASE, CONFIG_SYS_FSL_IFC_BASE,
332 CONFIG_SYS_FSL_IFC_SIZE,
333 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) | PTE_BLOCK_NON_SHARE
336 { CONFIG_SYS_FSL_DRAM_BASE1, CONFIG_SYS_FSL_DRAM_BASE1,
337 CONFIG_SYS_FSL_DRAM_SIZE1,
338 PTE_BLOCK_MEMTYPE(MT_NORMAL) |
339 PTE_BLOCK_OUTER_SHARE | PTE_BLOCK_NS
341 { CONFIG_SYS_FSL_QBMAN_BASE, CONFIG_SYS_FSL_QBMAN_BASE,
342 CONFIG_SYS_FSL_QBMAN_SIZE,
343 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
344 PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN
346 { CONFIG_SYS_FSL_DRAM_BASE2, CONFIG_SYS_FSL_DRAM_BASE2,
347 CONFIG_SYS_FSL_DRAM_SIZE2,
348 PTE_BLOCK_MEMTYPE(MT_NORMAL) |
349 PTE_BLOCK_OUTER_SHARE | PTE_BLOCK_NS
351 { CONFIG_SYS_PCIE1_PHYS_ADDR, CONFIG_SYS_PCIE1_PHYS_ADDR,
352 CONFIG_SYS_PCIE1_PHYS_SIZE,
353 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
354 PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN
356 { CONFIG_SYS_PCIE2_PHYS_ADDR, CONFIG_SYS_PCIE2_PHYS_ADDR,
357 CONFIG_SYS_PCIE2_PHYS_SIZE,
358 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
359 PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN
361 #ifdef CONFIG_SYS_PCIE3_PHYS_ADDR
362 { CONFIG_SYS_PCIE3_PHYS_ADDR, CONFIG_SYS_PCIE3_PHYS_ADDR,
363 CONFIG_SYS_PCIE3_PHYS_SIZE,
364 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
365 PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN
368 { CONFIG_SYS_FSL_DRAM_BASE3, CONFIG_SYS_FSL_DRAM_BASE3,
369 CONFIG_SYS_FSL_DRAM_SIZE3,
370 PTE_BLOCK_MEMTYPE(MT_NORMAL) |
371 PTE_BLOCK_OUTER_SHARE | PTE_BLOCK_NS
374 #ifdef CONFIG_SYS_MEM_RESERVE_SECURE
375 {}, /* space holder for secure mem */
380 struct mm_region *mem_map = early_map;
382 void cpu_name(char *name)
384 struct ccsr_gur __iomem *gur = (void *)(CONFIG_SYS_FSL_GUTS_ADDR);
385 unsigned int i, svr, ver;
387 svr = gur_in32(&gur->svr);
388 ver = SVR_SOC_VER(svr);
390 for (i = 0; i < ARRAY_SIZE(cpu_type_list); i++)
391 if ((cpu_type_list[i].soc_ver & SVR_WO_E) == ver) {
392 strcpy(name, cpu_type_list[i].name);
393 #ifdef CONFIG_ARCH_LX2160A
394 if (IS_C_PROCESSOR(svr))
398 if (IS_E_PROCESSOR(svr))
401 sprintf(name + strlen(name), " Rev%d.%d",
402 SVR_MAJ(svr), SVR_MIN(svr));
406 if (i == ARRAY_SIZE(cpu_type_list))
407 strcpy(name, "unknown");
410 #if !CONFIG_IS_ENABLED(SYS_DCACHE_OFF)
412 * To start MMU before DDR is available, we create MMU table in SRAM.
413 * The base address of SRAM is CONFIG_SYS_FSL_OCRAM_BASE. We use three
414 * levels of translation tables here to cover 40-bit address space.
415 * We use 4KB granule size, with 40 bits physical address, T0SZ=24
416 * Address above EARLY_PGTABLE_SIZE (0x5000) is free for other purpose.
417 * Note, the debug print in cache_v8.c is not usable for debugging
418 * these early MMU tables because UART is not yet available.
420 static inline void early_mmu_setup(void)
422 unsigned int el = current_el();
424 /* global data is already setup, no allocation yet */
426 gd->arch.tlb_addr = CONFIG_SYS_FSL_OCRAM_BASE;
428 gd->arch.tlb_addr = CONFIG_SYS_DDR_SDRAM_BASE;
429 gd->arch.tlb_fillptr = gd->arch.tlb_addr;
430 gd->arch.tlb_size = EARLY_PGTABLE_SIZE;
432 /* Create early page tables */
435 /* point TTBR to the new table */
436 set_ttbr_tcr_mair(el, gd->arch.tlb_addr,
437 get_tcr(el, NULL, NULL) &
438 ~(TCR_ORGN_MASK | TCR_IRGN_MASK),
441 set_sctlr(get_sctlr() | CR_M);
444 static void fix_pcie_mmu_map(void)
446 #ifdef CONFIG_ARCH_LS2080A
449 struct ccsr_gur __iomem *gur = (void *)(CONFIG_SYS_FSL_GUTS_ADDR);
451 svr = gur_in32(&gur->svr);
452 ver = SVR_SOC_VER(svr);
454 /* Fix PCIE base and size for LS2088A */
455 if ((ver == SVR_LS2088A) || (ver == SVR_LS2084A) ||
456 (ver == SVR_LS2048A) || (ver == SVR_LS2044A) ||
457 (ver == SVR_LS2081A) || (ver == SVR_LS2041A)) {
458 for (i = 0; i < ARRAY_SIZE(final_map); i++) {
459 switch (final_map[i].phys) {
460 case CONFIG_SYS_PCIE1_PHYS_ADDR:
461 final_map[i].phys = 0x2000000000ULL;
462 final_map[i].virt = 0x2000000000ULL;
463 final_map[i].size = 0x800000000ULL;
465 case CONFIG_SYS_PCIE2_PHYS_ADDR:
466 final_map[i].phys = 0x2800000000ULL;
467 final_map[i].virt = 0x2800000000ULL;
468 final_map[i].size = 0x800000000ULL;
470 #ifdef CONFIG_SYS_PCIE3_PHYS_ADDR
471 case CONFIG_SYS_PCIE3_PHYS_ADDR:
472 final_map[i].phys = 0x3000000000ULL;
473 final_map[i].virt = 0x3000000000ULL;
474 final_map[i].size = 0x800000000ULL;
477 #ifdef CONFIG_SYS_PCIE4_PHYS_ADDR
478 case CONFIG_SYS_PCIE4_PHYS_ADDR:
479 final_map[i].phys = 0x3800000000ULL;
480 final_map[i].virt = 0x3800000000ULL;
481 final_map[i].size = 0x800000000ULL;
493 * The final tables look similar to early tables, but different in detail.
494 * These tables are in DRAM. Sub tables are added to enable cache for
497 * Put the MMU table in secure memory if gd->arch.secure_ram is valid.
498 * OCRAM will be not used for this purpose so gd->arch.secure_ram can't be 0.
500 static inline void final_mmu_setup(void)
502 u64 tlb_addr_save = gd->arch.tlb_addr;
503 unsigned int el = current_el();
506 /* fix the final_map before filling in the block entries */
511 /* Update mapping for DDR to actual size */
512 for (index = 0; index < ARRAY_SIZE(final_map) - 2; index++) {
514 * Find the entry for DDR mapping and update the address and
515 * size. Zero-sized mapping will be skipped when creating MMU
518 switch (final_map[index].virt) {
519 case CONFIG_SYS_FSL_DRAM_BASE1:
520 final_map[index].virt = gd->bd->bi_dram[0].start;
521 final_map[index].phys = gd->bd->bi_dram[0].start;
522 final_map[index].size = gd->bd->bi_dram[0].size;
524 #ifdef CONFIG_SYS_FSL_DRAM_BASE2
525 case CONFIG_SYS_FSL_DRAM_BASE2:
526 #if (CONFIG_NR_DRAM_BANKS >= 2)
527 final_map[index].virt = gd->bd->bi_dram[1].start;
528 final_map[index].phys = gd->bd->bi_dram[1].start;
529 final_map[index].size = gd->bd->bi_dram[1].size;
531 final_map[index].size = 0;
535 #ifdef CONFIG_SYS_FSL_DRAM_BASE3
536 case CONFIG_SYS_FSL_DRAM_BASE3:
537 #if (CONFIG_NR_DRAM_BANKS >= 3)
538 final_map[index].virt = gd->bd->bi_dram[2].start;
539 final_map[index].phys = gd->bd->bi_dram[2].start;
540 final_map[index].size = gd->bd->bi_dram[2].size;
542 final_map[index].size = 0;
551 #ifdef CONFIG_SYS_MEM_RESERVE_SECURE
552 if (gd->arch.secure_ram & MEM_RESERVE_SECURE_MAINTAINED) {
555 * Only use gd->arch.secure_ram if the address is
556 * recalculated. Align to 4KB for MMU table.
558 /* put page tables in secure ram */
559 index = ARRAY_SIZE(final_map) - 2;
560 gd->arch.tlb_addr = gd->arch.secure_ram & ~0xfff;
561 final_map[index].virt = gd->arch.secure_ram & ~0x3;
562 final_map[index].phys = final_map[index].virt;
563 final_map[index].size = CONFIG_SYS_MEM_RESERVE_SECURE;
564 final_map[index].attrs = PTE_BLOCK_OUTER_SHARE;
565 gd->arch.secure_ram |= MEM_RESERVE_SECURE_SECURED;
566 tlb_addr_save = gd->arch.tlb_addr;
568 /* Use allocated (board_f.c) memory for TLB */
569 tlb_addr_save = gd->arch.tlb_allocated;
570 gd->arch.tlb_addr = tlb_addr_save;
575 /* Reset the fill ptr */
576 gd->arch.tlb_fillptr = tlb_addr_save;
578 /* Create normal system page tables */
581 /* Create emergency page tables */
582 gd->arch.tlb_addr = gd->arch.tlb_fillptr;
583 gd->arch.tlb_emerg = gd->arch.tlb_addr;
585 gd->arch.tlb_addr = tlb_addr_save;
587 /* Disable cache and MMU */
588 dcache_disable(); /* TLBs are invalidated */
589 invalidate_icache_all();
591 /* point TTBR to the new table */
592 set_ttbr_tcr_mair(el, gd->arch.tlb_addr, get_tcr(el, NULL, NULL),
595 set_sctlr(get_sctlr() | CR_M);
598 u64 get_page_table_size(void)
603 int arch_cpu_init(void)
606 * This function is called before U-Boot relocates itself to speed up
607 * on system running. It is not necessary to run if performance is not
608 * critical. Skip if MMU is already enabled by SPL or other means.
610 if (get_sctlr() & CR_M)
614 __asm_invalidate_dcache_all();
615 __asm_invalidate_tlb_all();
617 set_sctlr(get_sctlr() | CR_C);
627 * This function is called from common/board_r.c.
628 * It recreates MMU table in main memory.
630 void enable_caches(void)
633 __asm_invalidate_tlb_all();
637 #endif /* !CONFIG_IS_ENABLED(SYS_DCACHE_OFF) */
639 #ifdef CONFIG_TFABOOT
640 enum boot_src __get_boot_src(u32 porsr1)
642 enum boot_src src = BOOT_SOURCE_RESERVED;
643 u32 rcw_src = (porsr1 & RCW_SRC_MASK) >> RCW_SRC_BIT;
644 #if !defined(CONFIG_NXP_LSCH3_2)
647 debug("%s: rcw_src 0x%x\n", __func__, rcw_src);
649 #if defined(CONFIG_FSL_LSCH3)
650 #if defined(CONFIG_NXP_LSCH3_2)
652 case RCW_SRC_SDHC1_VAL:
653 src = BOOT_SOURCE_SD_MMC;
655 case RCW_SRC_SDHC2_VAL:
656 src = BOOT_SOURCE_SD_MMC2;
658 case RCW_SRC_I2C1_VAL:
659 src = BOOT_SOURCE_I2C1_EXTENDED;
661 case RCW_SRC_FLEXSPI_NAND2K_VAL:
662 src = BOOT_SOURCE_XSPI_NAND;
664 case RCW_SRC_FLEXSPI_NAND4K_VAL:
665 src = BOOT_SOURCE_XSPI_NAND;
667 case RCW_SRC_RESERVED_1_VAL:
668 src = BOOT_SOURCE_RESERVED;
670 case RCW_SRC_FLEXSPI_NOR_24B:
671 src = BOOT_SOURCE_XSPI_NOR;
674 src = BOOT_SOURCE_RESERVED;
677 val = rcw_src & RCW_SRC_TYPE_MASK;
678 if (val == RCW_SRC_NOR_VAL) {
679 val = rcw_src & NOR_TYPE_MASK;
684 src = BOOT_SOURCE_IFC_NOR;
687 src = BOOT_SOURCE_RESERVED;
690 /* RCW SRC Serial Flash */
691 val = rcw_src & RCW_SRC_SERIAL_MASK;
693 case RCW_SRC_QSPI_VAL:
694 /* RCW SRC Serial NOR (QSPI) */
695 src = BOOT_SOURCE_QSPI_NOR;
697 case RCW_SRC_SD_CARD_VAL:
698 /* RCW SRC SD Card */
699 src = BOOT_SOURCE_SD_MMC;
701 case RCW_SRC_EMMC_VAL:
703 src = BOOT_SOURCE_SD_MMC;
705 case RCW_SRC_I2C1_VAL:
706 /* RCW SRC I2C1 Extended */
707 src = BOOT_SOURCE_I2C1_EXTENDED;
710 src = BOOT_SOURCE_RESERVED;
714 #elif defined(CONFIG_FSL_LSCH2)
716 val = rcw_src & RCW_SRC_NAND_MASK;
717 if (val == RCW_SRC_NAND_VAL) {
718 val = rcw_src & NAND_RESERVED_MASK;
719 if (val != NAND_RESERVED_1 && val != NAND_RESERVED_2)
720 src = BOOT_SOURCE_IFC_NAND;
724 val = rcw_src & RCW_SRC_NOR_MASK;
725 if (val == NOR_8B_VAL || val == NOR_16B_VAL) {
726 src = BOOT_SOURCE_IFC_NOR;
731 src = BOOT_SOURCE_QSPI_NOR;
734 src = BOOT_SOURCE_SD_MMC;
737 src = BOOT_SOURCE_RESERVED;
743 if (CONFIG_IS_ENABLED(SYS_FSL_ERRATUM_A010539) && !rcw_src)
744 src = BOOT_SOURCE_QSPI_NOR;
746 debug("%s: src 0x%x\n", __func__, src);
750 enum boot_src get_boot_src(void)
755 #if defined(CONFIG_FSL_LSCH3)
756 u32 __iomem *dcfg_ccsr = (u32 __iomem *)DCFG_BASE;
757 #elif defined(CONFIG_FSL_LSCH2)
758 struct ccsr_gur __iomem *gur = (void *)(CONFIG_SYS_FSL_GUTS_ADDR);
761 if (current_el() == 2) {
762 regs.regs[0] = SIP_SVC_RCW;
766 porsr1 = regs.regs[1];
769 if (current_el() == 3 || !porsr1) {
770 #ifdef CONFIG_FSL_LSCH3
771 porsr1 = in_le32(dcfg_ccsr + DCFG_PORSR1 / 4);
772 #elif defined(CONFIG_FSL_LSCH2)
773 porsr1 = in_be32(&gur->porsr1);
777 debug("%s: porsr1 0x%x\n", __func__, porsr1);
779 return __get_boot_src(porsr1);
782 #ifdef CONFIG_ENV_IS_IN_MMC
783 int mmc_get_env_dev(void)
785 enum boot_src src = get_boot_src();
786 int dev = CONFIG_SYS_MMC_ENV_DEV;
789 case BOOT_SOURCE_SD_MMC:
792 case BOOT_SOURCE_SD_MMC2:
803 enum env_location env_get_location(enum env_operation op, int prio)
805 enum boot_src src = get_boot_src();
806 enum env_location env_loc = ENVL_NOWHERE;
811 #ifdef CONFIG_ENV_IS_NOWHERE
816 case BOOT_SOURCE_IFC_NOR:
817 env_loc = ENVL_FLASH;
819 case BOOT_SOURCE_QSPI_NOR:
821 case BOOT_SOURCE_XSPI_NOR:
822 env_loc = ENVL_SPI_FLASH;
824 case BOOT_SOURCE_IFC_NAND:
826 case BOOT_SOURCE_QSPI_NAND:
828 case BOOT_SOURCE_XSPI_NAND:
831 case BOOT_SOURCE_SD_MMC:
833 case BOOT_SOURCE_SD_MMC2:
836 case BOOT_SOURCE_I2C1_EXTENDED:
844 #endif /* CONFIG_TFABOOT */
846 u32 initiator_type(u32 cluster, int init_id)
848 struct ccsr_gur *gur = (void *)(CONFIG_SYS_FSL_GUTS_ADDR);
849 u32 idx = (cluster >> (init_id * 8)) & TP_CLUSTER_INIT_MASK;
852 type = gur_in32(&gur->tp_ityp[idx]);
853 if (type & TP_ITYP_AV)
859 u32 cpu_pos_mask(void)
861 struct ccsr_gur __iomem *gur = (void *)(CONFIG_SYS_FSL_GUTS_ADDR);
863 u32 cluster, type, mask = 0;
868 cluster = gur_in32(&gur->tp_cluster[i].lower);
869 for (j = 0; j < TP_INIT_PER_CLUSTER; j++) {
870 type = initiator_type(cluster, j);
871 if (type && (TP_ITYP_TYPE(type) == TP_ITYP_TYPE_ARM))
872 mask |= 1 << (i * TP_INIT_PER_CLUSTER + j);
875 } while ((cluster & TP_CLUSTER_EOC) == 0x0);
882 struct ccsr_gur __iomem *gur = (void *)(CONFIG_SYS_FSL_GUTS_ADDR);
883 int i = 0, count = 0;
884 u32 cluster, type, mask = 0;
889 cluster = gur_in32(&gur->tp_cluster[i].lower);
890 for (j = 0; j < TP_INIT_PER_CLUSTER; j++) {
891 type = initiator_type(cluster, j);
893 if (TP_ITYP_TYPE(type) == TP_ITYP_TYPE_ARM)
899 } while ((cluster & TP_CLUSTER_EOC) == 0x0);
905 * Return the number of cores on this SOC.
907 int cpu_numcores(void)
909 return hweight32(cpu_mask());
912 int fsl_qoriq_core_to_cluster(unsigned int core)
914 struct ccsr_gur __iomem *gur =
915 (void __iomem *)(CONFIG_SYS_FSL_GUTS_ADDR);
916 int i = 0, count = 0;
922 cluster = gur_in32(&gur->tp_cluster[i].lower);
923 for (j = 0; j < TP_INIT_PER_CLUSTER; j++) {
924 if (initiator_type(cluster, j)) {
931 } while ((cluster & TP_CLUSTER_EOC) == 0x0);
933 return -1; /* cannot identify the cluster */
936 u32 fsl_qoriq_core_to_type(unsigned int core)
938 struct ccsr_gur __iomem *gur =
939 (void __iomem *)(CONFIG_SYS_FSL_GUTS_ADDR);
940 int i = 0, count = 0;
946 cluster = gur_in32(&gur->tp_cluster[i].lower);
947 for (j = 0; j < TP_INIT_PER_CLUSTER; j++) {
948 type = initiator_type(cluster, j);
956 } while ((cluster & TP_CLUSTER_EOC) == 0x0);
958 return -1; /* cannot identify the cluster */
961 #ifndef CONFIG_FSL_LSCH3
964 struct ccsr_gur __iomem *gur = (void *)(CONFIG_SYS_FSL_GUTS_ADDR);
966 return gur_in32(&gur->svr);
970 #ifdef CONFIG_DISPLAY_CPUINFO
971 int print_cpuinfo(void)
973 struct ccsr_gur __iomem *gur = (void *)(CONFIG_SYS_FSL_GUTS_ADDR);
974 struct sys_info sysinfo;
976 unsigned int i, core;
977 u32 type, rcw, svr = gur_in32(&gur->svr);
982 printf(" %s (0x%x)\n", buf, svr);
983 memset((u8 *)buf, 0x00, ARRAY_SIZE(buf));
984 get_sys_info(&sysinfo);
985 puts("Clock Configuration:");
986 for_each_cpu(i, core, cpu_numcores(), cpu_mask()) {
989 type = TP_ITYP_VER(fsl_qoriq_core_to_type(core));
990 printf("CPU%d(%s):%-4s MHz ", core,
991 type == TY_ITYP_VER_A7 ? "A7 " :
992 (type == TY_ITYP_VER_A53 ? "A53" :
993 (type == TY_ITYP_VER_A57 ? "A57" :
994 (type == TY_ITYP_VER_A72 ? "A72" : " "))),
995 strmhz(buf, sysinfo.freq_processor[core]));
997 /* Display platform clock as Bus frequency. */
998 printf("\n Bus: %-4s MHz ",
999 strmhz(buf, sysinfo.freq_systembus / CONFIG_SYS_FSL_PCLK_DIV));
1000 printf("DDR: %-4s MT/s", strmhz(buf, sysinfo.freq_ddrbus));
1001 #ifdef CONFIG_SYS_DPAA_FMAN
1002 printf(" FMAN: %-4s MHz", strmhz(buf, sysinfo.freq_fman[0]));
1004 #ifdef CONFIG_SYS_FSL_HAS_DP_DDR
1005 if (soc_has_dp_ddr()) {
1006 printf(" DP-DDR: %-4s MT/s",
1007 strmhz(buf, sysinfo.freq_ddrbus2));
1013 * Display the RCW, so that no one gets confused as to what RCW
1014 * we're actually using for this boot.
1016 puts("Reset Configuration Word (RCW):");
1017 for (i = 0; i < ARRAY_SIZE(gur->rcwsr); i++) {
1018 rcw = gur_in32(&gur->rcwsr[i]);
1020 printf("\n %08x:", i * 4);
1021 printf(" %08x", rcw);
1029 #ifdef CONFIG_FSL_ESDHC
1030 int cpu_mmc_init(bd_t *bis)
1032 return fsl_esdhc_mmc_init(bis);
1036 int cpu_eth_init(bd_t *bis)
1040 #if defined(CONFIG_FSL_MC_ENET) && !defined(CONFIG_SPL_BUILD)
1041 error = fsl_mc_ldpaa_init(bis);
1043 #ifdef CONFIG_FMAN_ENET
1044 fm_standard_init(bis);
1049 static inline int check_psci(void)
1051 unsigned int psci_ver;
1053 psci_ver = sec_firmware_support_psci_version();
1054 if (psci_ver == PSCI_INVALID_VER)
1060 static void config_core_prefetch(void)
1063 char buffer[HWCONFIG_BUFFER_SIZE];
1064 const char *prefetch_arg = NULL;
1067 struct pt_regs regs;
1069 if (env_get_f("hwconfig", buffer, sizeof(buffer)) > 0)
1072 prefetch_arg = hwconfig_subarg_f("core_prefetch", "disable",
1076 mask = simple_strtoul(prefetch_arg, NULL, 0) & 0xff;
1078 printf("Core0 prefetch can't be disabled\n");
1082 #define SIP_PREFETCH_DISABLE_64 0xC200FF13
1083 regs.regs[0] = SIP_PREFETCH_DISABLE_64;
1084 regs.regs[1] = mask;
1088 printf("Prefetch disable config failed for mask ");
1090 printf("Prefetch disable config passed for mask ");
1091 printf("0x%x\n", mask);
1095 int arch_early_init_r(void)
1097 #ifdef CONFIG_SYS_FSL_ERRATUM_A009635
1100 * erratum A009635 is valid only for LS2080A SoC and
1101 * its personalitiesi
1103 svr_dev_id = get_svr();
1104 if (IS_SVR_DEV(svr_dev_id, SVR_DEV(SVR_LS2080A)))
1107 #if defined(CONFIG_SYS_FSL_ERRATUM_A009942) && defined(CONFIG_SYS_FSL_DDR)
1108 erratum_a009942_check_cpo();
1111 debug("PSCI: PSCI does not exist.\n");
1113 /* if PSCI does not exist, boot secondary cores here */
1114 if (fsl_layerscape_wake_seconday_cores())
1115 printf("Did not wake secondary cores\n");
1118 config_core_prefetch();
1120 #ifdef CONFIG_SYS_HAS_SERDES
1123 #ifdef CONFIG_SYS_FSL_HAS_RGMII
1124 /* some dpmacs in armv8a based freescale layerscape SOCs can be
1125 * configured via both serdes(sgmii, xfi, xlaui etc) bits and via
1126 * EC*_PMUX(rgmii) bits in RCW.
1127 * e.g. dpmac 17 and 18 in LX2160A can be configured as SGMII from
1128 * serdes bits and as RGMII via EC1_PMUX/EC2_PMUX bits
1129 * Now if a dpmac is enabled by serdes bits then it takes precedence
1130 * over EC*_PMUX bits. i.e. in LX2160A if we select serdes protocol
1131 * that configures dpmac17 as SGMII and set the EC1_PMUX as RGMII,
1132 * then the dpmac is SGMII and not RGMII.
1134 * Therefore, move the fsl_rgmii_init after fsl_serdes_init. in
1135 * fsl_rgmii_init function of SOC, we will check if the dpmac is enabled
1136 * or not? if it is (fsl_serdes_init has already enabled the dpmac),
1137 * then don't enable it.
1141 #ifdef CONFIG_FMAN_ENET
1144 #ifdef CONFIG_SYS_DPAA_QBMAN
1145 setup_qbman_portals();
1150 int timer_init(void)
1152 u32 __iomem *cntcr = (u32 *)CONFIG_SYS_FSL_TIMER_ADDR;
1153 #ifdef CONFIG_FSL_LSCH3
1154 u32 __iomem *cltbenr = (u32 *)CONFIG_SYS_FSL_PMU_CLTBENR;
1156 #if defined(CONFIG_ARCH_LS2080A) || defined(CONFIG_ARCH_LS1088A)
1157 u32 __iomem *pctbenr = (u32 *)FSL_PMU_PCTBENR_OFFSET;
1160 #ifdef COUNTER_FREQUENCY_REAL
1161 unsigned long cntfrq = COUNTER_FREQUENCY_REAL;
1163 /* Update with accurate clock frequency */
1164 if (current_el() == 3)
1165 asm volatile("msr cntfrq_el0, %0" : : "r" (cntfrq) : "memory");
1168 #ifdef CONFIG_FSL_LSCH3
1169 /* Enable timebase for all clusters.
1170 * It is safe to do so even some clusters are not enabled.
1172 out_le32(cltbenr, 0xf);
1175 #if defined(CONFIG_ARCH_LS2080A) || defined(CONFIG_ARCH_LS1088A)
1177 * In certain Layerscape SoCs, the clock for each core's
1178 * has an enable bit in the PMU Physical Core Time Base Enable
1179 * Register (PCTBENR), which allows the watchdog to operate.
1181 setbits_le32(pctbenr, 0xff);
1183 * For LS2080A SoC and its personalities, timer controller
1184 * offset is different
1186 svr_dev_id = get_svr();
1187 if (IS_SVR_DEV(svr_dev_id, SVR_DEV(SVR_LS2080A)))
1188 cntcr = (u32 *)SYS_FSL_LS2080A_LS2085A_TIMER_ADDR;
1192 /* Enable clock for timer
1193 * This is a global setting.
1195 out_le32(cntcr, 0x1);
1200 __efi_runtime_data u32 __iomem *rstcr = (u32 *)CONFIG_SYS_FSL_RST_ADDR;
1202 void __efi_runtime reset_cpu(ulong addr)
1206 #ifdef CONFIG_ARCH_LX2160A
1207 val = in_le32(rstcr);
1209 out_le32(rstcr, val);
1211 /* Raise RESET_REQ_B */
1212 val = scfg_in32(rstcr);
1214 scfg_out32(rstcr, val);
1218 #ifdef CONFIG_EFI_LOADER
1220 void __efi_runtime EFIAPI efi_reset_system(
1221 enum efi_reset_type reset_type,
1222 efi_status_t reset_status,
1223 unsigned long data_size, void *reset_data)
1225 switch (reset_type) {
1226 case EFI_RESET_COLD:
1227 case EFI_RESET_WARM:
1228 case EFI_RESET_PLATFORM_SPECIFIC:
1231 case EFI_RESET_SHUTDOWN:
1232 /* Nothing we can do */
1239 efi_status_t efi_reset_system_init(void)
1241 return efi_add_runtime_mmio(&rstcr, sizeof(*rstcr));
1247 * Calculate reserved memory with given memory bank
1248 * Return aligned memory size on success
1249 * Return (ram_size + needed size) for failure
1251 phys_size_t board_reserve_ram_top(phys_size_t ram_size)
1253 phys_size_t ram_top = ram_size;
1255 #if defined(CONFIG_FSL_MC_ENET) && !defined(CONFIG_SPL_BUILD)
1256 ram_top = mc_get_dram_block_size();
1257 if (ram_top > ram_size)
1258 return ram_size + ram_top;
1260 ram_top = ram_size - ram_top;
1261 /* The start address of MC reserved memory needs to be aligned. */
1262 ram_top &= ~(CONFIG_SYS_MC_RSV_MEM_ALIGN - 1);
1265 return ram_size - ram_top;
1268 phys_size_t get_effective_memsize(void)
1270 phys_size_t ea_size, rem = 0;
1273 * For ARMv8 SoCs, DDR memory is split into two or three regions. The
1274 * first region is 2GB space at 0x8000_0000. Secure memory needs to
1275 * allocated from first region. If the memory extends to the second
1276 * region (or the third region if applicable), Management Complex (MC)
1277 * memory should be put into the highest region, i.e. the end of DDR
1278 * memory. CONFIG_MAX_MEM_MAPPED is set to the size of first region so
1279 * U-Boot doesn't relocate itself into higher address. Should DDR be
1280 * configured to skip the first region, this function needs to be
1283 if (gd->ram_size > CONFIG_MAX_MEM_MAPPED) {
1284 ea_size = CONFIG_MAX_MEM_MAPPED;
1285 rem = gd->ram_size - ea_size;
1287 ea_size = gd->ram_size;
1290 #ifdef CONFIG_SYS_MEM_RESERVE_SECURE
1291 /* Check if we have enough space for secure memory */
1292 if (ea_size > CONFIG_SYS_MEM_RESERVE_SECURE)
1293 ea_size -= CONFIG_SYS_MEM_RESERVE_SECURE;
1295 printf("Error: No enough space for secure memory.\n");
1297 /* Check if we have enough memory for MC */
1298 if (rem < board_reserve_ram_top(rem)) {
1299 /* Not enough memory in high region to reserve */
1300 if (ea_size > board_reserve_ram_top(ea_size))
1301 ea_size -= board_reserve_ram_top(ea_size);
1303 printf("Error: No enough space for reserved memory.\n");
1309 #ifdef CONFIG_TFABOOT
1310 phys_size_t tfa_get_dram_size(void)
1312 struct pt_regs regs;
1313 phys_size_t dram_size = 0;
1315 regs.regs[0] = SMC_DRAM_BANK_INFO;
1322 dram_size = regs.regs[1];
1326 static int tfa_dram_init_banksize(void)
1329 struct pt_regs regs;
1330 phys_size_t dram_size = tfa_get_dram_size();
1332 debug("dram_size %llx\n", dram_size);
1338 regs.regs[0] = SMC_DRAM_BANK_INFO;
1347 debug("bank[%d]: start %lx, size %lx\n", i, regs.regs[1],
1349 gd->bd->bi_dram[i].start = regs.regs[1];
1350 gd->bd->bi_dram[i].size = regs.regs[2];
1352 dram_size -= gd->bd->bi_dram[i].size;
1355 } while (dram_size);
1360 #if defined(CONFIG_FSL_MC_ENET) && !defined(CONFIG_SPL_BUILD)
1361 /* Assign memory for MC */
1362 #ifdef CONFIG_SYS_DDR_BLOCK3_BASE
1363 if (gd->bd->bi_dram[2].size >=
1364 board_reserve_ram_top(gd->bd->bi_dram[2].size)) {
1365 gd->arch.resv_ram = gd->bd->bi_dram[2].start +
1366 gd->bd->bi_dram[2].size -
1367 board_reserve_ram_top(gd->bd->bi_dram[2].size);
1371 if (gd->bd->bi_dram[1].size >=
1372 board_reserve_ram_top(gd->bd->bi_dram[1].size)) {
1373 gd->arch.resv_ram = gd->bd->bi_dram[1].start +
1374 gd->bd->bi_dram[1].size -
1375 board_reserve_ram_top(gd->bd->bi_dram[1].size);
1376 } else if (gd->bd->bi_dram[0].size >
1377 board_reserve_ram_top(gd->bd->bi_dram[0].size)) {
1378 gd->arch.resv_ram = gd->bd->bi_dram[0].start +
1379 gd->bd->bi_dram[0].size -
1380 board_reserve_ram_top(gd->bd->bi_dram[0].size);
1383 #endif /* CONFIG_FSL_MC_ENET */
1389 int dram_init_banksize(void)
1391 #ifdef CONFIG_SYS_DP_DDR_BASE_PHY
1392 phys_size_t dp_ddr_size;
1395 #ifdef CONFIG_TFABOOT
1396 if (!tfa_dram_init_banksize())
1400 * gd->ram_size has the total size of DDR memory, less reserved secure
1401 * memory. The DDR extends from low region to high region(s) presuming
1402 * no hole is created with DDR configuration. gd->arch.secure_ram tracks
1403 * the location of secure memory. gd->arch.resv_ram tracks the location
1404 * of reserved memory for Management Complex (MC). Because gd->ram_size
1405 * is reduced by this function if secure memory is reserved, checking
1406 * gd->arch.secure_ram should be done to avoid running it repeatedly.
1409 #ifdef CONFIG_SYS_MEM_RESERVE_SECURE
1410 if (gd->arch.secure_ram & MEM_RESERVE_SECURE_MAINTAINED) {
1411 debug("No need to run again, skip %s\n", __func__);
1417 gd->bd->bi_dram[0].start = CONFIG_SYS_SDRAM_BASE;
1418 if (gd->ram_size > CONFIG_SYS_DDR_BLOCK1_SIZE) {
1419 gd->bd->bi_dram[0].size = CONFIG_SYS_DDR_BLOCK1_SIZE;
1420 gd->bd->bi_dram[1].start = CONFIG_SYS_DDR_BLOCK2_BASE;
1421 gd->bd->bi_dram[1].size = gd->ram_size -
1422 CONFIG_SYS_DDR_BLOCK1_SIZE;
1423 #ifdef CONFIG_SYS_DDR_BLOCK3_BASE
1424 if (gd->bi_dram[1].size > CONFIG_SYS_DDR_BLOCK2_SIZE) {
1425 gd->bd->bi_dram[2].start = CONFIG_SYS_DDR_BLOCK3_BASE;
1426 gd->bd->bi_dram[2].size = gd->bd->bi_dram[1].size -
1427 CONFIG_SYS_DDR_BLOCK2_SIZE;
1428 gd->bd->bi_dram[1].size = CONFIG_SYS_DDR_BLOCK2_SIZE;
1432 gd->bd->bi_dram[0].size = gd->ram_size;
1434 #ifdef CONFIG_SYS_MEM_RESERVE_SECURE
1435 if (gd->bd->bi_dram[0].size >
1436 CONFIG_SYS_MEM_RESERVE_SECURE) {
1437 gd->bd->bi_dram[0].size -=
1438 CONFIG_SYS_MEM_RESERVE_SECURE;
1439 gd->arch.secure_ram = gd->bd->bi_dram[0].start +
1440 gd->bd->bi_dram[0].size;
1441 gd->arch.secure_ram |= MEM_RESERVE_SECURE_MAINTAINED;
1442 gd->ram_size -= CONFIG_SYS_MEM_RESERVE_SECURE;
1444 #endif /* CONFIG_SYS_MEM_RESERVE_SECURE */
1446 #if defined(CONFIG_FSL_MC_ENET) && !defined(CONFIG_SPL_BUILD)
1447 /* Assign memory for MC */
1448 #ifdef CONFIG_SYS_DDR_BLOCK3_BASE
1449 if (gd->bd->bi_dram[2].size >=
1450 board_reserve_ram_top(gd->bd->bi_dram[2].size)) {
1451 gd->arch.resv_ram = gd->bd->bi_dram[2].start +
1452 gd->bd->bi_dram[2].size -
1453 board_reserve_ram_top(gd->bd->bi_dram[2].size);
1457 if (gd->bd->bi_dram[1].size >=
1458 board_reserve_ram_top(gd->bd->bi_dram[1].size)) {
1459 gd->arch.resv_ram = gd->bd->bi_dram[1].start +
1460 gd->bd->bi_dram[1].size -
1461 board_reserve_ram_top(gd->bd->bi_dram[1].size);
1462 } else if (gd->bd->bi_dram[0].size >
1463 board_reserve_ram_top(gd->bd->bi_dram[0].size)) {
1464 gd->arch.resv_ram = gd->bd->bi_dram[0].start +
1465 gd->bd->bi_dram[0].size -
1466 board_reserve_ram_top(gd->bd->bi_dram[0].size);
1469 #endif /* CONFIG_FSL_MC_ENET */
1471 #ifdef CONFIG_SYS_DP_DDR_BASE_PHY
1472 #ifdef CONFIG_SYS_DDR_BLOCK3_BASE
1473 #error "This SoC shouldn't have DP DDR"
1475 if (soc_has_dp_ddr()) {
1476 /* initialize DP-DDR here */
1479 * DDR controller use 0 as the base address for binding.
1480 * It is mapped to CONFIG_SYS_DP_DDR_BASE for core to access.
1482 dp_ddr_size = fsl_other_ddr_sdram(CONFIG_SYS_DP_DDR_BASE_PHY,
1484 CONFIG_DP_DDR_NUM_CTRLS,
1485 CONFIG_DP_DDR_DIMM_SLOTS_PER_CTLR,
1488 gd->bd->bi_dram[2].start = CONFIG_SYS_DP_DDR_BASE;
1489 gd->bd->bi_dram[2].size = dp_ddr_size;
1491 puts("Not detected");
1496 #ifdef CONFIG_SYS_MEM_RESERVE_SECURE
1497 debug("%s is called. gd->ram_size is reduced to %lu\n",
1498 __func__, (ulong)gd->ram_size);
1504 #if CONFIG_IS_ENABLED(EFI_LOADER)
1505 void efi_add_known_memory(void)
1508 phys_addr_t ram_start, start;
1509 phys_size_t ram_size;
1513 for (i = 0; i < CONFIG_NR_DRAM_BANKS; i++) {
1514 #ifdef CONFIG_SYS_DP_DDR_BASE_PHY
1515 #ifdef CONFIG_SYS_DDR_BLOCK3_BASE
1516 #error "This SoC shouldn't have DP DDR"
1519 continue; /* skip DP-DDR */
1521 ram_start = gd->bd->bi_dram[i].start;
1522 ram_size = gd->bd->bi_dram[i].size;
1523 #ifdef CONFIG_RESV_RAM
1524 if (gd->arch.resv_ram >= ram_start &&
1525 gd->arch.resv_ram < ram_start + ram_size)
1526 ram_size = gd->arch.resv_ram - ram_start;
1528 start = (ram_start + EFI_PAGE_MASK) & ~EFI_PAGE_MASK;
1529 pages = (ram_size + EFI_PAGE_MASK) >> EFI_PAGE_SHIFT;
1531 efi_add_memory_map(start, pages, EFI_CONVENTIONAL_MEMORY,
1538 * Before DDR size is known, early MMU table have DDR mapped as device memory
1539 * to avoid speculative access. To relocate U-Boot to DDR, "normal memory"
1540 * needs to be set for these mappings.
1541 * If a special case configures DDR with holes in the mapping, the holes need
1542 * to be marked as invalid. This is not implemented in this function.
1544 void update_early_mmu_table(void)
1546 if (!gd->arch.tlb_addr)
1549 if (gd->ram_size <= CONFIG_SYS_FSL_DRAM_SIZE1) {
1550 mmu_change_region_attr(
1551 CONFIG_SYS_SDRAM_BASE,
1553 PTE_BLOCK_MEMTYPE(MT_NORMAL) |
1554 PTE_BLOCK_OUTER_SHARE |
1558 mmu_change_region_attr(
1559 CONFIG_SYS_SDRAM_BASE,
1560 CONFIG_SYS_DDR_BLOCK1_SIZE,
1561 PTE_BLOCK_MEMTYPE(MT_NORMAL) |
1562 PTE_BLOCK_OUTER_SHARE |
1565 #ifdef CONFIG_SYS_DDR_BLOCK3_BASE
1566 #ifndef CONFIG_SYS_DDR_BLOCK2_SIZE
1567 #error "Missing CONFIG_SYS_DDR_BLOCK2_SIZE"
1569 if (gd->ram_size - CONFIG_SYS_DDR_BLOCK1_SIZE >
1570 CONFIG_SYS_DDR_BLOCK2_SIZE) {
1571 mmu_change_region_attr(
1572 CONFIG_SYS_DDR_BLOCK2_BASE,
1573 CONFIG_SYS_DDR_BLOCK2_SIZE,
1574 PTE_BLOCK_MEMTYPE(MT_NORMAL) |
1575 PTE_BLOCK_OUTER_SHARE |
1578 mmu_change_region_attr(
1579 CONFIG_SYS_DDR_BLOCK3_BASE,
1581 CONFIG_SYS_DDR_BLOCK1_SIZE -
1582 CONFIG_SYS_DDR_BLOCK2_SIZE,
1583 PTE_BLOCK_MEMTYPE(MT_NORMAL) |
1584 PTE_BLOCK_OUTER_SHARE |
1590 mmu_change_region_attr(
1591 CONFIG_SYS_DDR_BLOCK2_BASE,
1593 CONFIG_SYS_DDR_BLOCK1_SIZE,
1594 PTE_BLOCK_MEMTYPE(MT_NORMAL) |
1595 PTE_BLOCK_OUTER_SHARE |
1602 __weak int dram_init(void)
1605 #if (!defined(CONFIG_SPL) && !defined(CONFIG_TFABOOT)) || \
1606 defined(CONFIG_SPL_BUILD)
1607 /* This will break-before-make MMU for DDR */
1608 update_early_mmu_table();