4 * Privileged Space Mapping Buffer (PMB) Support.
6 * Copyright (C) 2005 - 2010 Paul Mundt
7 * Copyright (C) 2010 Matt Fleming
9 * This file is subject to the terms and conditions of the GNU General Public
10 * License. See the file "COPYING" in the main directory of this archive
13 #include <linux/init.h>
14 #include <linux/kernel.h>
15 #include <linux/sysdev.h>
16 #include <linux/cpu.h>
17 #include <linux/module.h>
18 #include <linux/slab.h>
19 #include <linux/bitops.h>
20 #include <linux/debugfs.h>
22 #include <linux/seq_file.h>
23 #include <linux/err.h>
25 #include <linux/spinlock.h>
26 #include <linux/rwlock.h>
27 #include <asm/sizes.h>
28 #include <asm/system.h>
29 #include <asm/uaccess.h>
30 #include <asm/pgtable.h>
33 #include <asm/mmu_context.h>
46 * 0 .. NR_PMB_ENTRIES for specific entry selection, or
47 * PMB_NO_ENTRY to search for a free one
51 /* Adjacent entry link for contiguous multi-entry mappings */
52 struct pmb_entry *link;
55 static void pmb_unmap_entry(struct pmb_entry *);
57 static DEFINE_RWLOCK(pmb_rwlock);
58 static struct pmb_entry pmb_entry_list[NR_PMB_ENTRIES];
59 static DECLARE_BITMAP(pmb_map, NR_PMB_ENTRIES);
61 static __always_inline unsigned long mk_pmb_entry(unsigned int entry)
63 return (entry & PMB_E_MASK) << PMB_E_SHIFT;
66 static __always_inline unsigned long mk_pmb_addr(unsigned int entry)
68 return mk_pmb_entry(entry) | PMB_ADDR;
71 static __always_inline unsigned long mk_pmb_data(unsigned int entry)
73 return mk_pmb_entry(entry) | PMB_DATA;
76 static int pmb_alloc_entry(void)
80 pos = find_first_zero_bit(pmb_map, NR_PMB_ENTRIES);
81 if (pos >= 0 && pos < NR_PMB_ENTRIES)
82 __set_bit(pos, pmb_map);
89 static struct pmb_entry *pmb_alloc(unsigned long vpn, unsigned long ppn,
90 unsigned long flags, int entry)
92 struct pmb_entry *pmbe;
93 unsigned long irqflags;
97 write_lock_irqsave(&pmb_rwlock, irqflags);
99 if (entry == PMB_NO_ENTRY) {
100 pos = pmb_alloc_entry();
101 if (unlikely(pos < 0)) {
106 if (__test_and_set_bit(entry, pmb_map)) {
107 ret = ERR_PTR(-ENOSPC);
114 write_unlock_irqrestore(&pmb_rwlock, irqflags);
116 pmbe = &pmb_entry_list[pos];
118 spin_lock_init(&pmbe->lock);
129 write_unlock_irqrestore(&pmb_rwlock, irqflags);
133 static void pmb_free(struct pmb_entry *pmbe)
135 __clear_bit(pmbe->entry, pmb_map);
136 pmbe->entry = PMB_NO_ENTRY;
140 * Ensure that the PMB entries match our cache configuration.
142 * When we are in 32-bit address extended mode, CCR.CB becomes
143 * invalid, so care must be taken to manually adjust cacheable
146 static __always_inline unsigned long pmb_cache_flags(void)
148 unsigned long flags = 0;
150 #if defined(CONFIG_CACHE_WRITETHROUGH)
151 flags |= PMB_C | PMB_WT | PMB_UB;
152 #elif defined(CONFIG_CACHE_WRITEBACK)
160 * Must be run uncached.
162 static void __set_pmb_entry(struct pmb_entry *pmbe)
166 pmbe->flags &= ~PMB_CACHE_MASK;
167 pmbe->flags |= pmb_cache_flags();
169 __raw_writel(pmbe->vpn | PMB_V, mk_pmb_addr(pmbe->entry));
170 __raw_writel(pmbe->ppn | pmbe->flags | PMB_V, mk_pmb_data(pmbe->entry));
175 static void __clear_pmb_entry(struct pmb_entry *pmbe)
177 unsigned int entry = pmbe->entry;
183 addr = mk_pmb_addr(entry);
184 __raw_writel(__raw_readl(addr) & ~PMB_V, addr);
186 addr = mk_pmb_data(entry);
187 __raw_writel(__raw_readl(addr) & ~PMB_V, addr);
192 static void set_pmb_entry(struct pmb_entry *pmbe)
196 spin_lock_irqsave(&pmbe->lock, flags);
197 __set_pmb_entry(pmbe);
198 spin_unlock_irqrestore(&pmbe->lock, flags);
205 { .size = SZ_512M, .flag = PMB_SZ_512M, },
206 { .size = SZ_128M, .flag = PMB_SZ_128M, },
207 { .size = SZ_64M, .flag = PMB_SZ_64M, },
208 { .size = SZ_16M, .flag = PMB_SZ_16M, },
211 long pmb_remap(unsigned long vaddr, unsigned long phys,
212 unsigned long size, pgprot_t prot)
214 struct pmb_entry *pmbp, *pmbe;
215 unsigned long wanted;
220 flags = pgprot_val(prot);
222 pmb_flags = PMB_WT | PMB_UB;
224 /* Convert typical pgprot value to the PMB equivalent */
225 if (flags & _PAGE_CACHABLE) {
228 if ((flags & _PAGE_WT) == 0)
229 pmb_flags &= ~(PMB_WT | PMB_UB);
236 for (i = 0; i < ARRAY_SIZE(pmb_sizes); i++) {
239 if (size < pmb_sizes[i].size)
242 pmbe = pmb_alloc(vaddr, phys, pmb_flags | pmb_sizes[i].flag,
249 spin_lock_irqsave(&pmbe->lock, flags);
251 __set_pmb_entry(pmbe);
253 phys += pmb_sizes[i].size;
254 vaddr += pmb_sizes[i].size;
255 size -= pmb_sizes[i].size;
257 pmbe->size = pmb_sizes[i].size;
260 * Link adjacent entries that span multiple PMB entries
261 * for easier tear-down.
264 spin_lock(&pmbp->lock);
266 spin_unlock(&pmbp->lock);
272 * Instead of trying smaller sizes on every iteration
273 * (even if we succeed in allocating space), try using
274 * pmb_sizes[i].size again.
278 spin_unlock_irqrestore(&pmbe->lock, flags);
284 return wanted - size;
287 pmb_unmap_entry(pmbp);
292 void pmb_unmap(unsigned long addr)
294 struct pmb_entry *pmbe = NULL;
297 read_lock(&pmb_rwlock);
299 for (i = 0; i < ARRAY_SIZE(pmb_entry_list); i++) {
300 if (test_bit(i, pmb_map)) {
301 pmbe = &pmb_entry_list[i];
302 if (pmbe->vpn == addr)
307 read_unlock(&pmb_rwlock);
309 pmb_unmap_entry(pmbe);
312 static void pmb_unmap_entry(struct pmb_entry *pmbe)
319 write_lock_irqsave(&pmb_rwlock, flags);
322 struct pmb_entry *pmblink = pmbe;
325 * We may be called before this pmb_entry has been
326 * entered into the PMB table via set_pmb_entry(), but
327 * that's OK because we've allocated a unique slot for
328 * this entry in pmb_alloc() (even if we haven't filled
331 * Therefore, calling __clear_pmb_entry() is safe as no
332 * other mapping can be using that slot.
334 __clear_pmb_entry(pmbe);
336 pmbe = pmblink->link;
341 write_unlock_irqrestore(&pmb_rwlock, flags);
344 static __always_inline unsigned int pmb_ppn_in_range(unsigned long ppn)
346 return ppn >= __pa(memory_start) && ppn < __pa(memory_end);
349 static int pmb_synchronize_mappings(void)
351 unsigned int applied = 0;
352 struct pmb_entry *pmbp = NULL;
355 pr_info("PMB: boot mappings:\n");
358 * Run through the initial boot mappings, log the established
359 * ones, and blow away anything that falls outside of the valid
360 * PPN range. Specifically, we only care about existing mappings
361 * that impact the cached/uncached sections.
363 * Note that touching these can be a bit of a minefield; the boot
364 * loader can establish multi-page mappings with the same caching
365 * attributes, so we need to ensure that we aren't modifying a
366 * mapping that we're presently executing from, or may execute
367 * from in the case of straddling page boundaries.
369 * In the future we will have to tidy up after the boot loader by
370 * jumping between the cached and uncached mappings and tearing
371 * down alternating mappings while executing from the other.
373 for (i = 0; i < NR_PMB_ENTRIES; i++) {
374 unsigned long addr, data;
375 unsigned long addr_val, data_val;
376 unsigned long ppn, vpn, flags;
377 unsigned long irqflags;
379 struct pmb_entry *pmbe;
381 addr = mk_pmb_addr(i);
382 data = mk_pmb_data(i);
384 addr_val = __raw_readl(addr);
385 data_val = __raw_readl(data);
388 * Skip over any bogus entries
390 if (!(data_val & PMB_V) || !(addr_val & PMB_V))
393 ppn = data_val & PMB_PFN_MASK;
394 vpn = addr_val & PMB_PFN_MASK;
397 * Only preserve in-range mappings.
399 if (!pmb_ppn_in_range(ppn)) {
401 * Invalidate anything out of bounds.
403 __raw_writel(addr_val & ~PMB_V, addr);
404 __raw_writel(data_val & ~PMB_V, data);
409 * Update the caching attributes if necessary
411 if (data_val & PMB_C) {
412 data_val &= ~PMB_CACHE_MASK;
413 data_val |= pmb_cache_flags();
414 __raw_writel(data_val, data);
417 size = data_val & PMB_SZ_MASK;
418 flags = size | (data_val & PMB_CACHE_MASK);
420 pmbe = pmb_alloc(vpn, ppn, flags, i);
426 spin_lock_irqsave(&pmbe->lock, irqflags);
428 for (j = 0; j < ARRAY_SIZE(pmb_sizes); j++)
429 if (pmb_sizes[j].flag == size)
430 pmbe->size = pmb_sizes[j].size;
433 spin_lock(&pmbp->lock);
436 * Compare the previous entry against the current one to
437 * see if the entries span a contiguous mapping. If so,
438 * setup the entry links accordingly.
440 if ((pmbe->vpn == (pmbp->vpn + pmbp->size)) &&
441 (pmbe->ppn == (pmbp->ppn + pmbp->size)))
444 spin_unlock(&pmbp->lock);
449 spin_unlock_irqrestore(&pmbe->lock, irqflags);
451 pr_info("\t0x%08lx -> 0x%08lx [ %ldMB %scached ]\n",
452 vpn >> PAGE_SHIFT, ppn >> PAGE_SHIFT, pmbe->size >> 20,
453 (data_val & PMB_C) ? "" : "un");
458 return (applied == 0);
468 * Sync our software copy of the PMB mappings with those in
469 * hardware. The mappings in the hardware PMB were either set up
470 * by the bootloader or very early on by the kernel.
472 ret = pmb_synchronize_mappings();
473 if (unlikely(ret == 0)) {
478 __raw_writel(0, PMB_IRMCR);
480 /* Flush out the TLB */
481 __raw_writel(__raw_readl(MMUCR) | MMUCR_TI, MMUCR);
488 bool __in_29bit_mode(void)
490 return (__raw_readl(PMB_PASCR) & PASCR_SE) == 0;
493 static int pmb_seq_show(struct seq_file *file, void *iter)
497 seq_printf(file, "V: Valid, C: Cacheable, WT: Write-Through\n"
498 "CB: Copy-Back, B: Buffered, UB: Unbuffered\n");
499 seq_printf(file, "ety vpn ppn size flags\n");
501 for (i = 0; i < NR_PMB_ENTRIES; i++) {
502 unsigned long addr, data;
506 addr = __raw_readl(mk_pmb_addr(i));
507 data = __raw_readl(mk_pmb_data(i));
509 size = data & PMB_SZ_MASK;
510 sz_str = (size == PMB_SZ_16M) ? " 16MB":
511 (size == PMB_SZ_64M) ? " 64MB":
512 (size == PMB_SZ_128M) ? "128MB":
515 /* 02: V 0x88 0x08 128MB C CB B */
516 seq_printf(file, "%02d: %c 0x%02lx 0x%02lx %s %c %s %s\n",
517 i, ((addr & PMB_V) && (data & PMB_V)) ? 'V' : ' ',
518 (addr >> 24) & 0xff, (data >> 24) & 0xff,
519 sz_str, (data & PMB_C) ? 'C' : ' ',
520 (data & PMB_WT) ? "WT" : "CB",
521 (data & PMB_UB) ? "UB" : " B");
527 static int pmb_debugfs_open(struct inode *inode, struct file *file)
529 return single_open(file, pmb_seq_show, NULL);
532 static const struct file_operations pmb_debugfs_fops = {
533 .owner = THIS_MODULE,
534 .open = pmb_debugfs_open,
537 .release = single_release,
540 static int __init pmb_debugfs_init(void)
542 struct dentry *dentry;
544 dentry = debugfs_create_file("pmb", S_IFREG | S_IRUGO,
545 sh_debugfs_root, NULL, &pmb_debugfs_fops);
549 return PTR_ERR(dentry);
553 postcore_initcall(pmb_debugfs_init);
556 static int pmb_sysdev_suspend(struct sys_device *dev, pm_message_t state)
558 static pm_message_t prev_state;
561 /* Restore the PMB after a resume from hibernation */
562 if (state.event == PM_EVENT_ON &&
563 prev_state.event == PM_EVENT_FREEZE) {
564 struct pmb_entry *pmbe;
566 read_lock(&pmb_rwlock);
568 for (i = 0; i < ARRAY_SIZE(pmb_entry_list); i++) {
569 if (test_bit(i, pmb_map)) {
570 pmbe = &pmb_entry_list[i];
575 read_unlock(&pmb_rwlock);
583 static int pmb_sysdev_resume(struct sys_device *dev)
585 return pmb_sysdev_suspend(dev, PMSG_ON);
588 static struct sysdev_driver pmb_sysdev_driver = {
589 .suspend = pmb_sysdev_suspend,
590 .resume = pmb_sysdev_resume,
593 static int __init pmb_sysdev_init(void)
595 return sysdev_driver_register(&cpu_sysdev_class, &pmb_sysdev_driver);
597 subsys_initcall(pmb_sysdev_init);