4 * Privileged Space Mapping Buffer (PMB) Support.
6 * Copyright (C) 2005 - 2010 Paul Mundt
7 * Copyright (C) 2010 Matt Fleming
9 * This file is subject to the terms and conditions of the GNU General Public
10 * License. See the file "COPYING" in the main directory of this archive
13 #include <linux/init.h>
14 #include <linux/kernel.h>
15 #include <linux/sysdev.h>
16 #include <linux/cpu.h>
17 #include <linux/module.h>
18 #include <linux/slab.h>
19 #include <linux/bitops.h>
20 #include <linux/debugfs.h>
22 #include <linux/seq_file.h>
23 #include <linux/err.h>
25 #include <linux/spinlock.h>
26 #include <linux/vmalloc.h>
27 #include <asm/sizes.h>
28 #include <asm/system.h>
29 #include <asm/uaccess.h>
30 #include <asm/pgtable.h>
33 #include <asm/mmu_context.h>
46 * 0 .. NR_PMB_ENTRIES for specific entry selection, or
47 * PMB_NO_ENTRY to search for a free one
51 /* Adjacent entry link for contiguous multi-entry mappings */
52 struct pmb_entry *link;
59 { .size = SZ_512M, .flag = PMB_SZ_512M, },
60 { .size = SZ_128M, .flag = PMB_SZ_128M, },
61 { .size = SZ_64M, .flag = PMB_SZ_64M, },
62 { .size = SZ_16M, .flag = PMB_SZ_16M, },
65 static void pmb_unmap_entry(struct pmb_entry *, int depth);
67 static DEFINE_RWLOCK(pmb_rwlock);
68 static struct pmb_entry pmb_entry_list[NR_PMB_ENTRIES];
69 static DECLARE_BITMAP(pmb_map, NR_PMB_ENTRIES);
71 static __always_inline unsigned long mk_pmb_entry(unsigned int entry)
73 return (entry & PMB_E_MASK) << PMB_E_SHIFT;
76 static __always_inline unsigned long mk_pmb_addr(unsigned int entry)
78 return mk_pmb_entry(entry) | PMB_ADDR;
81 static __always_inline unsigned long mk_pmb_data(unsigned int entry)
83 return mk_pmb_entry(entry) | PMB_DATA;
86 static __always_inline unsigned int pmb_ppn_in_range(unsigned long ppn)
88 return ppn >= __pa(memory_start) && ppn < __pa(memory_end);
92 * Ensure that the PMB entries match our cache configuration.
94 * When we are in 32-bit address extended mode, CCR.CB becomes
95 * invalid, so care must be taken to manually adjust cacheable
98 static __always_inline unsigned long pmb_cache_flags(void)
100 unsigned long flags = 0;
102 #if defined(CONFIG_CACHE_OFF)
103 flags |= PMB_WT | PMB_UB;
104 #elif defined(CONFIG_CACHE_WRITETHROUGH)
105 flags |= PMB_C | PMB_WT | PMB_UB;
106 #elif defined(CONFIG_CACHE_WRITEBACK)
114 * Convert typical pgprot value to the PMB equivalent
116 static inline unsigned long pgprot_to_pmb_flags(pgprot_t prot)
118 unsigned long pmb_flags = 0;
119 u64 flags = pgprot_val(prot);
121 if (flags & _PAGE_CACHABLE)
123 if (flags & _PAGE_WT)
124 pmb_flags |= PMB_WT | PMB_UB;
129 static bool pmb_can_merge(struct pmb_entry *a, struct pmb_entry *b)
131 return (b->vpn == (a->vpn + a->size)) &&
132 (b->ppn == (a->ppn + a->size)) &&
133 (b->flags == a->flags);
136 static bool pmb_size_valid(unsigned long size)
140 for (i = 0; i < ARRAY_SIZE(pmb_sizes); i++)
141 if (pmb_sizes[i].size == size)
147 static inline bool pmb_addr_valid(unsigned long addr, unsigned long size)
149 return (addr >= P1SEG && (addr + size - 1) < P3SEG);
152 static inline bool pmb_prot_valid(pgprot_t prot)
154 return (pgprot_val(prot) & _PAGE_USER) == 0;
157 static int pmb_size_to_flags(unsigned long size)
161 for (i = 0; i < ARRAY_SIZE(pmb_sizes); i++)
162 if (pmb_sizes[i].size == size)
163 return pmb_sizes[i].flag;
168 static int pmb_alloc_entry(void)
172 pos = find_first_zero_bit(pmb_map, NR_PMB_ENTRIES);
173 if (pos >= 0 && pos < NR_PMB_ENTRIES)
174 __set_bit(pos, pmb_map);
181 static struct pmb_entry *pmb_alloc(unsigned long vpn, unsigned long ppn,
182 unsigned long flags, int entry)
184 struct pmb_entry *pmbe;
185 unsigned long irqflags;
189 write_lock_irqsave(&pmb_rwlock, irqflags);
191 if (entry == PMB_NO_ENTRY) {
192 pos = pmb_alloc_entry();
193 if (unlikely(pos < 0)) {
198 if (__test_and_set_bit(entry, pmb_map)) {
199 ret = ERR_PTR(-ENOSPC);
206 write_unlock_irqrestore(&pmb_rwlock, irqflags);
208 pmbe = &pmb_entry_list[pos];
210 memset(pmbe, 0, sizeof(struct pmb_entry));
212 spin_lock_init(&pmbe->lock);
222 write_unlock_irqrestore(&pmb_rwlock, irqflags);
226 static void pmb_free(struct pmb_entry *pmbe)
228 __clear_bit(pmbe->entry, pmb_map);
230 pmbe->entry = PMB_NO_ENTRY;
235 * Must be run uncached.
237 static void __set_pmb_entry(struct pmb_entry *pmbe)
240 __raw_writel(pmbe->ppn | pmbe->flags | PMB_V, mk_pmb_data(pmbe->entry));
241 __raw_writel(pmbe->vpn | PMB_V, mk_pmb_addr(pmbe->entry));
244 static void __clear_pmb_entry(struct pmb_entry *pmbe)
246 unsigned long addr, data;
247 unsigned long addr_val, data_val;
249 addr = mk_pmb_addr(pmbe->entry);
250 data = mk_pmb_data(pmbe->entry);
252 addr_val = __raw_readl(addr);
253 data_val = __raw_readl(data);
256 writel_uncached(addr_val & ~PMB_V, addr);
257 writel_uncached(data_val & ~PMB_V, data);
260 static void set_pmb_entry(struct pmb_entry *pmbe)
264 spin_lock_irqsave(&pmbe->lock, flags);
265 __set_pmb_entry(pmbe);
266 spin_unlock_irqrestore(&pmbe->lock, flags);
269 int pmb_bolt_mapping(unsigned long vaddr, phys_addr_t phys,
270 unsigned long size, pgprot_t prot)
275 void __iomem *pmb_remap_caller(phys_addr_t phys, unsigned long size,
276 pgprot_t prot, void *caller)
278 struct pmb_entry *pmbp, *pmbe;
279 unsigned long pmb_flags;
281 unsigned long orig_addr, vaddr;
282 phys_addr_t offset, last_addr;
283 phys_addr_t align_mask;
284 unsigned long aligned;
285 struct vm_struct *area;
288 * Small mappings need to go through the TLB.
291 return ERR_PTR(-EINVAL);
292 if (!pmb_prot_valid(prot))
293 return ERR_PTR(-EINVAL);
296 pmb_flags = pgprot_to_pmb_flags(prot);
299 for (i = 0; i < ARRAY_SIZE(pmb_sizes); i++)
300 if (size >= pmb_sizes[i].size)
303 last_addr = phys + size;
304 align_mask = ~(pmb_sizes[i].size - 1);
305 offset = phys & ~align_mask;
307 aligned = ALIGN(last_addr, pmb_sizes[i].size) - phys;
309 area = __get_vm_area_caller(aligned, VM_IOREMAP, uncached_end,
314 area->phys_addr = phys;
315 orig_addr = vaddr = (unsigned long)area->addr;
317 if (!pmb_addr_valid(vaddr, aligned))
318 return ERR_PTR(-EFAULT);
321 for (i = 0; i < ARRAY_SIZE(pmb_sizes); i++) {
324 if (size < pmb_sizes[i].size)
327 pmbe = pmb_alloc(vaddr, phys, pmb_flags | pmb_sizes[i].flag,
330 pmb_unmap_entry(pmbp, mapped);
334 spin_lock_irqsave(&pmbe->lock, flags);
336 pmbe->size = pmb_sizes[i].size;
338 __set_pmb_entry(pmbe);
345 * Link adjacent entries that span multiple PMB entries
346 * for easier tear-down.
349 spin_lock(&pmbp->lock);
351 spin_unlock(&pmbp->lock);
357 * Instead of trying smaller sizes on every iteration
358 * (even if we succeed in allocating space), try using
359 * pmb_sizes[i].size again.
364 spin_unlock_irqrestore(&pmbe->lock, flags);
370 return (void __iomem *)(offset + (char *)orig_addr);
373 int pmb_unmap(void __iomem *addr)
375 struct pmb_entry *pmbe = NULL;
376 unsigned long vaddr = (unsigned long __force)addr;
379 read_lock(&pmb_rwlock);
381 for (i = 0; i < ARRAY_SIZE(pmb_entry_list); i++) {
382 if (test_bit(i, pmb_map)) {
383 pmbe = &pmb_entry_list[i];
384 if (pmbe->vpn == vaddr) {
391 read_unlock(&pmb_rwlock);
394 pmb_unmap_entry(pmbe, NR_PMB_ENTRIES);
401 static void __pmb_unmap_entry(struct pmb_entry *pmbe, int depth)
404 struct pmb_entry *pmblink = pmbe;
407 * We may be called before this pmb_entry has been
408 * entered into the PMB table via set_pmb_entry(), but
409 * that's OK because we've allocated a unique slot for
410 * this entry in pmb_alloc() (even if we haven't filled
413 * Therefore, calling __clear_pmb_entry() is safe as no
414 * other mapping can be using that slot.
416 __clear_pmb_entry(pmbe);
418 pmbe = pmblink->link;
421 } while (pmbe && --depth);
424 static void pmb_unmap_entry(struct pmb_entry *pmbe, int depth)
431 write_lock_irqsave(&pmb_rwlock, flags);
432 __pmb_unmap_entry(pmbe, depth);
433 write_unlock_irqrestore(&pmb_rwlock, flags);
436 static void __init pmb_notify(void)
440 pr_info("PMB: boot mappings:\n");
442 read_lock(&pmb_rwlock);
444 for (i = 0; i < ARRAY_SIZE(pmb_entry_list); i++) {
445 struct pmb_entry *pmbe;
447 if (!test_bit(i, pmb_map))
450 pmbe = &pmb_entry_list[i];
452 pr_info(" 0x%08lx -> 0x%08lx [ %4ldMB %2scached ]\n",
453 pmbe->vpn >> PAGE_SHIFT, pmbe->ppn >> PAGE_SHIFT,
454 pmbe->size >> 20, (pmbe->flags & PMB_C) ? "" : "un");
457 read_unlock(&pmb_rwlock);
461 * Sync our software copy of the PMB mappings with those in hardware. The
462 * mappings in the hardware PMB were either set up by the bootloader or
463 * very early on by the kernel.
465 static void __init pmb_synchronize(void)
467 struct pmb_entry *pmbp = NULL;
471 * Run through the initial boot mappings, log the established
472 * ones, and blow away anything that falls outside of the valid
473 * PPN range. Specifically, we only care about existing mappings
474 * that impact the cached/uncached sections.
476 * Note that touching these can be a bit of a minefield; the boot
477 * loader can establish multi-page mappings with the same caching
478 * attributes, so we need to ensure that we aren't modifying a
479 * mapping that we're presently executing from, or may execute
480 * from in the case of straddling page boundaries.
482 * In the future we will have to tidy up after the boot loader by
483 * jumping between the cached and uncached mappings and tearing
484 * down alternating mappings while executing from the other.
486 for (i = 0; i < NR_PMB_ENTRIES; i++) {
487 unsigned long addr, data;
488 unsigned long addr_val, data_val;
489 unsigned long ppn, vpn, flags;
490 unsigned long irqflags;
492 struct pmb_entry *pmbe;
494 addr = mk_pmb_addr(i);
495 data = mk_pmb_data(i);
497 addr_val = __raw_readl(addr);
498 data_val = __raw_readl(data);
501 * Skip over any bogus entries
503 if (!(data_val & PMB_V) || !(addr_val & PMB_V))
506 ppn = data_val & PMB_PFN_MASK;
507 vpn = addr_val & PMB_PFN_MASK;
510 * Only preserve in-range mappings.
512 if (!pmb_ppn_in_range(ppn)) {
514 * Invalidate anything out of bounds.
516 writel_uncached(addr_val & ~PMB_V, addr);
517 writel_uncached(data_val & ~PMB_V, data);
522 * Update the caching attributes if necessary
524 if (data_val & PMB_C) {
525 data_val &= ~PMB_CACHE_MASK;
526 data_val |= pmb_cache_flags();
528 writel_uncached(data_val, data);
531 size = data_val & PMB_SZ_MASK;
532 flags = size | (data_val & PMB_CACHE_MASK);
534 pmbe = pmb_alloc(vpn, ppn, flags, i);
540 spin_lock_irqsave(&pmbe->lock, irqflags);
542 for (j = 0; j < ARRAY_SIZE(pmb_sizes); j++)
543 if (pmb_sizes[j].flag == size)
544 pmbe->size = pmb_sizes[j].size;
547 spin_lock(&pmbp->lock);
550 * Compare the previous entry against the current one to
551 * see if the entries span a contiguous mapping. If so,
552 * setup the entry links accordingly. Compound mappings
553 * are later coalesced.
555 if (pmb_can_merge(pmbp, pmbe))
558 spin_unlock(&pmbp->lock);
563 spin_unlock_irqrestore(&pmbe->lock, irqflags);
567 static void __init pmb_merge(struct pmb_entry *head)
569 unsigned long span, newsize;
570 struct pmb_entry *tail;
571 int i = 1, depth = 0;
573 span = newsize = head->size;
579 if (pmb_size_valid(span)) {
584 /* This is the end of the line.. */
593 * The merged page size must be valid.
595 if (!pmb_size_valid(newsize))
598 head->flags &= ~PMB_SZ_MASK;
599 head->flags |= pmb_size_to_flags(newsize);
601 head->size = newsize;
603 __pmb_unmap_entry(head->link, depth);
604 __set_pmb_entry(head);
607 static void __init pmb_coalesce(void)
612 write_lock_irqsave(&pmb_rwlock, flags);
614 for (i = 0; i < ARRAY_SIZE(pmb_entry_list); i++) {
615 struct pmb_entry *pmbe;
617 if (!test_bit(i, pmb_map))
620 pmbe = &pmb_entry_list[i];
623 * We're only interested in compound mappings
629 * Nothing to do if it already uses the largest possible
632 if (pmbe->size == SZ_512M)
638 write_unlock_irqrestore(&pmb_rwlock, flags);
641 #ifdef CONFIG_UNCACHED_MAPPING
642 static void __init pmb_resize(void)
647 * If the uncached mapping was constructed by the kernel, it will
648 * already be a reasonable size.
650 if (uncached_size == SZ_16M)
653 read_lock(&pmb_rwlock);
655 for (i = 0; i < ARRAY_SIZE(pmb_entry_list); i++) {
656 struct pmb_entry *pmbe;
659 if (!test_bit(i, pmb_map))
662 pmbe = &pmb_entry_list[i];
664 if (pmbe->vpn != uncached_start)
668 * Found it, now resize it.
670 spin_lock_irqsave(&pmbe->lock, flags);
673 pmbe->flags &= ~PMB_SZ_MASK;
674 pmbe->flags |= pmb_size_to_flags(pmbe->size);
676 uncached_resize(pmbe->size);
678 __set_pmb_entry(pmbe);
680 spin_unlock_irqrestore(&pmbe->lock, flags);
683 read_lock(&pmb_rwlock);
687 void __init pmb_init(void)
689 /* Synchronize software state */
692 /* Attempt to combine compound mappings */
695 #ifdef CONFIG_UNCACHED_MAPPING
696 /* Resize initial mappings, if necessary */
703 writel_uncached(0, PMB_IRMCR);
705 /* Flush out the TLB */
706 __raw_writel(__raw_readl(MMUCR) | MMUCR_TI, MMUCR);
710 bool __in_29bit_mode(void)
712 return (__raw_readl(PMB_PASCR) & PASCR_SE) == 0;
715 static int pmb_seq_show(struct seq_file *file, void *iter)
719 seq_printf(file, "V: Valid, C: Cacheable, WT: Write-Through\n"
720 "CB: Copy-Back, B: Buffered, UB: Unbuffered\n");
721 seq_printf(file, "ety vpn ppn size flags\n");
723 for (i = 0; i < NR_PMB_ENTRIES; i++) {
724 unsigned long addr, data;
728 addr = __raw_readl(mk_pmb_addr(i));
729 data = __raw_readl(mk_pmb_data(i));
731 size = data & PMB_SZ_MASK;
732 sz_str = (size == PMB_SZ_16M) ? " 16MB":
733 (size == PMB_SZ_64M) ? " 64MB":
734 (size == PMB_SZ_128M) ? "128MB":
737 /* 02: V 0x88 0x08 128MB C CB B */
738 seq_printf(file, "%02d: %c 0x%02lx 0x%02lx %s %c %s %s\n",
739 i, ((addr & PMB_V) && (data & PMB_V)) ? 'V' : ' ',
740 (addr >> 24) & 0xff, (data >> 24) & 0xff,
741 sz_str, (data & PMB_C) ? 'C' : ' ',
742 (data & PMB_WT) ? "WT" : "CB",
743 (data & PMB_UB) ? "UB" : " B");
749 static int pmb_debugfs_open(struct inode *inode, struct file *file)
751 return single_open(file, pmb_seq_show, NULL);
754 static const struct file_operations pmb_debugfs_fops = {
755 .owner = THIS_MODULE,
756 .open = pmb_debugfs_open,
759 .release = single_release,
762 static int __init pmb_debugfs_init(void)
764 struct dentry *dentry;
766 dentry = debugfs_create_file("pmb", S_IFREG | S_IRUGO,
767 sh_debugfs_root, NULL, &pmb_debugfs_fops);
771 return PTR_ERR(dentry);
775 postcore_initcall(pmb_debugfs_init);
778 static int pmb_sysdev_suspend(struct sys_device *dev, pm_message_t state)
780 static pm_message_t prev_state;
783 /* Restore the PMB after a resume from hibernation */
784 if (state.event == PM_EVENT_ON &&
785 prev_state.event == PM_EVENT_FREEZE) {
786 struct pmb_entry *pmbe;
788 read_lock(&pmb_rwlock);
790 for (i = 0; i < ARRAY_SIZE(pmb_entry_list); i++) {
791 if (test_bit(i, pmb_map)) {
792 pmbe = &pmb_entry_list[i];
797 read_unlock(&pmb_rwlock);
805 static int pmb_sysdev_resume(struct sys_device *dev)
807 return pmb_sysdev_suspend(dev, PMSG_ON);
810 static struct sysdev_driver pmb_sysdev_driver = {
811 .suspend = pmb_sysdev_suspend,
812 .resume = pmb_sysdev_resume,
815 static int __init pmb_sysdev_init(void)
817 return sysdev_driver_register(&cpu_sysdev_class, &pmb_sysdev_driver);
819 subsys_initcall(pmb_sysdev_init);