4 * Privileged Space Mapping Buffer (PMB) Support.
6 * Copyright (C) 2005 - 2010 Paul Mundt
7 * Copyright (C) 2010 Matt Fleming
9 * This file is subject to the terms and conditions of the GNU General Public
10 * License. See the file "COPYING" in the main directory of this archive
13 #include <linux/init.h>
14 #include <linux/kernel.h>
15 #include <linux/sysdev.h>
16 #include <linux/cpu.h>
17 #include <linux/module.h>
18 #include <linux/slab.h>
19 #include <linux/bitops.h>
20 #include <linux/debugfs.h>
22 #include <linux/seq_file.h>
23 #include <linux/err.h>
25 #include <linux/spinlock.h>
26 #include <linux/vmalloc.h>
27 #include <asm/sizes.h>
28 #include <asm/system.h>
29 #include <asm/uaccess.h>
30 #include <asm/pgtable.h>
33 #include <asm/mmu_context.h>
46 * 0 .. NR_PMB_ENTRIES for specific entry selection, or
47 * PMB_NO_ENTRY to search for a free one
51 /* Adjacent entry link for contiguous multi-entry mappings */
52 struct pmb_entry *link;
59 { .size = SZ_512M, .flag = PMB_SZ_512M, },
60 { .size = SZ_128M, .flag = PMB_SZ_128M, },
61 { .size = SZ_64M, .flag = PMB_SZ_64M, },
62 { .size = SZ_16M, .flag = PMB_SZ_16M, },
65 static void pmb_unmap_entry(struct pmb_entry *, int depth);
67 static DEFINE_RWLOCK(pmb_rwlock);
68 static struct pmb_entry pmb_entry_list[NR_PMB_ENTRIES];
69 static DECLARE_BITMAP(pmb_map, NR_PMB_ENTRIES);
71 static unsigned int pmb_iomapping_enabled;
73 static __always_inline unsigned long mk_pmb_entry(unsigned int entry)
75 return (entry & PMB_E_MASK) << PMB_E_SHIFT;
78 static __always_inline unsigned long mk_pmb_addr(unsigned int entry)
80 return mk_pmb_entry(entry) | PMB_ADDR;
83 static __always_inline unsigned long mk_pmb_data(unsigned int entry)
85 return mk_pmb_entry(entry) | PMB_DATA;
88 static __always_inline unsigned int pmb_ppn_in_range(unsigned long ppn)
90 return ppn >= __pa(memory_start) && ppn < __pa(memory_end);
94 * Ensure that the PMB entries match our cache configuration.
96 * When we are in 32-bit address extended mode, CCR.CB becomes
97 * invalid, so care must be taken to manually adjust cacheable
100 static __always_inline unsigned long pmb_cache_flags(void)
102 unsigned long flags = 0;
104 #if defined(CONFIG_CACHE_OFF)
105 flags |= PMB_WT | PMB_UB;
106 #elif defined(CONFIG_CACHE_WRITETHROUGH)
107 flags |= PMB_C | PMB_WT | PMB_UB;
108 #elif defined(CONFIG_CACHE_WRITEBACK)
116 * Convert typical pgprot value to the PMB equivalent
118 static inline unsigned long pgprot_to_pmb_flags(pgprot_t prot)
120 unsigned long pmb_flags = 0;
121 u64 flags = pgprot_val(prot);
123 if (flags & _PAGE_CACHABLE)
125 if (flags & _PAGE_WT)
126 pmb_flags |= PMB_WT | PMB_UB;
131 static bool pmb_can_merge(struct pmb_entry *a, struct pmb_entry *b)
133 return (b->vpn == (a->vpn + a->size)) &&
134 (b->ppn == (a->ppn + a->size)) &&
135 (b->flags == a->flags);
138 static bool pmb_size_valid(unsigned long size)
142 for (i = 0; i < ARRAY_SIZE(pmb_sizes); i++)
143 if (pmb_sizes[i].size == size)
149 static inline bool pmb_addr_valid(unsigned long addr, unsigned long size)
151 return (addr >= P1SEG && (addr + size - 1) < P3SEG);
154 static inline bool pmb_prot_valid(pgprot_t prot)
156 return (pgprot_val(prot) & _PAGE_USER) == 0;
159 static int pmb_size_to_flags(unsigned long size)
163 for (i = 0; i < ARRAY_SIZE(pmb_sizes); i++)
164 if (pmb_sizes[i].size == size)
165 return pmb_sizes[i].flag;
170 static int pmb_alloc_entry(void)
174 pos = find_first_zero_bit(pmb_map, NR_PMB_ENTRIES);
175 if (pos >= 0 && pos < NR_PMB_ENTRIES)
176 __set_bit(pos, pmb_map);
183 static struct pmb_entry *pmb_alloc(unsigned long vpn, unsigned long ppn,
184 unsigned long flags, int entry)
186 struct pmb_entry *pmbe;
187 unsigned long irqflags;
191 write_lock_irqsave(&pmb_rwlock, irqflags);
193 if (entry == PMB_NO_ENTRY) {
194 pos = pmb_alloc_entry();
195 if (unlikely(pos < 0)) {
200 if (__test_and_set_bit(entry, pmb_map)) {
201 ret = ERR_PTR(-ENOSPC);
208 write_unlock_irqrestore(&pmb_rwlock, irqflags);
210 pmbe = &pmb_entry_list[pos];
212 memset(pmbe, 0, sizeof(struct pmb_entry));
214 spin_lock_init(&pmbe->lock);
224 write_unlock_irqrestore(&pmb_rwlock, irqflags);
228 static void pmb_free(struct pmb_entry *pmbe)
230 __clear_bit(pmbe->entry, pmb_map);
232 pmbe->entry = PMB_NO_ENTRY;
237 * Must be run uncached.
239 static void __set_pmb_entry(struct pmb_entry *pmbe)
242 __raw_writel(pmbe->ppn | pmbe->flags | PMB_V, mk_pmb_data(pmbe->entry));
243 __raw_writel(pmbe->vpn | PMB_V, mk_pmb_addr(pmbe->entry));
246 static void __clear_pmb_entry(struct pmb_entry *pmbe)
248 unsigned long addr, data;
249 unsigned long addr_val, data_val;
251 addr = mk_pmb_addr(pmbe->entry);
252 data = mk_pmb_data(pmbe->entry);
254 addr_val = __raw_readl(addr);
255 data_val = __raw_readl(data);
258 writel_uncached(addr_val & ~PMB_V, addr);
259 writel_uncached(data_val & ~PMB_V, data);
262 static void set_pmb_entry(struct pmb_entry *pmbe)
266 spin_lock_irqsave(&pmbe->lock, flags);
267 __set_pmb_entry(pmbe);
268 spin_unlock_irqrestore(&pmbe->lock, flags);
271 int pmb_bolt_mapping(unsigned long vaddr, phys_addr_t phys,
272 unsigned long size, pgprot_t prot)
274 struct pmb_entry *pmbp, *pmbe;
275 unsigned long pmb_flags;
278 if (!pmb_addr_valid(vaddr, size))
281 pmb_flags = pgprot_to_pmb_flags(prot);
285 for (i = mapped = 0; i < ARRAY_SIZE(pmb_sizes); i++) {
288 if (size < pmb_sizes[i].size)
291 pmbe = pmb_alloc(vaddr, phys, pmb_flags | pmb_sizes[i].flag,
294 pmb_unmap_entry(pmbp, mapped);
295 return PTR_ERR(pmbe);
298 spin_lock_irqsave(&pmbe->lock, flags);
300 pmbe->size = pmb_sizes[i].size;
302 __set_pmb_entry(pmbe);
309 * Link adjacent entries that span multiple PMB entries
310 * for easier tear-down.
313 spin_lock(&pmbp->lock);
315 spin_unlock(&pmbp->lock);
321 * Instead of trying smaller sizes on every iteration
322 * (even if we succeed in allocating space), try using
323 * pmb_sizes[i].size again.
328 spin_unlock_irqrestore(&pmbe->lock, flags);
337 void __iomem *pmb_remap_caller(phys_addr_t phys, unsigned long size,
338 pgprot_t prot, void *caller)
340 unsigned long orig_addr, vaddr;
341 phys_addr_t offset, last_addr;
342 phys_addr_t align_mask;
343 unsigned long aligned;
344 struct vm_struct *area;
347 if (!pmb_iomapping_enabled)
351 * Small mappings need to go through the TLB.
354 return ERR_PTR(-EINVAL);
355 if (!pmb_prot_valid(prot))
356 return ERR_PTR(-EINVAL);
358 for (i = 0; i < ARRAY_SIZE(pmb_sizes); i++)
359 if (size >= pmb_sizes[i].size)
362 last_addr = phys + size;
363 align_mask = ~(pmb_sizes[i].size - 1);
364 offset = phys & ~align_mask;
366 aligned = ALIGN(last_addr, pmb_sizes[i].size) - phys;
368 area = __get_vm_area_caller(aligned, VM_IOREMAP, uncached_end,
373 area->phys_addr = phys;
374 orig_addr = vaddr = (unsigned long)area->addr;
376 ret = pmb_bolt_mapping(vaddr, phys, size, prot);
380 return (void __iomem *)(offset + (char *)orig_addr);
383 int pmb_unmap(void __iomem *addr)
385 struct pmb_entry *pmbe = NULL;
386 unsigned long vaddr = (unsigned long __force)addr;
389 read_lock(&pmb_rwlock);
391 for (i = 0; i < ARRAY_SIZE(pmb_entry_list); i++) {
392 if (test_bit(i, pmb_map)) {
393 pmbe = &pmb_entry_list[i];
394 if (pmbe->vpn == vaddr) {
401 read_unlock(&pmb_rwlock);
404 pmb_unmap_entry(pmbe, NR_PMB_ENTRIES);
411 static void __pmb_unmap_entry(struct pmb_entry *pmbe, int depth)
414 struct pmb_entry *pmblink = pmbe;
417 * We may be called before this pmb_entry has been
418 * entered into the PMB table via set_pmb_entry(), but
419 * that's OK because we've allocated a unique slot for
420 * this entry in pmb_alloc() (even if we haven't filled
423 * Therefore, calling __clear_pmb_entry() is safe as no
424 * other mapping can be using that slot.
426 __clear_pmb_entry(pmbe);
428 pmbe = pmblink->link;
431 } while (pmbe && --depth);
434 static void pmb_unmap_entry(struct pmb_entry *pmbe, int depth)
441 write_lock_irqsave(&pmb_rwlock, flags);
442 __pmb_unmap_entry(pmbe, depth);
443 write_unlock_irqrestore(&pmb_rwlock, flags);
446 static void __init pmb_notify(void)
450 pr_info("PMB: boot mappings:\n");
452 read_lock(&pmb_rwlock);
454 for (i = 0; i < ARRAY_SIZE(pmb_entry_list); i++) {
455 struct pmb_entry *pmbe;
457 if (!test_bit(i, pmb_map))
460 pmbe = &pmb_entry_list[i];
462 pr_info(" 0x%08lx -> 0x%08lx [ %4ldMB %2scached ]\n",
463 pmbe->vpn >> PAGE_SHIFT, pmbe->ppn >> PAGE_SHIFT,
464 pmbe->size >> 20, (pmbe->flags & PMB_C) ? "" : "un");
467 read_unlock(&pmb_rwlock);
471 * Sync our software copy of the PMB mappings with those in hardware. The
472 * mappings in the hardware PMB were either set up by the bootloader or
473 * very early on by the kernel.
475 static void __init pmb_synchronize(void)
477 struct pmb_entry *pmbp = NULL;
481 * Run through the initial boot mappings, log the established
482 * ones, and blow away anything that falls outside of the valid
483 * PPN range. Specifically, we only care about existing mappings
484 * that impact the cached/uncached sections.
486 * Note that touching these can be a bit of a minefield; the boot
487 * loader can establish multi-page mappings with the same caching
488 * attributes, so we need to ensure that we aren't modifying a
489 * mapping that we're presently executing from, or may execute
490 * from in the case of straddling page boundaries.
492 * In the future we will have to tidy up after the boot loader by
493 * jumping between the cached and uncached mappings and tearing
494 * down alternating mappings while executing from the other.
496 for (i = 0; i < NR_PMB_ENTRIES; i++) {
497 unsigned long addr, data;
498 unsigned long addr_val, data_val;
499 unsigned long ppn, vpn, flags;
500 unsigned long irqflags;
502 struct pmb_entry *pmbe;
504 addr = mk_pmb_addr(i);
505 data = mk_pmb_data(i);
507 addr_val = __raw_readl(addr);
508 data_val = __raw_readl(data);
511 * Skip over any bogus entries
513 if (!(data_val & PMB_V) || !(addr_val & PMB_V))
516 ppn = data_val & PMB_PFN_MASK;
517 vpn = addr_val & PMB_PFN_MASK;
520 * Only preserve in-range mappings.
522 if (!pmb_ppn_in_range(ppn)) {
524 * Invalidate anything out of bounds.
526 writel_uncached(addr_val & ~PMB_V, addr);
527 writel_uncached(data_val & ~PMB_V, data);
532 * Update the caching attributes if necessary
534 if (data_val & PMB_C) {
535 data_val &= ~PMB_CACHE_MASK;
536 data_val |= pmb_cache_flags();
538 writel_uncached(data_val, data);
541 size = data_val & PMB_SZ_MASK;
542 flags = size | (data_val & PMB_CACHE_MASK);
544 pmbe = pmb_alloc(vpn, ppn, flags, i);
550 spin_lock_irqsave(&pmbe->lock, irqflags);
552 for (j = 0; j < ARRAY_SIZE(pmb_sizes); j++)
553 if (pmb_sizes[j].flag == size)
554 pmbe->size = pmb_sizes[j].size;
557 spin_lock(&pmbp->lock);
560 * Compare the previous entry against the current one to
561 * see if the entries span a contiguous mapping. If so,
562 * setup the entry links accordingly. Compound mappings
563 * are later coalesced.
565 if (pmb_can_merge(pmbp, pmbe))
568 spin_unlock(&pmbp->lock);
573 spin_unlock_irqrestore(&pmbe->lock, irqflags);
577 static void __init pmb_merge(struct pmb_entry *head)
579 unsigned long span, newsize;
580 struct pmb_entry *tail;
581 int i = 1, depth = 0;
583 span = newsize = head->size;
589 if (pmb_size_valid(span)) {
594 /* This is the end of the line.. */
603 * The merged page size must be valid.
605 if (!pmb_size_valid(newsize))
608 head->flags &= ~PMB_SZ_MASK;
609 head->flags |= pmb_size_to_flags(newsize);
611 head->size = newsize;
613 __pmb_unmap_entry(head->link, depth);
614 __set_pmb_entry(head);
617 static void __init pmb_coalesce(void)
622 write_lock_irqsave(&pmb_rwlock, flags);
624 for (i = 0; i < ARRAY_SIZE(pmb_entry_list); i++) {
625 struct pmb_entry *pmbe;
627 if (!test_bit(i, pmb_map))
630 pmbe = &pmb_entry_list[i];
633 * We're only interested in compound mappings
639 * Nothing to do if it already uses the largest possible
642 if (pmbe->size == SZ_512M)
648 write_unlock_irqrestore(&pmb_rwlock, flags);
651 #ifdef CONFIG_UNCACHED_MAPPING
652 static void __init pmb_resize(void)
657 * If the uncached mapping was constructed by the kernel, it will
658 * already be a reasonable size.
660 if (uncached_size == SZ_16M)
663 read_lock(&pmb_rwlock);
665 for (i = 0; i < ARRAY_SIZE(pmb_entry_list); i++) {
666 struct pmb_entry *pmbe;
669 if (!test_bit(i, pmb_map))
672 pmbe = &pmb_entry_list[i];
674 if (pmbe->vpn != uncached_start)
678 * Found it, now resize it.
680 spin_lock_irqsave(&pmbe->lock, flags);
683 pmbe->flags &= ~PMB_SZ_MASK;
684 pmbe->flags |= pmb_size_to_flags(pmbe->size);
686 uncached_resize(pmbe->size);
688 __set_pmb_entry(pmbe);
690 spin_unlock_irqrestore(&pmbe->lock, flags);
693 read_lock(&pmb_rwlock);
697 static int __init early_pmb(char *p)
702 if (strstr(p, "iomap"))
703 pmb_iomapping_enabled = 1;
707 early_param("pmb", early_pmb);
709 void __init pmb_init(void)
711 /* Synchronize software state */
714 /* Attempt to combine compound mappings */
717 #ifdef CONFIG_UNCACHED_MAPPING
718 /* Resize initial mappings, if necessary */
725 writel_uncached(0, PMB_IRMCR);
727 /* Flush out the TLB */
728 __raw_writel(__raw_readl(MMUCR) | MMUCR_TI, MMUCR);
732 bool __in_29bit_mode(void)
734 return (__raw_readl(PMB_PASCR) & PASCR_SE) == 0;
737 static int pmb_seq_show(struct seq_file *file, void *iter)
741 seq_printf(file, "V: Valid, C: Cacheable, WT: Write-Through\n"
742 "CB: Copy-Back, B: Buffered, UB: Unbuffered\n");
743 seq_printf(file, "ety vpn ppn size flags\n");
745 for (i = 0; i < NR_PMB_ENTRIES; i++) {
746 unsigned long addr, data;
750 addr = __raw_readl(mk_pmb_addr(i));
751 data = __raw_readl(mk_pmb_data(i));
753 size = data & PMB_SZ_MASK;
754 sz_str = (size == PMB_SZ_16M) ? " 16MB":
755 (size == PMB_SZ_64M) ? " 64MB":
756 (size == PMB_SZ_128M) ? "128MB":
759 /* 02: V 0x88 0x08 128MB C CB B */
760 seq_printf(file, "%02d: %c 0x%02lx 0x%02lx %s %c %s %s\n",
761 i, ((addr & PMB_V) && (data & PMB_V)) ? 'V' : ' ',
762 (addr >> 24) & 0xff, (data >> 24) & 0xff,
763 sz_str, (data & PMB_C) ? 'C' : ' ',
764 (data & PMB_WT) ? "WT" : "CB",
765 (data & PMB_UB) ? "UB" : " B");
771 static int pmb_debugfs_open(struct inode *inode, struct file *file)
773 return single_open(file, pmb_seq_show, NULL);
776 static const struct file_operations pmb_debugfs_fops = {
777 .owner = THIS_MODULE,
778 .open = pmb_debugfs_open,
781 .release = single_release,
784 static int __init pmb_debugfs_init(void)
786 struct dentry *dentry;
788 dentry = debugfs_create_file("pmb", S_IFREG | S_IRUGO,
789 sh_debugfs_root, NULL, &pmb_debugfs_fops);
793 return PTR_ERR(dentry);
797 postcore_initcall(pmb_debugfs_init);
800 static int pmb_sysdev_suspend(struct sys_device *dev, pm_message_t state)
802 static pm_message_t prev_state;
805 /* Restore the PMB after a resume from hibernation */
806 if (state.event == PM_EVENT_ON &&
807 prev_state.event == PM_EVENT_FREEZE) {
808 struct pmb_entry *pmbe;
810 read_lock(&pmb_rwlock);
812 for (i = 0; i < ARRAY_SIZE(pmb_entry_list); i++) {
813 if (test_bit(i, pmb_map)) {
814 pmbe = &pmb_entry_list[i];
819 read_unlock(&pmb_rwlock);
827 static int pmb_sysdev_resume(struct sys_device *dev)
829 return pmb_sysdev_suspend(dev, PMSG_ON);
832 static struct sysdev_driver pmb_sysdev_driver = {
833 .suspend = pmb_sysdev_suspend,
834 .resume = pmb_sysdev_resume,
837 static int __init pmb_sysdev_init(void)
839 return sysdev_driver_register(&cpu_sysdev_class, &pmb_sysdev_driver);
841 subsys_initcall(pmb_sysdev_init);