4 * Privileged Space Mapping Buffer (PMB) Support.
6 * Copyright (C) 2005 - 2010 Paul Mundt
7 * Copyright (C) 2010 Matt Fleming
9 * This file is subject to the terms and conditions of the GNU General Public
10 * License. See the file "COPYING" in the main directory of this archive
13 #include <linux/init.h>
14 #include <linux/kernel.h>
15 #include <linux/sysdev.h>
16 #include <linux/cpu.h>
17 #include <linux/module.h>
18 #include <linux/slab.h>
19 #include <linux/bitops.h>
20 #include <linux/debugfs.h>
22 #include <linux/seq_file.h>
23 #include <linux/err.h>
25 #include <linux/spinlock.h>
26 #include <asm/sizes.h>
27 #include <asm/system.h>
28 #include <asm/uaccess.h>
29 #include <asm/pgtable.h>
32 #include <asm/mmu_context.h>
45 * 0 .. NR_PMB_ENTRIES for specific entry selection, or
46 * PMB_NO_ENTRY to search for a free one
50 /* Adjacent entry link for contiguous multi-entry mappings */
51 struct pmb_entry *link;
54 static void pmb_unmap_entry(struct pmb_entry *, int depth);
56 static DEFINE_RWLOCK(pmb_rwlock);
57 static struct pmb_entry pmb_entry_list[NR_PMB_ENTRIES];
58 static DECLARE_BITMAP(pmb_map, NR_PMB_ENTRIES);
60 static __always_inline unsigned long mk_pmb_entry(unsigned int entry)
62 return (entry & PMB_E_MASK) << PMB_E_SHIFT;
65 static __always_inline unsigned long mk_pmb_addr(unsigned int entry)
67 return mk_pmb_entry(entry) | PMB_ADDR;
70 static __always_inline unsigned long mk_pmb_data(unsigned int entry)
72 return mk_pmb_entry(entry) | PMB_DATA;
75 static int pmb_alloc_entry(void)
79 pos = find_first_zero_bit(pmb_map, NR_PMB_ENTRIES);
80 if (pos >= 0 && pos < NR_PMB_ENTRIES)
81 __set_bit(pos, pmb_map);
88 static struct pmb_entry *pmb_alloc(unsigned long vpn, unsigned long ppn,
89 unsigned long flags, int entry)
91 struct pmb_entry *pmbe;
92 unsigned long irqflags;
96 write_lock_irqsave(&pmb_rwlock, irqflags);
98 if (entry == PMB_NO_ENTRY) {
99 pos = pmb_alloc_entry();
100 if (unlikely(pos < 0)) {
105 if (__test_and_set_bit(entry, pmb_map)) {
106 ret = ERR_PTR(-ENOSPC);
113 write_unlock_irqrestore(&pmb_rwlock, irqflags);
115 pmbe = &pmb_entry_list[pos];
117 memset(pmbe, 0, sizeof(struct pmb_entry));
119 spin_lock_init(&pmbe->lock);
129 write_unlock_irqrestore(&pmb_rwlock, irqflags);
133 static void pmb_free(struct pmb_entry *pmbe)
135 __clear_bit(pmbe->entry, pmb_map);
137 pmbe->entry = PMB_NO_ENTRY;
142 * Ensure that the PMB entries match our cache configuration.
144 * When we are in 32-bit address extended mode, CCR.CB becomes
145 * invalid, so care must be taken to manually adjust cacheable
148 static __always_inline unsigned long pmb_cache_flags(void)
150 unsigned long flags = 0;
152 #if defined(CONFIG_CACHE_WRITETHROUGH)
153 flags |= PMB_C | PMB_WT | PMB_UB;
154 #elif defined(CONFIG_CACHE_WRITEBACK)
162 * Must be run uncached.
164 static void __set_pmb_entry(struct pmb_entry *pmbe)
166 writel_uncached(pmbe->vpn | PMB_V, mk_pmb_addr(pmbe->entry));
167 writel_uncached(pmbe->ppn | pmbe->flags | PMB_V,
168 mk_pmb_data(pmbe->entry));
171 static void __clear_pmb_entry(struct pmb_entry *pmbe)
173 unsigned long addr, data;
174 unsigned long addr_val, data_val;
176 addr = mk_pmb_addr(pmbe->entry);
177 data = mk_pmb_data(pmbe->entry);
179 addr_val = __raw_readl(addr);
180 data_val = __raw_readl(data);
183 writel_uncached(addr_val & ~PMB_V, addr);
184 writel_uncached(data_val & ~PMB_V, data);
187 static void set_pmb_entry(struct pmb_entry *pmbe)
191 spin_lock_irqsave(&pmbe->lock, flags);
192 __set_pmb_entry(pmbe);
193 spin_unlock_irqrestore(&pmbe->lock, flags);
200 { .size = SZ_512M, .flag = PMB_SZ_512M, },
201 { .size = SZ_128M, .flag = PMB_SZ_128M, },
202 { .size = SZ_64M, .flag = PMB_SZ_64M, },
203 { .size = SZ_16M, .flag = PMB_SZ_16M, },
206 long pmb_remap(unsigned long vaddr, unsigned long phys,
207 unsigned long size, pgprot_t prot)
209 struct pmb_entry *pmbp, *pmbe;
210 unsigned long wanted;
215 flags = pgprot_val(prot);
217 pmb_flags = PMB_WT | PMB_UB;
219 /* Convert typical pgprot value to the PMB equivalent */
220 if (flags & _PAGE_CACHABLE) {
223 if ((flags & _PAGE_WT) == 0)
224 pmb_flags &= ~(PMB_WT | PMB_UB);
231 for (i = 0; i < ARRAY_SIZE(pmb_sizes); i++) {
234 if (size < pmb_sizes[i].size)
237 pmbe = pmb_alloc(vaddr, phys, pmb_flags | pmb_sizes[i].flag,
244 spin_lock_irqsave(&pmbe->lock, flags);
246 __set_pmb_entry(pmbe);
248 phys += pmb_sizes[i].size;
249 vaddr += pmb_sizes[i].size;
250 size -= pmb_sizes[i].size;
252 pmbe->size = pmb_sizes[i].size;
255 * Link adjacent entries that span multiple PMB entries
256 * for easier tear-down.
259 spin_lock(&pmbp->lock);
261 spin_unlock(&pmbp->lock);
267 * Instead of trying smaller sizes on every iteration
268 * (even if we succeed in allocating space), try using
269 * pmb_sizes[i].size again.
273 spin_unlock_irqrestore(&pmbe->lock, flags);
279 return wanted - size;
282 pmb_unmap_entry(pmbp, NR_PMB_ENTRIES);
287 void pmb_unmap(unsigned long addr)
289 struct pmb_entry *pmbe = NULL;
292 read_lock(&pmb_rwlock);
294 for (i = 0; i < ARRAY_SIZE(pmb_entry_list); i++) {
295 if (test_bit(i, pmb_map)) {
296 pmbe = &pmb_entry_list[i];
297 if (pmbe->vpn == addr)
302 read_unlock(&pmb_rwlock);
304 pmb_unmap_entry(pmbe, NR_PMB_ENTRIES);
307 static bool pmb_can_merge(struct pmb_entry *a, struct pmb_entry *b)
309 return (b->vpn == (a->vpn + a->size)) &&
310 (b->ppn == (a->ppn + a->size)) &&
311 (b->flags == a->flags);
314 static bool pmb_size_valid(unsigned long size)
318 for (i = 0; i < ARRAY_SIZE(pmb_sizes); i++)
319 if (pmb_sizes[i].size == size)
325 static int pmb_size_to_flags(unsigned long size)
329 for (i = 0; i < ARRAY_SIZE(pmb_sizes); i++)
330 if (pmb_sizes[i].size == size)
331 return pmb_sizes[i].flag;
336 static void __pmb_unmap_entry(struct pmb_entry *pmbe, int depth)
339 struct pmb_entry *pmblink = pmbe;
342 * We may be called before this pmb_entry has been
343 * entered into the PMB table via set_pmb_entry(), but
344 * that's OK because we've allocated a unique slot for
345 * this entry in pmb_alloc() (even if we haven't filled
348 * Therefore, calling __clear_pmb_entry() is safe as no
349 * other mapping can be using that slot.
351 __clear_pmb_entry(pmbe);
353 pmbe = pmblink->link;
356 } while (pmbe && --depth);
359 static void pmb_unmap_entry(struct pmb_entry *pmbe, int depth)
366 write_lock_irqsave(&pmb_rwlock, flags);
367 __pmb_unmap_entry(pmbe, depth);
368 write_unlock_irqrestore(&pmb_rwlock, flags);
371 static __always_inline unsigned int pmb_ppn_in_range(unsigned long ppn)
373 return ppn >= __pa(memory_start) && ppn < __pa(memory_end);
376 static void __init pmb_notify(void)
380 pr_info("PMB: boot mappings:\n");
382 read_lock(&pmb_rwlock);
384 for (i = 0; i < ARRAY_SIZE(pmb_entry_list); i++) {
385 struct pmb_entry *pmbe;
387 if (!test_bit(i, pmb_map))
390 pmbe = &pmb_entry_list[i];
392 pr_info(" 0x%08lx -> 0x%08lx [ %4ldMB %2scached ]\n",
393 pmbe->vpn >> PAGE_SHIFT, pmbe->ppn >> PAGE_SHIFT,
394 pmbe->size >> 20, (pmbe->flags & PMB_C) ? "" : "un");
397 read_unlock(&pmb_rwlock);
401 * Sync our software copy of the PMB mappings with those in hardware. The
402 * mappings in the hardware PMB were either set up by the bootloader or
403 * very early on by the kernel.
405 static void __init pmb_synchronize(void)
407 struct pmb_entry *pmbp = NULL;
411 * Run through the initial boot mappings, log the established
412 * ones, and blow away anything that falls outside of the valid
413 * PPN range. Specifically, we only care about existing mappings
414 * that impact the cached/uncached sections.
416 * Note that touching these can be a bit of a minefield; the boot
417 * loader can establish multi-page mappings with the same caching
418 * attributes, so we need to ensure that we aren't modifying a
419 * mapping that we're presently executing from, or may execute
420 * from in the case of straddling page boundaries.
422 * In the future we will have to tidy up after the boot loader by
423 * jumping between the cached and uncached mappings and tearing
424 * down alternating mappings while executing from the other.
426 for (i = 0; i < NR_PMB_ENTRIES; i++) {
427 unsigned long addr, data;
428 unsigned long addr_val, data_val;
429 unsigned long ppn, vpn, flags;
430 unsigned long irqflags;
432 struct pmb_entry *pmbe;
434 addr = mk_pmb_addr(i);
435 data = mk_pmb_data(i);
437 addr_val = __raw_readl(addr);
438 data_val = __raw_readl(data);
441 * Skip over any bogus entries
443 if (!(data_val & PMB_V) || !(addr_val & PMB_V))
446 ppn = data_val & PMB_PFN_MASK;
447 vpn = addr_val & PMB_PFN_MASK;
450 * Only preserve in-range mappings.
452 if (!pmb_ppn_in_range(ppn)) {
454 * Invalidate anything out of bounds.
456 writel_uncached(addr_val & ~PMB_V, addr);
457 writel_uncached(data_val & ~PMB_V, data);
462 * Update the caching attributes if necessary
464 if (data_val & PMB_C) {
465 data_val &= ~PMB_CACHE_MASK;
466 data_val |= pmb_cache_flags();
468 writel_uncached(data_val, data);
471 size = data_val & PMB_SZ_MASK;
472 flags = size | (data_val & PMB_CACHE_MASK);
474 pmbe = pmb_alloc(vpn, ppn, flags, i);
480 spin_lock_irqsave(&pmbe->lock, irqflags);
482 for (j = 0; j < ARRAY_SIZE(pmb_sizes); j++)
483 if (pmb_sizes[j].flag == size)
484 pmbe->size = pmb_sizes[j].size;
487 spin_lock(&pmbp->lock);
490 * Compare the previous entry against the current one to
491 * see if the entries span a contiguous mapping. If so,
492 * setup the entry links accordingly. Compound mappings
493 * are later coalesced.
495 if (pmb_can_merge(pmbp, pmbe))
498 spin_unlock(&pmbp->lock);
503 spin_unlock_irqrestore(&pmbe->lock, irqflags);
507 static void __init pmb_merge(struct pmb_entry *head)
509 unsigned long span, newsize;
510 struct pmb_entry *tail;
511 int i = 1, depth = 0;
513 span = newsize = head->size;
519 if (pmb_size_valid(span)) {
524 /* This is the end of the line.. */
533 * The merged page size must be valid.
535 if (!pmb_size_valid(newsize))
538 head->flags &= ~PMB_SZ_MASK;
539 head->flags |= pmb_size_to_flags(newsize);
541 head->size = newsize;
543 __pmb_unmap_entry(head->link, depth);
544 __set_pmb_entry(head);
547 static void __init pmb_coalesce(void)
552 write_lock_irqsave(&pmb_rwlock, flags);
554 for (i = 0; i < ARRAY_SIZE(pmb_entry_list); i++) {
555 struct pmb_entry *pmbe;
557 if (!test_bit(i, pmb_map))
560 pmbe = &pmb_entry_list[i];
563 * We're only interested in compound mappings
569 * Nothing to do if it already uses the largest possible
572 if (pmbe->size == SZ_512M)
578 write_unlock_irqrestore(&pmb_rwlock, flags);
581 #ifdef CONFIG_UNCACHED_MAPPING
582 static void __init pmb_resize(void)
587 * If the uncached mapping was constructed by the kernel, it will
588 * already be a reasonable size.
590 if (uncached_size == SZ_16M)
593 read_lock(&pmb_rwlock);
595 for (i = 0; i < ARRAY_SIZE(pmb_entry_list); i++) {
596 struct pmb_entry *pmbe;
599 if (!test_bit(i, pmb_map))
602 pmbe = &pmb_entry_list[i];
604 if (pmbe->vpn != uncached_start)
608 * Found it, now resize it.
610 spin_lock_irqsave(&pmbe->lock, flags);
613 pmbe->flags &= ~PMB_SZ_MASK;
614 pmbe->flags |= pmb_size_to_flags(pmbe->size);
616 uncached_resize(pmbe->size);
618 __set_pmb_entry(pmbe);
620 spin_unlock_irqrestore(&pmbe->lock, flags);
623 read_lock(&pmb_rwlock);
627 void __init pmb_init(void)
629 /* Synchronize software state */
632 /* Attempt to combine compound mappings */
635 #ifdef CONFIG_UNCACHED_MAPPING
636 /* Resize initial mappings, if necessary */
643 writel_uncached(0, PMB_IRMCR);
645 /* Flush out the TLB */
646 __raw_writel(__raw_readl(MMUCR) | MMUCR_TI, MMUCR);
650 bool __in_29bit_mode(void)
652 return (__raw_readl(PMB_PASCR) & PASCR_SE) == 0;
655 static int pmb_seq_show(struct seq_file *file, void *iter)
659 seq_printf(file, "V: Valid, C: Cacheable, WT: Write-Through\n"
660 "CB: Copy-Back, B: Buffered, UB: Unbuffered\n");
661 seq_printf(file, "ety vpn ppn size flags\n");
663 for (i = 0; i < NR_PMB_ENTRIES; i++) {
664 unsigned long addr, data;
668 addr = __raw_readl(mk_pmb_addr(i));
669 data = __raw_readl(mk_pmb_data(i));
671 size = data & PMB_SZ_MASK;
672 sz_str = (size == PMB_SZ_16M) ? " 16MB":
673 (size == PMB_SZ_64M) ? " 64MB":
674 (size == PMB_SZ_128M) ? "128MB":
677 /* 02: V 0x88 0x08 128MB C CB B */
678 seq_printf(file, "%02d: %c 0x%02lx 0x%02lx %s %c %s %s\n",
679 i, ((addr & PMB_V) && (data & PMB_V)) ? 'V' : ' ',
680 (addr >> 24) & 0xff, (data >> 24) & 0xff,
681 sz_str, (data & PMB_C) ? 'C' : ' ',
682 (data & PMB_WT) ? "WT" : "CB",
683 (data & PMB_UB) ? "UB" : " B");
689 static int pmb_debugfs_open(struct inode *inode, struct file *file)
691 return single_open(file, pmb_seq_show, NULL);
694 static const struct file_operations pmb_debugfs_fops = {
695 .owner = THIS_MODULE,
696 .open = pmb_debugfs_open,
699 .release = single_release,
702 static int __init pmb_debugfs_init(void)
704 struct dentry *dentry;
706 dentry = debugfs_create_file("pmb", S_IFREG | S_IRUGO,
707 sh_debugfs_root, NULL, &pmb_debugfs_fops);
711 return PTR_ERR(dentry);
715 postcore_initcall(pmb_debugfs_init);
718 static int pmb_sysdev_suspend(struct sys_device *dev, pm_message_t state)
720 static pm_message_t prev_state;
723 /* Restore the PMB after a resume from hibernation */
724 if (state.event == PM_EVENT_ON &&
725 prev_state.event == PM_EVENT_FREEZE) {
726 struct pmb_entry *pmbe;
728 read_lock(&pmb_rwlock);
730 for (i = 0; i < ARRAY_SIZE(pmb_entry_list); i++) {
731 if (test_bit(i, pmb_map)) {
732 pmbe = &pmb_entry_list[i];
737 read_unlock(&pmb_rwlock);
745 static int pmb_sysdev_resume(struct sys_device *dev)
747 return pmb_sysdev_suspend(dev, PMSG_ON);
750 static struct sysdev_driver pmb_sysdev_driver = {
751 .suspend = pmb_sysdev_suspend,
752 .resume = pmb_sysdev_resume,
755 static int __init pmb_sysdev_init(void)
757 return sysdev_driver_register(&cpu_sysdev_class, &pmb_sysdev_driver);
759 subsys_initcall(pmb_sysdev_init);