1 From a62a047ed02162573e4bece18ecf8bdd66ccd06b Mon Sep 17 00:00:00 2001
2 From: Hiroshi DOYU <Hiroshi.DOYU@nokia.com>
3 Date: Mon, 26 Jan 2009 15:13:40 +0200
4 Subject: [PATCH] omap iommu: tlb and pagetable primitives
8 - iotlb_*() : iommu tlb operations
9 - iopgtable_*() : iommu pagetable(twl) operations
10 - iommu_*() : the other generic operations
12 and the entry points to register and acquire iommu object.
14 Signed-off-by: Hiroshi DOYU <Hiroshi.DOYU@nokia.com>
16 arch/arm/plat-omap/include/mach/iommu.h | 157 +++++
17 arch/arm/plat-omap/iommu.c | 953 +++++++++++++++++++++++++++++++
18 arch/arm/plat-omap/iopgtable.h | 72 +++
19 3 files changed, 1182 insertions(+), 0 deletions(-)
20 create mode 100644 arch/arm/plat-omap/include/mach/iommu.h
21 create mode 100644 arch/arm/plat-omap/iommu.c
22 create mode 100644 arch/arm/plat-omap/iopgtable.h
24 diff --git a/arch/arm/plat-omap/include/mach/iommu.h b/arch/arm/plat-omap/include/mach/iommu.h
26 index 0000000..ef04d7a
28 +++ b/arch/arm/plat-omap/include/mach/iommu.h
31 + * omap iommu: main structures
33 + * Copyright (C) 2008-2009 Nokia Corporation
35 + * Written by Hiroshi DOYU <Hiroshi.DOYU@nokia.com>
37 + * This program is free software; you can redistribute it and/or modify
38 + * it under the terms of the GNU General Public License version 2 as
39 + * published by the Free Software Foundation.
42 +#ifndef __MACH_IOMMU_H
43 +#define __MACH_IOMMU_H
48 + u32 pgsz, prsvd, valid;
52 + u32 endian, elsz, mixed;
59 + struct module *owner;
61 + void __iomem *regbase;
64 + unsigned int refcount;
65 + struct mutex iommu_lock; /* global for this whole object */
68 + * We don't change iopgd for a situation like pgd for a task,
69 + * but share it globally for each iommu.
72 + spinlock_t page_table_lock; /* protect iopgd */
76 + struct list_head mmap;
77 + struct mutex mmap_lock; /* protect mmap */
79 + int (*isr)(struct iommu *obj);
81 + void *ctx; /* iommu context: registres saved area */
106 +/* architecture specific functions */
107 +struct iommu_functions {
108 + unsigned long version;
110 + int (*enable)(struct iommu *obj);
111 + void (*disable)(struct iommu *obj);
112 + u32 (*fault_isr)(struct iommu *obj, u32 *ra);
114 + void (*tlb_read_cr)(struct iommu *obj, struct cr_regs *cr);
115 + void (*tlb_load_cr)(struct iommu *obj, struct cr_regs *cr);
117 + struct cr_regs *(*alloc_cr)(struct iommu *obj, struct iotlb_entry *e);
118 + int (*cr_valid)(struct cr_regs *cr);
119 + u32 (*cr_to_virt)(struct cr_regs *cr);
120 + void (*cr_to_e)(struct cr_regs *cr, struct iotlb_entry *e);
121 + ssize_t (*dump_cr)(struct iommu *obj, struct cr_regs *cr, char *buf);
123 + u32 (*get_pte_attr)(struct iotlb_entry *e);
125 + void (*save_ctx)(struct iommu *obj);
126 + void (*restore_ctx)(struct iommu *obj);
127 + ssize_t (*dump_ctx)(struct iommu *obj, char *buf);
130 +struct iommu_platform_data {
132 + const char *clk_name;
133 + const int nr_tlb_entries;
136 +#include <mach/iommu2.h>
139 + * utilities for super page(16MB, 1MB, 64KB and 4KB)
142 +#define iopgsz_max(bytes) \
143 + (((bytes) >= SZ_16M) ? SZ_16M : \
144 + ((bytes) >= SZ_1M) ? SZ_1M : \
145 + ((bytes) >= SZ_64K) ? SZ_64K : \
146 + ((bytes) >= SZ_4K) ? SZ_4K : 0)
148 +#define bytes_to_iopgsz(bytes) \
149 + (((bytes) == SZ_16M) ? MMU_CAM_PGSZ_16M : \
150 + ((bytes) == SZ_1M) ? MMU_CAM_PGSZ_1M : \
151 + ((bytes) == SZ_64K) ? MMU_CAM_PGSZ_64K : \
152 + ((bytes) == SZ_4K) ? MMU_CAM_PGSZ_4K : -1)
154 +#define iopgsz_to_bytes(iopgsz) \
155 + (((iopgsz) == MMU_CAM_PGSZ_16M) ? SZ_16M : \
156 + ((iopgsz) == MMU_CAM_PGSZ_1M) ? SZ_1M : \
157 + ((iopgsz) == MMU_CAM_PGSZ_64K) ? SZ_64K : \
158 + ((iopgsz) == MMU_CAM_PGSZ_4K) ? SZ_4K : 0)
160 +#define iopgsz_ok(bytes) (bytes_to_iopgsz(bytes) >= 0)
165 +extern u32 iommu_arch_version(void);
167 +extern int load_iotlb_entry(struct iommu *obj, struct iotlb_entry *e);
168 +extern void flush_iotlb_page(struct iommu *obj, u32 da);
169 +extern void flush_iotlb_range(struct iommu *obj, u32 start, u32 end);
170 +extern void flush_iotlb_all(struct iommu *obj);
172 +ssize_t iotlb_dump_cr(struct iommu *obj, struct cr_regs *cr, char *buf);
174 +extern int iopgtable_store_entry(struct iommu *obj, struct iotlb_entry *e);
175 +extern size_t iopgtable_clear_entry(struct iommu *obj, u32 iova);
177 +extern struct iommu *iommu_get(const char *name);
178 +extern void iommu_put(struct iommu *obj);
180 +extern void iommu_save_ctx(struct iommu *obj);
181 +extern void iommu_restore_ctx(struct iommu *obj);
183 +extern int install_iommu_arch(const struct iommu_functions *ops);
184 +extern void uninstall_iommu_arch(const struct iommu_functions *ops);
186 +#endif /* __MACH_IOMMU_H */
187 diff --git a/arch/arm/plat-omap/iommu.c b/arch/arm/plat-omap/iommu.c
189 index 0000000..e638883
191 +++ b/arch/arm/plat-omap/iommu.c
194 + * omap iommu: tlb and pagetable primitives
196 + * Copyright (C) 2008-2009 Nokia Corporation
198 + * Written by Hiroshi DOYU <Hiroshi.DOYU@nokia.com>,
199 + * Paul Mundt and Toshihiro Kobayashi
201 + * This program is free software; you can redistribute it and/or modify
202 + * it under the terms of the GNU General Public License version 2 as
203 + * published by the Free Software Foundation.
206 +#include <linux/err.h>
207 +#include <linux/module.h>
208 +#include <linux/interrupt.h>
209 +#include <linux/ioport.h>
210 +#include <linux/clk.h>
211 +#include <linux/platform_device.h>
214 +#include <asm/cacheflush.h>
216 +#include <mach/clock.h>
217 +#include <mach/iommu.h>
219 +#include "iopgtable.h"
221 +/* accommodate the difference between omap1 and omap2/3 */
222 +static const struct iommu_functions *arch_iommu;
224 +static struct platform_driver omap_iommu_driver;
225 +static struct kmem_cache *iopte_cachep;
228 + * install_iommu_arch() - Install archtecure specific iommu functions
229 + * @ops: a pointer to architecture specific iommu functions
231 + * There are several kind of iommu algorithm(tlb, pagetable) among
232 + * omap series. This interface installs such an iommu algorighm.
234 +int install_iommu_arch(const struct iommu_functions *ops)
242 +EXPORT_SYMBOL_GPL(install_iommu_arch);
245 + * uninstall_iommu_arch() - Uninstall archtecure specific iommu functions
246 + * @ops: a pointer to architecture specific iommu functions
248 + * This interface uninstalls the iommu algorighm installed previously.
250 +void uninstall_iommu_arch(const struct iommu_functions *ops)
252 + if (arch_iommu != ops)
253 + pr_err("%s: not your arch\n", __func__);
257 +EXPORT_SYMBOL_GPL(uninstall_iommu_arch);
260 + * iommu_save_ctx() - Save registers for pm off-mode support
261 + * @obj: target iommu
263 +void iommu_save_ctx(struct iommu *obj)
265 + arch_iommu->save_ctx(obj);
267 +EXPORT_SYMBOL_GPL(iommu_save_ctx);
270 + * iommu_restore_ctx() - Restore registers for pm off-mode support
271 + * @obj: target iommu
273 +void iommu_restore_ctx(struct iommu *obj)
275 + arch_iommu->restore_ctx(obj);
277 +EXPORT_SYMBOL_GPL(iommu_restore_ctx);
280 + * iommu_arch_version() - Return running iommu arch version
282 +u32 iommu_arch_version(void)
284 + return arch_iommu->version;
286 +EXPORT_SYMBOL_GPL(iommu_arch_version);
288 +static int iommu_enable(struct iommu *obj)
295 + clk_enable(obj->clk);
297 + err = arch_iommu->enable(obj);
299 + clk_disable(obj->clk);
303 +static void iommu_disable(struct iommu *obj)
308 + clk_enable(obj->clk);
310 + arch_iommu->disable(obj);
312 + clk_disable(obj->clk);
316 +static ssize_t iommu_dump_ctx(struct iommu *obj, char *buf)
321 + return arch_iommu->dump_ctx(obj, buf);
328 +static inline void iotlb_cr_to_e(struct cr_regs *cr, struct iotlb_entry *e)
332 + arch_iommu->cr_to_e(cr, e);
335 +static inline int iotlb_cr_valid(struct cr_regs *cr)
340 + return arch_iommu->cr_valid(cr);
343 +static inline struct cr_regs *iotlb_alloc_cr(struct iommu *obj,
344 + struct iotlb_entry *e)
349 + return arch_iommu->alloc_cr(obj, e);
352 +static inline u32 iotlb_cr_to_virt(struct cr_regs *cr)
354 + return arch_iommu->cr_to_virt(cr);
357 +static u32 get_iopte_attr(struct iotlb_entry *e)
359 + return arch_iommu->get_pte_attr(e);
362 +static u32 iommu_report_fault(struct iommu *obj, u32 *da)
364 + return arch_iommu->fault_isr(obj, da);
367 +static void iotlb_lock_get(struct iommu *obj, struct iotlb_lock *l)
371 + val = iommu_read_reg(obj, MMU_LOCK);
373 + l->base = MMU_LOCK_BASE(val);
374 + l->vict = MMU_LOCK_VICT(val);
376 + BUG_ON(l->base != 0); /* Currently no preservation is used */
379 +static void iotlb_lock_set(struct iommu *obj, struct iotlb_lock *l)
383 + BUG_ON(l->base != 0); /* Currently no preservation is used */
385 + val = (l->base << MMU_LOCK_BASE_SHIFT);
386 + val |= (l->vict << MMU_LOCK_VICT_SHIFT);
388 + iommu_write_reg(obj, val, MMU_LOCK);
391 +static void iotlb_read_cr(struct iommu *obj, struct cr_regs *cr)
393 + arch_iommu->tlb_read_cr(obj, cr);
396 +static void iotlb_load_cr(struct iommu *obj, struct cr_regs *cr)
398 + arch_iommu->tlb_load_cr(obj, cr);
400 + iommu_write_reg(obj, 1, MMU_FLUSH_ENTRY);
401 + iommu_write_reg(obj, 1, MMU_LD_TLB);
405 + * iotlb_dump_cr() - Dump an iommu tlb entry into buf
406 + * @obj: target iommu
407 + * @cr: contents of cam and ram register
408 + * @buf: output buffer
410 +ssize_t iotlb_dump_cr(struct iommu *obj, struct cr_regs *cr, char *buf)
412 + BUG_ON(!cr || !buf);
414 + return arch_iommu->dump_cr(obj, cr, buf);
416 +EXPORT_SYMBOL_GPL(iotlb_dump_cr);
419 + * load_iotlb_entry() - Set an iommu tlb entry
420 + * @obj: target iommu
421 + * @e: an iommu tlb entry info
423 +int load_iotlb_entry(struct iommu *obj, struct iotlb_entry *e)
427 + struct iotlb_lock l;
428 + struct cr_regs *cr;
430 + if (!obj || !obj->nr_tlb_entries || !e)
433 + clk_enable(obj->clk);
435 + for (i = 0; i < obj->nr_tlb_entries; i++) {
436 + struct cr_regs tmp;
438 + iotlb_lock_get(obj, &l);
440 + iotlb_lock_set(obj, &l);
441 + iotlb_read_cr(obj, &tmp);
442 + if (!iotlb_cr_valid(&tmp))
446 + if (i == obj->nr_tlb_entries) {
447 + dev_dbg(obj->dev, "%s: full: no entry\n", __func__);
452 + cr = iotlb_alloc_cr(obj, e);
454 + clk_disable(obj->clk);
455 + return PTR_ERR(cr);
458 + iotlb_load_cr(obj, cr);
461 + /* increment victim for next tlb load */
462 + if (++l.vict == obj->nr_tlb_entries)
464 + iotlb_lock_set(obj, &l);
466 + clk_disable(obj->clk);
469 +EXPORT_SYMBOL_GPL(load_iotlb_entry);
472 + * flush_iotlb_page() - Clear an iommu tlb entry
473 + * @obj: target iommu
474 + * @da: iommu device virtual address
476 + * Clear an iommu tlb entry which includes 'da' address.
478 +void flush_iotlb_page(struct iommu *obj, u32 da)
480 + struct iotlb_lock l;
483 + clk_enable(obj->clk);
485 + for (i = 0; i < obj->nr_tlb_entries; i++) {
490 + iotlb_lock_get(obj, &l);
492 + iotlb_lock_set(obj, &l);
493 + iotlb_read_cr(obj, &cr);
494 + if (!iotlb_cr_valid(&cr))
497 + start = iotlb_cr_to_virt(&cr);
498 + bytes = iopgsz_to_bytes(cr.cam & 3);
500 + if ((start <= da) && (da < start + bytes)) {
501 + dev_dbg(obj->dev, "%s: %08x<=%08x(%x)\n",
502 + __func__, start, da, bytes);
504 + iommu_write_reg(obj, 1, MMU_FLUSH_ENTRY);
507 + clk_disable(obj->clk);
509 + if (i == obj->nr_tlb_entries)
510 + dev_dbg(obj->dev, "%s: no page for %08x\n", __func__, da);
512 +EXPORT_SYMBOL_GPL(flush_iotlb_page);
515 + * flush_iotlb_range() - Clear an iommu tlb entries
516 + * @obj: target iommu
517 + * @start: iommu device virtual address(start)
518 + * @end: iommu device virtual address(end)
520 + * Clear an iommu tlb entry which includes 'da' address.
522 +void flush_iotlb_range(struct iommu *obj, u32 start, u32 end)
527 + flush_iotlb_page(obj, da);
528 + /* FIXME: Optimize for multiple page size */
532 +EXPORT_SYMBOL_GPL(flush_iotlb_range);
535 + * flush_iotlb_all() - Clear all iommu tlb entries
536 + * @obj: target iommu
538 +void flush_iotlb_all(struct iommu *obj)
540 + struct iotlb_lock l;
542 + clk_enable(obj->clk);
546 + iotlb_lock_set(obj, &l);
548 + iommu_write_reg(obj, 1, MMU_GFLUSH);
550 + clk_disable(obj->clk);
552 +EXPORT_SYMBOL_GPL(flush_iotlb_all);
555 + * H/W pagetable operations
557 +static void flush_iopgd_range(u32 *first, u32 *last)
559 + /* FIXME: L2 cache should be taken care of if it exists */
561 + asm("mcr p15, 0, %0, c7, c10, 1 @ flush_pgd"
563 + first += L1_CACHE_BYTES / sizeof(*first);
564 + } while (first <= last);
567 +static void flush_iopte_range(u32 *first, u32 *last)
569 + /* FIXME: L2 cache should be taken care of if it exists */
571 + asm("mcr p15, 0, %0, c7, c10, 1 @ flush_pte"
573 + first += L1_CACHE_BYTES / sizeof(*first);
574 + } while (first <= last);
577 +static void iopte_free(u32 *iopte)
579 + /* Note: freed iopte's must be clean ready for re-use */
580 + kmem_cache_free(iopte_cachep, iopte);
583 +static u32 *iopte_alloc(struct iommu *obj, u32 *iopgd, u32 da)
587 + /* a table has already existed */
592 + * do the allocation outside the page table lock
594 + spin_unlock(&obj->page_table_lock);
595 + iopte = kmem_cache_zalloc(iopte_cachep, GFP_KERNEL);
596 + spin_lock(&obj->page_table_lock);
600 + return ERR_PTR(-ENOMEM);
602 + *iopgd = virt_to_phys(iopte) | IOPGD_TABLE;
603 + flush_iopgd_range(iopgd, iopgd);
605 + dev_vdbg(obj->dev, "%s: a new pte:%p\n", __func__, iopte);
607 + /* We raced, free the reduniovant table */
612 + iopte = iopte_offset(iopgd, da);
615 + "%s: da:%08x pgd:%p *pgd:%08x pte:%p *pte:%08x\n",
616 + __func__, da, iopgd, *iopgd, iopte, *iopte);
621 +static int iopgd_alloc_section(struct iommu *obj, u32 da, u32 pa, u32 prot)
623 + u32 *iopgd = iopgd_offset(obj, da);
625 + *iopgd = (pa & IOSECTION_MASK) | prot | IOPGD_SECTION;
626 + flush_iopgd_range(iopgd, iopgd);
630 +static int iopgd_alloc_super(struct iommu *obj, u32 da, u32 pa, u32 prot)
632 + u32 *iopgd = iopgd_offset(obj, da);
635 + for (i = 0; i < 16; i++)
636 + *(iopgd + i) = (pa & IOSUPER_MASK) | prot | IOPGD_SUPER;
637 + flush_iopgd_range(iopgd, iopgd + 15);
641 +static int iopte_alloc_page(struct iommu *obj, u32 da, u32 pa, u32 prot)
643 + u32 *iopgd = iopgd_offset(obj, da);
644 + u32 *iopte = iopte_alloc(obj, iopgd, da);
647 + return PTR_ERR(iopte);
649 + *iopte = (pa & IOPAGE_MASK) | prot | IOPTE_SMALL;
650 + flush_iopte_range(iopte, iopte);
652 + dev_vdbg(obj->dev, "%s: da:%08x pa:%08x pte:%p *pte:%08x\n",
653 + __func__, da, pa, iopte, *iopte);
658 +static int iopte_alloc_large(struct iommu *obj, u32 da, u32 pa, u32 prot)
660 + u32 *iopgd = iopgd_offset(obj, da);
661 + u32 *iopte = iopte_alloc(obj, iopgd, da);
665 + return PTR_ERR(iopte);
667 + for (i = 0; i < 16; i++)
668 + *(iopte + i) = (pa & IOLARGE_MASK) | prot | IOPTE_LARGE;
669 + flush_iopte_range(iopte, iopte + 15);
673 +static int iopgtable_store_entry_core(struct iommu *obj, struct iotlb_entry *e)
675 + int (*fn)(struct iommu *, u32, u32, u32);
683 + case MMU_CAM_PGSZ_16M:
684 + fn = iopgd_alloc_super;
686 + case MMU_CAM_PGSZ_1M:
687 + fn = iopgd_alloc_section;
689 + case MMU_CAM_PGSZ_64K:
690 + fn = iopte_alloc_large;
692 + case MMU_CAM_PGSZ_4K:
693 + fn = iopte_alloc_page;
701 + prot = get_iopte_attr(e);
703 + spin_lock(&obj->page_table_lock);
704 + err = fn(obj, e->da, e->pa, prot);
705 + spin_unlock(&obj->page_table_lock);
711 +static void dump_tlb_entries(struct iommu *obj)
714 + struct iotlb_lock l;
716 + clk_enable(obj->clk);
718 + pr_info("%8s %8s\n", "cam:", "ram:");
719 + pr_info("-----------------------------------------\n");
721 + for (i = 0; i < obj->nr_tlb_entries; i++) {
723 + static char buf[4096];
725 + iotlb_lock_get(obj, &l);
727 + iotlb_lock_set(obj, &l);
728 + iotlb_read_cr(obj, &cr);
729 + if (!iotlb_cr_valid(&cr))
732 + memset(buf, 0, 4096);
733 + iotlb_dump_cr(obj, &cr, buf);
737 + clk_disable(obj->clk);
740 +static inline void dump_tlb_entries(struct iommu *obj) {}
744 + * iopgtable_store_entry() - Make an iommu pte entry
745 + * @obj: target iommu
746 + * @e: an iommu tlb entry info
748 +int iopgtable_store_entry(struct iommu *obj, struct iotlb_entry *e)
752 + flush_iotlb_page(obj, e->da);
753 + err = iopgtable_store_entry_core(obj, e);
756 + load_iotlb_entry(obj, e);
760 +EXPORT_SYMBOL_GPL(iopgtable_store_entry);
763 + * iopgtable_lookup_entry() - Lookup an iommu pte entry
764 + * @obj: target iommu
765 + * @da: iommu device virtual address
766 + * @ppgd: iommu pgd entry pointer to be returned
767 + * @ppte: iommu pte entry pointer to be returned
769 +void iopgtable_lookup_entry(struct iommu *obj, u32 da, u32 **ppgd, u32 **ppte)
771 + u32 *iopgd, *iopte = NULL;
773 + iopgd = iopgd_offset(obj, da);
777 + if (*iopgd & IOPGD_TABLE)
778 + iopte = iopte_offset(iopgd, da);
783 +EXPORT_SYMBOL_GPL(iopgtable_lookup_entry);
785 +static size_t iopgtable_clear_entry_core(struct iommu *obj, u32 da)
788 + u32 *iopgd = iopgd_offset(obj, da);
794 + if (*iopgd & IOPGD_TABLE) {
796 + u32 *iopte = iopte_offset(iopgd, da);
798 + bytes = IOPTE_SIZE;
799 + if (*iopte & IOPTE_LARGE) {
801 + /* rewind to the 1st entry */
802 + iopte = (u32 *)((u32)iopte & IOLARGE_MASK);
805 + memset(iopte, 0, nent * sizeof(*iopte));
806 + flush_iopte_range(iopte, iopte + (nent - 1) * sizeof(*iopte));
809 + * do table walk to check if this table is necessary or not
811 + iopte = iopte_offset(iopgd, 0);
812 + for (i = 0; i < PTRS_PER_IOPTE; i++)
817 + nent = 1; /* for the next L1 entry */
819 + bytes = IOPGD_SIZE;
820 + if (*iopgd & IOPGD_SUPER) {
822 + /* rewind to the 1st entry */
823 + iopgd = (u32 *)((u32)iopgd & IOSUPER_MASK);
827 + memset(iopgd, 0, nent * sizeof(*iopgd));
828 + flush_iopgd_range(iopgd, iopgd + (nent - 1) * sizeof(*iopgd));
834 + * iopgtable_clear_entry() - Remove an iommu pte entry
835 + * @obj: target iommu
836 + * @da: iommu device virtual address
838 +size_t iopgtable_clear_entry(struct iommu *obj, u32 da)
842 + spin_lock(&obj->page_table_lock);
844 + bytes = iopgtable_clear_entry_core(obj, da);
845 + flush_iotlb_page(obj, da);
847 + spin_unlock(&obj->page_table_lock);
851 +EXPORT_SYMBOL_GPL(iopgtable_clear_entry);
853 +static void iopgtable_clear_entry_all(struct iommu *obj)
857 + spin_lock(&obj->page_table_lock);
859 + for (i = 0; i < PTRS_PER_IOPGD; i++) {
863 + da = i << IOPGD_SHIFT;
864 + iopgd = iopgd_offset(obj, da);
869 + if (*iopgd & IOPGD_TABLE)
870 + iopte_free(iopte_offset(iopgd, 0));
873 + flush_iopgd_range(iopgd, iopgd);
876 + flush_iotlb_all(obj);
878 + spin_unlock(&obj->page_table_lock);
882 + * Device IOMMU generic operations
884 +static irqreturn_t iommu_fault_handler(int irq, void *data)
887 + u32 *iopgd, *iopte;
889 + struct iommu *obj = data;
891 + /* Dynamic loading TLB or PTE */
893 + err = obj->isr(obj);
896 + return IRQ_HANDLED;
898 + stat = iommu_report_fault(obj, &da);
900 + return IRQ_HANDLED;
902 + iopgd = iopgd_offset(obj, da);
904 + if (!(*iopgd & IOPGD_TABLE)) {
905 + dev_err(obj->dev, "%s: da:%08x pgd:%p *pgd:%08x\n", __func__,
906 + da, iopgd, *iopgd);
910 + iopte = iopte_offset(iopgd, da);
912 + dev_err(obj->dev, "%s: da:%08x pgd:%p *pgd:%08x pte:%p *pte:%08x\n",
913 + __func__, da, iopgd, *iopgd, iopte, *iopte);
915 + dump_tlb_entries(obj);
920 +static int device_match_by_alias(struct device *dev, void *data)
922 + struct iommu *obj = to_iommu(dev);
923 + const char *name = data;
925 + pr_debug("%s: %s %s\n", __func__, obj->name, name);
927 + return strcmp(obj->name, name) == 0;
931 + * iommu_put() - Get iommu handler
932 + * @name: target iommu name
934 +struct iommu *iommu_get(const char *name)
937 + struct device *dev;
940 + dev = driver_find_device(&omap_iommu_driver.driver, NULL, (void *)name,
941 + device_match_by_alias);
943 + return ERR_PTR(-ENODEV);
945 + obj = to_iommu(dev);
947 + mutex_lock(&obj->iommu_lock);
949 + if (obj->refcount++ == 0) {
950 + err = iommu_enable(obj);
953 + flush_iotlb_all(obj);
956 + if (!try_module_get(obj->owner))
959 + mutex_unlock(&obj->iommu_lock);
961 + dev_dbg(obj->dev, "%s: %s\n", __func__, obj->name);
965 + if (obj->refcount == 1)
966 + iommu_disable(obj);
968 + mutex_unlock(&obj->iommu_lock);
969 + return ERR_PTR(err);
971 +EXPORT_SYMBOL_GPL(iommu_get);
974 + * iommu_put() - Put back iommu handler
975 + * @obj: target iommu
977 +void iommu_put(struct iommu *obj)
979 + if (!obj && IS_ERR(obj))
982 + mutex_lock(&obj->iommu_lock);
984 + if (--obj->refcount == 0)
985 + iommu_disable(obj);
987 + module_put(obj->owner);
989 + mutex_unlock(&obj->iommu_lock);
991 + dev_dbg(obj->dev, "%s: %s\n", __func__, obj->name);
993 +EXPORT_SYMBOL_GPL(iommu_put);
996 + * OMAP Device MMU(IOMMU) detection
998 +static int __devinit omap_iommu_probe(struct platform_device *pdev)
1000 + int err = -ENODEV;
1003 + struct iommu *obj;
1004 + struct resource *res;
1005 + struct iommu_platform_data *pdata = pdev->dev.platform_data;
1007 + if (pdev->num_resources != 2)
1010 + obj = kzalloc(sizeof(*obj) + MMU_REG_SIZE, GFP_KERNEL);
1014 + obj->clk = clk_get(&pdev->dev, pdata->clk_name);
1015 + if (IS_ERR(obj->clk))
1018 + obj->nr_tlb_entries = pdata->nr_tlb_entries;
1019 + obj->name = pdata->name;
1020 + obj->dev = &pdev->dev;
1021 + obj->ctx = (void *)obj + sizeof(*obj);
1023 + mutex_init(&obj->iommu_lock);
1024 + mutex_init(&obj->mmap_lock);
1025 + spin_lock_init(&obj->page_table_lock);
1026 + INIT_LIST_HEAD(&obj->mmap);
1028 + res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1033 + obj->regbase = ioremap(res->start, resource_size(res));
1034 + if (!obj->regbase) {
1039 + res = request_mem_region(res->start, resource_size(res),
1040 + dev_name(&pdev->dev));
1046 + irq = platform_get_irq(pdev, 0);
1051 + err = request_irq(irq, iommu_fault_handler, IRQF_SHARED,
1052 + dev_name(&pdev->dev), obj);
1055 + platform_set_drvdata(pdev, obj);
1057 + p = (void *)__get_free_pages(GFP_KERNEL, get_order(IOPGD_TABLE_SIZE));
1062 + memset(p, 0, IOPGD_TABLE_SIZE);
1063 + clean_dcache_area(p, IOPGD_TABLE_SIZE);
1066 + BUG_ON(!IS_ALIGNED((unsigned long)obj->iopgd, IOPGD_TABLE_SIZE));
1068 + dev_info(&pdev->dev, "%s registered\n", obj->name);
1072 + free_irq(irq, obj);
1074 + release_mem_region(res->start, resource_size(res));
1075 + iounmap(obj->regbase);
1077 + clk_put(obj->clk);
1083 +static int __devexit omap_iommu_remove(struct platform_device *pdev)
1086 + struct resource *res;
1087 + struct iommu *obj = platform_get_drvdata(pdev);
1089 + platform_set_drvdata(pdev, NULL);
1091 + iopgtable_clear_entry_all(obj);
1092 + free_pages((unsigned long)obj->iopgd, get_order(IOPGD_TABLE_SIZE));
1094 + irq = platform_get_irq(pdev, 0);
1095 + free_irq(irq, obj);
1096 + res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1097 + release_mem_region(res->start, resource_size(res));
1098 + iounmap(obj->regbase);
1100 + clk_put(obj->clk);
1101 + dev_info(&pdev->dev, "%s removed\n", obj->name);
1106 +static struct platform_driver omap_iommu_driver = {
1107 + .probe = omap_iommu_probe,
1108 + .remove = __devexit_p(omap_iommu_remove),
1110 + .name = "omap-iommu",
1114 +static void iopte_cachep_ctor(void *iopte)
1116 + clean_dcache_area(iopte, IOPTE_TABLE_SIZE);
1119 +static int __init omap_iommu_init(void)
1121 + struct kmem_cache *p;
1122 + const unsigned long flags = SLAB_HWCACHE_ALIGN;
1124 + p = kmem_cache_create("iopte_cache", IOPTE_TABLE_SIZE, 0, flags,
1125 + iopte_cachep_ctor);
1130 + return platform_driver_register(&omap_iommu_driver);
1132 +module_init(omap_iommu_init);
1134 +static void __exit omap_iommu_exit(void)
1136 + kmem_cache_destroy(iopte_cachep);
1138 + platform_driver_unregister(&omap_iommu_driver);
1140 +module_exit(omap_iommu_exit);
1142 +MODULE_DESCRIPTION("omap iommu: tlb and pagetable primitives");
1143 +MODULE_ALIAS("platform:omap-iommu");
1144 +MODULE_AUTHOR("Hiroshi DOYU, Paul Mundt and Toshihiro Kobayashi");
1145 +MODULE_LICENSE("GPL v2");
1146 diff --git a/arch/arm/plat-omap/iopgtable.h b/arch/arm/plat-omap/iopgtable.h
1147 new file mode 100644
1148 index 0000000..37dac43
1150 +++ b/arch/arm/plat-omap/iopgtable.h
1153 + * omap iommu: pagetable definitions
1155 + * Copyright (C) 2008-2009 Nokia Corporation
1157 + * Written by Hiroshi DOYU <Hiroshi.DOYU@nokia.com>
1159 + * This program is free software; you can redistribute it and/or modify
1160 + * it under the terms of the GNU General Public License version 2 as
1161 + * published by the Free Software Foundation.
1164 +#ifndef __PLAT_OMAP_IOMMU_H
1165 +#define __PLAT_OMAP_IOMMU_H
1167 +#define IOPGD_SHIFT 20
1168 +#define IOPGD_SIZE (1 << IOPGD_SHIFT)
1169 +#define IOPGD_MASK (~(IOPGD_SIZE - 1))
1170 +#define IOSECTION_MASK IOPGD_MASK
1171 +#define PTRS_PER_IOPGD (1 << (32 - IOPGD_SHIFT))
1172 +#define IOPGD_TABLE_SIZE (PTRS_PER_IOPGD * sizeof(u32))
1174 +#define IOSUPER_SIZE (IOPGD_SIZE << 4)
1175 +#define IOSUPER_MASK (~(IOSUPER_SIZE - 1))
1177 +#define IOPTE_SHIFT 12
1178 +#define IOPTE_SIZE (1 << IOPTE_SHIFT)
1179 +#define IOPTE_MASK (~(IOPTE_SIZE - 1))
1180 +#define IOPAGE_MASK IOPTE_MASK
1181 +#define PTRS_PER_IOPTE (1 << (IOPGD_SHIFT - IOPTE_SHIFT))
1182 +#define IOPTE_TABLE_SIZE (PTRS_PER_IOPTE * sizeof(u32))
1184 +#define IOLARGE_SIZE (IOPTE_SIZE << 4)
1185 +#define IOLARGE_MASK (~(IOLARGE_SIZE - 1))
1187 +#define IOPGD_TABLE (1 << 0)
1188 +#define IOPGD_SECTION (2 << 0)
1189 +#define IOPGD_SUPER (1 << 18 | 2 << 0)
1191 +#define IOPTE_SMALL (2 << 0)
1192 +#define IOPTE_LARGE (1 << 0)
1194 +#define iopgd_index(da) (((da) >> IOPGD_SHIFT) & (PTRS_PER_IOPGD - 1))
1195 +#define iopgd_offset(obj, da) ((obj)->iopgd + iopgd_index(da))
1197 +#define iopte_paddr(iopgd) (*iopgd & ~((1 << 10) - 1))
1198 +#define iopte_vaddr(iopgd) ((u32 *)phys_to_virt(iopte_paddr(iopgd)))
1200 +#define iopte_index(da) (((da) >> IOPTE_SHIFT) & (PTRS_PER_IOPTE - 1))
1201 +#define iopte_offset(iopgd, da) (iopte_vaddr(iopgd) + iopte_index(da))
1203 +static inline u32 iotlb_init_entry(struct iotlb_entry *e, u32 da, u32 pa,
1206 + memset(e, 0, sizeof(*e));
1211 + /* FIXME: add OMAP1 support */
1212 + e->pgsz = flags & MMU_CAM_PGSZ_MASK;
1213 + e->endian = flags & MMU_RAM_ENDIAN_MASK;
1214 + e->elsz = flags & MMU_RAM_ELSZ_MASK;
1215 + e->mixed = flags & MMU_RAM_MIXED_MASK;
1217 + return iopgsz_to_bytes(e->pgsz);
1220 +#define to_iommu(dev) \
1221 + (struct iommu *)platform_get_drvdata(to_platform_device(dev))
1223 +#endif /* __PLAT_OMAP_IOMMU_H */