2 * omap iommu: tlb and pagetable primitives
4 * Copyright (C) 2008-2010 Nokia Corporation
6 * Written by Hiroshi DOYU <Hiroshi.DOYU@nokia.com>,
7 * Paul Mundt and Toshihiro Kobayashi
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
14 #include <linux/err.h>
15 #include <linux/module.h>
16 #include <linux/slab.h>
17 #include <linux/interrupt.h>
18 #include <linux/ioport.h>
19 #include <linux/clk.h>
20 #include <linux/platform_device.h>
21 #include <linux/iommu.h>
22 #include <linux/mutex.h>
23 #include <linux/spinlock.h>
25 #include <asm/cacheflush.h>
27 #include <plat/iommu.h>
29 #include <plat/iopgtable.h>
31 #define for_each_iotlb_cr(obj, n, __i, cr) \
33 (__i < (n)) && (cr = __iotlb_read_cr((obj), __i), true); \
37 * struct omap_iommu_domain - omap iommu domain
38 * @pgtable: the page table
39 * @iommu_dev: an omap iommu device attached to this domain. only a single
40 * iommu device can be attached for now.
41 * @lock: domain lock, should be taken when attaching/detaching
43 struct omap_iommu_domain {
45 struct iommu *iommu_dev;
49 /* accommodate the difference between omap1 and omap2/3 */
50 static const struct iommu_functions *arch_iommu;
52 static struct platform_driver omap_iommu_driver;
53 static struct kmem_cache *iopte_cachep;
56 * install_iommu_arch - Install archtecure specific iommu functions
57 * @ops: a pointer to architecture specific iommu functions
59 * There are several kind of iommu algorithm(tlb, pagetable) among
60 * omap series. This interface installs such an iommu algorighm.
62 int install_iommu_arch(const struct iommu_functions *ops)
70 EXPORT_SYMBOL_GPL(install_iommu_arch);
73 * uninstall_iommu_arch - Uninstall archtecure specific iommu functions
74 * @ops: a pointer to architecture specific iommu functions
76 * This interface uninstalls the iommu algorighm installed previously.
78 void uninstall_iommu_arch(const struct iommu_functions *ops)
80 if (arch_iommu != ops)
81 pr_err("%s: not your arch\n", __func__);
85 EXPORT_SYMBOL_GPL(uninstall_iommu_arch);
88 * iommu_save_ctx - Save registers for pm off-mode support
91 void iommu_save_ctx(struct iommu *obj)
93 arch_iommu->save_ctx(obj);
95 EXPORT_SYMBOL_GPL(iommu_save_ctx);
98 * iommu_restore_ctx - Restore registers for pm off-mode support
101 void iommu_restore_ctx(struct iommu *obj)
103 arch_iommu->restore_ctx(obj);
105 EXPORT_SYMBOL_GPL(iommu_restore_ctx);
108 * iommu_arch_version - Return running iommu arch version
110 u32 iommu_arch_version(void)
112 return arch_iommu->version;
114 EXPORT_SYMBOL_GPL(iommu_arch_version);
116 static int iommu_enable(struct iommu *obj)
126 clk_enable(obj->clk);
128 err = arch_iommu->enable(obj);
130 clk_disable(obj->clk);
134 static void iommu_disable(struct iommu *obj)
139 clk_enable(obj->clk);
141 arch_iommu->disable(obj);
143 clk_disable(obj->clk);
149 void iotlb_cr_to_e(struct cr_regs *cr, struct iotlb_entry *e)
153 arch_iommu->cr_to_e(cr, e);
155 EXPORT_SYMBOL_GPL(iotlb_cr_to_e);
157 static inline int iotlb_cr_valid(struct cr_regs *cr)
162 return arch_iommu->cr_valid(cr);
165 static inline struct cr_regs *iotlb_alloc_cr(struct iommu *obj,
166 struct iotlb_entry *e)
171 return arch_iommu->alloc_cr(obj, e);
174 static u32 iotlb_cr_to_virt(struct cr_regs *cr)
176 return arch_iommu->cr_to_virt(cr);
179 static u32 get_iopte_attr(struct iotlb_entry *e)
181 return arch_iommu->get_pte_attr(e);
184 static u32 iommu_report_fault(struct iommu *obj, u32 *da)
186 return arch_iommu->fault_isr(obj, da);
189 static void iotlb_lock_get(struct iommu *obj, struct iotlb_lock *l)
193 val = iommu_read_reg(obj, MMU_LOCK);
195 l->base = MMU_LOCK_BASE(val);
196 l->vict = MMU_LOCK_VICT(val);
200 static void iotlb_lock_set(struct iommu *obj, struct iotlb_lock *l)
204 val = (l->base << MMU_LOCK_BASE_SHIFT);
205 val |= (l->vict << MMU_LOCK_VICT_SHIFT);
207 iommu_write_reg(obj, val, MMU_LOCK);
210 static void iotlb_read_cr(struct iommu *obj, struct cr_regs *cr)
212 arch_iommu->tlb_read_cr(obj, cr);
215 static void iotlb_load_cr(struct iommu *obj, struct cr_regs *cr)
217 arch_iommu->tlb_load_cr(obj, cr);
219 iommu_write_reg(obj, 1, MMU_FLUSH_ENTRY);
220 iommu_write_reg(obj, 1, MMU_LD_TLB);
224 * iotlb_dump_cr - Dump an iommu tlb entry into buf
226 * @cr: contents of cam and ram register
227 * @buf: output buffer
229 static inline ssize_t iotlb_dump_cr(struct iommu *obj, struct cr_regs *cr,
234 return arch_iommu->dump_cr(obj, cr, buf);
237 /* only used in iotlb iteration for-loop */
238 static struct cr_regs __iotlb_read_cr(struct iommu *obj, int n)
243 iotlb_lock_get(obj, &l);
245 iotlb_lock_set(obj, &l);
246 iotlb_read_cr(obj, &cr);
252 * load_iotlb_entry - Set an iommu tlb entry
254 * @e: an iommu tlb entry info
256 static int load_iotlb_entry(struct iommu *obj, struct iotlb_entry *e)
262 if (!obj || !obj->nr_tlb_entries || !e)
265 clk_enable(obj->clk);
267 iotlb_lock_get(obj, &l);
268 if (l.base == obj->nr_tlb_entries) {
269 dev_warn(obj->dev, "%s: preserve entries full\n", __func__);
277 for_each_iotlb_cr(obj, obj->nr_tlb_entries, i, tmp)
278 if (!iotlb_cr_valid(&tmp))
281 if (i == obj->nr_tlb_entries) {
282 dev_dbg(obj->dev, "%s: full: no entry\n", __func__);
287 iotlb_lock_get(obj, &l);
290 iotlb_lock_set(obj, &l);
293 cr = iotlb_alloc_cr(obj, e);
295 clk_disable(obj->clk);
299 iotlb_load_cr(obj, cr);
304 /* increment victim for next tlb load */
305 if (++l.vict == obj->nr_tlb_entries)
307 iotlb_lock_set(obj, &l);
309 clk_disable(obj->clk);
314 * flush_iotlb_page - Clear an iommu tlb entry
316 * @da: iommu device virtual address
318 * Clear an iommu tlb entry which includes 'da' address.
320 static void flush_iotlb_page(struct iommu *obj, u32 da)
325 clk_enable(obj->clk);
327 for_each_iotlb_cr(obj, obj->nr_tlb_entries, i, cr) {
331 if (!iotlb_cr_valid(&cr))
334 start = iotlb_cr_to_virt(&cr);
335 bytes = iopgsz_to_bytes(cr.cam & 3);
337 if ((start <= da) && (da < start + bytes)) {
338 dev_dbg(obj->dev, "%s: %08x<=%08x(%x)\n",
339 __func__, start, da, bytes);
340 iotlb_load_cr(obj, &cr);
341 iommu_write_reg(obj, 1, MMU_FLUSH_ENTRY);
344 clk_disable(obj->clk);
346 if (i == obj->nr_tlb_entries)
347 dev_dbg(obj->dev, "%s: no page for %08x\n", __func__, da);
351 * flush_iotlb_range - Clear an iommu tlb entries
353 * @start: iommu device virtual address(start)
354 * @end: iommu device virtual address(end)
356 * Clear an iommu tlb entry which includes 'da' address.
358 void flush_iotlb_range(struct iommu *obj, u32 start, u32 end)
363 flush_iotlb_page(obj, da);
364 /* FIXME: Optimize for multiple page size */
368 EXPORT_SYMBOL_GPL(flush_iotlb_range);
371 * flush_iotlb_all - Clear all iommu tlb entries
374 static void flush_iotlb_all(struct iommu *obj)
378 clk_enable(obj->clk);
382 iotlb_lock_set(obj, &l);
384 iommu_write_reg(obj, 1, MMU_GFLUSH);
386 clk_disable(obj->clk);
390 * iommu_set_twl - enable/disable table walking logic
392 * @on: enable/disable
394 * Function used to enable/disable TWL. If one wants to work
395 * exclusively with locked TLB entries and receive notifications
396 * for TLB miss then call this function to disable TWL.
398 void iommu_set_twl(struct iommu *obj, bool on)
400 clk_enable(obj->clk);
401 arch_iommu->set_twl(obj, on);
402 clk_disable(obj->clk);
404 EXPORT_SYMBOL_GPL(iommu_set_twl);
406 #if defined(CONFIG_OMAP_IOMMU_DEBUG_MODULE)
408 ssize_t iommu_dump_ctx(struct iommu *obj, char *buf, ssize_t bytes)
413 clk_enable(obj->clk);
415 bytes = arch_iommu->dump_ctx(obj, buf, bytes);
417 clk_disable(obj->clk);
421 EXPORT_SYMBOL_GPL(iommu_dump_ctx);
423 static int __dump_tlb_entries(struct iommu *obj, struct cr_regs *crs, int num)
426 struct iotlb_lock saved;
428 struct cr_regs *p = crs;
430 clk_enable(obj->clk);
431 iotlb_lock_get(obj, &saved);
433 for_each_iotlb_cr(obj, num, i, tmp) {
434 if (!iotlb_cr_valid(&tmp))
439 iotlb_lock_set(obj, &saved);
440 clk_disable(obj->clk);
446 * dump_tlb_entries - dump cr arrays to given buffer
448 * @buf: output buffer
450 size_t dump_tlb_entries(struct iommu *obj, char *buf, ssize_t bytes)
456 num = bytes / sizeof(*cr);
457 num = min(obj->nr_tlb_entries, num);
459 cr = kcalloc(num, sizeof(*cr), GFP_KERNEL);
463 num = __dump_tlb_entries(obj, cr, num);
464 for (i = 0; i < num; i++)
465 p += iotlb_dump_cr(obj, cr + i, p);
470 EXPORT_SYMBOL_GPL(dump_tlb_entries);
472 int foreach_iommu_device(void *data, int (*fn)(struct device *, void *))
474 return driver_for_each_device(&omap_iommu_driver.driver,
477 EXPORT_SYMBOL_GPL(foreach_iommu_device);
479 #endif /* CONFIG_OMAP_IOMMU_DEBUG_MODULE */
482 * H/W pagetable operations
484 static void flush_iopgd_range(u32 *first, u32 *last)
486 /* FIXME: L2 cache should be taken care of if it exists */
488 asm("mcr p15, 0, %0, c7, c10, 1 @ flush_pgd"
490 first += L1_CACHE_BYTES / sizeof(*first);
491 } while (first <= last);
494 static void flush_iopte_range(u32 *first, u32 *last)
496 /* FIXME: L2 cache should be taken care of if it exists */
498 asm("mcr p15, 0, %0, c7, c10, 1 @ flush_pte"
500 first += L1_CACHE_BYTES / sizeof(*first);
501 } while (first <= last);
504 static void iopte_free(u32 *iopte)
506 /* Note: freed iopte's must be clean ready for re-use */
507 kmem_cache_free(iopte_cachep, iopte);
510 static u32 *iopte_alloc(struct iommu *obj, u32 *iopgd, u32 da)
514 /* a table has already existed */
519 * do the allocation outside the page table lock
521 spin_unlock(&obj->page_table_lock);
522 iopte = kmem_cache_zalloc(iopte_cachep, GFP_KERNEL);
523 spin_lock(&obj->page_table_lock);
527 return ERR_PTR(-ENOMEM);
529 *iopgd = virt_to_phys(iopte) | IOPGD_TABLE;
530 flush_iopgd_range(iopgd, iopgd);
532 dev_vdbg(obj->dev, "%s: a new pte:%p\n", __func__, iopte);
534 /* We raced, free the reduniovant table */
539 iopte = iopte_offset(iopgd, da);
542 "%s: da:%08x pgd:%p *pgd:%08x pte:%p *pte:%08x\n",
543 __func__, da, iopgd, *iopgd, iopte, *iopte);
548 static int iopgd_alloc_section(struct iommu *obj, u32 da, u32 pa, u32 prot)
550 u32 *iopgd = iopgd_offset(obj, da);
552 if ((da | pa) & ~IOSECTION_MASK) {
553 dev_err(obj->dev, "%s: %08x:%08x should aligned on %08lx\n",
554 __func__, da, pa, IOSECTION_SIZE);
558 *iopgd = (pa & IOSECTION_MASK) | prot | IOPGD_SECTION;
559 flush_iopgd_range(iopgd, iopgd);
563 static int iopgd_alloc_super(struct iommu *obj, u32 da, u32 pa, u32 prot)
565 u32 *iopgd = iopgd_offset(obj, da);
568 if ((da | pa) & ~IOSUPER_MASK) {
569 dev_err(obj->dev, "%s: %08x:%08x should aligned on %08lx\n",
570 __func__, da, pa, IOSUPER_SIZE);
574 for (i = 0; i < 16; i++)
575 *(iopgd + i) = (pa & IOSUPER_MASK) | prot | IOPGD_SUPER;
576 flush_iopgd_range(iopgd, iopgd + 15);
580 static int iopte_alloc_page(struct iommu *obj, u32 da, u32 pa, u32 prot)
582 u32 *iopgd = iopgd_offset(obj, da);
583 u32 *iopte = iopte_alloc(obj, iopgd, da);
586 return PTR_ERR(iopte);
588 *iopte = (pa & IOPAGE_MASK) | prot | IOPTE_SMALL;
589 flush_iopte_range(iopte, iopte);
591 dev_vdbg(obj->dev, "%s: da:%08x pa:%08x pte:%p *pte:%08x\n",
592 __func__, da, pa, iopte, *iopte);
597 static int iopte_alloc_large(struct iommu *obj, u32 da, u32 pa, u32 prot)
599 u32 *iopgd = iopgd_offset(obj, da);
600 u32 *iopte = iopte_alloc(obj, iopgd, da);
603 if ((da | pa) & ~IOLARGE_MASK) {
604 dev_err(obj->dev, "%s: %08x:%08x should aligned on %08lx\n",
605 __func__, da, pa, IOLARGE_SIZE);
610 return PTR_ERR(iopte);
612 for (i = 0; i < 16; i++)
613 *(iopte + i) = (pa & IOLARGE_MASK) | prot | IOPTE_LARGE;
614 flush_iopte_range(iopte, iopte + 15);
618 static int iopgtable_store_entry_core(struct iommu *obj, struct iotlb_entry *e)
620 int (*fn)(struct iommu *, u32, u32, u32);
628 case MMU_CAM_PGSZ_16M:
629 fn = iopgd_alloc_super;
631 case MMU_CAM_PGSZ_1M:
632 fn = iopgd_alloc_section;
634 case MMU_CAM_PGSZ_64K:
635 fn = iopte_alloc_large;
637 case MMU_CAM_PGSZ_4K:
638 fn = iopte_alloc_page;
646 prot = get_iopte_attr(e);
648 spin_lock(&obj->page_table_lock);
649 err = fn(obj, e->da, e->pa, prot);
650 spin_unlock(&obj->page_table_lock);
656 * iopgtable_store_entry - Make an iommu pte entry
658 * @e: an iommu tlb entry info
660 int iopgtable_store_entry(struct iommu *obj, struct iotlb_entry *e)
664 flush_iotlb_page(obj, e->da);
665 err = iopgtable_store_entry_core(obj, e);
666 #ifdef PREFETCH_IOTLB
668 load_iotlb_entry(obj, e);
672 EXPORT_SYMBOL_GPL(iopgtable_store_entry);
675 * iopgtable_lookup_entry - Lookup an iommu pte entry
677 * @da: iommu device virtual address
678 * @ppgd: iommu pgd entry pointer to be returned
679 * @ppte: iommu pte entry pointer to be returned
682 iopgtable_lookup_entry(struct omap_iommu *obj, u32 da, u32 **ppgd, u32 **ppte)
684 u32 *iopgd, *iopte = NULL;
686 iopgd = iopgd_offset(obj, da);
690 if (iopgd_is_table(*iopgd))
691 iopte = iopte_offset(iopgd, da);
697 static size_t iopgtable_clear_entry_core(struct iommu *obj, u32 da)
700 u32 *iopgd = iopgd_offset(obj, da);
706 if (iopgd_is_table(*iopgd)) {
708 u32 *iopte = iopte_offset(iopgd, da);
711 if (*iopte & IOPTE_LARGE) {
713 /* rewind to the 1st entry */
714 iopte = iopte_offset(iopgd, (da & IOLARGE_MASK));
717 memset(iopte, 0, nent * sizeof(*iopte));
718 flush_iopte_range(iopte, iopte + (nent - 1) * sizeof(*iopte));
721 * do table walk to check if this table is necessary or not
723 iopte = iopte_offset(iopgd, 0);
724 for (i = 0; i < PTRS_PER_IOPTE; i++)
729 nent = 1; /* for the next L1 entry */
732 if ((*iopgd & IOPGD_SUPER) == IOPGD_SUPER) {
734 /* rewind to the 1st entry */
735 iopgd = iopgd_offset(obj, (da & IOSUPER_MASK));
739 memset(iopgd, 0, nent * sizeof(*iopgd));
740 flush_iopgd_range(iopgd, iopgd + (nent - 1) * sizeof(*iopgd));
746 * iopgtable_clear_entry - Remove an iommu pte entry
748 * @da: iommu device virtual address
750 static size_t iopgtable_clear_entry(struct iommu *obj, u32 da)
754 spin_lock(&obj->page_table_lock);
756 bytes = iopgtable_clear_entry_core(obj, da);
757 flush_iotlb_page(obj, da);
759 spin_unlock(&obj->page_table_lock);
764 static void iopgtable_clear_entry_all(struct iommu *obj)
768 spin_lock(&obj->page_table_lock);
770 for (i = 0; i < PTRS_PER_IOPGD; i++) {
774 da = i << IOPGD_SHIFT;
775 iopgd = iopgd_offset(obj, da);
780 if (iopgd_is_table(*iopgd))
781 iopte_free(iopte_offset(iopgd, 0));
784 flush_iopgd_range(iopgd, iopgd);
787 flush_iotlb_all(obj);
789 spin_unlock(&obj->page_table_lock);
793 * Device IOMMU generic operations
795 static irqreturn_t iommu_fault_handler(int irq, void *data)
799 struct iommu *obj = data;
804 clk_enable(obj->clk);
805 errs = iommu_report_fault(obj, &da);
806 clk_disable(obj->clk);
810 /* Fault callback or TLB/PTE Dynamic loading */
811 if (obj->isr && !obj->isr(obj, da, errs, obj->isr_priv))
816 iopgd = iopgd_offset(obj, da);
818 if (!iopgd_is_table(*iopgd)) {
819 dev_err(obj->dev, "%s: errs:0x%08x da:0x%08x pgd:0x%p "
820 "*pgd:px%08x\n", obj->name, errs, da, iopgd, *iopgd);
824 iopte = iopte_offset(iopgd, da);
826 dev_err(obj->dev, "%s: errs:0x%08x da:0x%08x pgd:0x%p *pgd:0x%08x "
827 "pte:0x%p *pte:0x%08x\n", obj->name, errs, da, iopgd, *iopgd,
833 static int device_match_by_alias(struct device *dev, void *data)
835 struct iommu *obj = to_iommu(dev);
836 const char *name = data;
838 pr_debug("%s: %s %s\n", __func__, obj->name, name);
840 return strcmp(obj->name, name) == 0;
844 * iommu_set_da_range - Set a valid device address range
846 * @start Start of valid range
847 * @end End of valid range
849 int iommu_set_da_range(struct iommu *obj, u32 start, u32 end)
855 if (end < start || !PAGE_ALIGN(start | end))
858 obj->da_start = start;
863 EXPORT_SYMBOL_GPL(iommu_set_da_range);
866 * omap_find_iommu_device() - find an omap iommu device by name
867 * @name: name of the iommu device
869 * The generic iommu API requires the caller to provide the device
870 * he wishes to attach to a certain iommu domain.
872 * Drivers generally should not bother with this as it should just
873 * be taken care of by the DMA-API using dev_archdata.
875 * This function is provided as an interim solution until the latter
876 * materializes, and omap3isp is fully migrated to the DMA-API.
878 struct device *omap_find_iommu_device(const char *name)
880 return driver_find_device(&omap_iommu_driver.driver, NULL,
882 device_match_by_alias);
884 EXPORT_SYMBOL_GPL(omap_find_iommu_device);
887 * omap_iommu_attach() - attach iommu device to an iommu domain
888 * @dev: target omap iommu device
891 static struct iommu *omap_iommu_attach(struct device *dev, u32 *iopgd)
894 struct iommu *obj = to_iommu(dev);
896 spin_lock(&obj->iommu_lock);
898 /* an iommu device can only be attached once */
899 if (++obj->refcount > 1) {
900 dev_err(dev, "%s: already attached!\n", obj->name);
906 err = iommu_enable(obj);
909 flush_iotlb_all(obj);
911 if (!try_module_get(obj->owner))
914 spin_unlock(&obj->iommu_lock);
916 dev_dbg(obj->dev, "%s: %s\n", __func__, obj->name);
920 if (obj->refcount == 1)
924 spin_unlock(&obj->iommu_lock);
929 * omap_iommu_detach - release iommu device
932 static void omap_iommu_detach(struct iommu *obj)
934 if (!obj || IS_ERR(obj))
937 spin_lock(&obj->iommu_lock);
939 if (--obj->refcount == 0)
942 module_put(obj->owner);
946 spin_unlock(&obj->iommu_lock);
948 dev_dbg(obj->dev, "%s: %s\n", __func__, obj->name);
951 int iommu_set_isr(const char *name,
952 int (*isr)(struct iommu *obj, u32 da, u32 iommu_errs,
959 dev = driver_find_device(&omap_iommu_driver.driver, NULL, (void *)name,
960 device_match_by_alias);
965 mutex_lock(&obj->iommu_lock);
966 if (obj->refcount != 0) {
967 mutex_unlock(&obj->iommu_lock);
971 obj->isr_priv = isr_priv;
972 mutex_unlock(&obj->iommu_lock);
976 EXPORT_SYMBOL_GPL(iommu_set_isr);
979 * OMAP Device MMU(IOMMU) detection
981 static int __devinit omap_iommu_probe(struct platform_device *pdev)
986 struct resource *res;
987 struct iommu_platform_data *pdata = pdev->dev.platform_data;
989 if (pdev->num_resources != 2)
992 obj = kzalloc(sizeof(*obj) + MMU_REG_SIZE, GFP_KERNEL);
996 obj->clk = clk_get(&pdev->dev, pdata->clk_name);
997 if (IS_ERR(obj->clk))
1000 obj->nr_tlb_entries = pdata->nr_tlb_entries;
1001 obj->name = pdata->name;
1002 obj->dev = &pdev->dev;
1003 obj->ctx = (void *)obj + sizeof(*obj);
1004 obj->da_start = pdata->da_start;
1005 obj->da_end = pdata->da_end;
1007 spin_lock_init(&obj->iommu_lock);
1008 mutex_init(&obj->mmap_lock);
1009 spin_lock_init(&obj->page_table_lock);
1010 INIT_LIST_HEAD(&obj->mmap);
1012 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1018 res = request_mem_region(res->start, resource_size(res),
1019 dev_name(&pdev->dev));
1025 obj->regbase = ioremap(res->start, resource_size(res));
1026 if (!obj->regbase) {
1031 irq = platform_get_irq(pdev, 0);
1036 err = request_irq(irq, iommu_fault_handler, IRQF_SHARED,
1037 dev_name(&pdev->dev), obj);
1040 platform_set_drvdata(pdev, obj);
1042 dev_info(&pdev->dev, "%s registered\n", obj->name);
1046 iounmap(obj->regbase);
1048 release_mem_region(res->start, resource_size(res));
1056 static int __devexit omap_iommu_remove(struct platform_device *pdev)
1059 struct resource *res;
1060 struct iommu *obj = platform_get_drvdata(pdev);
1062 platform_set_drvdata(pdev, NULL);
1064 iopgtable_clear_entry_all(obj);
1066 irq = platform_get_irq(pdev, 0);
1068 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1069 release_mem_region(res->start, resource_size(res));
1070 iounmap(obj->regbase);
1073 dev_info(&pdev->dev, "%s removed\n", obj->name);
1078 static struct platform_driver omap_iommu_driver = {
1079 .probe = omap_iommu_probe,
1080 .remove = __devexit_p(omap_iommu_remove),
1082 .name = "omap-iommu",
1086 static void iopte_cachep_ctor(void *iopte)
1088 clean_dcache_area(iopte, IOPTE_TABLE_SIZE);
1091 static int omap_iommu_map(struct iommu_domain *domain, unsigned long da,
1092 phys_addr_t pa, int order, int prot)
1094 struct omap_iommu_domain *omap_domain = domain->priv;
1095 struct iommu *oiommu = omap_domain->iommu_dev;
1096 struct device *dev = oiommu->dev;
1097 size_t bytes = PAGE_SIZE << order;
1098 struct iotlb_entry e;
1102 /* we only support mapping a single iommu page for now */
1103 omap_pgsz = bytes_to_iopgsz(bytes);
1104 if (omap_pgsz < 0) {
1105 dev_err(dev, "invalid size to map: %d\n", bytes);
1109 dev_dbg(dev, "mapping da 0x%lx to pa 0x%x size 0x%x\n", da, pa, bytes);
1111 flags = omap_pgsz | prot;
1113 iotlb_init_entry(&e, da, pa, flags);
1115 ret = iopgtable_store_entry(oiommu, &e);
1117 dev_err(dev, "iopgtable_store_entry failed: %d\n", ret);
1124 static int omap_iommu_unmap(struct iommu_domain *domain, unsigned long da,
1127 struct omap_iommu_domain *omap_domain = domain->priv;
1128 struct iommu *oiommu = omap_domain->iommu_dev;
1129 struct device *dev = oiommu->dev;
1130 size_t bytes = PAGE_SIZE << order;
1133 dev_dbg(dev, "unmapping da 0x%lx size 0x%x\n", da, bytes);
1135 ret = iopgtable_clear_entry(oiommu, da);
1137 dev_err(dev, "entry @ 0x%lx was %d; not %d\n", da, ret, bytes);
1145 omap_iommu_attach_dev(struct iommu_domain *domain, struct device *dev)
1147 struct omap_iommu_domain *omap_domain = domain->priv;
1148 struct iommu *oiommu;
1151 spin_lock(&omap_domain->lock);
1153 /* only a single device is supported per domain for now */
1154 if (omap_domain->iommu_dev) {
1155 dev_err(dev, "iommu domain is already attached\n");
1160 /* get a handle to and enable the omap iommu */
1161 oiommu = omap_iommu_attach(dev, omap_domain->pgtable);
1162 if (IS_ERR(oiommu)) {
1163 ret = PTR_ERR(oiommu);
1164 dev_err(dev, "can't get omap iommu: %d\n", ret);
1168 omap_domain->iommu_dev = oiommu;
1171 spin_unlock(&omap_domain->lock);
1175 static void omap_iommu_detach_dev(struct iommu_domain *domain,
1178 struct omap_iommu_domain *omap_domain = domain->priv;
1179 struct iommu *oiommu = to_iommu(dev);
1181 spin_lock(&omap_domain->lock);
1183 /* only a single device is supported per domain for now */
1184 if (omap_domain->iommu_dev != oiommu) {
1185 dev_err(dev, "invalid iommu device\n");
1189 iopgtable_clear_entry_all(oiommu);
1191 omap_iommu_detach(oiommu);
1193 omap_domain->iommu_dev = NULL;
1196 spin_unlock(&omap_domain->lock);
1199 static int omap_iommu_domain_init(struct iommu_domain *domain)
1201 struct omap_iommu_domain *omap_domain;
1203 omap_domain = kzalloc(sizeof(*omap_domain), GFP_KERNEL);
1205 pr_err("kzalloc failed\n");
1209 omap_domain->pgtable = kzalloc(IOPGD_TABLE_SIZE, GFP_KERNEL);
1210 if (!omap_domain->pgtable) {
1211 pr_err("kzalloc failed\n");
1216 * should never fail, but please keep this around to ensure
1217 * we keep the hardware happy
1219 BUG_ON(!IS_ALIGNED((long)omap_domain->pgtable, IOPGD_TABLE_SIZE));
1221 clean_dcache_area(omap_domain->pgtable, IOPGD_TABLE_SIZE);
1222 spin_lock_init(&omap_domain->lock);
1224 domain->priv = omap_domain;
1234 /* assume device was already detached */
1235 static void omap_iommu_domain_destroy(struct iommu_domain *domain)
1237 struct omap_iommu_domain *omap_domain = domain->priv;
1239 domain->priv = NULL;
1241 kfree(omap_domain->pgtable);
1245 static phys_addr_t omap_iommu_iova_to_phys(struct iommu_domain *domain,
1248 struct omap_iommu_domain *omap_domain = domain->priv;
1249 struct iommu *oiommu = omap_domain->iommu_dev;
1250 struct device *dev = oiommu->dev;
1252 phys_addr_t ret = 0;
1254 iopgtable_lookup_entry(oiommu, da, &pgd, &pte);
1257 if (iopte_is_small(*pte))
1258 ret = omap_iommu_translate(*pte, da, IOPTE_MASK);
1259 else if (iopte_is_large(*pte))
1260 ret = omap_iommu_translate(*pte, da, IOLARGE_MASK);
1262 dev_err(dev, "bogus pte 0x%x", *pte);
1264 if (iopgd_is_section(*pgd))
1265 ret = omap_iommu_translate(*pgd, da, IOSECTION_MASK);
1266 else if (iopgd_is_super(*pgd))
1267 ret = omap_iommu_translate(*pgd, da, IOSUPER_MASK);
1269 dev_err(dev, "bogus pgd 0x%x", *pgd);
1275 static int omap_iommu_domain_has_cap(struct iommu_domain *domain,
1281 static struct iommu_ops omap_iommu_ops = {
1282 .domain_init = omap_iommu_domain_init,
1283 .domain_destroy = omap_iommu_domain_destroy,
1284 .attach_dev = omap_iommu_attach_dev,
1285 .detach_dev = omap_iommu_detach_dev,
1286 .map = omap_iommu_map,
1287 .unmap = omap_iommu_unmap,
1288 .iova_to_phys = omap_iommu_iova_to_phys,
1289 .domain_has_cap = omap_iommu_domain_has_cap,
1292 static int __init omap_iommu_init(void)
1294 struct kmem_cache *p;
1295 const unsigned long flags = SLAB_HWCACHE_ALIGN;
1296 size_t align = 1 << 10; /* L2 pagetable alignement */
1298 p = kmem_cache_create("iopte_cache", IOPTE_TABLE_SIZE, align, flags,
1304 register_iommu(&omap_iommu_ops);
1306 return platform_driver_register(&omap_iommu_driver);
1308 module_init(omap_iommu_init);
1310 static void __exit omap_iommu_exit(void)
1312 kmem_cache_destroy(iopte_cachep);
1314 platform_driver_unregister(&omap_iommu_driver);
1316 module_exit(omap_iommu_exit);
1318 MODULE_DESCRIPTION("omap iommu: tlb and pagetable primitives");
1319 MODULE_ALIAS("platform:omap-iommu");
1320 MODULE_AUTHOR("Hiroshi DOYU, Paul Mundt and Toshihiro Kobayashi");
1321 MODULE_LICENSE("GPL v2");