#include <linux/errno.h>
#include <linux/iommu.h>
---- static struct iommu_ops *iommu_ops;
++++ static void iommu_bus_init(struct bus_type *bus, struct iommu_ops *ops)
++++ {
++++ }
---- void register_iommu(struct iommu_ops *ops)
++++ /**
++++ * bus_set_iommu - set iommu-callbacks for the bus
++++ * @bus: bus.
++++ * @ops: the callbacks provided by the iommu-driver
++++ *
++++ * This function is called by an iommu driver to set the iommu methods
++++ * used for a particular bus. Drivers for devices on that bus can use
++++ * the iommu-api after these ops are registered.
++++ * This special function is needed because IOMMUs are usually devices on
++++ * the bus itself, so the iommu drivers are not initialized when the bus
++++ * is set up. With this function the iommu-driver can set the iommu-ops
++++ * afterwards.
++++ */
++++ int bus_set_iommu(struct bus_type *bus, struct iommu_ops *ops)
{
---- if (iommu_ops)
---- BUG();
++++ if (bus->iommu_ops != NULL)
++++ return -EBUSY;
+++
- iommu_ops = ops;
++++ bus->iommu_ops = ops;
++++
++++ /* Do IOMMU specific setup for this bus-type */
++++ iommu_bus_init(bus, ops);
+
--- iommu_ops = ops;
++++ return 0;
}
++++ EXPORT_SYMBOL_GPL(bus_set_iommu);
---- bool iommu_found(void)
++++ bool iommu_present(struct bus_type *bus)
{
---- return iommu_ops != NULL;
++++ return bus->iommu_ops != NULL;
}
---- EXPORT_SYMBOL_GPL(iommu_found);
++++ EXPORT_SYMBOL_GPL(iommu_present);
--- struct iommu_domain *iommu_domain_alloc(void)
+++ +/**
+++ + * iommu_set_fault_handler() - set a fault handler for an iommu domain
+++ + * @domain: iommu domain
+++ + * @handler: fault handler
+++ + *
+++ + * This function should be used by IOMMU users which want to be notified
+++ + * whenever an IOMMU fault happens.
+++ + *
+++ + * The fault handler itself should return 0 on success, and an appropriate
+++ + * error code otherwise.
+++ + */
+++ +void iommu_set_fault_handler(struct iommu_domain *domain,
+++ + iommu_fault_handler_t handler)
+++ +{
+++ + BUG_ON(!domain);
+++ +
+++ + domain->handler = handler;
+++ +}
+++ +EXPORT_SYMBOL_GPL(iommu_set_fault_handler);
+++ +
- struct iommu_domain *iommu_domain_alloc(void)
++++ struct iommu_domain *iommu_domain_alloc(struct bus_type *bus)
{
struct iommu_domain *domain;
int ret;
clk_disable(obj->clk);
}
-- EXPORT_SYMBOL_GPL(flush_iotlb_all);
--
-- /**
-- * iommu_set_twl - enable/disable table walking logic
-- * @obj: target iommu
-- * @on: enable/disable
-- *
-- * Function used to enable/disable TWL. If one wants to work
-- * exclusively with locked TLB entries and receive notifications
-- * for TLB miss then call this function to disable TWL.
-- */
-- void iommu_set_twl(struct iommu *obj, bool on)
-- {
-- clk_enable(obj->clk);
-- arch_iommu->set_twl(obj, on);
-- clk_disable(obj->clk);
-- }
-- EXPORT_SYMBOL_GPL(iommu_set_twl);
-- --#if defined(CONFIG_OMAP_IOMMU_DEBUG_MODULE)
++ ++#if defined(CONFIG_OMAP_IOMMU_DEBUG) || defined(CONFIG_OMAP_IOMMU_DEBUG_MODULE)
-- ssize_t iommu_dump_ctx(struct iommu *obj, char *buf, ssize_t bytes)
++ ssize_t omap_iommu_dump_ctx(struct omap_iommu *obj, char *buf, ssize_t bytes)
{
if (!obj || !buf)
return -EINVAL;
{
u32 da, errs;
u32 *iopgd, *iopte;
-- struct iommu *obj = data;
++ struct omap_iommu *obj = data;
+++ + struct iommu_domain *domain = obj->domain;
if (!obj->refcount)
return IRQ_NONE;
module_put(obj->owner);
-- mutex_unlock(&obj->iommu_lock);
--
-- dev_dbg(obj->dev, "%s: %s\n", __func__, obj->name);
-- }
-- EXPORT_SYMBOL_GPL(iommu_put);
--
-- int iommu_set_isr(const char *name,
-- int (*isr)(struct iommu *obj, u32 da, u32 iommu_errs,
-- void *priv),
-- void *isr_priv)
-- {
-- struct device *dev;
-- struct iommu *obj;
++ obj->iopgd = NULL;
-- dev = driver_find_device(&omap_iommu_driver.driver, NULL, (void *)name,
-- device_match_by_alias);
-- if (!dev)
-- return -ENODEV;
++ spin_unlock(&obj->iommu_lock);
-- obj = to_iommu(dev);
-- mutex_lock(&obj->iommu_lock);
-- if (obj->refcount != 0) {
-- mutex_unlock(&obj->iommu_lock);
-- return -EBUSY;
-- }
-- obj->isr = isr;
-- obj->isr_priv = isr_priv;
-- mutex_unlock(&obj->iommu_lock);
--
-- return 0;
++ dev_dbg(obj->dev, "%s: %s\n", __func__, obj->name);
}
-- EXPORT_SYMBOL_GPL(iommu_set_isr);
- -int omap_iommu_set_isr(const char *name,
- - int (*isr)(struct omap_iommu *obj, u32 da, u32 iommu_errs,
- - void *priv),
- - void *isr_priv)
- -{
- - struct device *dev;
- - struct omap_iommu *obj;
- -
- - dev = driver_find_device(&omap_iommu_driver.driver, NULL, (void *)name,
- - device_match_by_alias);
- - if (!dev)
- - return -ENODEV;
- -
- - obj = to_iommu(dev);
- - spin_lock(&obj->iommu_lock);
- - if (obj->refcount != 0) {
- - spin_unlock(&obj->iommu_lock);
- - return -EBUSY;
- - }
- - obj->isr = isr;
- - obj->isr_priv = isr_priv;
- - spin_unlock(&obj->iommu_lock);
- -
- - return 0;
- -}
- -EXPORT_SYMBOL_GPL(omap_iommu_set_isr);
- -
/*
* OMAP Device MMU(IOMMU) detection
*/
clean_dcache_area(iopte, IOPTE_TABLE_SIZE);
}
++ static int omap_iommu_map(struct iommu_domain *domain, unsigned long da,
++ phys_addr_t pa, int order, int prot)
++ {
++ struct omap_iommu_domain *omap_domain = domain->priv;
++ struct omap_iommu *oiommu = omap_domain->iommu_dev;
++ struct device *dev = oiommu->dev;
++ size_t bytes = PAGE_SIZE << order;
++ struct iotlb_entry e;
++ int omap_pgsz;
++ u32 ret, flags;
++
++ /* we only support mapping a single iommu page for now */
++ omap_pgsz = bytes_to_iopgsz(bytes);
++ if (omap_pgsz < 0) {
++ dev_err(dev, "invalid size to map: %d\n", bytes);
++ return -EINVAL;
++ }
++
++ dev_dbg(dev, "mapping da 0x%lx to pa 0x%x size 0x%x\n", da, pa, bytes);
++
++ flags = omap_pgsz | prot;
++
++ iotlb_init_entry(&e, da, pa, flags);
++
++ ret = omap_iopgtable_store_entry(oiommu, &e);
++ if (ret)
++ dev_err(dev, "omap_iopgtable_store_entry failed: %d\n", ret);
++
++ return ret;
++ }
++
++ static int omap_iommu_unmap(struct iommu_domain *domain, unsigned long da,
++ int order)
++ {
++ struct omap_iommu_domain *omap_domain = domain->priv;
++ struct omap_iommu *oiommu = omap_domain->iommu_dev;
++ struct device *dev = oiommu->dev;
++ size_t unmap_size;
++
++ dev_dbg(dev, "unmapping da 0x%lx order %d\n", da, order);
++
++ unmap_size = iopgtable_clear_entry(oiommu, da);
++
++ return unmap_size ? get_order(unmap_size) : -EINVAL;
++ }
++
++ static int
++ omap_iommu_attach_dev(struct iommu_domain *domain, struct device *dev)
++ {
++ struct omap_iommu_domain *omap_domain = domain->priv;
++ struct omap_iommu *oiommu;
++ int ret = 0;
++
++ spin_lock(&omap_domain->lock);
++
++ /* only a single device is supported per domain for now */
++ if (omap_domain->iommu_dev) {
++ dev_err(dev, "iommu domain is already attached\n");
++ ret = -EBUSY;
++ goto out;
++ }
++
++ /* get a handle to and enable the omap iommu */
++ oiommu = omap_iommu_attach(dev, omap_domain->pgtable);
++ if (IS_ERR(oiommu)) {
++ ret = PTR_ERR(oiommu);
++ dev_err(dev, "can't get omap iommu: %d\n", ret);
++ goto out;
++ }
++
++ omap_domain->iommu_dev = oiommu;
+++ + oiommu->domain = domain;
++
++ out:
++ spin_unlock(&omap_domain->lock);
++ return ret;
++ }
++
++ static void omap_iommu_detach_dev(struct iommu_domain *domain,
++ struct device *dev)
++ {
++ struct omap_iommu_domain *omap_domain = domain->priv;
++ struct omap_iommu *oiommu = to_iommu(dev);
++
++ spin_lock(&omap_domain->lock);
++
++ /* only a single device is supported per domain for now */
++ if (omap_domain->iommu_dev != oiommu) {
++ dev_err(dev, "invalid iommu device\n");
++ goto out;
++ }
++
++ iopgtable_clear_entry_all(oiommu);
++
++ omap_iommu_detach(oiommu);
++
++ omap_domain->iommu_dev = NULL;
++
++ out:
++ spin_unlock(&omap_domain->lock);
++ }
++
++ static int omap_iommu_domain_init(struct iommu_domain *domain)
++ {
++ struct omap_iommu_domain *omap_domain;
++
++ omap_domain = kzalloc(sizeof(*omap_domain), GFP_KERNEL);
++ if (!omap_domain) {
++ pr_err("kzalloc failed\n");
++ goto out;
++ }
++
++ omap_domain->pgtable = kzalloc(IOPGD_TABLE_SIZE, GFP_KERNEL);
++ if (!omap_domain->pgtable) {
++ pr_err("kzalloc failed\n");
++ goto fail_nomem;
++ }
++
++ /*
++ * should never fail, but please keep this around to ensure
++ * we keep the hardware happy
++ */
++ BUG_ON(!IS_ALIGNED((long)omap_domain->pgtable, IOPGD_TABLE_SIZE));
++
++ clean_dcache_area(omap_domain->pgtable, IOPGD_TABLE_SIZE);
++ spin_lock_init(&omap_domain->lock);
++
++ domain->priv = omap_domain;
++
++ return 0;
++
++ fail_nomem:
++ kfree(omap_domain);
++ out:
++ return -ENOMEM;
++ }
++
++ /* assume device was already detached */
++ static void omap_iommu_domain_destroy(struct iommu_domain *domain)
++ {
++ struct omap_iommu_domain *omap_domain = domain->priv;
++
++ domain->priv = NULL;
++
++ kfree(omap_domain->pgtable);
++ kfree(omap_domain);
++ }
++
++ static phys_addr_t omap_iommu_iova_to_phys(struct iommu_domain *domain,
++ unsigned long da)
++ {
++ struct omap_iommu_domain *omap_domain = domain->priv;
++ struct omap_iommu *oiommu = omap_domain->iommu_dev;
++ struct device *dev = oiommu->dev;
++ u32 *pgd, *pte;
++ phys_addr_t ret = 0;
++
++ iopgtable_lookup_entry(oiommu, da, &pgd, &pte);
++
++ if (pte) {
++ if (iopte_is_small(*pte))
++ ret = omap_iommu_translate(*pte, da, IOPTE_MASK);
++ else if (iopte_is_large(*pte))
++ ret = omap_iommu_translate(*pte, da, IOLARGE_MASK);
++ else
++ dev_err(dev, "bogus pte 0x%x", *pte);
++ } else {
++ if (iopgd_is_section(*pgd))
++ ret = omap_iommu_translate(*pgd, da, IOSECTION_MASK);
++ else if (iopgd_is_super(*pgd))
++ ret = omap_iommu_translate(*pgd, da, IOSUPER_MASK);
++ else
++ dev_err(dev, "bogus pgd 0x%x", *pgd);
++ }
++
++ return ret;
++ }
++
++ static int omap_iommu_domain_has_cap(struct iommu_domain *domain,
++ unsigned long cap)
++ {
++ return 0;
++ }
++
++ static struct iommu_ops omap_iommu_ops = {
++ .domain_init = omap_iommu_domain_init,
++ .domain_destroy = omap_iommu_domain_destroy,
++ .attach_dev = omap_iommu_attach_dev,
++ .detach_dev = omap_iommu_detach_dev,
++ .map = omap_iommu_map,
++ .unmap = omap_iommu_unmap,
++ .iova_to_phys = omap_iommu_iova_to_phys,
++ .domain_has_cap = omap_iommu_domain_has_cap,
++ };
++
static int __init omap_iommu_init(void)
{
struct kmem_cache *p;
return -ENOMEM;
iopte_cachep = p;
-- register_iommu(&omap_iommu_ops);
++++ bus_set_iommu(&platform_bus_type, &omap_iommu_ops);
++
return platform_driver_register(&omap_iommu_driver);
}
module_init(omap_iommu_init);
#define IOMMU_WRITE (2)
#define IOMMU_CACHE (4) /* DMA cache coherency */
++++ struct iommu_ops;
++++ struct bus_type;
struct device;
+++ +struct iommu_domain;
+++ +
+++ +/* iommu fault flags */
+++ +#define IOMMU_FAULT_READ 0x0
+++ +#define IOMMU_FAULT_WRITE 0x1
+++ +
+++ +typedef int (*iommu_fault_handler_t)(struct iommu_domain *,
+++ + struct device *, unsigned long, int);
struct iommu_domain {
++++ struct iommu_ops *ops;
void *priv;
+++ + iommu_fault_handler_t handler;
};
#define IOMMU_CAP_CACHE_COHERENCY 0x1
unsigned long iova);
extern int iommu_domain_has_cap(struct iommu_domain *domain,
unsigned long cap);
--- #else /* CONFIG_IOMMU_API */
+++ +extern void iommu_set_fault_handler(struct iommu_domain *domain,
+++ + iommu_fault_handler_t handler);
+++ +
+++ +/**
+++ + * report_iommu_fault() - report about an IOMMU fault to the IOMMU framework
+++ + * @domain: the iommu domain where the fault has happened
+++ + * @dev: the device where the fault has happened
+++ + * @iova: the faulting address
+++ + * @flags: mmu fault flags (e.g. IOMMU_FAULT_READ/IOMMU_FAULT_WRITE/...)
+++ + *
+++ + * This function should be called by the low-level IOMMU implementations
+++ + * whenever IOMMU faults happen, to allow high-level users, that are
+++ + * interested in such events, to know about them.
+++ + *
+++ + * This event may be useful for several possible use cases:
+++ + * - mere logging of the event
+++ + * - dynamic TLB/PTE loading
+++ + * - if restarting of the faulting device is required
+++ + *
+++ + * Returns 0 on success and an appropriate error code otherwise (if dynamic
+++ + * PTE/TLB loading will one day be supported, implementations will be able
+++ + * to tell whether it succeeded or not according to this return value).
+++ + *
+++ + * Specifically, -ENOSYS is returned if a fault handler isn't installed
+++ + * (though fault handlers can also return -ENOSYS, in case they want to
+++ + * elicit the default behavior of the IOMMU drivers).
+++ + */
+++ +static inline int report_iommu_fault(struct iommu_domain *domain,
+++ + struct device *dev, unsigned long iova, int flags)
+++ +{
+++ + int ret = -ENOSYS;
+
--- static inline void register_iommu(struct iommu_ops *ops)
--- {
+++ + /*
+++ + * if upper layers showed interest and installed a fault handler,
+++ + * invoke it.
+++ + */
+++ + if (domain->handler)
+++ + ret = domain->handler(domain, dev, iova, flags);
+
+++ + return ret;
+}
--- static inline bool iommu_found(void)
+++ #else /* CONFIG_IOMMU_API */
+++
- static inline void register_iommu(struct iommu_ops *ops)
- {
- }
++++ struct iommu_ops {};
+++
- static inline bool iommu_found(void)
++++ static inline bool iommu_present(struct bus_type *bus)
{
return false;
}