Merge branches 'iommu/fixes', 'x86/vt-d', 'x86/amd', 'arm/smmu', 'arm/tegra' and...
[pandora-kernel.git] / drivers / iommu / arm-smmu.c
index a3adde6..9f7e1d3 100644 (file)
@@ -343,6 +343,7 @@ struct arm_smmu_domain {
        struct arm_smmu_cfg             cfg;
        enum arm_smmu_domain_stage      stage;
        struct mutex                    init_mutex; /* Protects smmu pointer */
+       struct iommu_domain             domain;
 };
 
 static struct iommu_ops arm_smmu_ops;
@@ -360,6 +361,11 @@ static struct arm_smmu_option_prop arm_smmu_options[] = {
        { 0, NULL},
 };
 
+static struct arm_smmu_domain *to_smmu_domain(struct iommu_domain *dom)
+{
+       return container_of(dom, struct arm_smmu_domain, domain);
+}
+
 static void parse_driver_options(struct arm_smmu_device *smmu)
 {
        int i = 0;
@@ -645,7 +651,7 @@ static irqreturn_t arm_smmu_context_fault(int irq, void *dev)
        u32 fsr, far, fsynr, resume;
        unsigned long iova;
        struct iommu_domain *domain = dev;
-       struct arm_smmu_domain *smmu_domain = domain->priv;
+       struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
        struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
        struct arm_smmu_device *smmu = smmu_domain->smmu;
        void __iomem *cb_base;
@@ -730,6 +736,20 @@ static void arm_smmu_init_context_bank(struct arm_smmu_domain *smmu_domain,
        stage1 = cfg->cbar != CBAR_TYPE_S2_TRANS;
        cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
 
+       if (smmu->version > ARM_SMMU_V1) {
+               /*
+                * CBA2R.
+                * *Must* be initialised before CBAR thanks to VMID16
+                * architectural oversight affected some implementations.
+                */
+#ifdef CONFIG_64BIT
+               reg = CBA2R_RW64_64BIT;
+#else
+               reg = CBA2R_RW64_32BIT;
+#endif
+               writel_relaxed(reg, gr1_base + ARM_SMMU_GR1_CBA2R(cfg->cbndx));
+       }
+
        /* CBAR */
        reg = cfg->cbar;
        if (smmu->version == ARM_SMMU_V1)
@@ -747,16 +767,6 @@ static void arm_smmu_init_context_bank(struct arm_smmu_domain *smmu_domain,
        }
        writel_relaxed(reg, gr1_base + ARM_SMMU_GR1_CBAR(cfg->cbndx));
 
-       if (smmu->version > ARM_SMMU_V1) {
-               /* CBA2R */
-#ifdef CONFIG_64BIT
-               reg = CBA2R_RW64_64BIT;
-#else
-               reg = CBA2R_RW64_32BIT;
-#endif
-               writel_relaxed(reg, gr1_base + ARM_SMMU_GR1_CBA2R(cfg->cbndx));
-       }
-
        /* TTBRs */
        if (stage1) {
                reg = pgtbl_cfg->arm_lpae_s1_cfg.ttbr[0];
@@ -836,7 +846,7 @@ static int arm_smmu_init_domain_context(struct iommu_domain *domain,
        struct io_pgtable_ops *pgtbl_ops;
        struct io_pgtable_cfg pgtbl_cfg;
        enum io_pgtable_fmt fmt;
-       struct arm_smmu_domain *smmu_domain = domain->priv;
+       struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
        struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
 
        mutex_lock(&smmu_domain->init_mutex);
@@ -958,7 +968,7 @@ out_unlock:
 
 static void arm_smmu_destroy_domain_context(struct iommu_domain *domain)
 {
-       struct arm_smmu_domain *smmu_domain = domain->priv;
+       struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
        struct arm_smmu_device *smmu = smmu_domain->smmu;
        struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
        void __iomem *cb_base;
@@ -985,10 +995,12 @@ static void arm_smmu_destroy_domain_context(struct iommu_domain *domain)
        __arm_smmu_free_bitmap(smmu->context_map, cfg->cbndx);
 }
 
-static int arm_smmu_domain_init(struct iommu_domain *domain)
+static struct iommu_domain *arm_smmu_domain_alloc(unsigned type)
 {
        struct arm_smmu_domain *smmu_domain;
 
+       if (type != IOMMU_DOMAIN_UNMANAGED)
+               return NULL;
        /*
         * Allocate the domain and initialise some of its data structures.
         * We can't really do anything meaningful until we've added a
@@ -996,17 +1008,17 @@ static int arm_smmu_domain_init(struct iommu_domain *domain)
         */
        smmu_domain = kzalloc(sizeof(*smmu_domain), GFP_KERNEL);
        if (!smmu_domain)
-               return -ENOMEM;
+               return NULL;
 
        mutex_init(&smmu_domain->init_mutex);
        spin_lock_init(&smmu_domain->pgtbl_lock);
-       domain->priv = smmu_domain;
-       return 0;
+
+       return &smmu_domain->domain;
 }
 
-static void arm_smmu_domain_destroy(struct iommu_domain *domain)
+static void arm_smmu_domain_free(struct iommu_domain *domain)
 {
-       struct arm_smmu_domain *smmu_domain = domain->priv;
+       struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
 
        /*
         * Free the domain resources. We assume that all devices have
@@ -1143,7 +1155,7 @@ static void arm_smmu_domain_remove_master(struct arm_smmu_domain *smmu_domain,
 static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
 {
        int ret;
-       struct arm_smmu_domain *smmu_domain = domain->priv;
+       struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
        struct arm_smmu_device *smmu;
        struct arm_smmu_master_cfg *cfg;
 
@@ -1187,7 +1199,7 @@ static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
 
 static void arm_smmu_detach_dev(struct iommu_domain *domain, struct device *dev)
 {
-       struct arm_smmu_domain *smmu_domain = domain->priv;
+       struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
        struct arm_smmu_master_cfg *cfg;
 
        cfg = find_smmu_master_cfg(dev);
@@ -1203,7 +1215,7 @@ static int arm_smmu_map(struct iommu_domain *domain, unsigned long iova,
 {
        int ret;
        unsigned long flags;
-       struct arm_smmu_domain *smmu_domain = domain->priv;
+       struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
        struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops;
 
        if (!ops)
@@ -1220,7 +1232,7 @@ static size_t arm_smmu_unmap(struct iommu_domain *domain, unsigned long iova,
 {
        size_t ret;
        unsigned long flags;
-       struct arm_smmu_domain *smmu_domain = domain->priv;
+       struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
        struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops;
 
        if (!ops)
@@ -1235,7 +1247,7 @@ static size_t arm_smmu_unmap(struct iommu_domain *domain, unsigned long iova,
 static phys_addr_t arm_smmu_iova_to_phys_hard(struct iommu_domain *domain,
                                              dma_addr_t iova)
 {
-       struct arm_smmu_domain *smmu_domain = domain->priv;
+       struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
        struct arm_smmu_device *smmu = smmu_domain->smmu;
        struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
        struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops;
@@ -1281,7 +1293,7 @@ static phys_addr_t arm_smmu_iova_to_phys(struct iommu_domain *domain,
 {
        phys_addr_t ret;
        unsigned long flags;
-       struct arm_smmu_domain *smmu_domain = domain->priv;
+       struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
        struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops;
 
        if (!ops)
@@ -1329,61 +1341,83 @@ static void __arm_smmu_release_pci_iommudata(void *data)
        kfree(data);
 }
 
-static int arm_smmu_add_device(struct device *dev)
+static int arm_smmu_add_pci_device(struct pci_dev *pdev)
 {
-       struct arm_smmu_device *smmu;
-       struct arm_smmu_master_cfg *cfg;
+       int i, ret;
+       u16 sid;
        struct iommu_group *group;
-       void (*releasefn)(void *) = NULL;
-       int ret;
-
-       smmu = find_smmu_for_device(dev);
-       if (!smmu)
-               return -ENODEV;
+       struct arm_smmu_master_cfg *cfg;
 
-       group = iommu_group_alloc();
-       if (IS_ERR(group)) {
-               dev_err(dev, "Failed to allocate IOMMU group\n");
+       group = iommu_group_get_for_dev(&pdev->dev);
+       if (IS_ERR(group))
                return PTR_ERR(group);
-       }
-
-       if (dev_is_pci(dev)) {
-               struct pci_dev *pdev = to_pci_dev(dev);
 
+       cfg = iommu_group_get_iommudata(group);
+       if (!cfg) {
                cfg = kzalloc(sizeof(*cfg), GFP_KERNEL);
                if (!cfg) {
                        ret = -ENOMEM;
                        goto out_put_group;
                }
 
-               cfg->num_streamids = 1;
-               /*
-                * Assume Stream ID == Requester ID for now.
-                * We need a way to describe the ID mappings in FDT.
-                */
-               pci_for_each_dma_alias(pdev, __arm_smmu_get_pci_sid,
-                                      &cfg->streamids[0]);
-               releasefn = __arm_smmu_release_pci_iommudata;
-       } else {
-               struct arm_smmu_master *master;
-
-               master = find_smmu_master(smmu, dev->of_node);
-               if (!master) {
-                       ret = -ENODEV;
-                       goto out_put_group;
-               }
+               iommu_group_set_iommudata(group, cfg,
+                                         __arm_smmu_release_pci_iommudata);
+       }
 
-               cfg = &master->cfg;
+       if (cfg->num_streamids >= MAX_MASTER_STREAMIDS) {
+               ret = -ENOSPC;
+               goto out_put_group;
        }
 
-       iommu_group_set_iommudata(group, cfg, releasefn);
-       ret = iommu_group_add_device(group, dev);
+       /*
+        * Assume Stream ID == Requester ID for now.
+        * We need a way to describe the ID mappings in FDT.
+        */
+       pci_for_each_dma_alias(pdev, __arm_smmu_get_pci_sid, &sid);
+       for (i = 0; i < cfg->num_streamids; ++i)
+               if (cfg->streamids[i] == sid)
+                       break;
+
+       /* Avoid duplicate SIDs, as this can lead to SMR conflicts */
+       if (i == cfg->num_streamids)
+               cfg->streamids[cfg->num_streamids++] = sid;
 
+       return 0;
 out_put_group:
        iommu_group_put(group);
        return ret;
 }
 
+static int arm_smmu_add_platform_device(struct device *dev)
+{
+       struct iommu_group *group;
+       struct arm_smmu_master *master;
+       struct arm_smmu_device *smmu = find_smmu_for_device(dev);
+
+       if (!smmu)
+               return -ENODEV;
+
+       master = find_smmu_master(smmu, dev->of_node);
+       if (!master)
+               return -ENODEV;
+
+       /* No automatic group creation for platform devices */
+       group = iommu_group_alloc();
+       if (IS_ERR(group))
+               return PTR_ERR(group);
+
+       iommu_group_set_iommudata(group, &master->cfg, NULL);
+       return iommu_group_add_device(group, dev);
+}
+
+static int arm_smmu_add_device(struct device *dev)
+{
+       if (dev_is_pci(dev))
+               return arm_smmu_add_pci_device(to_pci_dev(dev));
+
+       return arm_smmu_add_platform_device(dev);
+}
+
 static void arm_smmu_remove_device(struct device *dev)
 {
        iommu_group_remove_device(dev);
@@ -1392,7 +1426,7 @@ static void arm_smmu_remove_device(struct device *dev)
 static int arm_smmu_domain_get_attr(struct iommu_domain *domain,
                                    enum iommu_attr attr, void *data)
 {
-       struct arm_smmu_domain *smmu_domain = domain->priv;
+       struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
 
        switch (attr) {
        case DOMAIN_ATTR_NESTING:
@@ -1407,7 +1441,7 @@ static int arm_smmu_domain_set_attr(struct iommu_domain *domain,
                                    enum iommu_attr attr, void *data)
 {
        int ret = 0;
-       struct arm_smmu_domain *smmu_domain = domain->priv;
+       struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
 
        mutex_lock(&smmu_domain->init_mutex);
 
@@ -1435,8 +1469,8 @@ out_unlock:
 
 static struct iommu_ops arm_smmu_ops = {
        .capable                = arm_smmu_capable,
-       .domain_init            = arm_smmu_domain_init,
-       .domain_destroy         = arm_smmu_domain_destroy,
+       .domain_alloc           = arm_smmu_domain_alloc,
+       .domain_free            = arm_smmu_domain_free,
        .attach_dev             = arm_smmu_attach_dev,
        .detach_dev             = arm_smmu_detach_dev,
        .map                    = arm_smmu_map,
@@ -1633,6 +1667,15 @@ static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu)
        size = arm_smmu_id_size_to_bits((id >> ID2_OAS_SHIFT) & ID2_OAS_MASK);
        smmu->pa_size = size;
 
+       /*
+        * What the page table walker can address actually depends on which
+        * descriptor format is in use, but since a) we don't know that yet,
+        * and b) it can vary per context bank, this will have to do...
+        */
+       if (dma_set_mask_and_coherent(smmu->dev, DMA_BIT_MASK(size)))
+               dev_warn(smmu->dev,
+                        "failed to set DMA mask for table walker\n");
+
        if (smmu->version == ARM_SMMU_V1) {
                smmu->va_size = smmu->ipa_size;
                size = SZ_4K | SZ_2M | SZ_1G;