X-Git-Url: https://git.openpandora.org/cgi-bin/gitweb.cgi?a=blobdiff_plain;f=arch%2Farm%2Fmach-msm%2Fiommu.c;h=e2d58e4cb0d73c47ecb11f62d834114d5e186d41;hb=e3166331a3288dd7184548896a1c7ab682f0dbe8;hp=f71747db3beed3fccb5db1bb47559f950d5d6a00;hpb=2301b65b86df8b80e6779ce9885ad62a5c4adc38;p=pandora-kernel.git diff --git a/arch/arm/mach-msm/iommu.c b/arch/arm/mach-msm/iommu.c index f71747db3bee..e2d58e4cb0d7 100644 --- a/arch/arm/mach-msm/iommu.c +++ b/arch/arm/mach-msm/iommu.c @@ -33,6 +33,16 @@ #include #include +#define MRC(reg, processor, op1, crn, crm, op2) \ +__asm__ __volatile__ ( \ +" mrc " #processor "," #op1 ", %0," #crn "," #crm "," #op2 "\n" \ +: "=r" (reg)) + +#define RCP15_PRRR(reg) MRC(reg, p15, 0, c10, c2, 0) +#define RCP15_NMRR(reg) MRC(reg, p15, 0, c10, c2, 1) + +static int msm_iommu_tex_class[4]; + DEFINE_SPINLOCK(msm_iommu_lock); struct msm_priv { @@ -40,23 +50,26 @@ struct msm_priv { struct list_head list_attached; }; -static void __flush_iotlb(struct iommu_domain *domain) +static int __flush_iotlb(struct iommu_domain *domain) { struct msm_priv *priv = domain->priv; struct msm_iommu_drvdata *iommu_drvdata; struct msm_iommu_ctx_drvdata *ctx_drvdata; - + int ret = 0; #ifndef CONFIG_IOMMU_PGTABLES_L2 unsigned long *fl_table = priv->pgtable; int i; - dmac_flush_range(fl_table, fl_table + SZ_16K); + if (!list_empty(&priv->list_attached)) { + dmac_flush_range(fl_table, fl_table + SZ_16K); - for (i = 0; i < NUM_FL_PTE; i++) - if ((fl_table[i] & 0x03) == FL_TYPE_TABLE) { - void *sl_table = __va(fl_table[i] & FL_BASE_MASK); - dmac_flush_range(sl_table, sl_table + SZ_4K); - } + for (i = 0; i < NUM_FL_PTE; i++) + if ((fl_table[i] & 0x03) == FL_TYPE_TABLE) { + void *sl_table = __va(fl_table[i] & + FL_BASE_MASK); + dmac_flush_range(sl_table, sl_table + SZ_4K); + } + } #endif list_for_each_entry(ctx_drvdata, &priv->list_attached, attached_elm) { @@ -66,6 +79,8 @@ static void __flush_iotlb(struct iommu_domain *domain) iommu_drvdata = dev_get_drvdata(ctx_drvdata->pdev->dev.parent); SET_CTX_TLBIALL(iommu_drvdata->base, ctx_drvdata->num, 0); } + + return ret; } static void __reset_context(void __iomem *base, int ctx) @@ -95,6 +110,7 @@ static void __reset_context(void __iomem *base, int ctx) static void __program_context(void __iomem *base, int ctx, phys_addr_t pgtable) { + unsigned int prrr, nmrr; __reset_context(base, ctx); /* Set up HTW mode */ @@ -127,11 +143,11 @@ static void __program_context(void __iomem *base, int ctx, phys_addr_t pgtable) /* Turn on TEX Remap */ SET_TRE(base, ctx, 1); - /* Do not configure PRRR / NMRR on the IOMMU for now. We will assume - * TEX class 0 for everything until attributes are properly worked out - */ - SET_PRRR(base, ctx, 0); - SET_NMRR(base, ctx, 0); + /* Set TEX remap attributes */ + RCP15_PRRR(prrr); + RCP15_NMRR(nmrr); + SET_PRRR(base, ctx, prrr); + SET_NMRR(base, ctx, nmrr); /* Turn on BFB prefetch */ SET_BFBDFE(base, ctx, 1); @@ -238,6 +254,11 @@ static int msm_iommu_attach_dev(struct iommu_domain *domain, struct device *dev) goto fail; } + if (!list_empty(&ctx_drvdata->attached_elm)) { + ret = -EBUSY; + goto fail; + } + list_for_each_entry(tmp_drvdata, &priv->list_attached, attached_elm) if (tmp_drvdata == ctx_drvdata) { ret = -EBUSY; @@ -248,7 +269,7 @@ static int msm_iommu_attach_dev(struct iommu_domain *domain, struct device *dev) __pa(priv->pgtable)); list_add(&(ctx_drvdata->attached_elm), &priv->list_attached); - __flush_iotlb(domain); + ret = __flush_iotlb(domain); fail: spin_unlock_irqrestore(&msm_iommu_lock, flags); @@ -263,6 +284,7 @@ static void msm_iommu_detach_dev(struct iommu_domain *domain, struct msm_iommu_drvdata *iommu_drvdata; struct msm_iommu_ctx_drvdata *ctx_drvdata; unsigned long flags; + int ret; spin_lock_irqsave(&msm_iommu_lock, flags); priv = domain->priv; @@ -277,7 +299,10 @@ static void msm_iommu_detach_dev(struct iommu_domain *domain, if (!iommu_drvdata || !ctx_drvdata || !ctx_dev) goto fail; - __flush_iotlb(domain); + ret = __flush_iotlb(domain); + if (ret) + goto fail; + __reset_context(iommu_drvdata->base, ctx_dev->num); list_del_init(&ctx_drvdata->attached_elm); @@ -296,12 +321,21 @@ static int msm_iommu_map(struct iommu_domain *domain, unsigned long va, unsigned long *sl_table; unsigned long *sl_pte; unsigned long sl_offset; + unsigned int pgprot; size_t len = 0x1000UL << order; - int ret = 0; + int ret = 0, tex, sh; spin_lock_irqsave(&msm_iommu_lock, flags); - priv = domain->priv; + sh = (prot & MSM_IOMMU_ATTR_SH) ? 1 : 0; + tex = msm_iommu_tex_class[prot & MSM_IOMMU_CP_MASK]; + + if (tex < 0 || tex > NUM_TEX_CLASS - 1) { + ret = -EINVAL; + goto fail; + } + + priv = domain->priv; if (!priv) { ret = -EINVAL; goto fail; @@ -322,6 +356,18 @@ static int msm_iommu_map(struct iommu_domain *domain, unsigned long va, goto fail; } + if (len == SZ_16M || len == SZ_1M) { + pgprot = sh ? FL_SHARED : 0; + pgprot |= tex & 0x01 ? FL_BUFFERABLE : 0; + pgprot |= tex & 0x02 ? FL_CACHEABLE : 0; + pgprot |= tex & 0x04 ? FL_TEX0 : 0; + } else { + pgprot = sh ? SL_SHARED : 0; + pgprot |= tex & 0x01 ? SL_BUFFERABLE : 0; + pgprot |= tex & 0x02 ? SL_CACHEABLE : 0; + pgprot |= tex & 0x04 ? SL_TEX0 : 0; + } + fl_offset = FL_OFFSET(va); /* Upper 12 bits */ fl_pte = fl_table + fl_offset; /* int pointers, 4 bytes */ @@ -330,17 +376,17 @@ static int msm_iommu_map(struct iommu_domain *domain, unsigned long va, for (i = 0; i < 16; i++) *(fl_pte+i) = (pa & 0xFF000000) | FL_SUPERSECTION | FL_AP_READ | FL_AP_WRITE | FL_TYPE_SECT | - FL_SHARED; + FL_SHARED | pgprot; } if (len == SZ_1M) *fl_pte = (pa & 0xFFF00000) | FL_AP_READ | FL_AP_WRITE | - FL_TYPE_SECT | FL_SHARED; + FL_TYPE_SECT | FL_SHARED | pgprot; /* Need a 2nd level table */ if ((len == SZ_4K || len == SZ_64K) && (*fl_pte) == 0) { unsigned long *sl; - sl = (unsigned long *) __get_free_pages(GFP_KERNEL, + sl = (unsigned long *) __get_free_pages(GFP_ATOMIC, get_order(SZ_4K)); if (!sl) { @@ -360,17 +406,17 @@ static int msm_iommu_map(struct iommu_domain *domain, unsigned long va, if (len == SZ_4K) *sl_pte = (pa & SL_BASE_MASK_SMALL) | SL_AP0 | SL_AP1 | - SL_SHARED | SL_TYPE_SMALL; + SL_SHARED | SL_TYPE_SMALL | pgprot; if (len == SZ_64K) { int i; for (i = 0; i < 16; i++) *(sl_pte+i) = (pa & SL_BASE_MASK_LARGE) | SL_AP0 | - SL_AP1 | SL_SHARED | SL_TYPE_LARGE; + SL_AP1 | SL_SHARED | SL_TYPE_LARGE | pgprot; } - __flush_iotlb(domain); + ret = __flush_iotlb(domain); fail: spin_unlock_irqrestore(&msm_iommu_lock, flags); return ret; @@ -455,7 +501,7 @@ static int msm_iommu_unmap(struct iommu_domain *domain, unsigned long va, } } - __flush_iotlb(domain); + ret = __flush_iotlb(domain); fail: spin_unlock_irqrestore(&msm_iommu_lock, flags); return ret; @@ -490,9 +536,6 @@ static phys_addr_t msm_iommu_iova_to_phys(struct iommu_domain *domain, SET_CTX_TLBIALL(base, ctx, 0); SET_V2PPR_VA(base, ctx, va >> V2Pxx_VA_SHIFT); - if (GET_FAULT(base, ctx)) - goto fail; - par = GET_PAR(base, ctx); /* We are dealing with a supersection */ @@ -501,6 +544,9 @@ static phys_addr_t msm_iommu_iova_to_phys(struct iommu_domain *domain, else /* Upper 20 bits from PAR, lower 12 from VA */ ret = (par & 0xFFFFF000) | (va & 0x00000FFF); + if (GET_FAULT(base, ctx)) + ret = 0; + fail: spin_unlock_irqrestore(&msm_iommu_lock, flags); return ret; @@ -543,8 +589,8 @@ irqreturn_t msm_iommu_fault_handler(int irq, void *dev_id) { struct msm_iommu_drvdata *drvdata = dev_id; void __iomem *base; - unsigned int fsr = 0; - int ncb = 0, i = 0; + unsigned int fsr; + int ncb, i; spin_lock(&msm_iommu_lock); @@ -555,7 +601,6 @@ irqreturn_t msm_iommu_fault_handler(int irq, void *dev_id) base = drvdata->base; - pr_err("===== WOAH! =====\n"); pr_err("Unexpected IOMMU page fault!\n"); pr_err("base = %08x\n", (unsigned int) base); @@ -585,8 +630,47 @@ static struct iommu_ops msm_iommu_ops = { .domain_has_cap = msm_iommu_domain_has_cap }; -static int msm_iommu_init(void) +static int __init get_tex_class(int icp, int ocp, int mt, int nos) +{ + int i = 0; + unsigned int prrr = 0; + unsigned int nmrr = 0; + int c_icp, c_ocp, c_mt, c_nos; + + RCP15_PRRR(prrr); + RCP15_NMRR(nmrr); + + for (i = 0; i < NUM_TEX_CLASS; i++) { + c_nos = PRRR_NOS(prrr, i); + c_mt = PRRR_MT(prrr, i); + c_icp = NMRR_ICP(nmrr, i); + c_ocp = NMRR_OCP(nmrr, i); + + if (icp == c_icp && ocp == c_ocp && c_mt == mt && c_nos == nos) + return i; + } + + return -ENODEV; +} + +static void __init setup_iommu_tex_classes(void) +{ + msm_iommu_tex_class[MSM_IOMMU_ATTR_NONCACHED] = + get_tex_class(CP_NONCACHED, CP_NONCACHED, MT_NORMAL, 1); + + msm_iommu_tex_class[MSM_IOMMU_ATTR_CACHED_WB_WA] = + get_tex_class(CP_WB_WA, CP_WB_WA, MT_NORMAL, 1); + + msm_iommu_tex_class[MSM_IOMMU_ATTR_CACHED_WB_NWA] = + get_tex_class(CP_WB_NWA, CP_WB_NWA, MT_NORMAL, 1); + + msm_iommu_tex_class[MSM_IOMMU_ATTR_CACHED_WT] = + get_tex_class(CP_WT, CP_WT, MT_NORMAL, 1); +} + +static int __init msm_iommu_init(void) { + setup_iommu_tex_classes(); register_iommu(&msm_iommu_ops); return 0; }