Merge commit 'upstream/master'
[pandora-kernel.git] / arch / ia64 / hp / common / sba_iommu.c
index 523eae6..4956be4 100644 (file)
@@ -35,6 +35,7 @@
 #include <linux/nodemask.h>
 #include <linux/bitops.h>         /* hweight64() */
 #include <linux/crash_dump.h>
+#include <linux/iommu-helper.h>
 
 #include <asm/delay.h>         /* ia64_get_itc() */
 #include <asm/io.h>
@@ -460,6 +461,13 @@ get_iovp_order (unsigned long size)
        return order;
 }
 
+static unsigned long ptr_to_pide(struct ioc *ioc, unsigned long *res_ptr,
+                                unsigned int bitshiftcnt)
+{
+       return (((unsigned long)res_ptr - (unsigned long)ioc->res_map) << 3)
+               + bitshiftcnt;
+}
+
 /**
  * sba_search_bitmap - find free space in IO PDIR resource bitmap
  * @ioc: IO MMU structure which owns the pdir we are interested in.
@@ -471,15 +479,25 @@ get_iovp_order (unsigned long size)
  * Cool perf optimization: search for log2(size) bits at a time.
  */
 static SBA_INLINE unsigned long
-sba_search_bitmap(struct ioc *ioc, unsigned long bits_wanted, int use_hint)
+sba_search_bitmap(struct ioc *ioc, struct device *dev,
+                 unsigned long bits_wanted, int use_hint)
 {
        unsigned long *res_ptr;
        unsigned long *res_end = (unsigned long *) &(ioc->res_map[ioc->res_size]);
-       unsigned long flags, pide = ~0UL;
+       unsigned long flags, pide = ~0UL, tpide;
+       unsigned long boundary_size;
+       unsigned long shift;
+       int ret;
 
        ASSERT(((unsigned long) ioc->res_hint & (sizeof(unsigned long) - 1UL)) == 0);
        ASSERT(res_ptr < res_end);
 
+       boundary_size = (unsigned long long)dma_get_seg_boundary(dev) + 1;
+       boundary_size = ALIGN(boundary_size, 1ULL << iovp_shift) >> iovp_shift;
+
+       BUG_ON(ioc->ibase & ~iovp_mask);
+       shift = ioc->ibase >> iovp_shift;
+
        spin_lock_irqsave(&ioc->res_lock, flags);
 
        /* Allow caller to force a search through the entire resource space */
@@ -504,9 +522,7 @@ sba_search_bitmap(struct ioc *ioc, unsigned long bits_wanted, int use_hint)
                        if (likely(*res_ptr != ~0UL)) {
                                bitshiftcnt = ffz(*res_ptr);
                                *res_ptr |= (1UL << bitshiftcnt);
-                               pide = ((unsigned long)res_ptr - (unsigned long)ioc->res_map);
-                               pide <<= 3;     /* convert to bit address */
-                               pide += bitshiftcnt;
+                               pide = ptr_to_pide(ioc, res_ptr, bitshiftcnt);
                                ioc->res_bitshift = bitshiftcnt + bits_wanted;
                                goto found_it;
                        }
@@ -535,11 +551,13 @@ sba_search_bitmap(struct ioc *ioc, unsigned long bits_wanted, int use_hint)
                        DBG_RES("    %p %lx %lx\n", res_ptr, mask, *res_ptr);
                        ASSERT(0 != mask);
                        for (; mask ; mask <<= o, bitshiftcnt += o) {
-                               if(0 == ((*res_ptr) & mask)) {
+                               tpide = ptr_to_pide(ioc, res_ptr, bitshiftcnt);
+                               ret = iommu_is_span_boundary(tpide, bits_wanted,
+                                                            shift,
+                                                            boundary_size);
+                               if ((0 == ((*res_ptr) & mask)) && !ret) {
                                        *res_ptr |= mask;     /* mark resources busy! */
-                                       pide = ((unsigned long)res_ptr - (unsigned long)ioc->res_map);
-                                       pide <<= 3;     /* convert to bit address */
-                                       pide += bitshiftcnt;
+                                       pide = tpide;
                                        ioc->res_bitshift = bitshiftcnt + bits_wanted;
                                        goto found_it;
                                }
@@ -560,6 +578,11 @@ sba_search_bitmap(struct ioc *ioc, unsigned long bits_wanted, int use_hint)
                end = res_end - qwords;
 
                for (; res_ptr < end; res_ptr++) {
+                       tpide = ptr_to_pide(ioc, res_ptr, 0);
+                       ret = iommu_is_span_boundary(tpide, bits_wanted,
+                                                    shift, boundary_size);
+                       if (ret)
+                               goto next_ptr;
                        for (i = 0 ; i < qwords ; i++) {
                                if (res_ptr[i] != 0)
                                        goto next_ptr;
@@ -572,8 +595,7 @@ sba_search_bitmap(struct ioc *ioc, unsigned long bits_wanted, int use_hint)
                                res_ptr[i] = ~0UL;
                        res_ptr[i] |= RESMAP_MASK(bits);
 
-                       pide = ((unsigned long)res_ptr - (unsigned long)ioc->res_map);
-                       pide <<= 3;     /* convert to bit address */
+                       pide = tpide;
                        res_ptr += qwords;
                        ioc->res_bitshift = bits;
                        goto found_it;
@@ -605,7 +627,7 @@ found_it:
  * resource bit map.
  */
 static int
-sba_alloc_range(struct ioc *ioc, size_t size)
+sba_alloc_range(struct ioc *ioc, struct device *dev, size_t size)
 {
        unsigned int pages_needed = size >> iovp_shift;
 #ifdef PDIR_SEARCH_TIMING
@@ -622,9 +644,9 @@ sba_alloc_range(struct ioc *ioc, size_t size)
        /*
        ** "seek and ye shall find"...praying never hurts either...
        */
-       pide = sba_search_bitmap(ioc, pages_needed, 1);
+       pide = sba_search_bitmap(ioc, dev, pages_needed, 1);
        if (unlikely(pide >= (ioc->res_size << 3))) {
-               pide = sba_search_bitmap(ioc, pages_needed, 0);
+               pide = sba_search_bitmap(ioc, dev, pages_needed, 0);
                if (unlikely(pide >= (ioc->res_size << 3))) {
 #if DELAYED_RESOURCE_CNT > 0
                        unsigned long flags;
@@ -653,7 +675,7 @@ sba_alloc_range(struct ioc *ioc, size_t size)
                        }
                        spin_unlock_irqrestore(&ioc->saved_lock, flags);
 
-                       pide = sba_search_bitmap(ioc, pages_needed, 0);
+                       pide = sba_search_bitmap(ioc, dev, pages_needed, 0);
                        if (unlikely(pide >= (ioc->res_size << 3)))
                                panic(__FILE__ ": I/O MMU @ %p is out of mapping resources\n",
                                      ioc->ioc_hpa);
@@ -877,16 +899,18 @@ sba_mark_invalid(struct ioc *ioc, dma_addr_t iova, size_t byte_cnt)
 }
 
 /**
- * sba_map_single - map one buffer and return IOVA for DMA
+ * sba_map_single_attrs - map one buffer and return IOVA for DMA
  * @dev: instance of PCI owned by the driver that's asking.
  * @addr:  driver buffer to map.
  * @size:  number of bytes to map in driver buffer.
  * @dir:  R/W or both.
+ * @attrs: optional dma attributes
  *
  * See Documentation/DMA-mapping.txt
  */
 dma_addr_t
-sba_map_single(struct device *dev, void *addr, size_t size, int dir)
+sba_map_single_attrs(struct device *dev, void *addr, size_t size, int dir,
+                    struct dma_attrs *attrs)
 {
        struct ioc *ioc;
        dma_addr_t iovp;
@@ -910,7 +934,8 @@ sba_map_single(struct device *dev, void *addr, size_t size, int dir)
                ** Device is bit capable of DMA'ing to the buffer...
                ** just return the PCI address of ptr
                */
-               DBG_BYPASS("sba_map_single() bypass mask/addr: 0x%lx/0x%lx\n",
+               DBG_BYPASS("sba_map_single_attrs() bypass mask/addr: "
+                          "0x%lx/0x%lx\n",
                           to_pci_dev(dev)->dma_mask, pci_addr);
                return pci_addr;
        }
@@ -931,12 +956,12 @@ sba_map_single(struct device *dev, void *addr, size_t size, int dir)
 
 #ifdef ASSERT_PDIR_SANITY
        spin_lock_irqsave(&ioc->res_lock, flags);
-       if (sba_check_pdir(ioc,"Check before sba_map_single()"))
+       if (sba_check_pdir(ioc,"Check before sba_map_single_attrs()"))
                panic("Sanity check failed");
        spin_unlock_irqrestore(&ioc->res_lock, flags);
 #endif
 
-       pide = sba_alloc_range(ioc, size);
+       pide = sba_alloc_range(ioc, dev, size);
 
        iovp = (dma_addr_t) pide << iovp_shift;
 
@@ -960,11 +985,12 @@ sba_map_single(struct device *dev, void *addr, size_t size, int dir)
        /* form complete address */
 #ifdef ASSERT_PDIR_SANITY
        spin_lock_irqsave(&ioc->res_lock, flags);
-       sba_check_pdir(ioc,"Check after sba_map_single()");
+       sba_check_pdir(ioc,"Check after sba_map_single_attrs()");
        spin_unlock_irqrestore(&ioc->res_lock, flags);
 #endif
        return SBA_IOVA(ioc, iovp, offset);
 }
+EXPORT_SYMBOL(sba_map_single_attrs);
 
 #ifdef ENABLE_MARK_CLEAN
 static SBA_INLINE void
@@ -991,15 +1017,17 @@ sba_mark_clean(struct ioc *ioc, dma_addr_t iova, size_t size)
 #endif
 
 /**
- * sba_unmap_single - unmap one IOVA and free resources
+ * sba_unmap_single_attrs - unmap one IOVA and free resources
  * @dev: instance of PCI owned by the driver that's asking.
  * @iova:  IOVA of driver buffer previously mapped.
  * @size:  number of bytes mapped in driver buffer.
  * @dir:  R/W or both.
+ * @attrs: optional dma attributes
  *
  * See Documentation/DMA-mapping.txt
  */
-void sba_unmap_single(struct device *dev, dma_addr_t iova, size_t size, int dir)
+void sba_unmap_single_attrs(struct device *dev, dma_addr_t iova, size_t size,
+                           int dir, struct dma_attrs *attrs)
 {
        struct ioc *ioc;
 #if DELAYED_RESOURCE_CNT > 0
@@ -1016,7 +1044,8 @@ void sba_unmap_single(struct device *dev, dma_addr_t iova, size_t size, int dir)
                /*
                ** Address does not fall w/in IOVA, must be bypassing
                */
-               DBG_BYPASS("sba_unmap_single() bypass addr: 0x%lx\n", iova);
+               DBG_BYPASS("sba_unmap_single_atttrs() bypass addr: 0x%lx\n",
+                          iova);
 
 #ifdef ENABLE_MARK_CLEAN
                if (dir == DMA_FROM_DEVICE) {
@@ -1065,7 +1094,7 @@ void sba_unmap_single(struct device *dev, dma_addr_t iova, size_t size, int dir)
        spin_unlock_irqrestore(&ioc->res_lock, flags);
 #endif /* DELAYED_RESOURCE_CNT == 0 */
 }
-
+EXPORT_SYMBOL(sba_unmap_single_attrs);
 
 /**
  * sba_alloc_coherent - allocate/map shared mem for DMA
@@ -1122,7 +1151,8 @@ sba_alloc_coherent (struct device *dev, size_t size, dma_addr_t *dma_handle, gfp
         * If device can't bypass or bypass is disabled, pass the 32bit fake
         * device to map single to get an iova mapping.
         */
-       *dma_handle = sba_map_single(&ioc->sac_only_dev->dev, addr, size, 0);
+       *dma_handle = sba_map_single_attrs(&ioc->sac_only_dev->dev, addr,
+                                          size, 0, NULL);
 
        return addr;
 }
@@ -1139,7 +1169,7 @@ sba_alloc_coherent (struct device *dev, size_t size, dma_addr_t *dma_handle, gfp
  */
 void sba_free_coherent (struct device *dev, size_t size, void *vaddr, dma_addr_t dma_handle)
 {
-       sba_unmap_single(dev, dma_handle, size, 0);
+       sba_unmap_single_attrs(dev, dma_handle, size, 0, NULL);
        free_pages((unsigned long) vaddr, get_order(size));
 }
 
@@ -1373,7 +1403,7 @@ sba_coalesce_chunks(struct ioc *ioc, struct device *dev,
                dma_len = (dma_len + dma_offset + ~iovp_mask) & iovp_mask;
                ASSERT(dma_len <= DMA_CHUNK_SIZE);
                dma_sg->dma_address = (dma_addr_t) (PIDE_FLAG
-                       | (sba_alloc_range(ioc, dma_len) << iovp_shift)
+                       | (sba_alloc_range(ioc, dev, dma_len) << iovp_shift)
                        | dma_offset);
                n_mappings++;
        }
@@ -1388,10 +1418,12 @@ sba_coalesce_chunks(struct ioc *ioc, struct device *dev,
  * @sglist:  array of buffer/length pairs
  * @nents:  number of entries in list
  * @dir:  R/W or both.
+ * @attrs: optional dma attributes
  *
  * See Documentation/DMA-mapping.txt
  */
-int sba_map_sg(struct device *dev, struct scatterlist *sglist, int nents, int dir)
+int sba_map_sg_attrs(struct device *dev, struct scatterlist *sglist, int nents,
+                    int dir, struct dma_attrs *attrs)
 {
        struct ioc *ioc;
        int coalesced, filled = 0;
@@ -1419,16 +1451,16 @@ int sba_map_sg(struct device *dev, struct scatterlist *sglist, int nents, int di
        /* Fast path single entry scatterlists. */
        if (nents == 1) {
                sglist->dma_length = sglist->length;
-               sglist->dma_address = sba_map_single(dev, sba_sg_address(sglist), sglist->length, dir);
+               sglist->dma_address = sba_map_single_attrs(dev, sba_sg_address(sglist), sglist->length, dir, attrs);
                return 1;
        }
 
 #ifdef ASSERT_PDIR_SANITY
        spin_lock_irqsave(&ioc->res_lock, flags);
-       if (sba_check_pdir(ioc,"Check before sba_map_sg()"))
+       if (sba_check_pdir(ioc,"Check before sba_map_sg_attrs()"))
        {
                sba_dump_sg(ioc, sglist, nents);
-               panic("Check before sba_map_sg()");
+               panic("Check before sba_map_sg_attrs()");
        }
        spin_unlock_irqrestore(&ioc->res_lock, flags);
 #endif
@@ -1457,10 +1489,10 @@ int sba_map_sg(struct device *dev, struct scatterlist *sglist, int nents, int di
 
 #ifdef ASSERT_PDIR_SANITY
        spin_lock_irqsave(&ioc->res_lock, flags);
-       if (sba_check_pdir(ioc,"Check after sba_map_sg()"))
+       if (sba_check_pdir(ioc,"Check after sba_map_sg_attrs()"))
        {
                sba_dump_sg(ioc, sglist, nents);
-               panic("Check after sba_map_sg()\n");
+               panic("Check after sba_map_sg_attrs()\n");
        }
        spin_unlock_irqrestore(&ioc->res_lock, flags);
 #endif
@@ -1470,18 +1502,20 @@ int sba_map_sg(struct device *dev, struct scatterlist *sglist, int nents, int di
 
        return filled;
 }
-
+EXPORT_SYMBOL(sba_map_sg_attrs);
 
 /**
- * sba_unmap_sg - unmap Scatter/Gather list
+ * sba_unmap_sg_attrs - unmap Scatter/Gather list
  * @dev: instance of PCI owned by the driver that's asking.
  * @sglist:  array of buffer/length pairs
  * @nents:  number of entries in list
  * @dir:  R/W or both.
+ * @attrs: optional dma attributes
  *
  * See Documentation/DMA-mapping.txt
  */
-void sba_unmap_sg (struct device *dev, struct scatterlist *sglist, int nents, int dir)
+void sba_unmap_sg_attrs(struct device *dev, struct scatterlist *sglist,
+                       int nents, int dir, struct dma_attrs *attrs)
 {
 #ifdef ASSERT_PDIR_SANITY
        struct ioc *ioc;
@@ -1496,13 +1530,14 @@ void sba_unmap_sg (struct device *dev, struct scatterlist *sglist, int nents, in
        ASSERT(ioc);
 
        spin_lock_irqsave(&ioc->res_lock, flags);
-       sba_check_pdir(ioc,"Check before sba_unmap_sg()");
+       sba_check_pdir(ioc,"Check before sba_unmap_sg_attrs()");
        spin_unlock_irqrestore(&ioc->res_lock, flags);
 #endif
 
        while (nents && sglist->dma_length) {
 
-               sba_unmap_single(dev, sglist->dma_address, sglist->dma_length, dir);
+               sba_unmap_single_attrs(dev, sglist->dma_address,
+                                      sglist->dma_length, dir, attrs);
                sglist = sg_next(sglist);
                nents--;
        }
@@ -1511,11 +1546,12 @@ void sba_unmap_sg (struct device *dev, struct scatterlist *sglist, int nents, in
 
 #ifdef ASSERT_PDIR_SANITY
        spin_lock_irqsave(&ioc->res_lock, flags);
-       sba_check_pdir(ioc,"Check after sba_unmap_sg()");
+       sba_check_pdir(ioc,"Check after sba_unmap_sg_attrs()");
        spin_unlock_irqrestore(&ioc->res_lock, flags);
 #endif
 
 }
+EXPORT_SYMBOL(sba_unmap_sg_attrs);
 
 /**************************************************************
 *
@@ -1896,15 +1932,13 @@ static const struct file_operations ioc_fops = {
 static void __init
 ioc_proc_init(void)
 {
-       struct proc_dir_entry *dir, *entry;
+       struct proc_dir_entry *dir;
 
        dir = proc_mkdir("bus/mckinley", NULL);
        if (!dir)
                return;
 
-       entry = create_proc_entry(ioc_list->name, 0, dir);
-       if (entry)
-               entry->proc_fops = &ioc_fops;
+       proc_create(ioc_list->name, 0, dir, &ioc_fops);
 }
 #endif
 
@@ -2113,7 +2147,7 @@ sba_dma_supported (struct device *dev, u64 mask)
 }
 
 int
-sba_dma_mapping_error (dma_addr_t dma_addr)
+sba_dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
 {
        return 0;
 }
@@ -2144,10 +2178,6 @@ sba_page_override(char *str)
 __setup("sbapagesize=",sba_page_override);
 
 EXPORT_SYMBOL(sba_dma_mapping_error);
-EXPORT_SYMBOL(sba_map_single);
-EXPORT_SYMBOL(sba_unmap_single);
-EXPORT_SYMBOL(sba_map_sg);
-EXPORT_SYMBOL(sba_unmap_sg);
 EXPORT_SYMBOL(sba_dma_supported);
 EXPORT_SYMBOL(sba_alloc_coherent);
 EXPORT_SYMBOL(sba_free_coherent);