Merge git://git.kernel.org/pub/scm/linux/kernel/git/wim/linux-2.6-watchdog
[pandora-kernel.git] / drivers / infiniband / hw / ehca / ehca_mrmw.c
index da88738..e239bbf 100644 (file)
@@ -72,24 +72,14 @@ enum ehca_mr_pgsize {
 
 static u32 ehca_encode_hwpage_size(u32 pgsize)
 {
-       u32 idx = 0;
-       pgsize >>= 12;
-       /*
-        * map mr page size into hw code:
-        * 0, 1, 2, 3 for 4K, 64K, 1M, 64M
-        */
-       while (!(pgsize & 1)) {
-               idx++;
-               pgsize >>= 4;
-       }
-       return idx;
+       int log = ilog2(pgsize);
+       WARN_ON(log < 12 || log > 24 || log & 3);
+       return (log - 12) / 4;
 }
 
 static u64 ehca_get_max_hwpage_size(struct ehca_shca *shca)
 {
-       if (shca->hca_cap_mr_pgsize & HCA_CAP_MR_PGSIZE_16M)
-               return EHCA_MR_PGSIZE16M;
-       return EHCA_MR_PGSIZE4K;
+       return 1UL << ilog2(shca->hca_cap_mr_pgsize);
 }
 
 static struct ehca_mr *ehca_mr_new(void)
@@ -259,7 +249,7 @@ struct ib_mr *ehca_reg_phys_mr(struct ib_pd *pd,
                pginfo.u.phy.num_phys_buf = num_phys_buf;
                pginfo.u.phy.phys_buf_array = phys_buf_array;
                pginfo.next_hwpage =
-                       ((u64)iova_start & ~(hw_pgsize - 1)) / hw_pgsize;
+                       ((u64)iova_start & ~PAGE_MASK) / hw_pgsize;
 
                ret = ehca_reg_mr(shca, e_mr, iova_start, size, mr_access_flags,
                                  e_pd, &pginfo, &e_mr->ib.ib_mr.lkey,
@@ -296,7 +286,7 @@ struct ib_mr *ehca_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
                container_of(pd->device, struct ehca_shca, ib_device);
        struct ehca_pd *e_pd = container_of(pd, struct ehca_pd, ib_pd);
        struct ehca_mr_pginfo pginfo;
-       int ret;
+       int ret, page_shift;
        u32 num_kpages;
        u32 num_hwpages;
        u64 hwpage_size;
@@ -351,19 +341,20 @@ struct ib_mr *ehca_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
        /* determine number of MR pages */
        num_kpages = NUM_CHUNKS((virt % PAGE_SIZE) + length, PAGE_SIZE);
        /* select proper hw_pgsize */
-       if (ehca_mr_largepage &&
-           (shca->hca_cap_mr_pgsize & HCA_CAP_MR_PGSIZE_16M)) {
-               int page_shift = PAGE_SHIFT;
-               if (e_mr->umem->hugetlb) {
-                       /* determine page_shift, clamp between 4K and 16M */
-                       page_shift = (fls64(length - 1) + 3) & ~3;
-                       page_shift = min(max(page_shift, EHCA_MR_PGSHIFT4K),
-                                        EHCA_MR_PGSHIFT16M);
-               }
-               hwpage_size = 1UL << page_shift;
-       } else
-               hwpage_size = EHCA_MR_PGSIZE4K; /* ehca1 only supports 4k */
-       ehca_dbg(pd->device, "hwpage_size=%lx", hwpage_size);
+       page_shift = PAGE_SHIFT;
+       if (e_mr->umem->hugetlb) {
+               /* determine page_shift, clamp between 4K and 16M */
+               page_shift = (fls64(length - 1) + 3) & ~3;
+               page_shift = min(max(page_shift, EHCA_MR_PGSHIFT4K),
+                                EHCA_MR_PGSHIFT16M);
+       }
+       hwpage_size = 1UL << page_shift;
+
+       /* now that we have the desired page size, shift until it's
+        * supported, too. 4K is always supported, so this terminates.
+        */
+       while (!(hwpage_size & shca->hca_cap_mr_pgsize))
+               hwpage_size >>= 4;
 
 reg_user_mr_fallback:
        num_hwpages = NUM_CHUNKS((virt % hwpage_size) + length, hwpage_size);
@@ -547,7 +538,7 @@ int ehca_rereg_phys_mr(struct ib_mr *mr,
                pginfo.u.phy.num_phys_buf = num_phys_buf;
                pginfo.u.phy.phys_buf_array = phys_buf_array;
                pginfo.next_hwpage =
-                       ((u64)iova_start & ~(hw_pgsize - 1)) / hw_pgsize;
+                       ((u64)iova_start & ~PAGE_MASK) / hw_pgsize;
        }
        if (mr_rereg_mask & IB_MR_REREG_ACCESS)
                new_acl = mr_access_flags;
@@ -809,8 +800,9 @@ struct ib_fmr *ehca_alloc_fmr(struct ib_pd *pd,
                ib_fmr = ERR_PTR(-EINVAL);
                goto alloc_fmr_exit0;
        }
-       hw_pgsize = ehca_get_max_hwpage_size(shca);
-       if ((1 << fmr_attr->page_shift) != hw_pgsize) {
+
+       hw_pgsize = 1 << fmr_attr->page_shift;
+       if (!(hw_pgsize & shca->hca_cap_mr_pgsize)) {
                ehca_err(pd->device, "unsupported fmr_attr->page_shift=%x",
                         fmr_attr->page_shift);
                ib_fmr = ERR_PTR(-EINVAL);
@@ -826,6 +818,7 @@ struct ib_fmr *ehca_alloc_fmr(struct ib_pd *pd,
 
        /* register MR on HCA */
        memset(&pginfo, 0, sizeof(pginfo));
+       pginfo.hwpage_size = hw_pgsize;
        /*
         * pginfo.num_hwpages==0, ie register_rpages() will not be called
         * but deferred to map_phys_fmr()
@@ -1776,7 +1769,7 @@ static int ehca_set_pagebuf_user1(struct ehca_mr_pginfo *pginfo,
        list_for_each_entry_continue(
                chunk, (&(pginfo->u.usr.region->chunk_list)), list) {
                for (i = pginfo->u.usr.next_nmap; i < chunk->nmap; ) {
-                       pgaddr = page_to_pfn(chunk->page_list[i].page)
+                       pgaddr = page_to_pfn(sg_page(&chunk->page_list[i]))
                                << PAGE_SHIFT ;
                        *kpage = phys_to_abs(pgaddr +
                                             (pginfo->next_hwpage *
@@ -1832,7 +1825,7 @@ static int ehca_check_kpages_per_ate(struct scatterlist *page_list,
 {
        int t;
        for (t = start_idx; t <= end_idx; t++) {
-               u64 pgaddr = page_to_pfn(page_list[t].page) << PAGE_SHIFT;
+               u64 pgaddr = page_to_pfn(sg_page(&page_list[t])) << PAGE_SHIFT;
                ehca_gen_dbg("chunk_page=%lx value=%016lx", pgaddr,
                             *(u64 *)abs_to_virt(phys_to_abs(pgaddr)));
                if (pgaddr - PAGE_SIZE != *prev_pgaddr) {
@@ -1867,7 +1860,7 @@ static int ehca_set_pagebuf_user2(struct ehca_mr_pginfo *pginfo,
                chunk, (&(pginfo->u.usr.region->chunk_list)), list) {
                for (i = pginfo->u.usr.next_nmap; i < chunk->nmap; ) {
                        if (nr_kpages == kpages_per_hwpage) {
-                               pgaddr = ( page_to_pfn(chunk->page_list[i].page)
+                               pgaddr = ( page_to_pfn(sg_page(&chunk->page_list[i]))
                                           << PAGE_SHIFT );
                                *kpage = phys_to_abs(pgaddr);
                                if ( !(*kpage) ) {