mlx4_core: Support ICM tables in coherent memory
[pandora-kernel.git] / mm / hugetlb.c
index 6912bbf..eab8c42 100644 (file)
@@ -42,7 +42,7 @@ static void clear_huge_page(struct page *page, unsigned long addr)
        might_sleep();
        for (i = 0; i < (HPAGE_SIZE/PAGE_SIZE); i++) {
                cond_resched();
-               clear_user_highpage(page + i, addr);
+               clear_user_highpage(page + i, addr + i * PAGE_SIZE);
        }
 }
 
@@ -71,24 +71,24 @@ static struct page *dequeue_huge_page(struct vm_area_struct *vma,
 {
        int nid;
        struct page *page = NULL;
+       struct mempolicy *mpol;
        struct zonelist *zonelist = huge_zonelist(vma, address,
-                                               htlb_alloc_mask);
+                                       htlb_alloc_mask, &mpol);
        struct zone **z;
 
        for (z = zonelist->zones; *z; z++) {
                nid = zone_to_nid(*z);
                if (cpuset_zone_allowed_softwall(*z, htlb_alloc_mask) &&
-                   !list_empty(&hugepage_freelists[nid]))
+                   !list_empty(&hugepage_freelists[nid])) {
+                       page = list_entry(hugepage_freelists[nid].next,
+                                         struct page, lru);
+                       list_del(&page->lru);
+                       free_huge_pages--;
+                       free_huge_pages_node[nid]--;
                        break;
+               }
        }
-
-       if (*z) {
-               page = list_entry(hugepage_freelists[nid].next,
-                                 struct page, lru);
-               list_del(&page->lru);
-               free_huge_pages--;
-               free_huge_pages_node[nid]--;
-       }
+       mpol_free(mpol);        /* unref if mpol !NULL */
        return page;
 }
 
@@ -107,15 +107,19 @@ static int alloc_fresh_huge_page(void)
 {
        static int prev_nid;
        struct page *page;
-       static DEFINE_SPINLOCK(nid_lock);
        int nid;
 
-       spin_lock(&nid_lock);
+       /*
+        * Copy static prev_nid to local nid, work on that, then copy it
+        * back to prev_nid afterwards: otherwise there's a window in which
+        * a racer might pass invalid nid MAX_NUMNODES to alloc_pages_node.
+        * But we don't need to use a spin_lock here: it really doesn't
+        * matter if occasionally a racer chooses the same nid as we do.
+        */
        nid = next_node(prev_nid, node_online_map);
        if (nid == MAX_NUMNODES)
                nid = first_node(node_online_map);
        prev_nid = nid;
-       spin_unlock(&nid_lock);
 
        page = alloc_pages_node(nid, htlb_alloc_mask|__GFP_COMP|__GFP_NOWARN,
                                        HUGETLB_PAGE_ORDER);
@@ -207,7 +211,7 @@ static void update_and_free_page(struct page *page)
                                1 << PG_dirty | 1 << PG_active | 1 << PG_reserved |
                                1 << PG_private | 1<< PG_writeback);
        }
-       page[1].lru.next = NULL;
+       set_compound_page_dtor(page, NULL);
        set_page_refcounted(page);
        __free_pages(page, HUGETLB_PAGE_ORDER);
 }
@@ -316,15 +320,14 @@ unsigned long hugetlb_total_pages(void)
  * hugegpage VMA.  do_page_fault() is supposed to trap this, so BUG is we get
  * this far.
  */
-static struct page *hugetlb_nopage(struct vm_area_struct *vma,
-                               unsigned long address, int *unused)
+static int hugetlb_vm_op_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
 {
        BUG();
-       return NULL;
+       return 0;
 }
 
 struct vm_operations_struct hugetlb_vm_ops = {
-       .nopage = hugetlb_nopage,
+       .fault = hugetlb_vm_op_fault,
 };
 
 static pte_t make_huge_pte(struct vm_area_struct *vma, struct page *page,
@@ -470,7 +473,7 @@ static int hugetlb_cow(struct mm_struct *mm, struct vm_area_struct *vma,
        avoidcopy = (page_count(old_page) == 1);
        if (avoidcopy) {
                set_huge_ptep_writable(vma, address, ptep);
-               return VM_FAULT_MINOR;
+               return 0;
        }
 
        page_cache_get(old_page);
@@ -495,7 +498,7 @@ static int hugetlb_cow(struct mm_struct *mm, struct vm_area_struct *vma,
        }
        page_cache_release(new_page);
        page_cache_release(old_page);
-       return VM_FAULT_MINOR;
+       return 0;
 }
 
 static int hugetlb_no_page(struct mm_struct *mm, struct vm_area_struct *vma,
@@ -552,7 +555,7 @@ retry:
        if (idx >= size)
                goto backout;
 
-       ret = VM_FAULT_MINOR;
+       ret = 0;
        if (!pte_none(*ptep))
                goto backout;
 
@@ -603,7 +606,7 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
                return ret;
        }
 
-       ret = VM_FAULT_MINOR;
+       ret = 0;
 
        spin_lock(&mm->page_table_lock);
        /* Check for a racing update before calling hugetlb_cow */
@@ -642,7 +645,7 @@ int follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma,
                        spin_unlock(&mm->page_table_lock);
                        ret = hugetlb_fault(mm, vma, vaddr, 0);
                        spin_lock(&mm->page_table_lock);
-                       if (ret == VM_FAULT_MINOR)
+                       if (!(ret & VM_FAULT_ERROR))
                                continue;
 
                        remainder = 0;