Merge branch 'for-linus' of git://git.kernel.dk/linux-2.6-block
[pandora-kernel.git] / mm / hugetlb.c
index a088f59..d7ca59d 100644 (file)
@@ -27,6 +27,9 @@ unsigned long max_huge_pages;
 static struct list_head hugepage_freelists[MAX_NUMNODES];
 static unsigned int nr_huge_pages_node[MAX_NUMNODES];
 static unsigned int free_huge_pages_node[MAX_NUMNODES];
+static gfp_t htlb_alloc_mask = GFP_HIGHUSER;
+unsigned long hugepages_treat_as_movable;
+
 /*
  * Protects updates to hugepage_freelists, nr_huge_pages, and free_huge_pages
  */
@@ -44,14 +47,14 @@ static void clear_huge_page(struct page *page, unsigned long addr)
 }
 
 static void copy_huge_page(struct page *dst, struct page *src,
-                          unsigned long addr)
+                          unsigned long addr, struct vm_area_struct *vma)
 {
        int i;
 
        might_sleep();
        for (i = 0; i < HPAGE_SIZE/PAGE_SIZE; i++) {
                cond_resched();
-               copy_user_highpage(dst + i, src + i, addr + i*PAGE_SIZE);
+               copy_user_highpage(dst + i, src + i, addr + i*PAGE_SIZE, vma);
        }
 }
 
@@ -66,24 +69,23 @@ static void enqueue_huge_page(struct page *page)
 static struct page *dequeue_huge_page(struct vm_area_struct *vma,
                                unsigned long address)
 {
-       int nid = numa_node_id();
+       int nid;
        struct page *page = NULL;
-       struct zonelist *zonelist = huge_zonelist(vma, address);
+       struct zonelist *zonelist = huge_zonelist(vma, address,
+                                               htlb_alloc_mask);
        struct zone **z;
 
        for (z = zonelist->zones; *z; z++) {
                nid = zone_to_nid(*z);
-               if (cpuset_zone_allowed(*z, GFP_HIGHUSER) &&
-                   !list_empty(&hugepage_freelists[nid]))
+               if (cpuset_zone_allowed_softwall(*z, htlb_alloc_mask) &&
+                   !list_empty(&hugepage_freelists[nid])) {
+                       page = list_entry(hugepage_freelists[nid].next,
+                                         struct page, lru);
+                       list_del(&page->lru);
+                       free_huge_pages--;
+                       free_huge_pages_node[nid]--;
                        break;
-       }
-
-       if (*z) {
-               page = list_entry(hugepage_freelists[nid].next,
-                                 struct page, lru);
-               list_del(&page->lru);
-               free_huge_pages--;
-               free_huge_pages_node[nid]--;
+               }
        }
        return page;
 }
@@ -101,15 +103,26 @@ static void free_huge_page(struct page *page)
 
 static int alloc_fresh_huge_page(void)
 {
-       static int nid = 0;
+       static int prev_nid;
        struct page *page;
-       page = alloc_pages_node(nid, GFP_HIGHUSER|__GFP_COMP|__GFP_NOWARN,
-                                       HUGETLB_PAGE_ORDER);
-       nid = next_node(nid, node_online_map);
+       int nid;
+
+       /*
+        * Copy static prev_nid to local nid, work on that, then copy it
+        * back to prev_nid afterwards: otherwise there's a window in which
+        * a racer might pass invalid nid MAX_NUMNODES to alloc_pages_node.
+        * But we don't need to use a spin_lock here: it really doesn't
+        * matter if occasionally a racer chooses the same nid as we do.
+        */
+       nid = next_node(prev_nid, node_online_map);
        if (nid == MAX_NUMNODES)
                nid = first_node(node_online_map);
+       prev_nid = nid;
+
+       page = alloc_pages_node(nid, htlb_alloc_mask|__GFP_COMP|__GFP_NOWARN,
+                                       HUGETLB_PAGE_ORDER);
        if (page) {
-               page[1].lru.next = (void *)free_huge_page;      /* dtor */
+               set_compound_page_dtor(page, free_huge_page);
                spin_lock(&hugetlb_lock);
                nr_huge_pages++;
                nr_huge_pages_node[page_to_nid(page)]++;
@@ -140,6 +153,8 @@ static struct page *alloc_huge_page(struct vm_area_struct *vma,
        return page;
 
 fail:
+       if (vma->vm_flags & VM_MAYSHARE)
+               resv_huge_pages++;
        spin_unlock(&hugetlb_lock);
        return NULL;
 }
@@ -172,6 +187,17 @@ static int __init hugetlb_setup(char *s)
 }
 __setup("hugepages=", hugetlb_setup);
 
+static unsigned int cpuset_mems_nr(unsigned int *array)
+{
+       int node;
+       unsigned int nr = 0;
+
+       for_each_node_mask(node, cpuset_current_mems_allowed)
+               nr += array[node];
+
+       return nr;
+}
+
 #ifdef CONFIG_SYSCTL
 static void update_and_free_page(struct page *page)
 {
@@ -183,7 +209,7 @@ static void update_and_free_page(struct page *page)
                                1 << PG_dirty | 1 << PG_active | 1 << PG_reserved |
                                1 << PG_private | 1<< PG_writeback);
        }
-       page[1].lru.next = NULL;
+       set_compound_page_dtor(page, NULL);
        set_page_refcounted(page);
        __free_pages(page, HUGETLB_PAGE_ORDER);
 }
@@ -243,6 +269,19 @@ int hugetlb_sysctl_handler(struct ctl_table *table, int write,
        max_huge_pages = set_max_huge_pages(max_huge_pages);
        return 0;
 }
+
+int hugetlb_treat_movable_handler(struct ctl_table *table, int write,
+                       struct file *file, void __user *buffer,
+                       size_t *length, loff_t *ppos)
+{
+       proc_dointvec(table, write, file, buffer, length, ppos);
+       if (hugepages_treat_as_movable)
+               htlb_alloc_mask = GFP_HIGHUSER_MOVABLE;
+       else
+               htlb_alloc_mask = GFP_HIGHUSER;
+       return 0;
+}
+
 #endif /* CONFIG_SYSCTL */
 
 int hugetlb_report_meminfo(char *buf)
@@ -279,15 +318,14 @@ unsigned long hugetlb_total_pages(void)
  * hugegpage VMA.  do_page_fault() is supposed to trap this, so BUG is we get
  * this far.
  */
-static struct page *hugetlb_nopage(struct vm_area_struct *vma,
-                               unsigned long address, int *unused)
+static int hugetlb_vm_op_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
 {
        BUG();
-       return NULL;
+       return 0;
 }
 
 struct vm_operations_struct hugetlb_vm_ops = {
-       .nopage = hugetlb_nopage,
+       .fault = hugetlb_vm_op_fault,
 };
 
 static pte_t make_huge_pte(struct vm_area_struct *vma, struct page *page,
@@ -313,9 +351,10 @@ static void set_huge_ptep_writable(struct vm_area_struct *vma,
        pte_t entry;
 
        entry = pte_mkwrite(pte_mkdirty(*ptep));
-       ptep_set_access_flags(vma, address, ptep, entry, 1);
-       update_mmu_cache(vma, address, entry);
-       lazy_mmu_prot_update(entry);
+       if (ptep_set_access_flags(vma, address, ptep, entry, 1)) {
+               update_mmu_cache(vma, address, entry);
+               lazy_mmu_prot_update(entry);
+       }
 }
 
 
@@ -344,7 +383,6 @@ int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src,
                        entry = *src_pte;
                        ptepage = pte_page(entry);
                        get_page(ptepage);
-                       add_mm_counter(dst, file_rss, HPAGE_SIZE / PAGE_SIZE);
                        set_huge_pte_at(dst, addr, dst_pte, entry);
                }
                spin_unlock(&src->page_table_lock);
@@ -365,6 +403,11 @@ void __unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start,
        pte_t pte;
        struct page *page;
        struct page *tmp;
+       /*
+        * A page gathering list, protected by per file i_mmap_lock. The
+        * lock is used to avoid list corruption from multiple unmapping
+        * of the same page since we are using page->lru.
+        */
        LIST_HEAD(page_list);
 
        WARN_ON(!is_vm_hugetlb_page(vma));
@@ -372,24 +415,23 @@ void __unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start,
        BUG_ON(end & ~HPAGE_MASK);
 
        spin_lock(&mm->page_table_lock);
-
-       /* Update high watermark before we lower rss */
-       update_hiwater_rss(mm);
-
        for (address = start; address < end; address += HPAGE_SIZE) {
                ptep = huge_pte_offset(mm, address);
                if (!ptep)
                        continue;
 
+               if (huge_pmd_unshare(mm, &address, ptep))
+                       continue;
+
                pte = huge_ptep_get_and_clear(mm, address, ptep);
                if (pte_none(pte))
                        continue;
 
                page = pte_page(pte);
+               if (pte_dirty(pte))
+                       set_page_dirty(page);
                list_add(&page->lru, &page_list);
-               add_mm_counter(mm, file_rss, (int) -(HPAGE_SIZE / PAGE_SIZE));
        }
-
        spin_unlock(&mm->page_table_lock);
        flush_tlb_range(vma, start, end);
        list_for_each_entry_safe(page, tmp, &page_list, lru) {
@@ -429,7 +471,7 @@ static int hugetlb_cow(struct mm_struct *mm, struct vm_area_struct *vma,
        avoidcopy = (page_count(old_page) == 1);
        if (avoidcopy) {
                set_huge_ptep_writable(vma, address, ptep);
-               return VM_FAULT_MINOR;
+               return 0;
        }
 
        page_cache_get(old_page);
@@ -441,7 +483,7 @@ static int hugetlb_cow(struct mm_struct *mm, struct vm_area_struct *vma,
        }
 
        spin_unlock(&mm->page_table_lock);
-       copy_huge_page(new_page, old_page, address);
+       copy_huge_page(new_page, old_page, address, vma);
        spin_lock(&mm->page_table_lock);
 
        ptep = huge_pte_offset(mm, address & HPAGE_MASK);
@@ -454,10 +496,10 @@ static int hugetlb_cow(struct mm_struct *mm, struct vm_area_struct *vma,
        }
        page_cache_release(new_page);
        page_cache_release(old_page);
-       return VM_FAULT_MINOR;
+       return 0;
 }
 
-int hugetlb_no_page(struct mm_struct *mm, struct vm_area_struct *vma,
+static int hugetlb_no_page(struct mm_struct *mm, struct vm_area_struct *vma,
                        unsigned long address, pte_t *ptep, int write_access)
 {
        int ret = VM_FAULT_SIGBUS;
@@ -511,11 +553,10 @@ retry:
        if (idx >= size)
                goto backout;
 
-       ret = VM_FAULT_MINOR;
+       ret = 0;
        if (!pte_none(*ptep))
                goto backout;
 
-       add_mm_counter(mm, file_rss, HPAGE_SIZE / PAGE_SIZE);
        new_pte = make_huge_pte(vma, page, ((vma->vm_flags & VM_WRITE)
                                && (vma->vm_flags & VM_SHARED)));
        set_huge_pte_at(mm, address, ptep, new_pte);
@@ -563,7 +604,7 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
                return ret;
        }
 
-       ret = VM_FAULT_MINOR;
+       ret = 0;
 
        spin_lock(&mm->page_table_lock);
        /* Check for a racing update before calling hugetlb_cow */
@@ -602,7 +643,7 @@ int follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma,
                        spin_unlock(&mm->page_table_lock);
                        ret = hugetlb_fault(mm, vma, vaddr, 0);
                        spin_lock(&mm->page_table_lock);
-                       if (ret == VM_FAULT_MINOR)
+                       if (!(ret & VM_FAULT_MAJOR))
                                continue;
 
                        remainder = 0;
@@ -653,11 +694,14 @@ void hugetlb_change_protection(struct vm_area_struct *vma,
        BUG_ON(address >= end);
        flush_cache_range(vma, address, end);
 
+       spin_lock(&vma->vm_file->f_mapping->i_mmap_lock);
        spin_lock(&mm->page_table_lock);
        for (; address < end; address += HPAGE_SIZE) {
                ptep = huge_pte_offset(mm, address);
                if (!ptep)
                        continue;
+               if (huge_pmd_unshare(mm, &address, ptep))
+                       continue;
                if (!pte_none(*ptep)) {
                        pte = huge_ptep_get_and_clear(mm, address, ptep);
                        pte = pte_mkhuge(pte_modify(pte, newprot));
@@ -666,6 +710,7 @@ void hugetlb_change_protection(struct vm_area_struct *vma,
                }
        }
        spin_unlock(&mm->page_table_lock);
+       spin_unlock(&vma->vm_file->f_mapping->i_mmap_lock);
 
        flush_tlb_range(vma, start, end);
 }
@@ -811,6 +856,26 @@ int hugetlb_reserve_pages(struct inode *inode, long from, long to)
        chg = region_chg(&inode->i_mapping->private_list, from, to);
        if (chg < 0)
                return chg;
+       /*
+        * When cpuset is configured, it breaks the strict hugetlb page
+        * reservation as the accounting is done on a global variable. Such
+        * reservation is completely rubbish in the presence of cpuset because
+        * the reservation is not checked against page availability for the
+        * current cpuset. Application can still potentially OOM'ed by kernel
+        * with lack of free htlb page in cpuset that the task is in.
+        * Attempt to enforce strict accounting with cpuset is almost
+        * impossible (or too ugly) because cpuset is too fluid that
+        * task or memory node can be dynamically moved between cpusets.
+        *
+        * The change of semantics for shared hugetlb mapping with cpuset is
+        * undesirable. However, in order to preserve some of the semantics,
+        * we fall back to check against current free page availability as
+        * a best attempt and hopefully to minimize the impact of changing
+        * semantics that cpuset has.
+        */
+       if (chg > cpuset_mems_nr(free_huge_pages_node))
+               return -ENOMEM;
+
        ret = hugetlb_acct_memory(chg);
        if (ret < 0)
                return ret;