Merge branch 'unicore32' of git://github.com/gxt/linux
[pandora-kernel.git] / mm / hugetlb.c
index 6402458..bb28a5f 100644 (file)
@@ -24,7 +24,7 @@
 
 #include <asm/page.h>
 #include <asm/pgtable.h>
-#include <asm/io.h>
+#include <linux/io.h>
 
 #include <linux/hugetlb.h>
 #include <linux/node.h>
@@ -62,10 +62,10 @@ static DEFINE_SPINLOCK(hugetlb_lock);
  * must either hold the mmap_sem for write, or the mmap_sem for read and
  * the hugetlb_instantiation mutex:
  *
- *     down_write(&mm->mmap_sem);
+ *     down_write(&mm->mmap_sem);
  * or
- *     down_read(&mm->mmap_sem);
- *     mutex_lock(&hugetlb_instantiation_mutex);
+ *     down_read(&mm->mmap_sem);
+ *     mutex_lock(&hugetlb_instantiation_mutex);
  */
 struct file_region {
        struct list_head link;
@@ -503,9 +503,10 @@ static void update_and_free_page(struct hstate *h, struct page *page)
        h->nr_huge_pages--;
        h->nr_huge_pages_node[page_to_nid(page)]--;
        for (i = 0; i < pages_per_huge_page(h); i++) {
-               page[i].flags &= ~(1 << PG_locked | 1 << PG_error | 1 << PG_referenced |
-                               1 << PG_dirty | 1 << PG_active | 1 << PG_reserved |
-                               1 << PG_private | 1<< PG_writeback);
+               page[i].flags &= ~(1 << PG_locked | 1 << PG_error |
+                               1 << PG_referenced | 1 << PG_dirty |
+                               1 << PG_active | 1 << PG_reserved |
+                               1 << PG_private | 1 << PG_writeback);
        }
        set_compound_page_dtor(page, NULL);
        set_page_refcounted(page);
@@ -591,7 +592,6 @@ int PageHuge(struct page *page)
 
        return dtor == free_huge_page;
 }
-
 EXPORT_SYMBOL_GPL(PageHuge);
 
 static struct page *alloc_fresh_huge_page_node(struct hstate *h, int nid)
@@ -1105,12 +1105,28 @@ static void __init gather_bootmem_prealloc(void)
        struct huge_bootmem_page *m;
 
        list_for_each_entry(m, &huge_boot_pages, list) {
-               struct page *page = virt_to_page(m);
                struct hstate *h = m->hstate;
+               struct page *page;
+
+#ifdef CONFIG_HIGHMEM
+               page = pfn_to_page(m->phys >> PAGE_SHIFT);
+               free_bootmem_late((unsigned long)m,
+                                 sizeof(struct huge_bootmem_page));
+#else
+               page = virt_to_page(m);
+#endif
                __ClearPageReserved(page);
                WARN_ON(page_count(page) != 1);
                prep_compound_huge_page(page, h->order);
                prep_new_huge_page(h, page, page_to_nid(page));
+               /*
+                * If we had gigantic hugepages allocated at boot time, we need
+                * to restore the 'stolen' pages to totalram_pages in order to
+                * fix confusing memory reports from free(1) and another
+                * side-effects, like CommitLimit going negative.
+                */
+               if (h->order > (MAX_ORDER - 1))
+                       totalram_pages += 1 << h->order;
        }
 }
 
@@ -2116,9 +2132,8 @@ static void set_huge_ptep_writable(struct vm_area_struct *vma,
        pte_t entry;
 
        entry = pte_mkwrite(pte_mkdirty(huge_ptep_get(ptep)));
-       if (huge_ptep_set_access_flags(vma, address, ptep, entry, 1)) {
+       if (huge_ptep_set_access_flags(vma, address, ptep, entry, 1))
                update_mmu_cache(vma, address, ptep);
-       }
 }
 
 
@@ -2173,9 +2188,9 @@ static int is_hugetlb_entry_migration(pte_t pte)
        if (huge_pte_none(pte) || pte_present(pte))
                return 0;
        swp = pte_to_swp_entry(pte);
-       if (non_swap_entry(swp) && is_migration_entry(swp)) {
+       if (non_swap_entry(swp) && is_migration_entry(swp))
                return 1;
-       else
+       else
                return 0;
 }
 
@@ -2186,9 +2201,9 @@ static int is_hugetlb_entry_hwpoisoned(pte_t pte)
        if (huge_pte_none(pte) || pte_present(pte))
                return 0;
        swp = pte_to_swp_entry(pte);
-       if (non_swap_entry(swp) && is_hwpoison_entry(swp)) {
+       if (non_swap_entry(swp) && is_hwpoison_entry(swp))
                return 1;
-       else
+       else
                return 0;
 }
 
@@ -2407,6 +2422,8 @@ retry_avoidcopy:
         * anon_vma prepared.
         */
        if (unlikely(anon_vma_prepare(vma))) {
+               page_cache_release(new_page);
+               page_cache_release(old_page);
                /* Caller expects lock to be held */
                spin_lock(&mm->page_table_lock);
                return VM_FAULT_OOM;
@@ -2551,7 +2568,7 @@ retry:
                 * So we need to block hugepage fault by PG_hwpoison bit check.
                 */
                if (unlikely(PageHWPoison(page))) {
-                       ret = VM_FAULT_HWPOISON | 
+                       ret = VM_FAULT_HWPOISON |
                              VM_FAULT_SET_HINDEX(h - hstates);
                        goto backout_unlocked;
                }
@@ -2619,7 +2636,7 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
                        migration_entry_wait(mm, (pmd_t *)ptep, address);
                        return 0;
                } else if (unlikely(is_hugetlb_entry_hwpoisoned(entry)))
-                       return VM_FAULT_HWPOISON_LARGE | 
+                       return VM_FAULT_HWPOISON_LARGE |
                               VM_FAULT_SET_HINDEX(h - hstates);
        }