mm: fix kernel BUG at huge_memory.c:1474!
authorHugh Dickins <hughd@google.com>
Mon, 17 Dec 2012 02:56:58 +0000 (18:56 -0800)
committerLinus Torvalds <torvalds@linux-foundation.org>
Mon, 17 Dec 2012 03:02:38 +0000 (19:02 -0800)
Andrea's autonuma-benchmark numa01 hits kernel BUG at huge_memory.c:1474!
in change_huge_pmd called from change_protection from change_prot_numa
from task_numa_work.

That BUG, introduced in the huge zero page commit cad7f613c4d0 ("thp:
change_huge_pmd(): make sure we don't try to make a page writable")
was trying to verify that newprot never adds write permission to an
anonymous huge page; but Automatic NUMA Balancing's 4b10e7d562c9 ("mm:
mempolicy: Implement change_prot_numa() in terms of change_protection()")
adds a new prot_numa path into change_huge_pmd(), which makes no use of
the newprot provided, and may retain the write bit in the pmd.

Just move the BUG_ON(pmd_write(entry)) up into the !prot_numa block.

Signed-off-by: Hugh Dickins <hughd@google.com>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
mm/huge_memory.c

index d7ee169..32754ee 100644 (file)
@@ -1460,9 +1460,10 @@ int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
        if (__pmd_trans_huge_lock(pmd, vma) == 1) {
                pmd_t entry;
                entry = pmdp_get_and_clear(mm, addr, pmd);
-               if (!prot_numa)
+               if (!prot_numa) {
                        entry = pmd_modify(entry, newprot);
-               else {
+                       BUG_ON(pmd_write(entry));
+               } else {
                        struct page *page = pmd_page(*pmd);
 
                        /* only check non-shared pages */
@@ -1471,7 +1472,6 @@ int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
                                entry = pmd_mknuma(entry);
                        }
                }
-               BUG_ON(pmd_write(entry));
                set_pmd_at(mm, addr, pmd, entry);
                spin_unlock(&vma->vm_mm->page_table_lock);
                ret = 1;