mm/hwpoison: don't need to hold compound lock for hugetlbfs page
authorWanpeng Li <liwanp@linux.vnet.ibm.com>
Wed, 11 Sep 2013 21:22:52 +0000 (14:22 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Wed, 11 Sep 2013 22:58:08 +0000 (15:58 -0700)
compound lock is introduced by commit e9da73d67("thp: compound_lock."), it
is used to serialize put_page against __split_huge_page_refcount().  In
addition, transparent hugepages will be splitted in hwpoison handler and
just one subpage will be poisoned.  There is unnecessary to hold compound
lock for hugetlbfs page.  This patch replace compound_trans_order by
compond_order in the place where the page is hugetlbfs page.

Signed-off-by: Wanpeng Li <liwanp@linux.vnet.ibm.com>
Reviewed-by: Naoya Horiguchi <n-horiguchi@ah.jp.nec.com>
Cc: Andi Kleen <andi@firstfloor.org>
Cc: Tony Luck <tony.luck@intel.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
include/linux/mm.h
mm/memory-failure.c

index 03f84b8..caf543c 100644 (file)
@@ -495,20 +495,6 @@ static inline int compound_order(struct page *page)
        return (unsigned long)page[1].lru.prev;
 }
 
-static inline int compound_trans_order(struct page *page)
-{
-       int order;
-       unsigned long flags;
-
-       if (!PageHead(page))
-               return 0;
-
-       flags = compound_lock_irqsave(page);
-       order = compound_order(page);
-       compound_unlock_irqrestore(page, flags);
-       return order;
-}
-
 static inline void set_compound_order(struct page *page, unsigned long order)
 {
        page[1].lru.prev = (void *)order;
index ec9ad52..7b5d325 100644 (file)
@@ -206,7 +206,7 @@ static int kill_proc(struct task_struct *t, unsigned long addr, int trapno,
 #ifdef __ARCH_SI_TRAPNO
        si.si_trapno = trapno;
 #endif
-       si.si_addr_lsb = compound_trans_order(compound_head(page)) + PAGE_SHIFT;
+       si.si_addr_lsb = compound_order(compound_head(page)) + PAGE_SHIFT;
 
        if ((flags & MF_ACTION_REQUIRED) && t == current) {
                si.si_code = BUS_MCEERR_AR;
@@ -983,7 +983,7 @@ static int hwpoison_user_mappings(struct page *p, unsigned long pfn,
 static void set_page_hwpoison_huge_page(struct page *hpage)
 {
        int i;
-       int nr_pages = 1 << compound_trans_order(hpage);
+       int nr_pages = 1 << compound_order(hpage);
        for (i = 0; i < nr_pages; i++)
                SetPageHWPoison(hpage + i);
 }
@@ -991,7 +991,7 @@ static void set_page_hwpoison_huge_page(struct page *hpage)
 static void clear_page_hwpoison_huge_page(struct page *hpage)
 {
        int i;
-       int nr_pages = 1 << compound_trans_order(hpage);
+       int nr_pages = 1 << compound_order(hpage);
        for (i = 0; i < nr_pages; i++)
                ClearPageHWPoison(hpage + i);
 }
@@ -1342,7 +1342,7 @@ int unpoison_memory(unsigned long pfn)
                return 0;
        }
 
-       nr_pages = 1 << compound_trans_order(page);
+       nr_pages = 1 << compound_order(page);
 
        if (!get_page_unless_zero(page)) {
                /*
@@ -1506,7 +1506,7 @@ static int soft_offline_huge_page(struct page *page, int flags)
        } else {
                set_page_hwpoison_huge_page(hpage);
                dequeue_hwpoisoned_huge_page(hpage);
-               atomic_long_add(1 << compound_trans_order(hpage),
+               atomic_long_add(1 << compound_order(hpage),
                                &num_poisoned_pages);
        }
        return ret;
@@ -1566,7 +1566,7 @@ int soft_offline_page(struct page *page, int flags)
                if (PageHuge(page)) {
                        set_page_hwpoison_huge_page(hpage);
                        dequeue_hwpoisoned_huge_page(hpage);
-                       atomic_long_add(1 << compound_trans_order(hpage),
+                       atomic_long_add(1 << compound_order(hpage),
                                        &num_poisoned_pages);
                } else {
                        SetPageHWPoison(page);