Merge branch 'hwpoison-hugepages' into hwpoison
authorAndi Kleen <ak@linux.intel.com>
Fri, 22 Oct 2010 15:40:48 +0000 (17:40 +0200)
committerAndi Kleen <ak@linux.intel.com>
Fri, 22 Oct 2010 15:40:48 +0000 (17:40 +0200)
Conflicts:
mm/memory-failure.c

1  2 
arch/x86/mm/fault.c
include/linux/mm.h
mm/memory-failure.c
mm/memory.c

diff --combined arch/x86/mm/fault.c
@@@ -11,6 -11,7 +11,7 @@@
  #include <linux/kprobes.h>            /* __kprobes, ...               */
  #include <linux/mmiotrace.h>          /* kmmio_handler, ...           */
  #include <linux/perf_event.h>         /* perf_sw_event                */
+ #include <linux/hugetlb.h>            /* hstate_index_to_shift        */
  
  #include <asm/traps.h>                        /* dotraplinkage, ...           */
  #include <asm/pgalloc.h>              /* pgd_*(), ...                 */
@@@ -160,15 -161,20 +161,20 @@@ is_prefetch(struct pt_regs *regs, unsig
  
  static void
  force_sig_info_fault(int si_signo, int si_code, unsigned long address,
-                    struct task_struct *tsk)
+                    struct task_struct *tsk, int fault)
  {
+       unsigned lsb = 0;
        siginfo_t info;
  
        info.si_signo   = si_signo;
        info.si_errno   = 0;
        info.si_code    = si_code;
        info.si_addr    = (void __user *)address;
-       info.si_addr_lsb = si_code == BUS_MCEERR_AR ? PAGE_SHIFT : 0;
+       if (fault & VM_FAULT_HWPOISON_LARGE)
+               lsb = hstate_index_to_shift(VM_FAULT_GET_HINDEX(fault)); 
+       if (fault & VM_FAULT_HWPOISON)
+               lsb = PAGE_SHIFT;
+       info.si_addr_lsb = lsb;
  
        force_sig_info(si_signo, &info, tsk);
  }
@@@ -229,16 -235,7 +235,16 @@@ void vmalloc_sync_all(void
  
                spin_lock_irqsave(&pgd_lock, flags);
                list_for_each_entry(page, &pgd_list, lru) {
 -                      if (!vmalloc_sync_one(page_address(page), address))
 +                      spinlock_t *pgt_lock;
 +                      pmd_t *ret;
 +
 +                      pgt_lock = &pgd_page_get_mm(page)->page_table_lock;
 +
 +                      spin_lock(pgt_lock);
 +                      ret = vmalloc_sync_one(page_address(page), address);
 +                      spin_unlock(pgt_lock);
 +
 +                      if (!ret)
                                break;
                }
                spin_unlock_irqrestore(&pgd_lock, flags);
@@@ -260,8 -257,6 +266,8 @@@ static noinline __kprobes int vmalloc_f
        if (!(address >= VMALLOC_START && address < VMALLOC_END))
                return -1;
  
 +      WARN_ON_ONCE(in_nmi());
 +
        /*
         * Synchronize this task's top level page-table
         * with the 'reference' page table.
@@@ -337,7 -332,29 +343,7 @@@ out
  
  void vmalloc_sync_all(void)
  {
 -      unsigned long address;
 -
 -      for (address = VMALLOC_START & PGDIR_MASK; address <= VMALLOC_END;
 -           address += PGDIR_SIZE) {
 -
 -              const pgd_t *pgd_ref = pgd_offset_k(address);
 -              unsigned long flags;
 -              struct page *page;
 -
 -              if (pgd_none(*pgd_ref))
 -                      continue;
 -
 -              spin_lock_irqsave(&pgd_lock, flags);
 -              list_for_each_entry(page, &pgd_list, lru) {
 -                      pgd_t *pgd;
 -                      pgd = (pgd_t *)page_address(page) + pgd_index(address);
 -                      if (pgd_none(*pgd))
 -                              set_pgd(pgd, *pgd_ref);
 -                      else
 -                              BUG_ON(pgd_page_vaddr(*pgd) != pgd_page_vaddr(*pgd_ref));
 -              }
 -              spin_unlock_irqrestore(&pgd_lock, flags);
 -      }
 +      sync_global_pgds(VMALLOC_START & PGDIR_MASK, VMALLOC_END);
  }
  
  /*
@@@ -358,8 -375,6 +364,8 @@@ static noinline __kprobes int vmalloc_f
        if (!(address >= VMALLOC_START && address < VMALLOC_END))
                return -1;
  
 +      WARN_ON_ONCE(in_nmi());
 +
        /*
         * Copy kernel mappings over when needed. This can also
         * happen within a race in page table update. In the later
@@@ -722,7 -737,7 +728,7 @@@ __bad_area_nosemaphore(struct pt_regs *
                tsk->thread.error_code  = error_code | (address >= TASK_SIZE);
                tsk->thread.trap_no     = 14;
  
-               force_sig_info_fault(SIGSEGV, si_code, address, tsk);
+               force_sig_info_fault(SIGSEGV, si_code, address, tsk, 0);
  
                return;
        }
@@@ -807,14 -822,14 +813,14 @@@ do_sigbus(struct pt_regs *regs, unsigne
        tsk->thread.trap_no     = 14;
  
  #ifdef CONFIG_MEMORY_FAILURE
-       if (fault & VM_FAULT_HWPOISON) {
+       if (fault & (VM_FAULT_HWPOISON|VM_FAULT_HWPOISON_LARGE)) {
                printk(KERN_ERR
        "MCE: Killing %s:%d due to hardware memory corruption fault at %lx\n",
                        tsk->comm, tsk->pid, address);
                code = BUS_MCEERR_AR;
        }
  #endif
-       force_sig_info_fault(SIGBUS, code, address, tsk);
+       force_sig_info_fault(SIGBUS, code, address, tsk, fault);
  }
  
  static noinline void
@@@ -824,7 -839,8 +830,8 @@@ mm_fault_error(struct pt_regs *regs, un
        if (fault & VM_FAULT_OOM) {
                out_of_memory(regs, error_code, address);
        } else {
-               if (fault & (VM_FAULT_SIGBUS|VM_FAULT_HWPOISON))
+               if (fault & (VM_FAULT_SIGBUS|VM_FAULT_HWPOISON|
+                            VM_FAULT_HWPOISON_LARGE))
                        do_sigbus(regs, error_code, address, fault);
                else
                        BUG();
@@@ -885,14 -901,8 +892,14 @@@ spurious_fault(unsigned long error_code
        if (pmd_large(*pmd))
                return spurious_fault_check(error_code, (pte_t *) pmd);
  
 +      /*
 +       * Note: don't use pte_present() here, since it returns true
 +       * if the _PAGE_PROTNONE bit is set.  However, this aliases the
 +       * _PAGE_GLOBAL bit, which for kernel pages give false positives
 +       * when CONFIG_DEBUG_PAGEALLOC is used.
 +       */
        pte = pte_offset_kernel(pmd, address);
 -      if (!pte_present(*pte))
 +      if (!(pte_flags(*pte) & _PAGE_PRESENT))
                return 0;
  
        ret = spurious_fault_check(error_code, pte);
diff --combined include/linux/mm.h
@@@ -718,12 -718,20 +718,20 @@@ static inline int page_mapped(struct pa
  #define VM_FAULT_SIGBUS       0x0002
  #define VM_FAULT_MAJOR        0x0004
  #define VM_FAULT_WRITE        0x0008  /* Special case for get_user_pages */
- #define VM_FAULT_HWPOISON 0x0010      /* Hit poisoned page */
+ #define VM_FAULT_HWPOISON 0x0010      /* Hit poisoned small page */
+ #define VM_FAULT_HWPOISON_LARGE 0x0020  /* Hit poisoned large page. Index encoded in upper bits */
  
  #define VM_FAULT_NOPAGE       0x0100  /* ->fault installed the pte, not return page */
  #define VM_FAULT_LOCKED       0x0200  /* ->fault locked the returned page */
  
- #define VM_FAULT_ERROR        (VM_FAULT_OOM | VM_FAULT_SIGBUS | VM_FAULT_HWPOISON)
+ #define VM_FAULT_HWPOISON_LARGE_MASK 0xf000 /* encodes hpage index for large hwpoison */
+ #define VM_FAULT_ERROR        (VM_FAULT_OOM | VM_FAULT_SIGBUS | VM_FAULT_HWPOISON | \
+                        VM_FAULT_HWPOISON_LARGE)
+ /* Encode hstate index for a hwpoisoned large page */
+ #define VM_FAULT_SET_HINDEX(x) ((x) << 12)
+ #define VM_FAULT_GET_HINDEX(x) (((x) >> 12) & 0xf)
  
  /*
   * Can be called by the pagefault handler when it gets a VM_FAULT_OOM.
@@@ -1175,8 -1183,6 +1183,8 @@@ extern void free_bootmem_with_active_re
                                                unsigned long max_low_pfn);
  int add_from_early_node_map(struct range *range, int az,
                                   int nr_range, int nid);
 +u64 __init find_memory_core_early(int nid, u64 size, u64 align,
 +                                      u64 goal, u64 limit);
  void *__alloc_memory_core_early(int nodeid, u64 size, u64 align,
                                 u64 goal, u64 limit);
  typedef int (*work_fn_t)(unsigned long, unsigned long, void *);
diff --combined mm/memory-failure.c
@@@ -7,26 -7,21 +7,26 @@@
   * Free Software Foundation.
   *
   * High level machine check handler. Handles pages reported by the
 - * hardware as being corrupted usually due to a 2bit ECC memory or cache
 + * hardware as being corrupted usually due to a multi-bit ECC memory or cache
   * failure.
 + * 
 + * In addition there is a "soft offline" entry point that allows stop using
 + * not-yet-corrupted-by-suspicious pages without killing anything.
   *
   * Handles page cache pages in various states.        The tricky part
 - * here is that we can access any page asynchronous to other VM
 - * users, because memory failures could happen anytime and anywhere,
 - * possibly violating some of their assumptions. This is why this code
 - * has to be extremely careful. Generally it tries to use normal locking
 - * rules, as in get the standard locks, even if that means the
 - * error handling takes potentially a long time.
 - *
 - * The operation to map back from RMAP chains to processes has to walk
 - * the complete process list and has non linear complexity with the number
 - * mappings. In short it can be quite slow. But since memory corruptions
 - * are rare we hope to get away with this.
 + * here is that we can access any page asynchronously in respect to 
 + * other VM users, because memory failures could happen anytime and 
 + * anywhere. This could violate some of their assumptions. This is why 
 + * this code has to be extremely careful. Generally it tries to use 
 + * normal locking rules, as in get the standard locks, even if that means 
 + * the error handling takes potentially a long time.
 + * 
 + * There are several operations here with exponential complexity because
 + * of unsuitable VM data structures. For example the operation to map back 
 + * from RMAP chains to processes has to walk the complete process list and 
 + * has non linear complexity with the number. But since memory corruptions
 + * are rare we hope to get away with this. This avoids impacting the core 
 + * VM.
   */
  
  /*
@@@ -35,6 -30,7 +35,6 @@@
   * - kcore/oldmem/vmcore/mem/kmem check for hwpoison pages
   * - pass bad pages to kdump next kernel
   */
 -#define DEBUG 1               /* remove me in 2.6.34 */
  #include <linux/kernel.h>
  #include <linux/mm.h>
  #include <linux/page-flags.h>
@@@ -82,7 -78,7 +82,7 @@@ static int hwpoison_filter_dev(struct p
                return 0;
  
        /*
 -       * page_mapping() does not accept slab page
 +       * page_mapping() does not accept slab pages.
         */
        if (PageSlab(p))
                return -EINVAL;
@@@ -272,7 -268,7 +272,7 @@@ struct to_kill 
        struct list_head nd;
        struct task_struct *tsk;
        unsigned long addr;
 -      unsigned addr_valid:1;
 +      char addr_valid;
  };
  
  /*
@@@ -313,7 -309,7 +313,7 @@@ static void add_to_kill(struct task_str
         * a SIGKILL because the error is not contained anymore.
         */
        if (tk->addr == -EFAULT) {
 -              pr_debug("MCE: Unable to find user space address %lx in %s\n",
 +              pr_info("MCE: Unable to find user space address %lx in %s\n",
                        page_to_pfn(p), tsk->comm);
                tk->addr_valid = 0;
        }
@@@ -581,7 -577,7 +581,7 @@@ static int me_pagecache_clean(struct pa
                                        pfn, err);
                } else if (page_has_private(p) &&
                                !try_to_release_page(p, GFP_NOIO)) {
 -                      pr_debug("MCE %#lx: failed to release buffers\n", pfn);
 +                      pr_info("MCE %#lx: failed to release buffers\n", pfn);
                } else {
                        ret = RECOVERED;
                }
@@@ -697,11 -693,10 +697,10 @@@ static int me_swapcache_clean(struct pa
   * Issues:
   * - Error on hugepage is contained in hugepage unit (not in raw page unit.)
   *   To narrow down kill region to one page, we need to break up pmd.
-  * - To support soft-offlining for hugepage, we need to support hugepage
-  *   migration.
   */
  static int me_huge_page(struct page *p, unsigned long pfn)
  {
+       int res = 0;
        struct page *hpage = compound_head(p);
        /*
         * We can safely recover from error on free or reserved (i.e.
         * so there is no race between isolation and mapping/unmapping.
         */
        if (!(page_mapping(hpage) || PageAnon(hpage))) {
-               __isolate_hwpoisoned_huge_page(hpage);
-               return RECOVERED;
+               res = dequeue_hwpoisoned_huge_page(hpage);
+               if (!res)
+                       return RECOVERED;
        }
        return DELAYED;
  }
@@@ -840,6 -836,8 +840,6 @@@ static int page_action(struct page_stat
        return (result == RECOVERED || result == DELAYED) ? 0 : -EBUSY;
  }
  
 -#define N_UNMAP_TRIES 5
 -
  /*
   * Do all that is necessary to remove user space mappings. Unmap
   * the pages and send SIGBUS to the processes if the data was dirty.
@@@ -851,6 -849,7 +851,6 @@@ static int hwpoison_user_mappings(struc
        struct address_space *mapping;
        LIST_HEAD(tokill);
        int ret;
 -      int i;
        int kill = 1;
        struct page *hpage = compound_head(p);
  
        if (kill)
                collect_procs(hpage, &tokill);
  
 -      /*
 -       * try_to_unmap can fail temporarily due to races.
 -       * Try a few times (RED-PEN better strategy?)
 -       */
 -      for (i = 0; i < N_UNMAP_TRIES; i++) {
 -              ret = try_to_unmap(hpage, ttu);
 -              if (ret == SWAP_SUCCESS)
 -                      break;
 -              pr_debug("MCE %#lx: try_to_unmap retry needed %d\n", pfn,  ret);
 -      }
 -
 +      ret = try_to_unmap(hpage, ttu);
        if (ret != SWAP_SUCCESS)
                printk(KERN_ERR "MCE %#lx: failed to unmap page (mapcount=%d)\n",
                                pfn, page_mapcount(hpage));
@@@ -972,7 -981,10 +972,10 @@@ int __memory_failure(unsigned long pfn
         * We need/can do nothing about count=0 pages.
         * 1) it's a free page, and therefore in safe hand:
         *    prep_new_page() will be the gate keeper.
-        * 2) it's part of a non-compound high order page.
+        * 2) it's a free hugepage, which is also safe:
+        *    an affected hugepage will be dequeued from hugepage freelist,
+        *    so there's no concern about reusing it ever after.
+        * 3) it's part of a non-compound high order page.
         *    Implies some kernel user: cannot stop them from
         *    R/W the page; let's pray that the page has been
         *    used and will be freed some time later.
                if (is_free_buddy_page(p)) {
                        action_result(pfn, "free buddy", DELAYED);
                        return 0;
+               } else if (PageHuge(hpage)) {
+                       /*
+                        * Check "just unpoisoned", "filter hit", and
+                        * "race with other subpage."
+                        */
+                       lock_page_nosync(hpage);
+                       if (!PageHWPoison(hpage)
+                           || (hwpoison_filter(p) && TestClearPageHWPoison(p))
+                           || (p != hpage && TestSetPageHWPoison(hpage))) {
+                               atomic_long_sub(nr_pages, &mce_bad_pages);
+                               return 0;
+                       }
+                       set_page_hwpoison_huge_page(hpage);
+                       res = dequeue_hwpoisoned_huge_page(hpage);
+                       action_result(pfn, "free huge",
+                                     res ? IGNORED : DELAYED);
+                       unlock_page(hpage);
+                       return res;
                } else {
                        action_result(pfn, "high order kernel", IGNORED);
                        return -EBUSY;
@@@ -1138,16 -1168,26 +1159,26 @@@ int unpoison_memory(unsigned long pfn
        page = compound_head(p);
  
        if (!PageHWPoison(p)) {
 -              pr_debug("MCE: Page was already unpoisoned %#lx\n", pfn);
 +              pr_info("MCE: Page was already unpoisoned %#lx\n", pfn);
                return 0;
        }
  
        nr_pages = 1 << compound_order(page);
  
        if (!get_page_unless_zero(page)) {
+               /*
+                * Since HWPoisoned hugepage should have non-zero refcount,
+                * race between memory failure and unpoison seems to happen.
+                * In such case unpoison fails and memory failure runs
+                * to the end.
+                */
+               if (PageHuge(page)) {
+                       pr_debug("MCE: Memory failure is now running on free hugepage %#lx\n", pfn);
+                       return 0;
+               }
                if (TestClearPageHWPoison(p))
                        atomic_long_sub(nr_pages, &mce_bad_pages);
 -              pr_debug("MCE: Software-unpoisoned free page %#lx\n", pfn);
 +              pr_info("MCE: Software-unpoisoned free page %#lx\n", pfn);
                return 0;
        }
  
         * the free buddy page pool.
         */
        if (TestClearPageHWPoison(page)) {
 -              pr_debug("MCE: Software-unpoisoned page %#lx\n", pfn);
 +              pr_info("MCE: Software-unpoisoned page %#lx\n", pfn);
                atomic_long_sub(nr_pages, &mce_bad_pages);
                freeit = 1;
+               if (PageHuge(page))
+                       clear_page_hwpoison_huge_page(page);
        }
-       if (PageHuge(p))
-               clear_page_hwpoison_huge_page(page);
        unlock_page(page);
  
        put_page(page);
@@@ -1178,7 -1218,11 +1209,11 @@@ EXPORT_SYMBOL(unpoison_memory)
  static struct page *new_page(struct page *p, unsigned long private, int **x)
  {
        int nid = page_to_nid(p);
-       return alloc_pages_exact_node(nid, GFP_HIGHUSER_MOVABLE, 0);
+       if (PageHuge(p))
+               return alloc_huge_page_node(page_hstate(compound_head(p)),
+                                                  nid);
+       else
+               return alloc_pages_exact_node(nid, GFP_HIGHUSER_MOVABLE, 0);
  }
  
  /*
@@@ -1206,14 -1250,21 +1241,21 @@@ static int get_any_page(struct page *p
         * was free.
         */
        set_migratetype_isolate(p);
+       /*
+        * When the target page is a free hugepage, just remove it
+        * from free hugepage list.
+        */
        if (!get_page_unless_zero(compound_head(p))) {
-               if (is_free_buddy_page(p)) {
+               if (PageHuge(p)) {
 -                      pr_debug("get_any_page: %#lx free huge page\n", pfn);
++                      pr_info("get_any_page: %#lx free huge page\n", pfn);
+                       ret = dequeue_hwpoisoned_huge_page(compound_head(p));
+               } else if (is_free_buddy_page(p)) {
 -                      pr_debug("get_any_page: %#lx free buddy page\n", pfn);
 +                      pr_info("get_any_page: %#lx free buddy page\n", pfn);
                        /* Set hwpoison bit while page is still isolated */
                        SetPageHWPoison(p);
                        ret = 0;
                } else {
 -                      pr_debug("get_any_page: %#lx: unknown zero refcount page type %lx\n",
 +                      pr_info("get_any_page: %#lx: unknown zero refcount page type %lx\n",
                                pfn, p->flags);
                        ret = -EIO;
                }
        return ret;
  }
  
+ static int soft_offline_huge_page(struct page *page, int flags)
+ {
+       int ret;
+       unsigned long pfn = page_to_pfn(page);
+       struct page *hpage = compound_head(page);
+       LIST_HEAD(pagelist);
+       ret = get_any_page(page, pfn, flags);
+       if (ret < 0)
+               return ret;
+       if (ret == 0)
+               goto done;
+       if (PageHWPoison(hpage)) {
+               put_page(hpage);
+               pr_debug("soft offline: %#lx hugepage already poisoned\n", pfn);
+               return -EBUSY;
+       }
+       /* Keep page count to indicate a given hugepage is isolated. */
+       list_add(&hpage->lru, &pagelist);
+       ret = migrate_huge_pages(&pagelist, new_page, MPOL_MF_MOVE_ALL, 0);
+       if (ret) {
+               pr_debug("soft offline: %#lx: migration failed %d, type %lx\n",
+                        pfn, ret, page->flags);
+               if (ret > 0)
+                       ret = -EIO;
+               return ret;
+       }
+ done:
+       if (!PageHWPoison(hpage))
+               atomic_long_add(1 << compound_order(hpage), &mce_bad_pages);
+       set_page_hwpoison_huge_page(hpage);
+       dequeue_hwpoisoned_huge_page(hpage);
+       /* keep elevated page count for bad page */
+       return ret;
+ }
  /**
   * soft_offline_page - Soft offline a page.
   * @page: page to offline
@@@ -1253,6 -1343,9 +1334,9 @@@ int soft_offline_page(struct page *page
        int ret;
        unsigned long pfn = page_to_pfn(page);
  
+       if (PageHuge(page))
+               return soft_offline_huge_page(page, flags);
        ret = get_any_page(page, pfn, flags);
        if (ret < 0)
                return ret;
                        goto done;
        }
        if (!PageLRU(page)) {
 -              pr_debug("soft_offline: %#lx: unknown non LRU page type %lx\n",
 +              pr_info("soft_offline: %#lx: unknown non LRU page type %lx\n",
                                pfn, page->flags);
                return -EIO;
        }
        if (PageHWPoison(page)) {
                unlock_page(page);
                put_page(page);
 -              pr_debug("soft offline: %#lx page already poisoned\n", pfn);
 +              pr_info("soft offline: %#lx page already poisoned\n", pfn);
                return -EBUSY;
        }
  
        put_page(page);
        if (ret == 1) {
                ret = 0;
 -              pr_debug("soft_offline: %#lx: invalidated\n", pfn);
 +              pr_info("soft_offline: %#lx: invalidated\n", pfn);
                goto done;
        }
  
                list_add(&page->lru, &pagelist);
                ret = migrate_pages(&pagelist, new_page, MPOL_MF_MOVE_ALL, 0);
                if (ret) {
 -                      pr_debug("soft offline: %#lx: migration failed %d, type %lx\n",
 +                      pr_info("soft offline: %#lx: migration failed %d, type %lx\n",
                                pfn, ret, page->flags);
                        if (ret > 0)
                                ret = -EIO;
                }
        } else {
 -              pr_debug("soft offline: %#lx: isolation failed: %d, page count %d, type %lx\n",
 +              pr_info("soft offline: %#lx: isolation failed: %d, page count %d, type %lx\n",
                                pfn, ret, page_count(page), page->flags);
        }
        if (ret)
diff --combined mm/memory.c
@@@ -1450,7 -1450,8 +1450,8 @@@ int __get_user_pages(struct task_struc
                                        if (ret & VM_FAULT_OOM)
                                                return i ? i : -ENOMEM;
                                        if (ret &
-                                           (VM_FAULT_HWPOISON|VM_FAULT_SIGBUS))
+                                           (VM_FAULT_HWPOISON|VM_FAULT_HWPOISON_LARGE|
+                                            VM_FAULT_SIGBUS))
                                                return i ? i : -EFAULT;
                                        BUG();
                                }
@@@ -3185,7 -3186,7 +3186,7 @@@ static inline int handle_pte_fault(stru
                 * with threads.
                 */
                if (flags & FAULT_FLAG_WRITE)
 -                      flush_tlb_page(vma, address);
 +                      flush_tlb_fix_spurious_fault(vma, address);
        }
  unlock:
        pte_unmap_unlock(pte, ptl);