futex: Take hugepages into account when generating futex_key
[pandora-kernel.git] / mm / hugetlb.c
index 73f17c0..ddf2128 100644 (file)
@@ -53,6 +53,84 @@ static unsigned long __initdata default_hstate_size;
  */
 static DEFINE_SPINLOCK(hugetlb_lock);
 
+static inline void unlock_or_release_subpool(struct hugepage_subpool *spool)
+{
+       bool free = (spool->count == 0) && (spool->used_hpages == 0);
+
+       spin_unlock(&spool->lock);
+
+       /* If no pages are used, and no other handles to the subpool
+        * remain, free the subpool the subpool remain */
+       if (free)
+               kfree(spool);
+}
+
+struct hugepage_subpool *hugepage_new_subpool(long nr_blocks)
+{
+       struct hugepage_subpool *spool;
+
+       spool = kmalloc(sizeof(*spool), GFP_KERNEL);
+       if (!spool)
+               return NULL;
+
+       spin_lock_init(&spool->lock);
+       spool->count = 1;
+       spool->max_hpages = nr_blocks;
+       spool->used_hpages = 0;
+
+       return spool;
+}
+
+void hugepage_put_subpool(struct hugepage_subpool *spool)
+{
+       spin_lock(&spool->lock);
+       BUG_ON(!spool->count);
+       spool->count--;
+       unlock_or_release_subpool(spool);
+}
+
+static int hugepage_subpool_get_pages(struct hugepage_subpool *spool,
+                                     long delta)
+{
+       int ret = 0;
+
+       if (!spool)
+               return 0;
+
+       spin_lock(&spool->lock);
+       if ((spool->used_hpages + delta) <= spool->max_hpages) {
+               spool->used_hpages += delta;
+       } else {
+               ret = -ENOMEM;
+       }
+       spin_unlock(&spool->lock);
+
+       return ret;
+}
+
+static void hugepage_subpool_put_pages(struct hugepage_subpool *spool,
+                                      long delta)
+{
+       if (!spool)
+               return;
+
+       spin_lock(&spool->lock);
+       spool->used_hpages -= delta;
+       /* If hugetlbfs_put_super couldn't free spool due to
+       * an outstanding quota reference, free it now. */
+       unlock_or_release_subpool(spool);
+}
+
+static inline struct hugepage_subpool *subpool_inode(struct inode *inode)
+{
+       return HUGETLBFS_SB(inode->i_sb)->spool;
+}
+
+static inline struct hugepage_subpool *subpool_vma(struct vm_area_struct *vma)
+{
+       return subpool_inode(vma->vm_file->f_dentry->d_inode);
+}
+
 /*
  * Region tracking -- allows tracking of reservations and instantiated pages
  *                    across the pages in a mapping.
@@ -460,8 +538,10 @@ static struct page *dequeue_huge_page_vma(struct hstate *h,
        struct zonelist *zonelist;
        struct zone *zone;
        struct zoneref *z;
+       unsigned int cpuset_mems_cookie;
 
-       get_mems_allowed();
+retry_cpuset:
+       cpuset_mems_cookie = get_mems_allowed();
        zonelist = huge_zonelist(vma, address,
                                        htlb_alloc_mask, &mpol, &nodemask);
        /*
@@ -488,10 +568,15 @@ static struct page *dequeue_huge_page_vma(struct hstate *h,
                        }
                }
        }
-err:
+
        mpol_cond_put(mpol);
-       put_mems_allowed();
+       if (unlikely(!put_mems_allowed(cpuset_mems_cookie) && !page))
+               goto retry_cpuset;
        return page;
+
+err:
+       mpol_cond_put(mpol);
+       return NULL;
 }
 
 static void update_and_free_page(struct hstate *h, struct page *page)
@@ -533,9 +618,9 @@ static void free_huge_page(struct page *page)
         */
        struct hstate *h = page_hstate(page);
        int nid = page_to_nid(page);
-       struct address_space *mapping;
+       struct hugepage_subpool *spool =
+               (struct hugepage_subpool *)page_private(page);
 
-       mapping = (struct address_space *) page_private(page);
        set_page_private(page, 0);
        page->mapping = NULL;
        BUG_ON(page_count(page));
@@ -551,8 +636,7 @@ static void free_huge_page(struct page *page)
                enqueue_huge_page(h, page);
        }
        spin_unlock(&hugetlb_lock);
-       if (mapping)
-               hugetlb_put_quota(mapping, 1);
+       hugepage_subpool_put_pages(spool, 1);
 }
 
 static void prep_new_huge_page(struct hstate *h, struct page *page, int nid)
@@ -595,6 +679,23 @@ int PageHuge(struct page *page)
 }
 EXPORT_SYMBOL_GPL(PageHuge);
 
+pgoff_t __basepage_index(struct page *page)
+{
+       struct page *page_head = compound_head(page);
+       pgoff_t index = page_index(page_head);
+       unsigned long compound_idx;
+
+       if (!PageHuge(page_head))
+               return page_index(page);
+
+       if (compound_order(page_head) >= MAX_ORDER)
+               compound_idx = page_to_pfn(page) - page_to_pfn(page_head);
+       else
+               compound_idx = page - page_head;
+
+       return (index << compound_order(page_head)) + compound_idx;
+}
+
 static struct page *alloc_fresh_huge_page_node(struct hstate *h, int nid)
 {
        struct page *page;
@@ -901,7 +1002,6 @@ retry:
        h->resv_huge_pages += delta;
        ret = 0;
 
-       spin_unlock(&hugetlb_lock);
        /* Free the needed pages to the hugetlb pool */
        list_for_each_entry_safe(page, tmp, &surplus_list, lru) {
                if ((--needed) < 0)
@@ -915,6 +1015,7 @@ retry:
                VM_BUG_ON(page_count(page));
                enqueue_huge_page(h, page);
        }
+       spin_unlock(&hugetlb_lock);
 
        /* Free unnecessary surplus pages to the buddy allocator */
 free:
@@ -966,11 +1067,12 @@ static void return_unused_surplus_pages(struct hstate *h,
 /*
  * Determine if the huge page at addr within the vma has an associated
  * reservation.  Where it does not we will need to logically increase
- * reservation and actually increase quota before an allocation can occur.
- * Where any new reservation would be required the reservation change is
- * prepared, but not committed.  Once the page has been quota'd allocated
- * an instantiated the change should be committed via vma_commit_reservation.
- * No action is required on failure.
+ * reservation and actually increase subpool usage before an allocation
+ * can occur.  Where any new reservation would be required the
+ * reservation change is prepared, but not committed.  Once the page
+ * has been allocated from the subpool and instantiated the change should
+ * be committed via vma_commit_reservation.  No action is required on
+ * failure.
  */
 static long vma_needs_reservation(struct hstate *h,
                        struct vm_area_struct *vma, unsigned long addr)
@@ -1019,24 +1121,24 @@ static void vma_commit_reservation(struct hstate *h,
 static struct page *alloc_huge_page(struct vm_area_struct *vma,
                                    unsigned long addr, int avoid_reserve)
 {
+       struct hugepage_subpool *spool = subpool_vma(vma);
        struct hstate *h = hstate_vma(vma);
        struct page *page;
-       struct address_space *mapping = vma->vm_file->f_mapping;
-       struct inode *inode = mapping->host;
        long chg;
 
        /*
-        * Processes that did not create the mapping will have no reserves and
-        * will not have accounted against quota. Check that the quota can be
-        * made before satisfying the allocation
-        * MAP_NORESERVE mappings may also need pages and quota allocated
-        * if no reserve mapping overlaps.
+        * Processes that did not create the mapping will have no
+        * reserves and will not have accounted against subpool
+        * limit. Check that the subpool limit can be made before
+        * satisfying the allocation MAP_NORESERVE mappings may also
+        * need pages and subpool limit allocated allocated if no reserve
+        * mapping overlaps.
         */
        chg = vma_needs_reservation(h, vma, addr);
        if (chg < 0)
                return ERR_PTR(-VM_FAULT_OOM);
        if (chg)
-               if (hugetlb_get_quota(inode->i_mapping, chg))
+               if (hugepage_subpool_get_pages(spool, chg))
                        return ERR_PTR(-VM_FAULT_SIGBUS);
 
        spin_lock(&hugetlb_lock);
@@ -1046,12 +1148,12 @@ static struct page *alloc_huge_page(struct vm_area_struct *vma,
        if (!page) {
                page = alloc_buddy_huge_page(h, NUMA_NO_NODE);
                if (!page) {
-                       hugetlb_put_quota(inode->i_mapping, chg);
+                       hugepage_subpool_put_pages(spool, chg);
                        return ERR_PTR(-VM_FAULT_SIGBUS);
                }
        }
 
-       set_page_private(page, (unsigned long) mapping);
+       set_page_private(page, (unsigned long)spool);
 
        vma_commit_reservation(h, vma, addr);
 
@@ -2007,8 +2109,12 @@ int hugetlb_report_node_meminfo(int nid, char *buf)
 /* Return the number pages of memory we physically have, in PAGE_SIZE units. */
 unsigned long hugetlb_total_pages(void)
 {
-       struct hstate *h = &default_hstate;
-       return h->nr_huge_pages * pages_per_huge_page(h);
+       struct hstate *h;
+       unsigned long nr_total_pages = 0;
+
+       for_each_hstate(h)
+               nr_total_pages += h->nr_huge_pages * pages_per_huge_page(h);
+       return nr_total_pages;
 }
 
 static int hugetlb_acct_memory(struct hstate *h, long delta)
@@ -2068,10 +2174,20 @@ static void hugetlb_vm_op_open(struct vm_area_struct *vma)
                kref_get(&reservations->refs);
 }
 
+static void resv_map_put(struct vm_area_struct *vma)
+{
+       struct resv_map *reservations = vma_resv_map(vma);
+
+       if (!reservations)
+               return;
+       kref_put(&reservations->refs, resv_map_release);
+}
+
 static void hugetlb_vm_op_close(struct vm_area_struct *vma)
 {
        struct hstate *h = hstate_vma(vma);
        struct resv_map *reservations = vma_resv_map(vma);
+       struct hugepage_subpool *spool = subpool_vma(vma);
        unsigned long reserve;
        unsigned long start;
        unsigned long end;
@@ -2083,11 +2199,11 @@ static void hugetlb_vm_op_close(struct vm_area_struct *vma)
                reserve = (end - start) -
                        region_count(&reservations->regions, start, end);
 
-               kref_put(&reservations->refs, resv_map_release);
+               resv_map_put(vma);
 
                if (reserve) {
                        hugetlb_acct_memory(h, -reserve);
-                       hugetlb_put_quota(vma->vm_file->f_mapping, reserve);
+                       hugepage_subpool_put_pages(spool, reserve);
                }
        }
 }
@@ -2287,6 +2403,25 @@ void __unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start,
        }
 }
 
+void __unmap_hugepage_range_final(struct vm_area_struct *vma,
+                         unsigned long start, unsigned long end,
+                         struct page *ref_page)
+{
+       __unmap_hugepage_range(vma, start, end, ref_page);
+
+       /*
+        * Clear this flag so that x86's huge_pmd_share page_table_shareable
+        * test will fail on a vma being torn down, and not grab a page table
+        * on its way out.  We're lucky that the flag has such an appropriate
+        * name, and can in fact be safely cleared here. We could clear it
+        * before the __unmap_hugepage_range above, but all that's necessary
+        * is to clear it before releasing the i_mmap_mutex. This works
+        * because in the context this is called, the VMA is about to be
+        * destroyed and the i_mmap_mutex is held.
+        */
+       vma->vm_flags &= ~VM_MAYSHARE;
+}
+
 void unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start,
                          unsigned long end, struct page *ref_page)
 {
@@ -2315,9 +2450,9 @@ static int unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma,
         * from page cache lookup which is in HPAGE_SIZE units.
         */
        address = address & huge_page_mask(h);
-       pgoff = ((address - vma->vm_start) >> PAGE_SHIFT)
-               + (vma->vm_pgoff >> PAGE_SHIFT);
-       mapping = (struct address_space *)page_private(page);
+       pgoff = ((address - vma->vm_start) >> PAGE_SHIFT) +
+                       vma->vm_pgoff;
+       mapping = vma->vm_file->f_dentry->d_inode->i_mapping;
 
        /*
         * Take the mapping lock for the duration of the table walk. As
@@ -2405,7 +2540,6 @@ retry_avoidcopy:
                if (outside_reserve) {
                        BUG_ON(huge_pte_none(pte));
                        if (unmap_ref_private(mm, vma, old_page, address)) {
-                               BUG_ON(page_count(old_page) != 1);
                                BUG_ON(huge_pte_none(pte));
                                spin_lock(&mm->page_table_lock);
                                goto retry_avoidcopy;
@@ -2634,7 +2768,7 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
        if (ptep) {
                entry = huge_ptep_get(ptep);
                if (unlikely(is_hugetlb_entry_migration(entry))) {
-                       migration_entry_wait(mm, (pmd_t *)ptep, address);
+                       migration_entry_wait_huge(mm, ptep);
                        return 0;
                } else if (unlikely(is_hugetlb_entry_hwpoisoned(entry)))
                        return VM_FAULT_HWPOISON_LARGE |
@@ -2686,6 +2820,7 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
         * so no worry about deadlock.
         */
        page = pte_page(entry);
+       get_page(page);
        if (page != pagecache_page)
                lock_page(page);
 
@@ -2717,6 +2852,7 @@ out_page_table_lock:
        }
        if (page != pagecache_page)
                unlock_page(page);
+       put_page(page);
 
 out_mutex:
        mutex_unlock(&hugetlb_instantiation_mutex);
@@ -2770,7 +2906,17 @@ int follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma,
                        break;
                }
 
-               if (absent ||
+               /*
+                * We need call hugetlb_fault for both hugepages under migration
+                * (in which case hugetlb_fault waits for the migration,) and
+                * hwpoisoned hugepages (in which case we need to prevent the
+                * caller from accessing to them.) In order to do this, we use
+                * here is_swap_pte instead of is_hugetlb_entry_migration and
+                * is_hugetlb_entry_hwpoisoned. This is because it simply covers
+                * both cases, and because we can't follow correct pages
+                * directly from any kind of swap entries.
+                */
+               if (absent || is_swap_pte(huge_ptep_get(pte)) ||
                    ((flags & FOLL_WRITE) && !pte_write(huge_ptep_get(pte)))) {
                        int ret;
 
@@ -2843,9 +2989,14 @@ void hugetlb_change_protection(struct vm_area_struct *vma,
                }
        }
        spin_unlock(&mm->page_table_lock);
-       mutex_unlock(&vma->vm_file->f_mapping->i_mmap_mutex);
-
+       /*
+        * Must flush TLB before releasing i_mmap_mutex: x86's huge_pmd_unshare
+        * may have cleared our pud entry and done put_page on the page table:
+        * once we release i_mmap_mutex, another task can do the final put_page
+        * and that page table be reused and filled with junk.
+        */
        flush_tlb_range(vma, start, end);
+       mutex_unlock(&vma->vm_file->f_mapping->i_mmap_mutex);
 }
 
 int hugetlb_reserve_pages(struct inode *inode,
@@ -2855,11 +3006,12 @@ int hugetlb_reserve_pages(struct inode *inode,
 {
        long ret, chg;
        struct hstate *h = hstate_inode(inode);
+       struct hugepage_subpool *spool = subpool_inode(inode);
 
        /*
         * Only apply hugepage reservation if asked. At fault time, an
         * attempt will be made for VM_NORESERVE to allocate a page
-        * and filesystem quota without using reserves
+        * without using reserves
         */
        if (vm_flags & VM_NORESERVE)
                return 0;
@@ -2883,21 +3035,25 @@ int hugetlb_reserve_pages(struct inode *inode,
                set_vma_resv_flags(vma, HPAGE_RESV_OWNER);
        }
 
-       if (chg < 0)
-               return chg;
+       if (chg < 0) {
+               ret = chg;
+               goto out_err;
+       }
 
-       /* There must be enough filesystem quota for the mapping */
-       if (hugetlb_get_quota(inode->i_mapping, chg))
-               return -ENOSPC;
+       /* There must be enough pages in the subpool for the mapping */
+       if (hugepage_subpool_get_pages(spool, chg)) {
+               ret = -ENOSPC;
+               goto out_err;
+       }
 
        /*
         * Check enough hugepages are available for the reservation.
-        * Hand back the quota if there are not
+        * Hand the pages back to the subpool if there are not
         */
        ret = hugetlb_acct_memory(h, chg);
        if (ret < 0) {
-               hugetlb_put_quota(inode->i_mapping, chg);
-               return ret;
+               hugepage_subpool_put_pages(spool, chg);
+               goto out_err;
        }
 
        /*
@@ -2914,18 +3070,23 @@ int hugetlb_reserve_pages(struct inode *inode,
        if (!vma || vma->vm_flags & VM_MAYSHARE)
                region_add(&inode->i_mapping->private_list, from, to);
        return 0;
+out_err:
+       if (vma)
+               resv_map_put(vma);
+       return ret;
 }
 
 void hugetlb_unreserve_pages(struct inode *inode, long offset, long freed)
 {
        struct hstate *h = hstate_inode(inode);
        long chg = region_truncate(&inode->i_mapping->private_list, offset);
+       struct hugepage_subpool *spool = subpool_inode(inode);
 
        spin_lock(&inode->i_lock);
        inode->i_blocks -= (blocks_per_huge_page(h) * freed);
        spin_unlock(&inode->i_lock);
 
-       hugetlb_put_quota(inode->i_mapping, (chg - freed));
+       hugepage_subpool_put_pages(spool, (chg - freed));
        hugetlb_acct_memory(h, -(chg - freed));
 }