git.openpandora.org
/
pandora-kernel.git
/ blobdiff
commit
grep
author
committer
pickaxe
?
search:
re
summary
|
shortlog
|
log
|
commit
|
commitdiff
|
tree
raw
|
inline
| side by side
Bluetooth: sort the list of IDs in the source code
[pandora-kernel.git]
/
mm
/
huge_memory.c
diff --git
a/mm/huge_memory.c
b/mm/huge_memory.c
index
36b3d98
..
79166c2
100644
(file)
--- a/
mm/huge_memory.c
+++ b/
mm/huge_memory.c
@@
-642,6
+642,7
@@
static int __do_huge_pmd_anonymous_page(struct mm_struct *mm,
set_pmd_at(mm, haddr, pmd, entry);
prepare_pmd_huge_pte(pgtable, mm);
add_mm_counter(mm, MM_ANONPAGES, HPAGE_PMD_NR);
set_pmd_at(mm, haddr, pmd, entry);
prepare_pmd_huge_pte(pgtable, mm);
add_mm_counter(mm, MM_ANONPAGES, HPAGE_PMD_NR);
+ mm->nr_ptes++;
spin_unlock(&mm->page_table_lock);
}
spin_unlock(&mm->page_table_lock);
}
@@
-681,7
+682,7
@@
int do_huge_pmd_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
if (haddr >= vma->vm_start && haddr + HPAGE_PMD_SIZE <= vma->vm_end) {
if (unlikely(anon_vma_prepare(vma)))
return VM_FAULT_OOM;
if (haddr >= vma->vm_start && haddr + HPAGE_PMD_SIZE <= vma->vm_end) {
if (unlikely(anon_vma_prepare(vma)))
return VM_FAULT_OOM;
- if (unlikely(khugepaged_enter(vma)))
+ if (unlikely(khugepaged_enter(vma
, vma->vm_flags
)))
return VM_FAULT_OOM;
page = alloc_hugepage_vma(transparent_hugepage_defrag(vma),
vma, haddr, numa_node_id(), 0);
return VM_FAULT_OOM;
page = alloc_hugepage_vma(transparent_hugepage_defrag(vma),
vma, haddr, numa_node_id(), 0);
@@
-760,6
+761,7
@@
int copy_huge_pmd(struct mm_struct *dst_mm, struct mm_struct *src_mm,
pmd = pmd_mkold(pmd_wrprotect(pmd));
set_pmd_at(dst_mm, addr, dst_pmd, pmd);
prepare_pmd_huge_pte(pgtable, dst_mm);
pmd = pmd_mkold(pmd_wrprotect(pmd));
set_pmd_at(dst_mm, addr, dst_pmd, pmd);
prepare_pmd_huge_pte(pgtable, dst_mm);
+ dst_mm->nr_ptes++;
ret = 0;
out_unlock:
ret = 0;
out_unlock:
@@
-858,7
+860,6
@@
static int do_huge_pmd_wp_page_fallback(struct mm_struct *mm,
}
kfree(pages);
}
kfree(pages);
- mm->nr_ptes++;
smp_wmb(); /* make pte visible before pmd */
pmd_populate(mm, pmd, pgtable);
page_remove_rmap(page);
smp_wmb(); /* make pte visible before pmd */
pmd_populate(mm, pmd, pgtable);
page_remove_rmap(page);
@@
-920,6
+921,8
@@
int do_huge_pmd_wp_page(struct mm_struct *mm, struct vm_area_struct *vma,
count_vm_event(THP_FAULT_FALLBACK);
ret = do_huge_pmd_wp_page_fallback(mm, vma, address,
pmd, orig_pmd, page, haddr);
count_vm_event(THP_FAULT_FALLBACK);
ret = do_huge_pmd_wp_page_fallback(mm, vma, address,
pmd, orig_pmd, page, haddr);
+ if (ret & VM_FAULT_OOM)
+ split_huge_page(page);
put_page(page);
goto out;
}
put_page(page);
goto out;
}
@@
-927,6
+930,7
@@
int do_huge_pmd_wp_page(struct mm_struct *mm, struct vm_area_struct *vma,
if (unlikely(mem_cgroup_newpage_charge(new_page, mm, GFP_KERNEL))) {
put_page(new_page);
if (unlikely(mem_cgroup_newpage_charge(new_page, mm, GFP_KERNEL))) {
put_page(new_page);
+ split_huge_page(page);
put_page(page);
ret |= VM_FAULT_OOM;
goto out;
put_page(page);
ret |= VM_FAULT_OOM;
goto out;
@@
-1017,6
+1021,7
@@
int zap_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
VM_BUG_ON(page_mapcount(page) < 0);
add_mm_counter(tlb->mm, MM_ANONPAGES, -HPAGE_PMD_NR);
VM_BUG_ON(!PageHead(page));
VM_BUG_ON(page_mapcount(page) < 0);
add_mm_counter(tlb->mm, MM_ANONPAGES, -HPAGE_PMD_NR);
VM_BUG_ON(!PageHead(page));
+ tlb->mm->nr_ptes--;
spin_unlock(&tlb->mm->page_table_lock);
tlb_remove_page(tlb, page);
pte_free(tlb->mm, pgtable);
spin_unlock(&tlb->mm->page_table_lock);
tlb_remove_page(tlb, page);
pte_free(tlb->mm, pgtable);
@@
-1356,7
+1361,6
@@
static int __split_huge_page_map(struct page *page,
pte_unmap(pte);
}
pte_unmap(pte);
}
- mm->nr_ptes++;
smp_wmb(); /* make pte visible before pmd */
/*
* Up to this point the pmd is present and huge and
smp_wmb(); /* make pte visible before pmd */
/*
* Up to this point the pmd is present and huge and
@@
-1489,7
+1493,7
@@
int hugepage_madvise(struct vm_area_struct *vma,
* register it here without waiting a page fault that
* may not happen any time soon.
*/
* register it here without waiting a page fault that
* may not happen any time soon.
*/
- if (unlikely(khugepaged_enter_vma_merge(vma)))
+ if (unlikely(khugepaged_enter_vma_merge(vma
, *vm_flags
)))
return -ENOMEM;
break;
case MADV_NOHUGEPAGE:
return -ENOMEM;
break;
case MADV_NOHUGEPAGE:
@@
-1621,7
+1625,8
@@
int __khugepaged_enter(struct mm_struct *mm)
return 0;
}
return 0;
}
-int khugepaged_enter_vma_merge(struct vm_area_struct *vma)
+int khugepaged_enter_vma_merge(struct vm_area_struct *vma,
+ unsigned long vm_flags)
{
unsigned long hstart, hend;
if (!vma->anon_vma)
{
unsigned long hstart, hend;
if (!vma->anon_vma)
@@
-1637,11
+1642,11
@@
int khugepaged_enter_vma_merge(struct vm_area_struct *vma)
* If is_pfn_mapping() is true is_learn_pfn_mapping() must be
* true too, verify it here.
*/
* If is_pfn_mapping() is true is_learn_pfn_mapping() must be
* true too, verify it here.
*/
- VM_BUG_ON(is_linear_pfn_mapping(vma) || vm
a->vm
_flags & VM_NO_THP);
+ VM_BUG_ON(is_linear_pfn_mapping(vma) || vm_flags & VM_NO_THP);
hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK;
hend = vma->vm_end & HPAGE_PMD_MASK;
if (hstart < hend)
hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK;
hend = vma->vm_end & HPAGE_PMD_MASK;
if (hstart < hend)
- return khugepaged_enter(vma);
+ return khugepaged_enter(vma
, vm_flags
);
return 0;
}
return 0;
}
@@
-1878,6
+1883,8
@@
static void collapse_huge_page(struct mm_struct *mm,
goto out;
vma = find_vma(mm, address);
goto out;
vma = find_vma(mm, address);
+ if (!vma)
+ goto out;
hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK;
hend = vma->vm_end & HPAGE_PMD_MASK;
if (address < hstart || address + HPAGE_PMD_SIZE > hend)
hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK;
hend = vma->vm_end & HPAGE_PMD_MASK;
if (address < hstart || address + HPAGE_PMD_SIZE > hend)
@@
-1933,7
+1940,12
@@
static void collapse_huge_page(struct mm_struct *mm,
pte_unmap(pte);
spin_lock(&mm->page_table_lock);
BUG_ON(!pmd_none(*pmd));
pte_unmap(pte);
spin_lock(&mm->page_table_lock);
BUG_ON(!pmd_none(*pmd));
- set_pmd_at(mm, address, pmd, _pmd);
+ /*
+ * We can only use set_pmd_at when establishing
+ * hugepmds and never for establishing regular pmds that
+ * points to regular pagetables. Use pmd_populate for that
+ */
+ pmd_populate(mm, pmd, pmd_pgtable(_pmd));
spin_unlock(&mm->page_table_lock);
anon_vma_unlock(vma->anon_vma);
goto out;
spin_unlock(&mm->page_table_lock);
anon_vma_unlock(vma->anon_vma);
goto out;
@@
-1969,7
+1981,6
@@
static void collapse_huge_page(struct mm_struct *mm,
set_pmd_at(mm, address, pmd, _pmd);
update_mmu_cache(vma, address, _pmd);
prepare_pmd_huge_pte(pgtable, mm);
set_pmd_at(mm, address, pmd, _pmd);
update_mmu_cache(vma, address, _pmd);
prepare_pmd_huge_pte(pgtable, mm);
- mm->nr_ptes--;
spin_unlock(&mm->page_table_lock);
#ifndef CONFIG_NUMA
spin_unlock(&mm->page_table_lock);
#ifndef CONFIG_NUMA
@@
-2064,7
+2075,7
@@
static void collect_mm_slot(struct mm_slot *mm_slot)
{
struct mm_struct *mm = mm_slot->mm;
{
struct mm_struct *mm = mm_slot->mm;
- VM_BUG_ON(!spin_is_locked(&khugepaged_mm_lock));
+ VM_BUG_ON(
NR_CPUS != 1 &&
!spin_is_locked(&khugepaged_mm_lock));
if (khugepaged_test_exit(mm)) {
/* free mm_slot */
if (khugepaged_test_exit(mm)) {
/* free mm_slot */
@@
-2094,7
+2105,7
@@
static unsigned int khugepaged_scan_mm_slot(unsigned int pages,
int progress = 0;
VM_BUG_ON(!pages);
int progress = 0;
VM_BUG_ON(!pages);
- VM_BUG_ON(!spin_is_locked(&khugepaged_mm_lock));
+ VM_BUG_ON(
NR_CPUS != 1 &&
!spin_is_locked(&khugepaged_mm_lock));
if (khugepaged_scan.mm_slot)
mm_slot = khugepaged_scan.mm_slot;
if (khugepaged_scan.mm_slot)
mm_slot = khugepaged_scan.mm_slot;