dump_stack();
}
+static inline int is_cow_mapping(unsigned int flags)
+{
+ return (flags & (VM_SHARED | VM_MAYWRITE)) == VM_MAYWRITE;
+}
+
/*
* This function gets the "struct page" associated with a pte.
*
unsigned long off = (addr - vma->vm_start) >> PAGE_SHIFT;
if (pfn == vma->vm_pgoff + off)
return NULL;
- if (vma->vm_flags & VM_SHARED)
+ if (!is_cow_mapping(vma->vm_flags))
return NULL;
}
* If it's a COW mapping, write protect it both
* in the parent and the child
*/
- if ((vm_flags & (VM_SHARED | VM_MAYWRITE)) == VM_MAYWRITE) {
+ if (is_cow_mapping(vm_flags)) {
ptep_set_wrprotect(src_mm, addr, src_pte);
pte = *src_pte;
}
* readonly mappings. The tradeoff is that copy_page_range is more
* efficient than faulting.
*/
- if (!(vma->vm_flags & (VM_HUGETLB|VM_NONLINEAR|VM_PFNMAP))) {
+ if (!(vma->vm_flags & (VM_HUGETLB|VM_NONLINEAR|VM_PFNMAP|VM_INSERTPAGE))) {
if (!vma->anon_vma)
return 0;
}
continue;
}
- if (!vma || (vma->vm_flags & VM_IO)
+ if (!vma || (vma->vm_flags & (VM_IO | VM_PFNMAP))
|| !(vm_flags & vma->vm_flags))
return i ? : -EFAULT;
return -EFAULT;
if (!page_count(page))
return -EINVAL;
+ vma->vm_flags |= VM_INSERTPAGE;
return insert_page(vma->vm_mm, addr, page, vma->vm_page_prot);
}
EXPORT_SYMBOL(vm_insert_page);
* behaviour that some programs depend on. We mark the "original"
* un-COW'ed pages by matching them up with "vma->vm_pgoff".
*/
- if (!(vma->vm_flags & VM_SHARED)) {
+ if (is_cow_mapping(vma->vm_flags)) {
if (addr != vma->vm_start || end != vma->vm_end)
return -EINVAL;
vma->vm_pgoff = pfn;
update_mmu_cache(vma, address, entry);
lazy_mmu_prot_update(entry);
lru_cache_add_active(new_page);
- page_add_anon_rmap(new_page, vma, address);
+ page_add_new_anon_rmap(new_page, vma, address);
/* Free the old page.. */
new_page = old_page;
out_busy:
return -ETXTBSY;
}
-
EXPORT_SYMBOL(vmtruncate);
+int vmtruncate_range(struct inode *inode, loff_t offset, loff_t end)
+{
+ struct address_space *mapping = inode->i_mapping;
+
+ /*
+ * If the underlying filesystem is not going to provide
+ * a way to truncate a range of blocks (punch a hole) -
+ * we should return failure right now.
+ */
+ if (!inode->i_op || !inode->i_op->truncate_range)
+ return -ENOSYS;
+
+ mutex_lock(&inode->i_mutex);
+ down_write(&inode->i_alloc_sem);
+ unmap_mapping_range(mapping, offset, (end - offset), 1);
+ truncate_inode_pages_range(mapping, offset, end);
+ inode->i_op->truncate_range(inode, offset, end);
+ up_write(&inode->i_alloc_sem);
+ mutex_unlock(&inode->i_mutex);
+
+ return 0;
+}
+EXPORT_SYMBOL(vmtruncate_range);
+
/*
* Primitive swap readahead code. We simply read an aligned block of
* (1 << page_cluster) entries in the swap area. This method is chosen
goto out;
entry = pte_to_swp_entry(orig_pte);
+again:
page = lookup_swap_cache(entry);
if (!page) {
swapin_readahead(entry, address, vma);
mark_page_accessed(page);
lock_page(page);
+ if (!PageSwapCache(page)) {
+ /* Page migration has occured */
+ unlock_page(page);
+ page_cache_release(page);
+ goto again;
+ }
/*
* Back out if somebody else already faulted in this pte.
goto release;
inc_mm_counter(mm, anon_rss);
lru_cache_add_active(page);
- SetPageReferenced(page);
- page_add_anon_rmap(page, vma, address);
+ page_add_new_anon_rmap(page, vma, address);
} else {
/* Map the ZERO_PAGE - vm_page_prot is readonly */
page = ZERO_PAGE(address);
if (anon) {
inc_mm_counter(mm, anon_rss);
lru_cache_add_active(new_page);
- page_add_anon_rmap(new_page, vma, address);
+ page_add_new_anon_rmap(new_page, vma, address);
} else {
inc_mm_counter(mm, file_rss);
page_add_file_rmap(new_page);
return handle_pte_fault(mm, vma, address, pte, pmd, write_access);
}
+EXPORT_SYMBOL_GPL(__handle_mm_fault);
+
#ifndef __PAGETABLE_PUD_FOLDED
/*
* Allocate page upper directory.