tlb->mm = mm;
tlb->fullmm = fullmm;
+ tlb->start = -1UL;
+ tlb->end = 0;
tlb->need_flush = 0;
tlb->fast_mode = (num_possible_cpus() == 1);
tlb->local.next = NULL;
{
struct mmu_gather_batch *batch, *next;
+ tlb->start = start;
+ tlb->end = end;
tlb_flush_mmu(tlb);
/* keep the page table cache within bounds */
*/
if (force_flush) {
force_flush = 0;
+
+#ifdef HAVE_GENERIC_MMU_GATHER
+ tlb->start = addr;
+ tlb->end = end;
+#endif
tlb_flush_mmu(tlb);
if (addr != end)
goto again;
* Since no pte has actually been setup, it is
* safe to do nothing in this case.
*/
- if (vma->vm_file)
- unmap_hugepage_range(vma, start, end, NULL);
+ if (vma->vm_file) {
+ mutex_lock(&vma->vm_file->f_mapping->i_mmap_mutex);
+ __unmap_hugepage_range_final(tlb, vma, start, end, NULL);
+ mutex_unlock(&vma->vm_file->f_mapping->i_mmap_mutex);
+ }
} else
unmap_page_range(tlb, vma, start, end, details);
}
if (!page_mkwrite) {
wait_on_page_locked(dirty_page);
set_page_dirty_balance(dirty_page, page_mkwrite);
+ /* file_update_time outside page_lock */
+ if (vma->vm_file)
+ file_update_time(vma->vm_file);
}
put_page(dirty_page);
if (page_mkwrite) {
}
}
- /* file_update_time outside page_lock */
- if (vma->vm_file)
- file_update_time(vma->vm_file);
-
return ret;
}
if (dirty_page) {
struct address_space *mapping = page->mapping;
+ int dirtied = 0;
if (set_page_dirty(dirty_page))
- page_mkwrite = 1;
+ dirtied = 1;
unlock_page(dirty_page);
put_page(dirty_page);
- if (page_mkwrite && mapping) {
+ if ((dirtied || page_mkwrite) && mapping) {
/*
* Some device drivers do not set page.mapping but still
* dirty their pages
}
/* file_update_time outside page_lock */
- if (vma->vm_file)
+ if (vma->vm_file && !page_mkwrite)
file_update_time(vma->vm_file);
} else {
unlock_page(vmf.page);
free_page((unsigned long)buf);
}
}
- up_read(¤t->mm->mmap_sem);
+ up_read(&mm->mmap_sem);
}
#ifdef CONFIG_PROVE_LOCKING