Update ctime and mtime for memory-mapped files
[pandora-kernel.git] / mm / memory.c
index f82b359..4b0144b 100644 (file)
@@ -259,9 +259,6 @@ void free_pgd_range(struct mmu_gather **tlb,
                        continue;
                free_pud_range(*tlb, pgd, addr, next, floor, ceiling);
        } while (pgd++, addr = next, addr != end);
-
-       if (!(*tlb)->fullmm)
-               flush_tlb_pgtables((*tlb)->mm, start, end);
 }
 
 void free_pgtables(struct mmu_gather **tlb, struct vm_area_struct *vma,
@@ -395,6 +392,7 @@ struct page *vm_normal_page(struct vm_area_struct *vma, unsigned long addr, pte_
                        return NULL;
        }
 
+#ifdef CONFIG_DEBUG_VM
        /*
         * Add some anal sanity checks for now. Eventually,
         * we should just do "return pfn_to_page(pfn)", but
@@ -405,6 +403,7 @@ struct page *vm_normal_page(struct vm_area_struct *vma, unsigned long addr, pte_
                print_bad_pte(vma, pte, addr);
                return NULL;
        }
+#endif
 
        /*
         * NOTE! We still have PageReserved() pages in the page 
@@ -966,7 +965,7 @@ no_page_table:
         * has touched so far, we don't want to allocate page tables.
         */
        if (flags & FOLL_ANON) {
-               page = ZERO_PAGE(address);
+               page = ZERO_PAGE(0);
                if (flags & FOLL_GET)
                        get_page(page);
                BUG_ON(flags & FOLL_WRITE);
@@ -1039,7 +1038,7 @@ int get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
 
                if (is_vm_hugetlb_page(vma)) {
                        i = follow_hugetlb_page(mm, vma, pages, vmas,
-                                               &start, &len, i);
+                                               &start, &len, i, write);
                        continue;
                }
 
@@ -1111,95 +1110,6 @@ int get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
 }
 EXPORT_SYMBOL(get_user_pages);
 
-static int zeromap_pte_range(struct mm_struct *mm, pmd_t *pmd,
-                       unsigned long addr, unsigned long end, pgprot_t prot)
-{
-       pte_t *pte;
-       spinlock_t *ptl;
-       int err = 0;
-
-       pte = pte_alloc_map_lock(mm, pmd, addr, &ptl);
-       if (!pte)
-               return -EAGAIN;
-       arch_enter_lazy_mmu_mode();
-       do {
-               struct page *page = ZERO_PAGE(addr);
-               pte_t zero_pte = pte_wrprotect(mk_pte(page, prot));
-
-               if (unlikely(!pte_none(*pte))) {
-                       err = -EEXIST;
-                       pte++;
-                       break;
-               }
-               page_cache_get(page);
-               page_add_file_rmap(page);
-               inc_mm_counter(mm, file_rss);
-               set_pte_at(mm, addr, pte, zero_pte);
-       } while (pte++, addr += PAGE_SIZE, addr != end);
-       arch_leave_lazy_mmu_mode();
-       pte_unmap_unlock(pte - 1, ptl);
-       return err;
-}
-
-static inline int zeromap_pmd_range(struct mm_struct *mm, pud_t *pud,
-                       unsigned long addr, unsigned long end, pgprot_t prot)
-{
-       pmd_t *pmd;
-       unsigned long next;
-       int err;
-
-       pmd = pmd_alloc(mm, pud, addr);
-       if (!pmd)
-               return -EAGAIN;
-       do {
-               next = pmd_addr_end(addr, end);
-               err = zeromap_pte_range(mm, pmd, addr, next, prot);
-               if (err)
-                       break;
-       } while (pmd++, addr = next, addr != end);
-       return err;
-}
-
-static inline int zeromap_pud_range(struct mm_struct *mm, pgd_t *pgd,
-                       unsigned long addr, unsigned long end, pgprot_t prot)
-{
-       pud_t *pud;
-       unsigned long next;
-       int err;
-
-       pud = pud_alloc(mm, pgd, addr);
-       if (!pud)
-               return -EAGAIN;
-       do {
-               next = pud_addr_end(addr, end);
-               err = zeromap_pmd_range(mm, pud, addr, next, prot);
-               if (err)
-                       break;
-       } while (pud++, addr = next, addr != end);
-       return err;
-}
-
-int zeromap_page_range(struct vm_area_struct *vma,
-                       unsigned long addr, unsigned long size, pgprot_t prot)
-{
-       pgd_t *pgd;
-       unsigned long next;
-       unsigned long end = addr + size;
-       struct mm_struct *mm = vma->vm_mm;
-       int err;
-
-       BUG_ON(addr >= end);
-       pgd = pgd_offset(mm, addr);
-       flush_cache_range(vma, addr, end);
-       do {
-               next = pgd_addr_end(addr, end);
-               err = zeromap_pud_range(mm, pgd, addr, next, prot);
-               if (err)
-                       break;
-       } while (pgd++, addr = next, addr != end);
-       return err;
-}
-
 pte_t * fastcall get_locked_pte(struct mm_struct *mm, unsigned long addr, spinlock_t **ptl)
 {
        pgd_t * pgd = pgd_offset(mm, addr);
@@ -1700,10 +1610,8 @@ static int do_wp_page(struct mm_struct *mm, struct vm_area_struct *vma,
                flush_cache_page(vma, address, pte_pfn(orig_pte));
                entry = pte_mkyoung(orig_pte);
                entry = maybe_mkwrite(pte_mkdirty(entry), vma);
-               if (ptep_set_access_flags(vma, address, page_table, entry,1)) {
+               if (ptep_set_access_flags(vma, address, page_table, entry,1))
                        update_mmu_cache(vma, address, entry);
-                       lazy_mmu_prot_update(entry);
-               }
                ret |= VM_FAULT_WRITE;
                goto unlock;
        }
@@ -1717,16 +1625,11 @@ gotten:
 
        if (unlikely(anon_vma_prepare(vma)))
                goto oom;
-       if (old_page == ZERO_PAGE(address)) {
-               new_page = alloc_zeroed_user_highpage_movable(vma, address);
-               if (!new_page)
-                       goto oom;
-       } else {
-               new_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, address);
-               if (!new_page)
-                       goto oom;
-               cow_user_page(new_page, old_page, address, vma);
-       }
+       VM_BUG_ON(old_page == ZERO_PAGE(0));
+       new_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, address);
+       if (!new_page)
+               goto oom;
+       cow_user_page(new_page, old_page, address, vma);
 
        /*
         * Re-check the pte - we dropped the lock
@@ -1744,7 +1647,6 @@ gotten:
                flush_cache_page(vma, address, pte_pfn(orig_pte));
                entry = mk_pte(new_page, vma->vm_page_prot);
                entry = maybe_mkwrite(pte_mkdirty(entry), vma);
-               lazy_mmu_prot_update(entry);
                /*
                 * Clear the pte entry and flush it first, before updating the
                 * pte with the new entry. This will avoid a race condition
@@ -1768,6 +1670,9 @@ gotten:
 unlock:
        pte_unmap_unlock(page_table, ptl);
        if (dirty_page) {
+               if (vma->vm_file)
+                       file_update_time(vma->vm_file);
+
                /*
                 * Yes, Virginia, this is actually required to prevent a race
                 * with clear_page_dirty_for_io() from clearing the page dirty
@@ -2184,9 +2089,9 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
                count_vm_event(PGMAJFAULT);
        }
 
-       delayacct_clear_flag(DELAYACCT_PF_SWAPIN);
        mark_page_accessed(page);
        lock_page(page);
+       delayacct_clear_flag(DELAYACCT_PF_SWAPIN);
 
        /*
         * Back out if somebody else already faulted in this pte.
@@ -2252,44 +2157,28 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
        spinlock_t *ptl;
        pte_t entry;
 
-       if (write_access) {
-               /* Allocate our own private page. */
-               pte_unmap(page_table);
-
-               if (unlikely(anon_vma_prepare(vma)))
-                       goto oom;
-               page = alloc_zeroed_user_highpage_movable(vma, address);
-               if (!page)
-                       goto oom;
-
-               entry = mk_pte(page, vma->vm_page_prot);
-               entry = maybe_mkwrite(pte_mkdirty(entry), vma);
+       /* Allocate our own private page. */
+       pte_unmap(page_table);
 
-               page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
-               if (!pte_none(*page_table))
-                       goto release;
-               inc_mm_counter(mm, anon_rss);
-               lru_cache_add_active(page);
-               page_add_new_anon_rmap(page, vma, address);
-       } else {
-               /* Map the ZERO_PAGE - vm_page_prot is readonly */
-               page = ZERO_PAGE(address);
-               page_cache_get(page);
-               entry = mk_pte(page, vma->vm_page_prot);
+       if (unlikely(anon_vma_prepare(vma)))
+               goto oom;
+       page = alloc_zeroed_user_highpage_movable(vma, address);
+       if (!page)
+               goto oom;
 
-               ptl = pte_lockptr(mm, pmd);
-               spin_lock(ptl);
-               if (!pte_none(*page_table))
-                       goto release;
-               inc_mm_counter(mm, file_rss);
-               page_add_file_rmap(page);
-       }
+       entry = mk_pte(page, vma->vm_page_prot);
+       entry = maybe_mkwrite(pte_mkdirty(entry), vma);
 
+       page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
+       if (!pte_none(*page_table))
+               goto release;
+       inc_mm_counter(mm, anon_rss);
+       lru_cache_add_active(page);
+       page_add_new_anon_rmap(page, vma, address);
        set_pte_at(mm, address, page_table, entry);
 
        /* No need to invalidate - it was non-present before */
        update_mmu_cache(vma, address, entry);
-       lazy_mmu_prot_update(entry);
 unlock:
        pte_unmap_unlock(page_table, ptl);
        return 0;
@@ -2442,7 +2331,6 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
 
                /* no need to invalidate: a not-present page won't be cached */
                update_mmu_cache(vma, address, entry);
-               lazy_mmu_prot_update(entry);
        } else {
                if (anon)
                        page_cache_release(page);
@@ -2458,6 +2346,9 @@ out_unlocked:
        if (anon)
                page_cache_release(vmf.page);
        else if (dirty_page) {
+               if (vma->vm_file)
+                       file_update_time(vma->vm_file);
+
                set_page_dirty_balance(dirty_page, page_mkwrite);
                put_page(dirty_page);
        }
@@ -2470,7 +2361,7 @@ static int do_linear_fault(struct mm_struct *mm, struct vm_area_struct *vma,
                int write_access, pte_t orig_pte)
 {
        pgoff_t pgoff = (((address & PAGE_MASK)
-                       - vma->vm_start) >> PAGE_CACHE_SHIFT) + vma->vm_pgoff;
+                       - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;
        unsigned int flags = (write_access ? FAULT_FLAG_WRITE : 0);
 
        pte_unmap(page_table);
@@ -2614,7 +2505,6 @@ static inline int handle_pte_fault(struct mm_struct *mm,
        entry = pte_mkyoung(entry);
        if (ptep_set_access_flags(vma, address, pte, entry, write_access)) {
                update_mmu_cache(vma, address, entry);
-               lazy_mmu_prot_update(entry);
        } else {
                /*
                 * This is needed only for protection faults but the arch code
@@ -2831,7 +2721,7 @@ int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, in
                return 0;
 
        down_read(&mm->mmap_sem);
-       /* ignore errors, just check how much was sucessfully transfered */
+       /* ignore errors, just check how much was successfully transferred */
        while (len) {
                int bytes, ret, offset;
                void *maddr;
@@ -2866,4 +2756,3 @@ int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, in
 
        return buf - old_buf;
 }
-EXPORT_SYMBOL_GPL(access_process_vm);