#include <linux/hugetlb.h>
#include <linux/sched.h>
#include <linux/ksm.h>
+#include <linux/file.h>
/*
* Any behaviour which results in changes to the vma->vm_flags needs to
struct address_space *mapping;
loff_t offset, endoff;
int error;
+ struct file *f;
*prev = NULL; /* tell sys_madvise we drop mmap_sem */
if (vma->vm_flags & (VM_LOCKED|VM_NONLINEAR|VM_HUGETLB))
return -EINVAL;
- if (!vma->vm_file || !vma->vm_file->f_mapping
- || !vma->vm_file->f_mapping->host) {
+ f = vma->vm_file;
+
+ if (!f || !f->f_mapping || !f->f_mapping->host) {
return -EINVAL;
}
endoff = (loff_t)(end - vma->vm_start - 1)
+ ((loff_t)vma->vm_pgoff << PAGE_SHIFT);
- /* vmtruncate_range needs to take i_mutex */
+ /*
+ * vmtruncate_range may need to take i_mutex. We need to
+ * explicitly grab a reference because the vma (and hence the
+ * vma's reference to the file) can go away as soon as we drop
+ * mmap_sem.
+ */
+ get_file(f);
up_read(¤t->mm->mmap_sem);
error = vmtruncate_range(mapping->host, offset, endoff);
+ fput(f);
down_read(¤t->mm->mmap_sem);
return error;
}
+#ifdef __arm__
+static long madvise_force_cache(struct vm_area_struct *vma,
+ struct vm_area_struct **prev,
+ unsigned long start, unsigned long end,
+ int tex_cb)
+{
+ struct mm_struct *mm = vma->vm_mm;
+ unsigned long addr, next_pgd, next_pmd;
+ spinlock_t *ptl;
+ pgd_t *pgd;
+ pud_t *pud;
+ pmd_t *pmd;
+ pte_t *pte;
+ u32 val;
+
+ *prev = vma;
+
+ if (mm == NULL)
+ return -EINVAL;
+
+ tex_cb &= 7;
+ vma->vm_page_prot = __pgprot_modify(vma->vm_page_prot,
+ L_PTE_MT_MASK, (tex_cb << 2));
+
+ addr = start;
+ pgd = pgd_offset(mm, addr);
+ flush_cache_range(vma, addr, end);
+ do {
+ next_pgd = pgd_addr_end(addr, end);
+ if (pgd_none_or_clear_bad(pgd))
+ continue;
+ pud = pud_offset(pgd, addr);
+ pmd = pmd_offset(pud, addr);
+ next_pmd = pmd_addr_end(addr, end);
+ if (pmd_trans_huge(*pmd)) {
+ val = pmd_val(*pmd);
+ val &= ~0x100c;
+ val |= (tex_cb << 10) & 0x1000;
+ val |= (tex_cb << 2) & 0x000c;
+ set_pmd_at(mm, addr, pmd, __pmd(val));
+ }
+ else if (pmd_none_or_clear_bad(pmd))
+ continue;
+
+ pte = pte_offset_map_lock(mm, pmd, addr, &ptl);
+ do {
+ if (!pte_present(*pte))
+ continue;
+ val = pte_val(*pte);
+ val = (val & ~L_PTE_MT_MASK) | (tex_cb << 2);
+ set_pte_at(mm, addr, pte, __pte(val));
+ } while (pte++, addr += PAGE_SIZE, addr < next_pmd);
+ pte_unmap_unlock(pte - 1, ptl);
+
+ } while (pgd++, addr = next_pgd, addr < end);
+ flush_tlb_range(vma, start, end);
+
+ return 0;
+}
+#endif
+
#ifdef CONFIG_MEMORY_FAILURE
/*
* Error injection support for memory error handling.
return madvise_willneed(vma, prev, start, end);
case MADV_DONTNEED:
return madvise_dontneed(vma, prev, start, end);
+#ifdef __arm__
+ case 0x2000 ... 0x2007:
+ return madvise_force_cache(vma, prev, start, end,
+ behavior & 7);
+#endif
default:
return madvise_behavior(vma, prev, start, end, behavior);
}
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
case MADV_HUGEPAGE:
case MADV_NOHUGEPAGE:
+#endif
+#ifdef __arm__
+ case 0x2000 ... 0x2007:
#endif
return 1;