tcp: enforce tcp_min_snd_mss in tcp_mtu_probing()
[pandora-kernel.git] / mm / madvise.c
index 74bf193..42f4fd5 100644 (file)
@@ -13,6 +13,7 @@
 #include <linux/hugetlb.h>
 #include <linux/sched.h>
 #include <linux/ksm.h>
+#include <linux/file.h>
 
 /*
  * Any behaviour which results in changes to the vma->vm_flags needs to
@@ -127,6 +128,7 @@ static long madvise_willneed(struct vm_area_struct * vma,
 {
        struct file *file = vma->vm_file;
 
+       *prev = vma;
        if (!file)
                return -EBADF;
 
@@ -135,7 +137,6 @@ static long madvise_willneed(struct vm_area_struct * vma,
                return 0;
        }
 
-       *prev = vma;
        start = ((start - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;
        if (end > vma->vm_end)
                end = vma->vm_end;
@@ -197,14 +198,16 @@ static long madvise_remove(struct vm_area_struct *vma,
        struct address_space *mapping;
        loff_t offset, endoff;
        int error;
+       struct file *f;
 
        *prev = NULL;   /* tell sys_madvise we drop mmap_sem */
 
        if (vma->vm_flags & (VM_LOCKED|VM_NONLINEAR|VM_HUGETLB))
                return -EINVAL;
 
-       if (!vma->vm_file || !vma->vm_file->f_mapping
-               || !vma->vm_file->f_mapping->host) {
+       f = vma->vm_file;
+
+       if (!f || !f->f_mapping || !f->f_mapping->host) {
                        return -EINVAL;
        }
 
@@ -218,13 +221,81 @@ static long madvise_remove(struct vm_area_struct *vma,
        endoff = (loff_t)(end - vma->vm_start - 1)
                        + ((loff_t)vma->vm_pgoff << PAGE_SHIFT);
 
-       /* vmtruncate_range needs to take i_mutex */
+       /*
+        * vmtruncate_range may need to take i_mutex.  We need to
+        * explicitly grab a reference because the vma (and hence the
+        * vma's reference to the file) can go away as soon as we drop
+        * mmap_sem.
+        */
+       get_file(f);
        up_read(&current->mm->mmap_sem);
        error = vmtruncate_range(mapping->host, offset, endoff);
+       fput(f);
        down_read(&current->mm->mmap_sem);
        return error;
 }
 
+#ifdef __arm__
+static long madvise_force_cache(struct vm_area_struct *vma,
+                               struct vm_area_struct **prev,
+                               unsigned long start, unsigned long end,
+                               int tex_cb)
+{
+       struct mm_struct *mm = vma->vm_mm;
+       unsigned long addr, next_pgd, next_pmd;
+       spinlock_t *ptl;
+       pgd_t *pgd;
+       pud_t *pud;
+       pmd_t *pmd;
+       pte_t *pte;
+       u32 val;
+
+       *prev = vma;
+
+       if (mm == NULL)
+               return -EINVAL;
+
+       tex_cb &= 7;
+       vma->vm_page_prot = __pgprot_modify(vma->vm_page_prot,
+               L_PTE_MT_MASK, (tex_cb << 2));
+
+       addr = start;
+       pgd = pgd_offset(mm, addr);
+       flush_cache_range(vma, addr, end);
+       do {
+               next_pgd = pgd_addr_end(addr, end);
+               if (pgd_none_or_clear_bad(pgd))
+                       continue;
+               pud = pud_offset(pgd, addr);
+               pmd = pmd_offset(pud, addr);
+               next_pmd = pmd_addr_end(addr, end);
+               if (pmd_trans_huge(*pmd)) {
+                       val = pmd_val(*pmd);
+                       val &= ~0x100c;
+                       val |= (tex_cb << 10) & 0x1000;
+                       val |= (tex_cb << 2)  & 0x000c;
+                       set_pmd_at(mm, addr, pmd, __pmd(val));
+               }
+               else if (pmd_none_or_clear_bad(pmd))
+                       continue;
+
+               pte = pte_offset_map_lock(mm, pmd, addr, &ptl);
+               do {
+                       if (!pte_present(*pte))
+                               continue;
+                       val = pte_val(*pte);
+                       val = (val & ~L_PTE_MT_MASK) | (tex_cb << 2);
+                       set_pte_at(mm, addr, pte, __pte(val));
+               } while (pte++, addr += PAGE_SIZE, addr < next_pmd);
+               pte_unmap_unlock(pte - 1, ptl);
+
+       } while (pgd++, addr = next_pgd, addr < end);
+       flush_tlb_range(vma, start, end);
+
+       return 0;
+}
+#endif
+
 #ifdef CONFIG_MEMORY_FAILURE
 /*
  * Error injection support for memory error handling.
@@ -268,6 +339,11 @@ madvise_vma(struct vm_area_struct *vma, struct vm_area_struct **prev,
                return madvise_willneed(vma, prev, start, end);
        case MADV_DONTNEED:
                return madvise_dontneed(vma, prev, start, end);
+#ifdef __arm__
+       case 0x2000 ... 0x2007:
+               return madvise_force_cache(vma, prev, start, end,
+                       behavior & 7);
+#endif
        default:
                return madvise_behavior(vma, prev, start, end, behavior);
        }
@@ -292,6 +368,9 @@ madvise_behavior_valid(int behavior)
 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
        case MADV_HUGEPAGE:
        case MADV_NOHUGEPAGE:
+#endif
+#ifdef __arm__
+       case 0x2000 ... 0x2007:
 #endif
                return 1;