hugetlbfs: check for pgoff value overflow
[pandora-kernel.git] / fs / hugetlbfs / inode.c
index ebc2f4d..906bcbd 100644 (file)
@@ -76,6 +76,16 @@ static void huge_pagevec_release(struct pagevec *pvec)
        pagevec_reinit(pvec);
 }
 
+/*
+ * Mask used when checking the page offset value passed in via system
+ * calls.  This value will be converted to a loff_t which is signed.
+ * Therefore, we want to check the upper PAGE_SHIFT + 1 bits of the
+ * value.  The extra bit (- 1 in the shift value) is to take the sign
+ * bit into account.
+ */
+#define PGOFF_LOFFT_MAX \
+       (((1UL << (PAGE_SHIFT + 1)) - 1) <<  (BITS_PER_LONG - (PAGE_SHIFT + 1)))
+
 static int hugetlbfs_file_mmap(struct file *file, struct vm_area_struct *vma)
 {
        struct inode *inode = file->f_path.dentry->d_inode;
@@ -94,17 +104,27 @@ static int hugetlbfs_file_mmap(struct file *file, struct vm_area_struct *vma)
        vma->vm_flags |= VM_HUGETLB | VM_RESERVED;
        vma->vm_ops = &hugetlb_vm_ops;
 
+       /*
+        * page based offset in vm_pgoff could be sufficiently large to
+        * overflow a (l)off_t when converted to byte offset.
+        */
+       if (vma->vm_pgoff & PGOFF_LOFFT_MAX)
+               return -EINVAL;
+
+       /* must be huge page aligned */
        if (vma->vm_pgoff & (~huge_page_mask(h) >> PAGE_SHIFT))
                return -EINVAL;
 
        vma_len = (loff_t)(vma->vm_end - vma->vm_start);
+       len = vma_len + ((loff_t)vma->vm_pgoff << PAGE_SHIFT);
+       /* check for overflow */
+       if (len < vma_len)
+               return -EINVAL;
 
        mutex_lock(&inode->i_mutex);
        file_accessed(file);
 
        ret = -ENOMEM;
-       len = vma_len + ((loff_t)vma->vm_pgoff << PAGE_SHIFT);
-
        if (hugetlb_reserve_pages(inode,
                                vma->vm_pgoff >> huge_page_order(h),
                                len >> huge_page_shift(h), vma,
@@ -114,7 +134,7 @@ static int hugetlbfs_file_mmap(struct file *file, struct vm_area_struct *vma)
        ret = 0;
        hugetlb_prefault_arch_hook(vma->vm_mm);
        if (vma->vm_flags & VM_WRITE && inode->i_size < len)
-               inode->i_size = len;
+               i_size_write(inode, len);
 out:
        mutex_unlock(&inode->i_mutex);
 
@@ -150,7 +170,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
                addr = ALIGN(addr, huge_page_size(h));
                vma = find_vma(mm, addr);
                if (TASK_SIZE - len >= addr &&
-                   (!vma || addr + len <= vma->vm_start))
+                   (!vma || addr + len <= vm_start_gap(vma)))
                        return addr;
        }
 
@@ -176,7 +196,7 @@ full_search:
                        return -ENOMEM;
                }
 
-               if (!vma || addr + len <= vma->vm_start)
+               if (!vma || addr + len <= vm_start_gap(vma))
                        return addr;
                addr = ALIGN(vma->vm_end, huge_page_size(h));
        }
@@ -422,7 +442,7 @@ static int hugetlbfs_setattr(struct dentry *dentry, struct iattr *attr)
 
        BUG_ON(!inode);
 
-       error = inode_change_ok(inode, attr);
+       error = setattr_prepare(dentry, attr);
        if (error)
                return error;
 
@@ -569,7 +589,8 @@ static int hugetlbfs_set_page_dirty(struct page *page)
 }
 
 static int hugetlbfs_migrate_page(struct address_space *mapping,
-                               struct page *newpage, struct page *page)
+                               struct page *newpage, struct page *page,
+                               enum migrate_mode mode)
 {
        int rc;
 
@@ -978,6 +999,11 @@ static int __init init_hugetlbfs_fs(void)
        int error;
        struct vfsmount *vfsmount;
 
+       if (!hugepages_supported()) {
+               pr_info("hugetlbfs: disabling because there are no supported hugepage sizes\n");
+               return -ENOTSUPP;
+       }
+
        error = bdi_init(&hugetlbfs_backing_dev_info);
        if (error)
                return error;