Merge branch 'stable-3.2' into pandora-3.2
authorGrazvydas Ignotas <notasas@gmail.com>
Sun, 14 Sep 2014 15:35:36 +0000 (18:35 +0300)
committerGrazvydas Ignotas <notasas@gmail.com>
Sun, 14 Sep 2014 15:35:36 +0000 (18:35 +0300)
Conflicts:
mm/truncate.c

1  2 
arch/arm/Kconfig
arch/arm/kernel/entry-header.S
arch/x86/Kconfig
fs/debugfs/inode.c
fs/namespace.c
kernel/trace/trace.c
mm/hugetlb.c
mm/page_alloc.c
mm/shmem.c
mm/vmalloc.c

@@@ -1,10 -1,9 +1,11 @@@
  config ARM
        bool
        default y
+       select ARCH_SUPPORTS_ATOMIC_RMW
        select HAVE_DMA_API_DEBUG
        select HAVE_IDE if PCI || ISA || PCMCIA
 +      select HAVE_DMA_ATTRS
 +      select HAVE_DMA_CONTIGUOUS if MMU
        select HAVE_MEMBLOCK
        select RTC_LIB
        select SYS_SUPPORTS_APM_EMULATION
Simple merge
@@@ -73,9 -73,9 +73,10 @@@ config X8
        select IRQ_FORCED_THREADING
        select USE_GENERIC_SMP_HELPERS if SMP
        select HAVE_BPF_JIT if (X86_64 && NET)
 +      select HAVE_ARCH_TRANSPARENT_HUGEPAGE
        select CLKEVT_I8253
        select ARCH_HAVE_NMI_SAFE_CMPXCHG
+       select ARCH_SUPPORTS_ATOMIC_RMW
  
  config INSTRUCTION_DECODER
        def_bool (KPROBES || PERF_EVENTS)
Simple merge
diff --cc fs/namespace.c
Simple merge
Simple merge
diff --cc mm/hugetlb.c
Simple merge
diff --cc mm/page_alloc.c
Simple merge
diff --cc mm/shmem.c
@@@ -1071,6 -1145,47 +1145,48 @@@ static int shmem_fault(struct vm_area_s
        return ret;
  }
  
+ int vmtruncate_range(struct inode *inode, loff_t lstart, loff_t lend)
+ {
+       /*
+        * If the underlying filesystem is not going to provide
+        * a way to truncate a range of blocks (punch a hole) -
+        * we should return failure right now.
+        * Only CONFIG_SHMEM shmem.c ever supported i_op->truncate_range().
+        */
+       if (inode->i_op->truncate_range != shmem_truncate_range)
+               return -ENOSYS;
+       mutex_lock(&inode->i_mutex);
+       {
+               struct shmem_falloc shmem_falloc;
+               struct address_space *mapping = inode->i_mapping;
+               loff_t unmap_start = round_up(lstart, PAGE_SIZE);
+               loff_t unmap_end = round_down(1 + lend, PAGE_SIZE) - 1;
+               DECLARE_WAIT_QUEUE_HEAD_ONSTACK(shmem_falloc_waitq);
+               shmem_falloc.waitq = &shmem_falloc_waitq;
+               shmem_falloc.start = unmap_start >> PAGE_SHIFT;
+               shmem_falloc.next = (unmap_end + 1) >> PAGE_SHIFT;
+               spin_lock(&inode->i_lock);
+               inode->i_private = &shmem_falloc;
+               spin_unlock(&inode->i_lock);
+               if ((u64)unmap_end > (u64)unmap_start)
+                       unmap_mapping_range(mapping, unmap_start,
+                                           1 + unmap_end - unmap_start, 0);
+               shmem_truncate_range(inode, lstart, lend);
+               /* No need to unmap again: hole-punching leaves COWed pages */
+               spin_lock(&inode->i_lock);
+               inode->i_private = NULL;
+               wake_up_all(&shmem_falloc_waitq);
+               spin_unlock(&inode->i_lock);
+       }
+       mutex_unlock(&inode->i_mutex);
+       return 0;
+ }
++EXPORT_SYMBOL_GPL(vmtruncate_range);
  #ifdef CONFIG_NUMA
  static int shmem_set_policy(struct vm_area_struct *vma, struct mempolicy *mpol)
  {
diff --cc mm/vmalloc.c
Simple merge