git.openpandora.org
/
pandora-kernel.git
/ blobdiff
commit
grep
author
committer
pickaxe
?
search:
re
summary
|
shortlog
|
log
|
commit
|
commitdiff
|
tree
raw
|
inline
| side by side
Merge branch 'for-usb-next' of git://git.kernel.org/pub/scm/linux/kernel/git/sarah...
[pandora-kernel.git]
/
mm
/
mmap.c
diff --git
a/mm/mmap.c
b/mm/mmap.c
index
26efbfc
..
bbdc9af
100644
(file)
--- a/
mm/mmap.c
+++ b/
mm/mmap.c
@@
-960,7
+960,7
@@
unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
{
struct mm_struct * mm = current->mm;
struct inode *inode;
{
struct mm_struct * mm = current->mm;
struct inode *inode;
-
unsigned in
t vm_flags;
+
vm_flags_
t vm_flags;
int error;
unsigned long reqprot = prot;
int error;
unsigned long reqprot = prot;
@@
-1165,7
+1165,7
@@
SYSCALL_DEFINE1(old_mmap, struct mmap_arg_struct __user *, arg)
*/
int vma_wants_writenotify(struct vm_area_struct *vma)
{
*/
int vma_wants_writenotify(struct vm_area_struct *vma)
{
-
unsigned in
t vm_flags = vma->vm_flags;
+
vm_flags_
t vm_flags = vma->vm_flags;
/* If it was private or non-writable, the write bit is already clear */
if ((vm_flags & (VM_WRITE|VM_SHARED)) != ((VM_WRITE|VM_SHARED)))
/* If it was private or non-writable, the write bit is already clear */
if ((vm_flags & (VM_WRITE|VM_SHARED)) != ((VM_WRITE|VM_SHARED)))
@@
-1193,7
+1193,7
@@
int vma_wants_writenotify(struct vm_area_struct *vma)
* We account for memory if it's a private writeable mapping,
* not hugepages and VM_NORESERVE wasn't set.
*/
* We account for memory if it's a private writeable mapping,
* not hugepages and VM_NORESERVE wasn't set.
*/
-static inline int accountable_mapping(struct file *file,
unsigned in
t vm_flags)
+static inline int accountable_mapping(struct file *file,
vm_flags_
t vm_flags)
{
/*
* hugetlb has its own accounting separate from the core VM
{
/*
* hugetlb has its own accounting separate from the core VM
@@
-1207,7
+1207,7
@@
static inline int accountable_mapping(struct file *file, unsigned int vm_flags)
unsigned long mmap_region(struct file *file, unsigned long addr,
unsigned long len, unsigned long flags,
unsigned long mmap_region(struct file *file, unsigned long addr,
unsigned long len, unsigned long flags,
-
unsigned in
t vm_flags, unsigned long pgoff)
+
vm_flags_
t vm_flags, unsigned long pgoff)
{
struct mm_struct *mm = current->mm;
struct vm_area_struct *vma, *prev;
{
struct mm_struct *mm = current->mm;
struct vm_area_struct *vma, *prev;
@@
-2502,15
+2502,15
@@
static void vm_lock_anon_vma(struct mm_struct *mm, struct anon_vma *anon_vma)
* The LSB of head.next can't change from under us
* because we hold the mm_all_locks_mutex.
*/
* The LSB of head.next can't change from under us
* because we hold the mm_all_locks_mutex.
*/
-
spin_lock_nest_lock(&anon_vma->root->lock
, &mm->mmap_sem);
+
mutex_lock_nest_lock(&anon_vma->root->mutex
, &mm->mmap_sem);
/*
* We can safely modify head.next after taking the
/*
* We can safely modify head.next after taking the
- * anon_vma->root->
lock
. If some other vma in this mm shares
+ * anon_vma->root->
mutex
. If some other vma in this mm shares
* the same anon_vma we won't take it again.
*
* No need of atomic instructions here, head.next
* can't change from under us thanks to the
* the same anon_vma we won't take it again.
*
* No need of atomic instructions here, head.next
* can't change from under us thanks to the
- * anon_vma->root->
lock
.
+ * anon_vma->root->
mutex
.
*/
if (__test_and_set_bit(0, (unsigned long *)
&anon_vma->root->head.next))
*/
if (__test_and_set_bit(0, (unsigned long *)
&anon_vma->root->head.next))
@@
-2559,7
+2559,7
@@
static void vm_lock_mapping(struct mm_struct *mm, struct address_space *mapping)
* vma in this mm is backed by the same anon_vma or address_space.
*
* We can take all the locks in random order because the VM code
* vma in this mm is backed by the same anon_vma or address_space.
*
* We can take all the locks in random order because the VM code
- * taking i_mmap_mutex or anon_vma->
lock
outside the mmap_sem never
+ * taking i_mmap_mutex or anon_vma->
mutex
outside the mmap_sem never
* takes more than one of them in a row. Secondly we're protected
* against a concurrent mm_take_all_locks() by the mm_all_locks_mutex.
*
* takes more than one of them in a row. Secondly we're protected
* against a concurrent mm_take_all_locks() by the mm_all_locks_mutex.
*
@@
-2615,7
+2615,7
@@
static void vm_unlock_anon_vma(struct anon_vma *anon_vma)
*
* No need of atomic instructions here, head.next
* can't change from under us until we release the
*
* No need of atomic instructions here, head.next
* can't change from under us until we release the
- * anon_vma->root->
lock
.
+ * anon_vma->root->
mutex
.
*/
if (!__test_and_clear_bit(0, (unsigned long *)
&anon_vma->root->head.next))
*/
if (!__test_and_clear_bit(0, (unsigned long *)
&anon_vma->root->head.next))