git.openpandora.org
/
pandora-kernel.git
/ blobdiff
commit
grep
author
committer
pickaxe
?
search:
re
summary
|
shortlog
|
log
|
commit
|
commitdiff
|
tree
raw
|
inline
| side by side
Merge branch 'topic/asoc' into for-linus
[pandora-kernel.git]
/
mm
/
hugetlb.c
diff --git
a/mm/hugetlb.c
b/mm/hugetlb.c
index
28c655b
..
e83ad2c
100644
(file)
--- a/
mm/hugetlb.c
+++ b/
mm/hugetlb.c
@@
-316,7
+316,7
@@
static void resv_map_release(struct kref *ref)
static struct resv_map *vma_resv_map(struct vm_area_struct *vma)
{
VM_BUG_ON(!is_vm_hugetlb_page(vma));
static struct resv_map *vma_resv_map(struct vm_area_struct *vma)
{
VM_BUG_ON(!is_vm_hugetlb_page(vma));
- if (!(vma->vm_flags & VM_
SHARED
))
+ if (!(vma->vm_flags & VM_
MAYSHARE
))
return (struct resv_map *)(get_vma_private_data(vma) &
~HPAGE_RESV_MASK);
return NULL;
return (struct resv_map *)(get_vma_private_data(vma) &
~HPAGE_RESV_MASK);
return NULL;
@@
-325,7
+325,7
@@
static struct resv_map *vma_resv_map(struct vm_area_struct *vma)
static void set_vma_resv_map(struct vm_area_struct *vma, struct resv_map *map)
{
VM_BUG_ON(!is_vm_hugetlb_page(vma));
static void set_vma_resv_map(struct vm_area_struct *vma, struct resv_map *map)
{
VM_BUG_ON(!is_vm_hugetlb_page(vma));
- VM_BUG_ON(vma->vm_flags & VM_
SHARED
);
+ VM_BUG_ON(vma->vm_flags & VM_
MAYSHARE
);
set_vma_private_data(vma, (get_vma_private_data(vma) &
HPAGE_RESV_MASK) | (unsigned long)map);
set_vma_private_data(vma, (get_vma_private_data(vma) &
HPAGE_RESV_MASK) | (unsigned long)map);
@@
-334,7
+334,7
@@
static void set_vma_resv_map(struct vm_area_struct *vma, struct resv_map *map)
static void set_vma_resv_flags(struct vm_area_struct *vma, unsigned long flags)
{
VM_BUG_ON(!is_vm_hugetlb_page(vma));
static void set_vma_resv_flags(struct vm_area_struct *vma, unsigned long flags)
{
VM_BUG_ON(!is_vm_hugetlb_page(vma));
- VM_BUG_ON(vma->vm_flags & VM_
SHARED
);
+ VM_BUG_ON(vma->vm_flags & VM_
MAYSHARE
);
set_vma_private_data(vma, get_vma_private_data(vma) | flags);
}
set_vma_private_data(vma, get_vma_private_data(vma) | flags);
}
@@
-353,7
+353,7
@@
static void decrement_hugepage_resv_vma(struct hstate *h,
if (vma->vm_flags & VM_NORESERVE)
return;
if (vma->vm_flags & VM_NORESERVE)
return;
- if (vma->vm_flags & VM_
SHARED
) {
+ if (vma->vm_flags & VM_
MAYSHARE
) {
/* Shared mappings always use reserves */
h->resv_huge_pages--;
} else if (is_vma_resv_set(vma, HPAGE_RESV_OWNER)) {
/* Shared mappings always use reserves */
h->resv_huge_pages--;
} else if (is_vma_resv_set(vma, HPAGE_RESV_OWNER)) {
@@
-369,14
+369,14
@@
static void decrement_hugepage_resv_vma(struct hstate *h,
void reset_vma_resv_huge_pages(struct vm_area_struct *vma)
{
VM_BUG_ON(!is_vm_hugetlb_page(vma));
void reset_vma_resv_huge_pages(struct vm_area_struct *vma)
{
VM_BUG_ON(!is_vm_hugetlb_page(vma));
- if (!(vma->vm_flags & VM_
SHARED
))
+ if (!(vma->vm_flags & VM_
MAYSHARE
))
vma->vm_private_data = (void *)0;
}
/* Returns true if the VMA has associated reserve pages */
static int vma_has_reserves(struct vm_area_struct *vma)
{
vma->vm_private_data = (void *)0;
}
/* Returns true if the VMA has associated reserve pages */
static int vma_has_reserves(struct vm_area_struct *vma)
{
- if (vma->vm_flags & VM_
SHARED
)
+ if (vma->vm_flags & VM_
MAYSHARE
)
return 1;
if (is_vma_resv_set(vma, HPAGE_RESV_OWNER))
return 1;
return 1;
if (is_vma_resv_set(vma, HPAGE_RESV_OWNER))
return 1;
@@
-924,7
+924,7
@@
static long vma_needs_reservation(struct hstate *h,
struct address_space *mapping = vma->vm_file->f_mapping;
struct inode *inode = mapping->host;
struct address_space *mapping = vma->vm_file->f_mapping;
struct inode *inode = mapping->host;
- if (vma->vm_flags & VM_
SHARED
) {
+ if (vma->vm_flags & VM_
MAYSHARE
) {
pgoff_t idx = vma_hugecache_offset(h, vma, addr);
return region_chg(&inode->i_mapping->private_list,
idx, idx + 1);
pgoff_t idx = vma_hugecache_offset(h, vma, addr);
return region_chg(&inode->i_mapping->private_list,
idx, idx + 1);
@@
-949,7
+949,7
@@
static void vma_commit_reservation(struct hstate *h,
struct address_space *mapping = vma->vm_file->f_mapping;
struct inode *inode = mapping->host;
struct address_space *mapping = vma->vm_file->f_mapping;
struct inode *inode = mapping->host;
- if (vma->vm_flags & VM_
SHARED
) {
+ if (vma->vm_flags & VM_
MAYSHARE
) {
pgoff_t idx = vma_hugecache_offset(h, vma, addr);
region_add(&inode->i_mapping->private_list, idx, idx + 1);
pgoff_t idx = vma_hugecache_offset(h, vma, addr);
region_add(&inode->i_mapping->private_list, idx, idx + 1);
@@
-1893,7
+1893,7
@@
retry_avoidcopy:
* at the time of fork() could consume its reserves on COW instead
* of the full address range.
*/
* at the time of fork() could consume its reserves on COW instead
* of the full address range.
*/
- if (!(vma->vm_flags & VM_
SHARED
) &&
+ if (!(vma->vm_flags & VM_
MAYSHARE
) &&
is_vma_resv_set(vma, HPAGE_RESV_OWNER) &&
old_page != pagecache_page)
outside_reserve = 1;
is_vma_resv_set(vma, HPAGE_RESV_OWNER) &&
old_page != pagecache_page)
outside_reserve = 1;
@@
-2000,7
+2000,7
@@
retry:
clear_huge_page(page, address, huge_page_size(h));
__SetPageUptodate(page);
clear_huge_page(page, address, huge_page_size(h));
__SetPageUptodate(page);
- if (vma->vm_flags & VM_
SHARED
) {
+ if (vma->vm_flags & VM_
MAYSHARE
) {
int err;
struct inode *inode = mapping->host;
int err;
struct inode *inode = mapping->host;
@@
-2104,7
+2104,7
@@
int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
goto out_mutex;
}
goto out_mutex;
}
- if (!(vma->vm_flags & VM_
SHARED
))
+ if (!(vma->vm_flags & VM_
MAYSHARE
))
pagecache_page = hugetlbfs_pagecache_page(h,
vma, address);
}
pagecache_page = hugetlbfs_pagecache_page(h,
vma, address);
}
@@
-2289,7
+2289,7
@@
int hugetlb_reserve_pages(struct inode *inode,
* to reserve the full area even if read-only as mprotect() may be
* called to make the mapping read-write. Assume !vma is a shm mapping
*/
* to reserve the full area even if read-only as mprotect() may be
* called to make the mapping read-write. Assume !vma is a shm mapping
*/
- if (!vma || vma->vm_flags & VM_
SHARED
)
+ if (!vma || vma->vm_flags & VM_
MAYSHARE
)
chg = region_chg(&inode->i_mapping->private_list, from, to);
else {
struct resv_map *resv_map = resv_map_alloc();
chg = region_chg(&inode->i_mapping->private_list, from, to);
else {
struct resv_map *resv_map = resv_map_alloc();
@@
-2330,7
+2330,7
@@
int hugetlb_reserve_pages(struct inode *inode,
* consumed reservations are stored in the map. Hence, nothing
* else has to be done for private mappings here
*/
* consumed reservations are stored in the map. Hence, nothing
* else has to be done for private mappings here
*/
- if (!vma || vma->vm_flags & VM_
SHARED
)
+ if (!vma || vma->vm_flags & VM_
MAYSHARE
)
region_add(&inode->i_mapping->private_list, from, to);
return 0;
}
region_add(&inode->i_mapping->private_list, from, to);
return 0;
}