* If pagelist != NULL then isolate pages from the LRU and
* put them on the pagelist.
*/
-static struct vm_area_struct *
+static int
check_range(struct mm_struct *mm, unsigned long start, unsigned long end,
const nodemask_t *nodes, unsigned long flags, void *private)
{
- int err;
- struct vm_area_struct *first, *vma, *prev;
-
+ int err = 0;
+ struct vm_area_struct *vma, *prev;
- first = find_vma(mm, start);
- if (!first)
- return ERR_PTR(-EFAULT);
+ vma = find_vma(mm, start);
+ if (!vma)
+ return -EFAULT;
prev = NULL;
- for (vma = first; vma && vma->vm_start < end; vma = vma->vm_next) {
+ for (; vma && vma->vm_start < end; vma = vma->vm_next) {
if (!(flags & MPOL_MF_DISCONTIG_OK)) {
if (!vma->vm_next && vma->vm_end < end)
- return ERR_PTR(-EFAULT);
+ return -EFAULT;
if (prev && prev->vm_end < vma->vm_start)
- return ERR_PTR(-EFAULT);
+ return -EFAULT;
}
if (!is_vm_hugetlb_page(vma) &&
((flags & MPOL_MF_STRICT) ||
start = vma->vm_start;
err = check_pgd_range(vma, start, endvma, nodes,
flags, private);
- if (err) {
- first = ERR_PTR(err);
+ if (err)
break;
- }
}
prev = vma;
}
- return first;
+ return err;
}
/*
nodemask_t nmask;
LIST_HEAD(pagelist);
int err = 0;
- struct vm_area_struct *vma;
nodes_clear(nmask);
node_set(source, nmask);
- vma = check_range(mm, mm->mmap->vm_start, mm->task_size, &nmask,
+ /*
+ * This does not "check" the range but isolates all pages that
+ * need migration. Between passing in the full user address
+ * space range and MPOL_MF_DISCONTIG_OK, this call can not fail.
+ */
+ VM_BUG_ON(!(flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)));
+ check_range(mm, mm->mmap->vm_start, mm->task_size, &nmask,
flags | MPOL_MF_DISCONTIG_OK, &pagelist);
- if (IS_ERR(vma))
- return PTR_ERR(vma);
if (!list_empty(&pagelist)) {
err = migrate_pages(&pagelist, new_node_page, dest,
/*
* Allocate a new page for page migration based on vma policy.
- * Start assuming that page is mapped by vma pointed to by @private.
+ * Start by assuming the page is mapped by the same vma as contains @start.
* Search forward from there, if not. N.B., this assumes that the
* list of pages handed to migrate_pages()--which is how we get here--
* is in virtual address order.
*/
-static struct page *new_vma_page(struct page *page, unsigned long private, int **x)
+static struct page *new_page(struct page *page, unsigned long start, int **x)
{
- struct vm_area_struct *vma = (struct vm_area_struct *)private;
+ struct vm_area_struct *vma;
unsigned long uninitialized_var(address);
+ vma = find_vma(current->mm, start);
while (vma) {
address = page_address_in_vma(page, vma);
if (address != -EFAULT)
return -ENOSYS;
}
-static struct page *new_vma_page(struct page *page, unsigned long private, int **x)
+static struct page *new_page(struct page *page, unsigned long start, int **x)
{
return NULL;
}
unsigned short mode, unsigned short mode_flags,
nodemask_t *nmask, unsigned long flags)
{
- struct vm_area_struct *vma;
struct mm_struct *mm = current->mm;
struct mempolicy *new;
unsigned long end;
if (err)
goto mpol_out;
- vma = check_range(mm, start, end, nmask,
+ err = check_range(mm, start, end, nmask,
flags | MPOL_MF_INVERT, &pagelist);
-
- err = PTR_ERR(vma);
- if (!IS_ERR(vma)) {
+ if (!err) {
int nr_failed = 0;
err = mbind_range(mm, start, end, new);
if (!list_empty(&pagelist)) {
- nr_failed = migrate_pages(&pagelist, new_vma_page,
- (unsigned long)vma,
- false, true);
+ nr_failed = migrate_pages(&pagelist, new_page,
+ start, false, true);
if (nr_failed)
putback_lru_pages(&pagelist);
}
* task can change it's policy. The system default policy requires no
* such protection.
*/
-unsigned slab_node(struct mempolicy *policy)
+unsigned slab_node(void)
{
+ struct mempolicy *policy;
+
+ if (in_interrupt())
+ return numa_node_id();
+
+ policy = current->mempolicy;
if (!policy || policy->flags & MPOL_F_LOCAL)
return numa_node_id();
} else
*new = *old;
- rcu_read_lock();
if (current_cpuset_is_being_rebound()) {
nodemask_t mems = cpuset_mems_allowed(current);
if (new->flags & MPOL_F_REBINDING)
else
mpol_rebind_policy(new, &mems, MPOL_REBIND_ONCE);
}
- rcu_read_unlock();
atomic_set(&new->refcnt, 1);
return new;
}