+ int ret = -ENOMEM;
+ struct mem_cgroup *mem = mc.to;
+
+ ret = __mem_cgroup_try_charge(NULL, GFP_KERNEL, &mem, false, NULL);
+ if (ret || !mem)
+ return -ENOMEM;
+
+ mc.precharge++;
+ return ret;
+}
+
+/**
+ * is_target_pte_for_mc - check a pte whether it is valid for move charge
+ * @vma: the vma the pte to be checked belongs
+ * @addr: the address corresponding to the pte to be checked
+ * @ptent: the pte to be checked
+ * @target: the pointer the target page will be stored(can be NULL)
+ *
+ * Returns
+ * 0(MC_TARGET_NONE): if the pte is not a target for move charge.
+ * 1(MC_TARGET_PAGE): if the page corresponding to this pte is a target for
+ * move charge. if @target is not NULL, the page is stored in target->page
+ * with extra refcnt got(Callers should handle it).
+ *
+ * Called with pte lock held.
+ */
+/* We add a new member later. */
+union mc_target {
+ struct page *page;
+};
+
+/* We add a new type later. */
+enum mc_target_type {
+ MC_TARGET_NONE, /* not used */
+ MC_TARGET_PAGE,
+};
+
+static int is_target_pte_for_mc(struct vm_area_struct *vma,
+ unsigned long addr, pte_t ptent, union mc_target *target)
+{
+ struct page *page;
+ struct page_cgroup *pc;
+ int ret = 0;
+ bool move_anon = test_bit(MOVE_CHARGE_TYPE_ANON,
+ &mc.to->move_charge_at_immigrate);
+
+ if (!pte_present(ptent))
+ return 0;
+
+ page = vm_normal_page(vma, addr, ptent);
+ if (!page || !page_mapped(page))
+ return 0;
+ /*
+ * TODO: We don't move charges of file(including shmem/tmpfs) pages for
+ * now.
+ */
+ if (!move_anon || !PageAnon(page))
+ return 0;
+ /*
+ * TODO: We don't move charges of shared(used by multiple processes)
+ * pages for now.
+ */
+ if (page_mapcount(page) > 1)
+ return 0;
+ if (!get_page_unless_zero(page))
+ return 0;
+
+ pc = lookup_page_cgroup(page);
+ /*
+ * Do only loose check w/o page_cgroup lock. mem_cgroup_move_account()
+ * checks the pc is valid or not under the lock.
+ */
+ if (PageCgroupUsed(pc) && pc->mem_cgroup == mc.from) {
+ ret = MC_TARGET_PAGE;
+ if (target)
+ target->page = page;
+ }
+
+ if (!ret || !target)
+ put_page(page);
+
+ return ret;
+}
+
+static int mem_cgroup_count_precharge_pte_range(pmd_t *pmd,
+ unsigned long addr, unsigned long end,
+ struct mm_walk *walk)
+{
+ struct vm_area_struct *vma = walk->private;
+ pte_t *pte;
+ spinlock_t *ptl;
+
+ pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
+ for (; addr != end; pte++, addr += PAGE_SIZE)
+ if (is_target_pte_for_mc(vma, addr, *pte, NULL))
+ mc.precharge++; /* increment precharge temporarily */
+ pte_unmap_unlock(pte - 1, ptl);
+ cond_resched();
+