drivers/misc/apds990x.c: apds990x_chip_on() should depend on CONFIG_PM || CONFIG_PM_R...
[pandora-kernel.git] / mm / memcontrol.c
index e41a6c2..cf7d027 100644 (file)
@@ -94,6 +94,8 @@ enum mem_cgroup_events_index {
        MEM_CGROUP_EVENTS_PGPGIN,       /* # of pages paged in */
        MEM_CGROUP_EVENTS_PGPGOUT,      /* # of pages paged out */
        MEM_CGROUP_EVENTS_COUNT,        /* # of pages paged in/out */
+       MEM_CGROUP_EVENTS_PGFAULT,      /* # of page-faults */
+       MEM_CGROUP_EVENTS_PGMAJFAULT,   /* # of major page-faults */
        MEM_CGROUP_EVENTS_NSTATS,
 };
 /*
@@ -231,6 +233,11 @@ struct mem_cgroup {
         * reclaimed from.
         */
        int last_scanned_child;
+       int last_scanned_node;
+#if MAX_NUMNODES > 1
+       nodemask_t      scan_nodes;
+       unsigned long   next_scan_node_update;
+#endif
        /*
         * Should the accounting and control be hierarchical, per subtree?
         */
@@ -352,7 +359,7 @@ enum charge_type {
 static void mem_cgroup_get(struct mem_cgroup *mem);
 static void mem_cgroup_put(struct mem_cgroup *mem);
 static struct mem_cgroup *parent_mem_cgroup(struct mem_cgroup *mem);
-static void drain_all_stock_async(void);
+static void drain_all_stock_async(struct mem_cgroup *mem);
 
 static struct mem_cgroup_per_zone *
 mem_cgroup_zoneinfo(struct mem_cgroup *mem, int nid, int zid)
@@ -585,6 +592,16 @@ static void mem_cgroup_swap_statistics(struct mem_cgroup *mem,
        this_cpu_add(mem->stat->count[MEM_CGROUP_STAT_SWAPOUT], val);
 }
 
+void mem_cgroup_pgfault(struct mem_cgroup *mem, int val)
+{
+       this_cpu_add(mem->stat->events[MEM_CGROUP_EVENTS_PGFAULT], val);
+}
+
+void mem_cgroup_pgmajfault(struct mem_cgroup *mem, int val)
+{
+       this_cpu_add(mem->stat->events[MEM_CGROUP_EVENTS_PGMAJFAULT], val);
+}
+
 static unsigned long mem_cgroup_read_events(struct mem_cgroup *mem,
                                            enum mem_cgroup_events_index idx)
 {
@@ -624,18 +641,27 @@ static void mem_cgroup_charge_statistics(struct mem_cgroup *mem,
        preempt_enable();
 }
 
+static unsigned long
+mem_cgroup_get_zonestat_node(struct mem_cgroup *mem, int nid, enum lru_list idx)
+{
+       struct mem_cgroup_per_zone *mz;
+       u64 total = 0;
+       int zid;
+
+       for (zid = 0; zid < MAX_NR_ZONES; zid++) {
+               mz = mem_cgroup_zoneinfo(mem, nid, zid);
+               total += MEM_CGROUP_ZSTAT(mz, idx);
+       }
+       return total;
+}
 static unsigned long mem_cgroup_get_local_zonestat(struct mem_cgroup *mem,
                                        enum lru_list idx)
 {
-       int nid, zid;
-       struct mem_cgroup_per_zone *mz;
+       int nid;
        u64 total = 0;
 
        for_each_online_node(nid)
-               for (zid = 0; zid < MAX_NR_ZONES; zid++) {
-                       mz = mem_cgroup_zoneinfo(mem, nid, zid);
-                       total += MEM_CGROUP_ZSTAT(mz, idx);
-               }
+               total += mem_cgroup_get_zonestat_node(mem, nid, idx);
        return total;
 }
 
@@ -709,7 +735,7 @@ struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p)
                                struct mem_cgroup, css);
 }
 
-static struct mem_cgroup *try_get_mem_cgroup_from_mm(struct mm_struct *mm)
+struct mem_cgroup *try_get_mem_cgroup_from_mm(struct mm_struct *mm)
 {
        struct mem_cgroup *mem = NULL;
 
@@ -813,6 +839,33 @@ static inline bool mem_cgroup_is_root(struct mem_cgroup *mem)
        return (mem == root_mem_cgroup);
 }
 
+void mem_cgroup_count_vm_event(struct mm_struct *mm, enum vm_event_item idx)
+{
+       struct mem_cgroup *mem;
+
+       if (!mm)
+               return;
+
+       rcu_read_lock();
+       mem = mem_cgroup_from_task(rcu_dereference(mm->owner));
+       if (unlikely(!mem))
+               goto out;
+
+       switch (idx) {
+       case PGMAJFAULT:
+               mem_cgroup_pgmajfault(mem, 1);
+               break;
+       case PGFAULT:
+               mem_cgroup_pgfault(mem, 1);
+               break;
+       default:
+               BUG();
+       }
+out:
+       rcu_read_unlock();
+}
+EXPORT_SYMBOL(mem_cgroup_count_vm_event);
+
 /*
  * Following LRU functions are allowed to be used without PCG_LOCK.
  * Operations are called by routine of global LRU independently from memcg.
@@ -1064,9 +1117,9 @@ int mem_cgroup_inactive_file_is_low(struct mem_cgroup *memcg)
        return (active > inactive);
 }
 
-unsigned long mem_cgroup_zone_nr_pages(struct mem_cgroup *memcg,
-                                      struct zone *zone,
-                                      enum lru_list lru)
+unsigned long mem_cgroup_zone_nr_lru_pages(struct mem_cgroup *memcg,
+                                               struct zone *zone,
+                                               enum lru_list lru)
 {
        int nid = zone_to_nid(zone);
        int zid = zone_idx(zone);
@@ -1075,6 +1128,93 @@ unsigned long mem_cgroup_zone_nr_pages(struct mem_cgroup *memcg,
        return MEM_CGROUP_ZSTAT(mz, lru);
 }
 
+#ifdef CONFIG_NUMA
+static unsigned long mem_cgroup_node_nr_file_lru_pages(struct mem_cgroup *memcg,
+                                                       int nid)
+{
+       unsigned long ret;
+
+       ret = mem_cgroup_get_zonestat_node(memcg, nid, LRU_INACTIVE_FILE) +
+               mem_cgroup_get_zonestat_node(memcg, nid, LRU_ACTIVE_FILE);
+
+       return ret;
+}
+
+static unsigned long mem_cgroup_nr_file_lru_pages(struct mem_cgroup *memcg)
+{
+       u64 total = 0;
+       int nid;
+
+       for_each_node_state(nid, N_HIGH_MEMORY)
+               total += mem_cgroup_node_nr_file_lru_pages(memcg, nid);
+
+       return total;
+}
+
+static unsigned long mem_cgroup_node_nr_anon_lru_pages(struct mem_cgroup *memcg,
+                                                       int nid)
+{
+       unsigned long ret;
+
+       ret = mem_cgroup_get_zonestat_node(memcg, nid, LRU_INACTIVE_ANON) +
+               mem_cgroup_get_zonestat_node(memcg, nid, LRU_ACTIVE_ANON);
+
+       return ret;
+}
+
+static unsigned long mem_cgroup_nr_anon_lru_pages(struct mem_cgroup *memcg)
+{
+       u64 total = 0;
+       int nid;
+
+       for_each_node_state(nid, N_HIGH_MEMORY)
+               total += mem_cgroup_node_nr_anon_lru_pages(memcg, nid);
+
+       return total;
+}
+
+static unsigned long
+mem_cgroup_node_nr_unevictable_lru_pages(struct mem_cgroup *memcg, int nid)
+{
+       return mem_cgroup_get_zonestat_node(memcg, nid, LRU_UNEVICTABLE);
+}
+
+static unsigned long
+mem_cgroup_nr_unevictable_lru_pages(struct mem_cgroup *memcg)
+{
+       u64 total = 0;
+       int nid;
+
+       for_each_node_state(nid, N_HIGH_MEMORY)
+               total += mem_cgroup_node_nr_unevictable_lru_pages(memcg, nid);
+
+       return total;
+}
+
+static unsigned long mem_cgroup_node_nr_lru_pages(struct mem_cgroup *memcg,
+                                                       int nid)
+{
+       enum lru_list l;
+       u64 total = 0;
+
+       for_each_lru(l)
+               total += mem_cgroup_get_zonestat_node(memcg, nid, l);
+
+       return total;
+}
+
+static unsigned long mem_cgroup_nr_lru_pages(struct mem_cgroup *memcg)
+{
+       u64 total = 0;
+       int nid;
+
+       for_each_node_state(nid, N_HIGH_MEMORY)
+               total += mem_cgroup_node_nr_lru_pages(memcg, nid);
+
+       return total;
+}
+#endif /* CONFIG_NUMA */
+
 struct zone_reclaim_stat *mem_cgroup_get_reclaim_stat(struct mem_cgroup *memcg,
                                                      struct zone *zone)
 {
@@ -1418,6 +1558,81 @@ mem_cgroup_select_victim(struct mem_cgroup *root_mem)
        return ret;
 }
 
+#if MAX_NUMNODES > 1
+
+/*
+ * Always updating the nodemask is not very good - even if we have an empty
+ * list or the wrong list here, we can start from some node and traverse all
+ * nodes based on the zonelist. So update the list loosely once per 10 secs.
+ *
+ */
+static void mem_cgroup_may_update_nodemask(struct mem_cgroup *mem)
+{
+       int nid;
+
+       if (time_after(mem->next_scan_node_update, jiffies))
+               return;
+
+       mem->next_scan_node_update = jiffies + 10*HZ;
+       /* make a nodemask where this memcg uses memory from */
+       mem->scan_nodes = node_states[N_HIGH_MEMORY];
+
+       for_each_node_mask(nid, node_states[N_HIGH_MEMORY]) {
+
+               if (mem_cgroup_get_zonestat_node(mem, nid, LRU_INACTIVE_FILE) ||
+                   mem_cgroup_get_zonestat_node(mem, nid, LRU_ACTIVE_FILE))
+                       continue;
+
+               if (total_swap_pages &&
+                   (mem_cgroup_get_zonestat_node(mem, nid, LRU_INACTIVE_ANON) ||
+                    mem_cgroup_get_zonestat_node(mem, nid, LRU_ACTIVE_ANON)))
+                       continue;
+               node_clear(nid, mem->scan_nodes);
+       }
+}
+
+/*
+ * Selecting a node where we start reclaim from. Because what we need is just
+ * reducing usage counter, start from anywhere is O,K. Considering
+ * memory reclaim from current node, there are pros. and cons.
+ *
+ * Freeing memory from current node means freeing memory from a node which
+ * we'll use or we've used. So, it may make LRU bad. And if several threads
+ * hit limits, it will see a contention on a node. But freeing from remote
+ * node means more costs for memory reclaim because of memory latency.
+ *
+ * Now, we use round-robin. Better algorithm is welcomed.
+ */
+int mem_cgroup_select_victim_node(struct mem_cgroup *mem)
+{
+       int node;
+
+       mem_cgroup_may_update_nodemask(mem);
+       node = mem->last_scanned_node;
+
+       node = next_node(node, mem->scan_nodes);
+       if (node == MAX_NUMNODES)
+               node = first_node(mem->scan_nodes);
+       /*
+        * We call this when we hit limit, not when pages are added to LRU.
+        * No LRU may hold pages because all pages are UNEVICTABLE or
+        * memcg is too small and all pages are not on LRU. In that case,
+        * we use curret node.
+        */
+       if (unlikely(node == MAX_NUMNODES))
+               node = numa_node_id();
+
+       mem->last_scanned_node = node;
+       return node;
+}
+
+#else
+int mem_cgroup_select_victim_node(struct mem_cgroup *mem)
+{
+       return 0;
+}
+#endif
+
 /*
  * Scan the hierarchy if needed to reclaim memory. We remember the last child
  * we reclaimed from, so that we don't end up penalizing one child extensively
@@ -1448,15 +1663,21 @@ static int mem_cgroup_hierarchical_reclaim(struct mem_cgroup *root_mem,
        excess = res_counter_soft_limit_excess(&root_mem->res) >> PAGE_SHIFT;
 
        /* If memsw_is_minimum==1, swap-out is of-no-use. */
-       if (root_mem->memsw_is_minimum)
+       if (!check_soft && root_mem->memsw_is_minimum)
                noswap = true;
 
        while (1) {
                victim = mem_cgroup_select_victim(root_mem);
                if (victim == root_mem) {
                        loop++;
-                       if (loop >= 1)
-                               drain_all_stock_async();
+                       /*
+                        * We are not draining per cpu cached charges during
+                        * soft limit reclaim  because global reclaim doesn't
+                        * care about charges. It tries to free some memory and
+                        * charges will not give any.
+                        */
+                       if (!check_soft && loop >= 1)
+                               drain_all_stock_async(root_mem);
                        if (loop >= 2) {
                                /*
                                 * If we have not been able to reclaim
@@ -1507,7 +1728,7 @@ static int mem_cgroup_hierarchical_reclaim(struct mem_cgroup *root_mem,
                        if (!res_counter_soft_limit_excess(&root_mem->res))
                                return total;
                } else if (mem_cgroup_margin(root_mem))
-                       return 1 + total;
+                       return total;
        }
        return total;
 }
@@ -1719,9 +1940,11 @@ struct memcg_stock_pcp {
        struct mem_cgroup *cached; /* this never be root cgroup */
        unsigned int nr_pages;
        struct work_struct work;
+       unsigned long flags;
+#define FLUSHING_CACHED_CHARGE (0)
 };
 static DEFINE_PER_CPU(struct memcg_stock_pcp, memcg_stock);
-static atomic_t memcg_drain_count;
+static DEFINE_MUTEX(percpu_charge_mutex);
 
 /*
  * Try to consume stocked charge on this cpu. If success, one page is consumed
@@ -1769,6 +1992,7 @@ static void drain_local_stock(struct work_struct *dummy)
 {
        struct memcg_stock_pcp *stock = &__get_cpu_var(memcg_stock);
        drain_stock(stock);
+       clear_bit(FLUSHING_CACHED_CHARGE, &stock->flags);
 }
 
 /*
@@ -1793,26 +2017,45 @@ static void refill_stock(struct mem_cgroup *mem, unsigned int nr_pages)
  * expects some charges will be back to res_counter later but cannot wait for
  * it.
  */
-static void drain_all_stock_async(void)
+static void drain_all_stock_async(struct mem_cgroup *root_mem)
 {
-       int cpu;
-       /* This function is for scheduling "drain" in asynchronous way.
-        * The result of "drain" is not directly handled by callers. Then,
-        * if someone is calling drain, we don't have to call drain more.
-        * Anyway, WORK_STRUCT_PENDING check in queue_work_on() will catch if
-        * there is a race. We just do loose check here.
+       int cpu, curcpu;
+       /*
+        * If someone calls draining, avoid adding more kworker runs.
         */
-       if (atomic_read(&memcg_drain_count))
+       if (!mutex_trylock(&percpu_charge_mutex))
                return;
        /* Notify other cpus that system-wide "drain" is running */
-       atomic_inc(&memcg_drain_count);
        get_online_cpus();
+       /*
+        * Get a hint for avoiding draining charges on the current cpu,
+        * which must be exhausted by our charging.  It is not required that
+        * this be a precise check, so we use raw_smp_processor_id() instead of
+        * getcpu()/putcpu().
+        */
+       curcpu = raw_smp_processor_id();
        for_each_online_cpu(cpu) {
                struct memcg_stock_pcp *stock = &per_cpu(memcg_stock, cpu);
-               schedule_work_on(cpu, &stock->work);
+               struct mem_cgroup *mem;
+
+               if (cpu == curcpu)
+                       continue;
+
+               mem = stock->cached;
+               if (!mem)
+                       continue;
+               if (mem != root_mem) {
+                       if (!root_mem->use_hierarchy)
+                               continue;
+                       /* check whether "mem" is under tree of "root_mem" */
+                       if (!css_is_ancestor(&mem->css, &root_mem->css))
+                               continue;
+               }
+               if (!test_and_set_bit(FLUSHING_CACHED_CHARGE, &stock->flags))
+                       schedule_work_on(cpu, &stock->work);
        }
        put_online_cpus();
-       atomic_dec(&memcg_drain_count);
+       mutex_unlock(&percpu_charge_mutex);
        /* We don't wait for flush_work */
 }
 
@@ -1820,9 +2063,9 @@ static void drain_all_stock_async(void)
 static void drain_all_stock_sync(void)
 {
        /* called when force_empty is called */
-       atomic_inc(&memcg_drain_count);
+       mutex_lock(&percpu_charge_mutex);
        schedule_on_each_cpu(drain_local_stock);
-       atomic_dec(&memcg_drain_count);
+       mutex_unlock(&percpu_charge_mutex);
 }
 
 /*
@@ -3348,10 +3591,9 @@ unsigned long mem_cgroup_soft_limit_reclaim(struct zone *zone, int order,
                                 */
                                next_mz =
                                __mem_cgroup_largest_soft_limit_node(mctz);
-                               if (next_mz == mz) {
+                               if (next_mz == mz)
                                        css_put(&next_mz->mem->css);
-                                       next_mz = NULL;
-                               } else /* next_mz == NULL or other memcg */
+                               else /* next_mz == NULL or other memcg */
                                        break;
                        } while (1);
                }
@@ -3783,6 +4025,8 @@ enum {
        MCS_PGPGIN,
        MCS_PGPGOUT,
        MCS_SWAP,
+       MCS_PGFAULT,
+       MCS_PGMAJFAULT,
        MCS_INACTIVE_ANON,
        MCS_ACTIVE_ANON,
        MCS_INACTIVE_FILE,
@@ -3805,6 +4049,8 @@ struct {
        {"pgpgin", "total_pgpgin"},
        {"pgpgout", "total_pgpgout"},
        {"swap", "total_swap"},
+       {"pgfault", "total_pgfault"},
+       {"pgmajfault", "total_pgmajfault"},
        {"inactive_anon", "total_inactive_anon"},
        {"active_anon", "total_active_anon"},
        {"inactive_file", "total_inactive_file"},
@@ -3833,6 +4079,10 @@ mem_cgroup_get_local_stat(struct mem_cgroup *mem, struct mcs_total_stat *s)
                val = mem_cgroup_read_stat(mem, MEM_CGROUP_STAT_SWAPOUT);
                s->stat[MCS_SWAP] += val * PAGE_SIZE;
        }
+       val = mem_cgroup_read_events(mem, MEM_CGROUP_EVENTS_PGFAULT);
+       s->stat[MCS_PGFAULT] += val;
+       val = mem_cgroup_read_events(mem, MEM_CGROUP_EVENTS_PGMAJFAULT);
+       s->stat[MCS_PGMAJFAULT] += val;
 
        /* per zone stat */
        val = mem_cgroup_get_local_zonestat(mem, LRU_INACTIVE_ANON);
@@ -3856,6 +4106,51 @@ mem_cgroup_get_total_stat(struct mem_cgroup *mem, struct mcs_total_stat *s)
                mem_cgroup_get_local_stat(iter, s);
 }
 
+#ifdef CONFIG_NUMA
+static int mem_control_numa_stat_show(struct seq_file *m, void *arg)
+{
+       int nid;
+       unsigned long total_nr, file_nr, anon_nr, unevictable_nr;
+       unsigned long node_nr;
+       struct cgroup *cont = m->private;
+       struct mem_cgroup *mem_cont = mem_cgroup_from_cont(cont);
+
+       total_nr = mem_cgroup_nr_lru_pages(mem_cont);
+       seq_printf(m, "total=%lu", total_nr);
+       for_each_node_state(nid, N_HIGH_MEMORY) {
+               node_nr = mem_cgroup_node_nr_lru_pages(mem_cont, nid);
+               seq_printf(m, " N%d=%lu", nid, node_nr);
+       }
+       seq_putc(m, '\n');
+
+       file_nr = mem_cgroup_nr_file_lru_pages(mem_cont);
+       seq_printf(m, "file=%lu", file_nr);
+       for_each_node_state(nid, N_HIGH_MEMORY) {
+               node_nr = mem_cgroup_node_nr_file_lru_pages(mem_cont, nid);
+               seq_printf(m, " N%d=%lu", nid, node_nr);
+       }
+       seq_putc(m, '\n');
+
+       anon_nr = mem_cgroup_nr_anon_lru_pages(mem_cont);
+       seq_printf(m, "anon=%lu", anon_nr);
+       for_each_node_state(nid, N_HIGH_MEMORY) {
+               node_nr = mem_cgroup_node_nr_anon_lru_pages(mem_cont, nid);
+               seq_printf(m, " N%d=%lu", nid, node_nr);
+       }
+       seq_putc(m, '\n');
+
+       unevictable_nr = mem_cgroup_nr_unevictable_lru_pages(mem_cont);
+       seq_printf(m, "unevictable=%lu", unevictable_nr);
+       for_each_node_state(nid, N_HIGH_MEMORY) {
+               node_nr = mem_cgroup_node_nr_unevictable_lru_pages(mem_cont,
+                                                                       nid);
+               seq_printf(m, " N%d=%lu", nid, node_nr);
+       }
+       seq_putc(m, '\n');
+       return 0;
+}
+#endif /* CONFIG_NUMA */
+
 static int mem_control_stat_show(struct cgroup *cont, struct cftype *cft,
                                 struct cgroup_map_cb *cb)
 {
@@ -3866,6 +4161,7 @@ static int mem_control_stat_show(struct cgroup *cont, struct cftype *cft,
        memset(&mystat, 0, sizeof(mystat));
        mem_cgroup_get_local_stat(mem_cont, &mystat);
 
+
        for (i = 0; i < NR_MCS_STAT; i++) {
                if (i == MCS_SWAP && !do_swap_account)
                        continue;
@@ -4289,6 +4585,22 @@ static int mem_cgroup_oom_control_write(struct cgroup *cgrp,
        return 0;
 }
 
+#ifdef CONFIG_NUMA
+static const struct file_operations mem_control_numa_stat_file_operations = {
+       .read = seq_read,
+       .llseek = seq_lseek,
+       .release = single_release,
+};
+
+static int mem_control_numa_stat_open(struct inode *unused, struct file *file)
+{
+       struct cgroup *cont = file->f_dentry->d_parent->d_fsdata;
+
+       file->f_op = &mem_control_numa_stat_file_operations;
+       return single_open(file, mem_control_numa_stat_show, cont);
+}
+#endif /* CONFIG_NUMA */
+
 static struct cftype mem_cgroup_files[] = {
        {
                .name = "usage_in_bytes",
@@ -4352,6 +4664,13 @@ static struct cftype mem_cgroup_files[] = {
                .unregister_event = mem_cgroup_oom_unregister_event,
                .private = MEMFILE_PRIVATE(_OOM_TYPE, OOM_CONTROL),
        },
+#ifdef CONFIG_NUMA
+       {
+               .name = "numa_stat",
+               .open = mem_control_numa_stat_open,
+               .mode = S_IRUGO,
+       },
+#endif
 };
 
 #ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP
@@ -4607,6 +4926,7 @@ mem_cgroup_create(struct cgroup_subsys *ss, struct cgroup *cont)
                res_counter_init(&mem->memsw, NULL);
        }
        mem->last_scanned_child = 0;
+       mem->last_scanned_node = MAX_NUMNODES;
        INIT_LIST_HEAD(&mem->oom_notify);
 
        if (parent)
@@ -5123,18 +5443,16 @@ static void mem_cgroup_move_task(struct cgroup_subsys *ss,
                                struct cgroup *old_cont,
                                struct task_struct *p)
 {
-       struct mm_struct *mm;
+       struct mm_struct *mm = get_task_mm(p);
 
-       if (!mc.to)
-               /* no need to move charge */
-               return;
-
-       mm = get_task_mm(p);
        if (mm) {
-               mem_cgroup_move_charge(mm);
+               if (mc.to)
+                       mem_cgroup_move_charge(mm);
+               put_swap_token(mm);
                mmput(mm);
        }
-       mem_cgroup_clear_mc();
+       if (mc.to)
+               mem_cgroup_clear_mc();
 }
 #else  /* !CONFIG_MMU */
 static int mem_cgroup_can_attach(struct cgroup_subsys *ss,