slub: use raw_cpu_inc for incrementing statistics
[pandora-kernel.git] / mm / slub.c
index 7d81afb..f620bbf 100644 (file)
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -224,7 +224,11 @@ static inline void memcg_propagate_slab_attrs(struct kmem_cache *s) { }
 static inline void stat(const struct kmem_cache *s, enum stat_item si)
 {
 #ifdef CONFIG_SLUB_STATS
-       __this_cpu_inc(s->cpu_slab->stat[si]);
+       /*
+        * The rmw is racy on a preemptible kernel but this is acceptable, so
+        * avoid this_cpu_add()'s irq-disable overhead.
+        */
+       raw_cpu_inc(s->cpu_slab->stat[si]);
 #endif
 }
 
@@ -3748,7 +3752,11 @@ __kmem_cache_alias(const char *name, size_t size, size_t align,
 
        s = find_mergeable(size, align, flags, name, ctor);
        if (s) {
+               int i;
+               struct kmem_cache *c;
+
                s->refcount++;
+
                /*
                 * Adjust the object sizes so that we clear
                 * the complete object on kzalloc.
@@ -3756,6 +3764,15 @@ __kmem_cache_alias(const char *name, size_t size, size_t align,
                s->object_size = max(s->object_size, (int)size);
                s->inuse = max_t(int, s->inuse, ALIGN(size, sizeof(void *)));
 
+               for_each_memcg_cache_index(i) {
+                       c = cache_from_memcg_idx(s, i);
+                       if (!c)
+                               continue;
+                       c->object_size = s->object_size;
+                       c->inuse = max_t(int, c->inuse,
+                                        ALIGN(size, sizeof(void *)));
+               }
+
                if (sysfs_slab_alias(s, name)) {
                        s->refcount--;
                        s = NULL;
@@ -5125,6 +5142,15 @@ static const struct kset_uevent_ops slab_uevent_ops = {
 
 static struct kset *slab_kset;
 
+static inline struct kset *cache_kset(struct kmem_cache *s)
+{
+#ifdef CONFIG_MEMCG_KMEM
+       if (!is_root_cache(s))
+               return s->memcg_params->root_cache->memcg_kset;
+#endif
+       return slab_kset;
+}
+
 #define ID_STR_LENGTH 64
 
 /* Create a unique string id for a slab cache:
@@ -5190,26 +5216,39 @@ static int sysfs_slab_add(struct kmem_cache *s)
                name = create_unique_id(s);
        }
 
-       s->kobj.kset = slab_kset;
+       s->kobj.kset = cache_kset(s);
        err = kobject_init_and_add(&s->kobj, &slab_ktype, NULL, "%s", name);
-       if (err) {
-               kobject_put(&s->kobj);
-               return err;
-       }
+       if (err)
+               goto out_put_kobj;
 
        err = sysfs_create_group(&s->kobj, &slab_attr_group);
-       if (err) {
-               kobject_del(&s->kobj);
-               kobject_put(&s->kobj);
-               return err;
+       if (err)
+               goto out_del_kobj;
+
+#ifdef CONFIG_MEMCG_KMEM
+       if (is_root_cache(s)) {
+               s->memcg_kset = kset_create_and_add("cgroup", NULL, &s->kobj);
+               if (!s->memcg_kset) {
+                       err = -ENOMEM;
+                       goto out_del_kobj;
+               }
        }
+#endif
+
        kobject_uevent(&s->kobj, KOBJ_ADD);
        if (!unmergeable) {
                /* Setup first alias */
                sysfs_slab_alias(s, s->name);
-               kfree(name);
        }
-       return 0;
+out:
+       if (!unmergeable)
+               kfree(name);
+       return err;
+out_del_kobj:
+       kobject_del(&s->kobj);
+out_put_kobj:
+       kobject_put(&s->kobj);
+       goto out;
 }
 
 static void sysfs_slab_remove(struct kmem_cache *s)
@@ -5221,6 +5260,9 @@ static void sysfs_slab_remove(struct kmem_cache *s)
                 */
                return;
 
+#ifdef CONFIG_MEMCG_KMEM
+       kset_unregister(s->memcg_kset);
+#endif
        kobject_uevent(&s->kobj, KOBJ_REMOVE);
        kobject_del(&s->kobj);
        kobject_put(&s->kobj);