git.openpandora.org
/
pandora-kernel.git
/ blobdiff
commit
grep
author
committer
pickaxe
?
search:
re
summary
|
shortlog
|
log
|
commit
|
commitdiff
|
tree
raw
|
inline
| side by side
ppp: Don't stop and restart queue on every TX packet
[pandora-kernel.git]
/
mm
/
slub.c
diff --git
a/mm/slub.c
b/mm/slub.c
index
f4a6229
..
ffe13fd
100644
(file)
--- a/
mm/slub.c
+++ b/
mm/slub.c
@@
-29,6
+29,7
@@
#include <linux/math64.h>
#include <linux/fault-inject.h>
#include <linux/stacktrace.h>
#include <linux/math64.h>
#include <linux/fault-inject.h>
#include <linux/stacktrace.h>
+#include <linux/prefetch.h>
#include <trace/events/kmem.h>
#include <trace/events/kmem.h>
@@
-269,6
+270,11
@@
static inline void *get_freepointer(struct kmem_cache *s, void *object)
return *(void **)(object + s->offset);
}
return *(void **)(object + s->offset);
}
+static void prefetch_freepointer(const struct kmem_cache *s, void *object)
+{
+ prefetch(object + s->offset);
+}
+
static inline void *get_freepointer_safe(struct kmem_cache *s, void *object)
{
void *p;
static inline void *get_freepointer_safe(struct kmem_cache *s, void *object)
{
void *p;
@@
-1560,6
+1566,7
@@
static void *get_partial_node(struct kmem_cache *s,
} else {
page->freelist = t;
available = put_cpu_partial(s, page, 0);
} else {
page->freelist = t;
available = put_cpu_partial(s, page, 0);
+ stat(s, CPU_PARTIAL_NODE);
}
if (kmem_cache_debug(s) || available > s->cpu_partial / 2)
break;
}
if (kmem_cache_debug(s) || available > s->cpu_partial / 2)
break;
@@
-1983,6
+1990,7
@@
int put_cpu_partial(struct kmem_cache *s, struct page *page, int drain)
local_irq_restore(flags);
pobjects = 0;
pages = 0;
local_irq_restore(flags);
pobjects = 0;
pages = 0;
+ stat(s, CPU_PARTIAL_DRAIN);
}
}
}
}
@@
-1994,7
+2002,6
@@
int put_cpu_partial(struct kmem_cache *s, struct page *page, int drain)
page->next = oldpage;
} while (this_cpu_cmpxchg(s->cpu_slab->partial, oldpage, page) != oldpage);
page->next = oldpage;
} while (this_cpu_cmpxchg(s->cpu_slab->partial, oldpage, page) != oldpage);
- stat(s, CPU_PARTIAL_FREE);
return pobjects;
}
return pobjects;
}
@@
-2028,9
+2035,17
@@
static void flush_cpu_slab(void *d)
__flush_cpu_slab(s, smp_processor_id());
}
__flush_cpu_slab(s, smp_processor_id());
}
+static bool has_cpu_slab(int cpu, void *info)
+{
+ struct kmem_cache *s = info;
+ struct kmem_cache_cpu *c = per_cpu_ptr(s->cpu_slab, cpu);
+
+ return !!(c->page);
+}
+
static void flush_all(struct kmem_cache *s)
{
static void flush_all(struct kmem_cache *s)
{
- on_each_cpu
(flush_cpu_slab, s, 1
);
+ on_each_cpu
_cond(has_cpu_slab, flush_cpu_slab, s, 1, GFP_ATOMIC
);
}
/*
}
/*
@@
-2319,6
+2334,8
@@
redo:
object = __slab_alloc(s, gfpflags, node, addr, c);
else {
object = __slab_alloc(s, gfpflags, node, addr, c);
else {
+ void *next_object = get_freepointer_safe(s, object);
+
/*
* The cmpxchg will only match if there was no additional
* operation and if we are on the right processor.
/*
* The cmpxchg will only match if there was no additional
* operation and if we are on the right processor.
@@
-2334,11
+2351,12
@@
redo:
if (unlikely(!this_cpu_cmpxchg_double(
s->cpu_slab->freelist, s->cpu_slab->tid,
object, tid,
if (unlikely(!this_cpu_cmpxchg_double(
s->cpu_slab->freelist, s->cpu_slab->tid,
object, tid,
-
get_freepointer_safe(s, object)
, next_tid(tid)))) {
+
next_object
, next_tid(tid)))) {
note_cmpxchg_failure("slab_alloc", s, tid);
goto redo;
}
note_cmpxchg_failure("slab_alloc", s, tid);
goto redo;
}
+ prefetch_freepointer(s, next_object);
stat(s, ALLOC_FASTPATH);
}
stat(s, ALLOC_FASTPATH);
}
@@
-2475,9
+2493,10
@@
static void __slab_free(struct kmem_cache *s, struct page *page,
* If we just froze the page then put it onto the
* per cpu partial list.
*/
* If we just froze the page then put it onto the
* per cpu partial list.
*/
- if (new.frozen && !was_frozen)
+ if (new.frozen && !was_frozen)
{
put_cpu_partial(s, page, 1);
put_cpu_partial(s, page, 1);
-
+ stat(s, CPU_PARTIAL_FREE);
+ }
/*
* The list lock was not taken therefore no list
* activity can be necessary.
/*
* The list lock was not taken therefore no list
* activity can be necessary.
@@
-3939,13
+3958,14
@@
struct kmem_cache *kmem_cache_create(const char *name, size_t size,
if (kmem_cache_open(s, n,
size, align, flags, ctor)) {
list_add(&s->list, &slab_caches);
if (kmem_cache_open(s, n,
size, align, flags, ctor)) {
list_add(&s->list, &slab_caches);
+ up_write(&slub_lock);
if (sysfs_slab_add(s)) {
if (sysfs_slab_add(s)) {
+ down_write(&slub_lock);
list_del(&s->list);
kfree(n);
kfree(s);
goto err;
}
list_del(&s->list);
kfree(n);
kfree(s);
goto err;
}
- up_write(&slub_lock);
return s;
}
kfree(n);
return s;
}
kfree(n);
@@
-5069,6
+5089,8
@@
STAT_ATTR(CMPXCHG_DOUBLE_CPU_FAIL, cmpxchg_double_cpu_fail);
STAT_ATTR(CMPXCHG_DOUBLE_FAIL, cmpxchg_double_fail);
STAT_ATTR(CPU_PARTIAL_ALLOC, cpu_partial_alloc);
STAT_ATTR(CPU_PARTIAL_FREE, cpu_partial_free);
STAT_ATTR(CMPXCHG_DOUBLE_FAIL, cmpxchg_double_fail);
STAT_ATTR(CPU_PARTIAL_ALLOC, cpu_partial_alloc);
STAT_ATTR(CPU_PARTIAL_FREE, cpu_partial_free);
+STAT_ATTR(CPU_PARTIAL_NODE, cpu_partial_node);
+STAT_ATTR(CPU_PARTIAL_DRAIN, cpu_partial_drain);
#endif
static struct attribute *slab_attrs[] = {
#endif
static struct attribute *slab_attrs[] = {
@@
-5134,6
+5156,8
@@
static struct attribute *slab_attrs[] = {
&cmpxchg_double_cpu_fail_attr.attr,
&cpu_partial_alloc_attr.attr,
&cpu_partial_free_attr.attr,
&cmpxchg_double_cpu_fail_attr.attr,
&cpu_partial_alloc_attr.attr,
&cpu_partial_free_attr.attr,
+ &cpu_partial_node_attr.attr,
+ &cpu_partial_drain_attr.attr,
#endif
#ifdef CONFIG_FAILSLAB
&failslab_attr.attr,
#endif
#ifdef CONFIG_FAILSLAB
&failslab_attr.attr,