git.openpandora.org
/
pandora-kernel.git
/ blobdiff
commit
grep
author
committer
pickaxe
?
search:
re
summary
|
shortlog
|
log
|
commit
|
commitdiff
|
tree
raw
|
inline
| side by side
[PATCH] slab debug and ARCH_SLAB_MINALIGN don't get along
[pandora-kernel.git]
/
mm
/
slab.c
diff --git
a/mm/slab.c
b/mm/slab.c
index
3c4a7e3
..
ff60a94
100644
(file)
--- a/
mm/slab.c
+++ b/
mm/slab.c
@@
-313,7
+313,7
@@
static int drain_freelist(struct kmem_cache *cache,
static void free_block(struct kmem_cache *cachep, void **objpp, int len,
int node);
static int enable_cpucache(struct kmem_cache *cachep);
static void free_block(struct kmem_cache *cachep, void **objpp, int len,
int node);
static int enable_cpucache(struct kmem_cache *cachep);
-static void cache_reap(
void
*unused);
+static void cache_reap(
struct work_struct
*unused);
/*
* This function must be completely optimized away if a constant is passed to
/*
* This function must be completely optimized away if a constant is passed to
@@
-753,7
+753,7
@@
int slab_is_available(void)
return g_cpucache_up == FULL;
}
return g_cpucache_up == FULL;
}
-static DEFINE_PER_CPU(struct
work_struct
, reap_work);
+static DEFINE_PER_CPU(struct
delayed_work
, reap_work);
static inline struct array_cache *cpu_cache_get(struct kmem_cache *cachep)
{
static inline struct array_cache *cpu_cache_get(struct kmem_cache *cachep)
{
@@
-916,16
+916,16
@@
static void next_reap_node(void)
*/
static void __devinit start_cpu_timer(int cpu)
{
*/
static void __devinit start_cpu_timer(int cpu)
{
- struct
work_struct
*reap_work = &per_cpu(reap_work, cpu);
+ struct
delayed_work
*reap_work = &per_cpu(reap_work, cpu);
/*
* When this gets called from do_initcalls via cpucache_init(),
* init_workqueues() has already run, so keventd will be setup
* at that time.
*/
/*
* When this gets called from do_initcalls via cpucache_init(),
* init_workqueues() has already run, so keventd will be setup
* at that time.
*/
- if (keventd_up() && reap_work->func == NULL) {
+ if (keventd_up() && reap_work->
work.
func == NULL) {
init_reap_node(cpu);
init_reap_node(cpu);
- INIT_
WORK(reap_work, cache_reap, NULL
);
+ INIT_
DELAYED_WORK(reap_work, cache_reap
);
schedule_delayed_work_on(cpu, reap_work, HZ + 3 * cpu);
}
}
schedule_delayed_work_on(cpu, reap_work, HZ + 3 * cpu);
}
}
@@
-2197,18
+2197,17
@@
kmem_cache_create (const char *name, size_t size, size_t align,
if (flags & SLAB_RED_ZONE || flags & SLAB_STORE_USER)
ralign = BYTES_PER_WORD;
if (flags & SLAB_RED_ZONE || flags & SLAB_STORE_USER)
ralign = BYTES_PER_WORD;
- /* 2) arch mandated alignment
: disables debug if necessary
*/
+ /* 2) arch mandated alignment */
if (ralign < ARCH_SLAB_MINALIGN) {
ralign = ARCH_SLAB_MINALIGN;
if (ralign < ARCH_SLAB_MINALIGN) {
ralign = ARCH_SLAB_MINALIGN;
- if (ralign > BYTES_PER_WORD)
- flags &= ~(SLAB_RED_ZONE | SLAB_STORE_USER);
}
}
- /* 3) caller mandated alignment
: disables debug if necessary
*/
+ /* 3) caller mandated alignment */
if (ralign < align) {
ralign = align;
if (ralign < align) {
ralign = align;
- if (ralign > BYTES_PER_WORD)
- flags &= ~(SLAB_RED_ZONE | SLAB_STORE_USER);
}
}
+ /* disable debug if necessary */
+ if (ralign > BYTES_PER_WORD)
+ flags &= ~(SLAB_RED_ZONE | SLAB_STORE_USER);
/*
* 4) Store it.
*/
/*
* 4) Store it.
*/
@@
-3063,6
+3062,12
@@
static void *cache_alloc_debugcheck_after(struct kmem_cache *cachep,
cachep->ctor(objp, cachep, ctor_flags);
}
cachep->ctor(objp, cachep, ctor_flags);
}
+#if ARCH_SLAB_MINALIGN
+ if ((u32)objp & (ARCH_SLAB_MINALIGN-1)) {
+ printk(KERN_ERR "0x%p: not aligned to ARCH_SLAB_MINALIGN=%d\n",
+ objp, ARCH_SLAB_MINALIGN);
+ }
+#endif
return objp;
}
#else
return objp;
}
#else
@@
-3815,7
+3820,7
@@
void drain_array(struct kmem_cache *cachep, struct kmem_list3 *l3,
* If we cannot acquire the cache chain mutex then just give up - we'll try
* again on the next iteration.
*/
* If we cannot acquire the cache chain mutex then just give up - we'll try
* again on the next iteration.
*/
-static void cache_reap(
void
*unused)
+static void cache_reap(
struct work_struct
*unused)
{
struct kmem_cache *searchp;
struct kmem_list3 *l3;
{
struct kmem_cache *searchp;
struct kmem_list3 *l3;