mm/sl[aou]b: Use "kmem_cache" name for slab cache with kmem_cache struct
Make all allocators use the "kmem_cache" slabname for the "kmem_cache" structure. Reviewed-by: Glauber Costa <glommer@parallels.com> Reviewed-by: Joonsoo Kim <js1304@gmail.com> Signed-off-by: Christoph Lameter <cl@linux.com> Signed-off-by: Pekka Enberg <penberg@kernel.org>
This commit is contained in:
parent
945cf2b619
commit
9b030cb865
5 changed files with 52 additions and 37 deletions
72
mm/slab.c
72
mm/slab.c
|
@ -578,9 +578,9 @@ static struct arraycache_init initarray_generic =
|
||||||
{ {0, BOOT_CPUCACHE_ENTRIES, 1, 0} };
|
{ {0, BOOT_CPUCACHE_ENTRIES, 1, 0} };
|
||||||
|
|
||||||
/* internal cache of cache description objs */
|
/* internal cache of cache description objs */
|
||||||
static struct kmem_list3 *cache_cache_nodelists[MAX_NUMNODES];
|
static struct kmem_list3 *kmem_cache_nodelists[MAX_NUMNODES];
|
||||||
static struct kmem_cache cache_cache = {
|
static struct kmem_cache kmem_cache_boot = {
|
||||||
.nodelists = cache_cache_nodelists,
|
.nodelists = kmem_cache_nodelists,
|
||||||
.batchcount = 1,
|
.batchcount = 1,
|
||||||
.limit = BOOT_CPUCACHE_ENTRIES,
|
.limit = BOOT_CPUCACHE_ENTRIES,
|
||||||
.shared = 1,
|
.shared = 1,
|
||||||
|
@ -1594,15 +1594,17 @@ void __init kmem_cache_init(void)
|
||||||
int order;
|
int order;
|
||||||
int node;
|
int node;
|
||||||
|
|
||||||
|
kmem_cache = &kmem_cache_boot;
|
||||||
|
|
||||||
if (num_possible_nodes() == 1)
|
if (num_possible_nodes() == 1)
|
||||||
use_alien_caches = 0;
|
use_alien_caches = 0;
|
||||||
|
|
||||||
for (i = 0; i < NUM_INIT_LISTS; i++) {
|
for (i = 0; i < NUM_INIT_LISTS; i++) {
|
||||||
kmem_list3_init(&initkmem_list3[i]);
|
kmem_list3_init(&initkmem_list3[i]);
|
||||||
if (i < MAX_NUMNODES)
|
if (i < MAX_NUMNODES)
|
||||||
cache_cache.nodelists[i] = NULL;
|
kmem_cache->nodelists[i] = NULL;
|
||||||
}
|
}
|
||||||
set_up_list3s(&cache_cache, CACHE_CACHE);
|
set_up_list3s(kmem_cache, CACHE_CACHE);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Fragmentation resistance on low memory - only use bigger
|
* Fragmentation resistance on low memory - only use bigger
|
||||||
|
@ -1614,9 +1616,9 @@ void __init kmem_cache_init(void)
|
||||||
|
|
||||||
/* Bootstrap is tricky, because several objects are allocated
|
/* Bootstrap is tricky, because several objects are allocated
|
||||||
* from caches that do not exist yet:
|
* from caches that do not exist yet:
|
||||||
* 1) initialize the cache_cache cache: it contains the struct
|
* 1) initialize the kmem_cache cache: it contains the struct
|
||||||
* kmem_cache structures of all caches, except cache_cache itself:
|
* kmem_cache structures of all caches, except kmem_cache itself:
|
||||||
* cache_cache is statically allocated.
|
* kmem_cache is statically allocated.
|
||||||
* Initially an __init data area is used for the head array and the
|
* Initially an __init data area is used for the head array and the
|
||||||
* kmem_list3 structures, it's replaced with a kmalloc allocated
|
* kmem_list3 structures, it's replaced with a kmalloc allocated
|
||||||
* array at the end of the bootstrap.
|
* array at the end of the bootstrap.
|
||||||
|
@ -1625,43 +1627,43 @@ void __init kmem_cache_init(void)
|
||||||
* An __init data area is used for the head array.
|
* An __init data area is used for the head array.
|
||||||
* 3) Create the remaining kmalloc caches, with minimally sized
|
* 3) Create the remaining kmalloc caches, with minimally sized
|
||||||
* head arrays.
|
* head arrays.
|
||||||
* 4) Replace the __init data head arrays for cache_cache and the first
|
* 4) Replace the __init data head arrays for kmem_cache and the first
|
||||||
* kmalloc cache with kmalloc allocated arrays.
|
* kmalloc cache with kmalloc allocated arrays.
|
||||||
* 5) Replace the __init data for kmem_list3 for cache_cache and
|
* 5) Replace the __init data for kmem_list3 for kmem_cache and
|
||||||
* the other cache's with kmalloc allocated memory.
|
* the other cache's with kmalloc allocated memory.
|
||||||
* 6) Resize the head arrays of the kmalloc caches to their final sizes.
|
* 6) Resize the head arrays of the kmalloc caches to their final sizes.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
node = numa_mem_id();
|
node = numa_mem_id();
|
||||||
|
|
||||||
/* 1) create the cache_cache */
|
/* 1) create the kmem_cache */
|
||||||
INIT_LIST_HEAD(&slab_caches);
|
INIT_LIST_HEAD(&slab_caches);
|
||||||
list_add(&cache_cache.list, &slab_caches);
|
list_add(&kmem_cache->list, &slab_caches);
|
||||||
cache_cache.colour_off = cache_line_size();
|
kmem_cache->colour_off = cache_line_size();
|
||||||
cache_cache.array[smp_processor_id()] = &initarray_cache.cache;
|
kmem_cache->array[smp_processor_id()] = &initarray_cache.cache;
|
||||||
cache_cache.nodelists[node] = &initkmem_list3[CACHE_CACHE + node];
|
kmem_cache->nodelists[node] = &initkmem_list3[CACHE_CACHE + node];
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* struct kmem_cache size depends on nr_node_ids & nr_cpu_ids
|
* struct kmem_cache size depends on nr_node_ids & nr_cpu_ids
|
||||||
*/
|
*/
|
||||||
cache_cache.size = offsetof(struct kmem_cache, array[nr_cpu_ids]) +
|
kmem_cache->size = offsetof(struct kmem_cache, array[nr_cpu_ids]) +
|
||||||
nr_node_ids * sizeof(struct kmem_list3 *);
|
nr_node_ids * sizeof(struct kmem_list3 *);
|
||||||
cache_cache.object_size = cache_cache.size;
|
kmem_cache->object_size = kmem_cache->size;
|
||||||
cache_cache.size = ALIGN(cache_cache.size,
|
kmem_cache->size = ALIGN(kmem_cache->object_size,
|
||||||
cache_line_size());
|
cache_line_size());
|
||||||
cache_cache.reciprocal_buffer_size =
|
kmem_cache->reciprocal_buffer_size =
|
||||||
reciprocal_value(cache_cache.size);
|
reciprocal_value(kmem_cache->size);
|
||||||
|
|
||||||
for (order = 0; order < MAX_ORDER; order++) {
|
for (order = 0; order < MAX_ORDER; order++) {
|
||||||
cache_estimate(order, cache_cache.size,
|
cache_estimate(order, kmem_cache->size,
|
||||||
cache_line_size(), 0, &left_over, &cache_cache.num);
|
cache_line_size(), 0, &left_over, &kmem_cache->num);
|
||||||
if (cache_cache.num)
|
if (kmem_cache->num)
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
BUG_ON(!cache_cache.num);
|
BUG_ON(!kmem_cache->num);
|
||||||
cache_cache.gfporder = order;
|
kmem_cache->gfporder = order;
|
||||||
cache_cache.colour = left_over / cache_cache.colour_off;
|
kmem_cache->colour = left_over / kmem_cache->colour_off;
|
||||||
cache_cache.slab_size = ALIGN(cache_cache.num * sizeof(kmem_bufctl_t) +
|
kmem_cache->slab_size = ALIGN(kmem_cache->num * sizeof(kmem_bufctl_t) +
|
||||||
sizeof(struct slab), cache_line_size());
|
sizeof(struct slab), cache_line_size());
|
||||||
|
|
||||||
/* 2+3) create the kmalloc caches */
|
/* 2+3) create the kmalloc caches */
|
||||||
|
@ -1728,15 +1730,15 @@ void __init kmem_cache_init(void)
|
||||||
|
|
||||||
ptr = kmalloc(sizeof(struct arraycache_init), GFP_NOWAIT);
|
ptr = kmalloc(sizeof(struct arraycache_init), GFP_NOWAIT);
|
||||||
|
|
||||||
BUG_ON(cpu_cache_get(&cache_cache) != &initarray_cache.cache);
|
BUG_ON(cpu_cache_get(kmem_cache) != &initarray_cache.cache);
|
||||||
memcpy(ptr, cpu_cache_get(&cache_cache),
|
memcpy(ptr, cpu_cache_get(kmem_cache),
|
||||||
sizeof(struct arraycache_init));
|
sizeof(struct arraycache_init));
|
||||||
/*
|
/*
|
||||||
* Do not assume that spinlocks can be initialized via memcpy:
|
* Do not assume that spinlocks can be initialized via memcpy:
|
||||||
*/
|
*/
|
||||||
spin_lock_init(&ptr->lock);
|
spin_lock_init(&ptr->lock);
|
||||||
|
|
||||||
cache_cache.array[smp_processor_id()] = ptr;
|
kmem_cache->array[smp_processor_id()] = ptr;
|
||||||
|
|
||||||
ptr = kmalloc(sizeof(struct arraycache_init), GFP_NOWAIT);
|
ptr = kmalloc(sizeof(struct arraycache_init), GFP_NOWAIT);
|
||||||
|
|
||||||
|
@ -1757,7 +1759,7 @@ void __init kmem_cache_init(void)
|
||||||
int nid;
|
int nid;
|
||||||
|
|
||||||
for_each_online_node(nid) {
|
for_each_online_node(nid) {
|
||||||
init_list(&cache_cache, &initkmem_list3[CACHE_CACHE + nid], nid);
|
init_list(kmem_cache, &initkmem_list3[CACHE_CACHE + nid], nid);
|
||||||
|
|
||||||
init_list(malloc_sizes[INDEX_AC].cs_cachep,
|
init_list(malloc_sizes[INDEX_AC].cs_cachep,
|
||||||
&initkmem_list3[SIZE_AC + nid], nid);
|
&initkmem_list3[SIZE_AC + nid], nid);
|
||||||
|
@ -2223,7 +2225,7 @@ void __kmem_cache_destroy(struct kmem_cache *cachep)
|
||||||
kfree(l3);
|
kfree(l3);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
kmem_cache_free(&cache_cache, cachep);
|
kmem_cache_free(kmem_cache, cachep);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
@ -2473,7 +2475,7 @@ __kmem_cache_create (const char *name, size_t size, size_t align,
|
||||||
gfp = GFP_NOWAIT;
|
gfp = GFP_NOWAIT;
|
||||||
|
|
||||||
/* Get cache's description obj. */
|
/* Get cache's description obj. */
|
||||||
cachep = kmem_cache_zalloc(&cache_cache, gfp);
|
cachep = kmem_cache_zalloc(kmem_cache, gfp);
|
||||||
if (!cachep)
|
if (!cachep)
|
||||||
return NULL;
|
return NULL;
|
||||||
|
|
||||||
|
@ -2531,7 +2533,7 @@ __kmem_cache_create (const char *name, size_t size, size_t align,
|
||||||
if (!cachep->num) {
|
if (!cachep->num) {
|
||||||
printk(KERN_ERR
|
printk(KERN_ERR
|
||||||
"kmem_cache_create: couldn't create cache %s.\n", name);
|
"kmem_cache_create: couldn't create cache %s.\n", name);
|
||||||
kmem_cache_free(&cache_cache, cachep);
|
kmem_cache_free(kmem_cache, cachep);
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
slab_size = ALIGN(cachep->num * sizeof(kmem_bufctl_t)
|
slab_size = ALIGN(cachep->num * sizeof(kmem_bufctl_t)
|
||||||
|
@ -3299,7 +3301,7 @@ static void *cache_alloc_debugcheck_after(struct kmem_cache *cachep,
|
||||||
|
|
||||||
static bool slab_should_failslab(struct kmem_cache *cachep, gfp_t flags)
|
static bool slab_should_failslab(struct kmem_cache *cachep, gfp_t flags)
|
||||||
{
|
{
|
||||||
if (cachep == &cache_cache)
|
if (cachep == kmem_cache)
|
||||||
return false;
|
return false;
|
||||||
|
|
||||||
return should_failslab(cachep->object_size, flags, cachep->flags);
|
return should_failslab(cachep->object_size, flags, cachep->flags);
|
||||||
|
|
|
@ -25,8 +25,14 @@ extern enum slab_state slab_state;
|
||||||
|
|
||||||
/* The slab cache mutex protects the management structures during changes */
|
/* The slab cache mutex protects the management structures during changes */
|
||||||
extern struct mutex slab_mutex;
|
extern struct mutex slab_mutex;
|
||||||
|
|
||||||
|
/* The list of all slab caches on the system */
|
||||||
extern struct list_head slab_caches;
|
extern struct list_head slab_caches;
|
||||||
|
|
||||||
|
/* The slab cache that manages slab cache information */
|
||||||
|
extern struct kmem_cache *kmem_cache;
|
||||||
|
|
||||||
|
/* Functions provided by the slab allocators */
|
||||||
struct kmem_cache *__kmem_cache_create(const char *name, size_t size,
|
struct kmem_cache *__kmem_cache_create(const char *name, size_t size,
|
||||||
size_t align, unsigned long flags, void (*ctor)(void *));
|
size_t align, unsigned long flags, void (*ctor)(void *));
|
||||||
|
|
||||||
|
|
|
@ -22,6 +22,7 @@
|
||||||
enum slab_state slab_state;
|
enum slab_state slab_state;
|
||||||
LIST_HEAD(slab_caches);
|
LIST_HEAD(slab_caches);
|
||||||
DEFINE_MUTEX(slab_mutex);
|
DEFINE_MUTEX(slab_mutex);
|
||||||
|
struct kmem_cache *kmem_cache;
|
||||||
|
|
||||||
#ifdef CONFIG_DEBUG_VM
|
#ifdef CONFIG_DEBUG_VM
|
||||||
static int kmem_cache_sanity_check(const char *name, size_t size)
|
static int kmem_cache_sanity_check(const char *name, size_t size)
|
||||||
|
|
|
@ -622,8 +622,16 @@ int kmem_cache_shrink(struct kmem_cache *d)
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(kmem_cache_shrink);
|
EXPORT_SYMBOL(kmem_cache_shrink);
|
||||||
|
|
||||||
|
struct kmem_cache kmem_cache_boot = {
|
||||||
|
.name = "kmem_cache",
|
||||||
|
.size = sizeof(struct kmem_cache),
|
||||||
|
.flags = SLAB_PANIC,
|
||||||
|
.align = ARCH_KMALLOC_MINALIGN,
|
||||||
|
};
|
||||||
|
|
||||||
void __init kmem_cache_init(void)
|
void __init kmem_cache_init(void)
|
||||||
{
|
{
|
||||||
|
kmem_cache = &kmem_cache_boot;
|
||||||
slab_state = UP;
|
slab_state = UP;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -3221,8 +3221,6 @@ void __kmem_cache_destroy(struct kmem_cache *s)
|
||||||
struct kmem_cache *kmalloc_caches[SLUB_PAGE_SHIFT];
|
struct kmem_cache *kmalloc_caches[SLUB_PAGE_SHIFT];
|
||||||
EXPORT_SYMBOL(kmalloc_caches);
|
EXPORT_SYMBOL(kmalloc_caches);
|
||||||
|
|
||||||
static struct kmem_cache *kmem_cache;
|
|
||||||
|
|
||||||
#ifdef CONFIG_ZONE_DMA
|
#ifdef CONFIG_ZONE_DMA
|
||||||
static struct kmem_cache *kmalloc_dma_caches[SLUB_PAGE_SHIFT];
|
static struct kmem_cache *kmalloc_dma_caches[SLUB_PAGE_SHIFT];
|
||||||
#endif
|
#endif
|
||||||
|
|
Loading…
Reference in a new issue