slab/slub: struct memcg_params

For the kmem slab controller, we need to record some extra information in
the kmem_cache structure.

Signed-off-by: Glauber Costa <glommer@parallels.com>
Signed-off-by: Suleiman Souhlal <suleiman@google.com>
Cc: Christoph Lameter <cl@linux.com>
Cc: David Rientjes <rientjes@google.com>
Cc: Frederic Weisbecker <fweisbec@redhat.com>
Cc: Greg Thelen <gthelen@google.com>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: JoonSoo Kim <js1304@gmail.com>
Cc: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Cc: Mel Gorman <mel@csn.ul.ie>
Cc: Michal Hocko <mhocko@suse.cz>
Cc: Pekka Enberg <penberg@cs.helsinki.fi>
Cc: Rik van Riel <riel@redhat.com>
Cc: Tejun Heo <tj@kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
Glauber Costa 2012-12-18 14:22:27 -08:00 committed by Linus Torvalds
parent d5bdae7d59
commit ba6c496ed8
4 changed files with 43 additions and 0 deletions

View file

@ -176,6 +176,30 @@ void kmem_cache_free(struct kmem_cache *, void *);
#define ARCH_SLAB_MINALIGN __alignof__(unsigned long long) #define ARCH_SLAB_MINALIGN __alignof__(unsigned long long)
#endif #endif
/*
* This is the main placeholder for memcg-related information in kmem caches.
* struct kmem_cache will hold a pointer to it, so the memory cost while
* disabled is 1 pointer. The runtime cost while enabled, gets bigger than it
* would otherwise be if that would be bundled in kmem_cache: we'll need an
* extra pointer chase. But the trade off clearly lays in favor of not
* penalizing non-users.
*
* Both the root cache and the child caches will have it. For the root cache,
* this will hold a dynamically allocated array large enough to hold
* information about the currently limited memcgs in the system.
*
* Child caches will hold extra metadata needed for its operation. Fields are:
*
* @memcg: pointer to the memcg this cache belongs to
*/
struct memcg_cache_params {
bool is_root_cache;
union {
struct kmem_cache *memcg_caches[0];
struct mem_cgroup *memcg;
};
};
/* /*
* Common kmalloc functions provided by all allocators * Common kmalloc functions provided by all allocators
*/ */

View file

@ -81,6 +81,9 @@ struct kmem_cache {
*/ */
int obj_offset; int obj_offset;
#endif /* CONFIG_DEBUG_SLAB */ #endif /* CONFIG_DEBUG_SLAB */
#ifdef CONFIG_MEMCG_KMEM
struct memcg_cache_params *memcg_params;
#endif
/* 6) per-cpu/per-node data, touched during every alloc/free */ /* 6) per-cpu/per-node data, touched during every alloc/free */
/* /*

View file

@ -101,6 +101,9 @@ struct kmem_cache {
#ifdef CONFIG_SYSFS #ifdef CONFIG_SYSFS
struct kobject kobj; /* For sysfs */ struct kobject kobj; /* For sysfs */
#endif #endif
#ifdef CONFIG_MEMCG_KMEM
struct memcg_cache_params *memcg_params;
#endif
#ifdef CONFIG_NUMA #ifdef CONFIG_NUMA
/* /*

View file

@ -100,4 +100,17 @@ void get_slabinfo(struct kmem_cache *s, struct slabinfo *sinfo);
void slabinfo_show_stats(struct seq_file *m, struct kmem_cache *s); void slabinfo_show_stats(struct seq_file *m, struct kmem_cache *s);
ssize_t slabinfo_write(struct file *file, const char __user *buffer, ssize_t slabinfo_write(struct file *file, const char __user *buffer,
size_t count, loff_t *ppos); size_t count, loff_t *ppos);
#ifdef CONFIG_MEMCG_KMEM
static inline bool is_root_cache(struct kmem_cache *s)
{
return !s->memcg_params || s->memcg_params->is_root_cache;
}
#else
static inline bool is_root_cache(struct kmem_cache *s)
{
return true;
}
#endif
#endif #endif