SLUB: Move page->offset to kmem_cache_cpu->offset
We need the offset from the page struct during slab_alloc and slab_free. In both cases we also reference the cacheline of the kmem_cache_cpu structure. We can therefore move the offset field into the kmem_cache_cpu structure freeing up 16 bits in the page struct. Moving the offset allows an allocation from slab_alloc() without touching the page struct in the hot path. The only thing left in slab_free() that touches the page struct cacheline for per cpu freeing is the checking of SlabDebug(page). The next patch deals with that. Use the available 16 bits to broaden page->inuse. More than 64k objects per slab become possible and we can get rid of the checks for that limitation. No need anymore to shrink the order of slabs if we boot with 2M sized slabs (slub_min_order=9). No need anymore to switch off the offset calculation for very large slabs since the field in the kmem_cache_cpu structure is 32 bits and so the offset field can now handle slab sizes of up to 8GB. Signed-off-by: Christoph Lameter <clameter@sgi.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
8e65d24c7c
commit
b3fba8da65
3 changed files with 13 additions and 45 deletions
|
@ -37,10 +37,7 @@ struct page {
|
|||
* to show when page is mapped
|
||||
* & limit reverse map searches.
|
||||
*/
|
||||
struct { /* SLUB uses */
|
||||
short unsigned int inuse;
|
||||
short unsigned int offset;
|
||||
};
|
||||
unsigned int inuse; /* SLUB: Nr of objects */
|
||||
};
|
||||
union {
|
||||
struct {
|
||||
|
|
|
@ -15,6 +15,7 @@ struct kmem_cache_cpu {
|
|||
void **freelist;
|
||||
struct page *page;
|
||||
int node;
|
||||
unsigned int offset;
|
||||
/* Lots of wasted space */
|
||||
} ____cacheline_aligned_in_smp;
|
||||
|
||||
|
|
52
mm/slub.c
52
mm/slub.c
|
@ -200,11 +200,6 @@ static inline void ClearSlabDebug(struct page *page)
|
|||
#define ARCH_SLAB_MINALIGN __alignof__(unsigned long long)
|
||||
#endif
|
||||
|
||||
/*
|
||||
* The page->inuse field is 16 bit thus we have this limitation
|
||||
*/
|
||||
#define MAX_OBJECTS_PER_SLAB 65535
|
||||
|
||||
/* Internal SLUB flags */
|
||||
#define __OBJECT_POISON 0x80000000 /* Poison object */
|
||||
#define __SYSFS_ADD_DEFERRED 0x40000000 /* Not yet visible via sysfs */
|
||||
|
@ -729,11 +724,6 @@ static int check_slab(struct kmem_cache *s, struct page *page)
|
|||
slab_err(s, page, "Not a valid slab page");
|
||||
return 0;
|
||||
}
|
||||
if (page->offset * sizeof(void *) != s->offset) {
|
||||
slab_err(s, page, "Corrupted offset %lu",
|
||||
(unsigned long)(page->offset * sizeof(void *)));
|
||||
return 0;
|
||||
}
|
||||
if (page->inuse > s->objects) {
|
||||
slab_err(s, page, "inuse %u > max %u",
|
||||
s->name, page->inuse, s->objects);
|
||||
|
@ -872,8 +862,6 @@ bad:
|
|||
slab_fix(s, "Marking all objects used");
|
||||
page->inuse = s->objects;
|
||||
page->freelist = NULL;
|
||||
/* Fix up fields that may be corrupted */
|
||||
page->offset = s->offset / sizeof(void *);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
@ -1104,7 +1092,6 @@ static struct page *new_slab(struct kmem_cache *s, gfp_t flags, int node)
|
|||
n = get_node(s, page_to_nid(page));
|
||||
if (n)
|
||||
atomic_long_inc(&n->nr_slabs);
|
||||
page->offset = s->offset / sizeof(void *);
|
||||
page->slab = s;
|
||||
page->flags |= 1 << PG_slab;
|
||||
if (s->flags & (SLAB_DEBUG_FREE | SLAB_RED_ZONE | SLAB_POISON |
|
||||
|
@ -1398,10 +1385,10 @@ static void deactivate_slab(struct kmem_cache *s, struct kmem_cache_cpu *c)
|
|||
|
||||
/* Retrieve object from cpu_freelist */
|
||||
object = c->freelist;
|
||||
c->freelist = c->freelist[page->offset];
|
||||
c->freelist = c->freelist[c->offset];
|
||||
|
||||
/* And put onto the regular freelist */
|
||||
object[page->offset] = page->freelist;
|
||||
object[c->offset] = page->freelist;
|
||||
page->freelist = object;
|
||||
page->inuse--;
|
||||
}
|
||||
|
@ -1497,7 +1484,7 @@ load_freelist:
|
|||
goto debug;
|
||||
|
||||
object = c->page->freelist;
|
||||
c->freelist = object[c->page->offset];
|
||||
c->freelist = object[c->offset];
|
||||
c->page->inuse = s->objects;
|
||||
c->page->freelist = NULL;
|
||||
c->node = page_to_nid(c->page);
|
||||
|
@ -1549,7 +1536,7 @@ debug:
|
|||
goto another_slab;
|
||||
|
||||
c->page->inuse++;
|
||||
c->page->freelist = object[c->page->offset];
|
||||
c->page->freelist = object[c->offset];
|
||||
slab_unlock(c->page);
|
||||
return object;
|
||||
}
|
||||
|
@ -1580,7 +1567,7 @@ static void __always_inline *slab_alloc(struct kmem_cache *s,
|
|||
|
||||
else {
|
||||
object = c->freelist;
|
||||
c->freelist = object[c->page->offset];
|
||||
c->freelist = object[c->offset];
|
||||
}
|
||||
local_irq_restore(flags);
|
||||
|
||||
|
@ -1613,7 +1600,7 @@ EXPORT_SYMBOL(kmem_cache_alloc_node);
|
|||
* handling required then we can return immediately.
|
||||
*/
|
||||
static void __slab_free(struct kmem_cache *s, struct page *page,
|
||||
void *x, void *addr)
|
||||
void *x, void *addr, unsigned int offset)
|
||||
{
|
||||
void *prior;
|
||||
void **object = (void *)x;
|
||||
|
@ -1623,7 +1610,7 @@ static void __slab_free(struct kmem_cache *s, struct page *page,
|
|||
if (unlikely(SlabDebug(page)))
|
||||
goto debug;
|
||||
checks_ok:
|
||||
prior = object[page->offset] = page->freelist;
|
||||
prior = object[offset] = page->freelist;
|
||||
page->freelist = object;
|
||||
page->inuse--;
|
||||
|
||||
|
@ -1684,10 +1671,10 @@ static void __always_inline slab_free(struct kmem_cache *s,
|
|||
debug_check_no_locks_freed(object, s->objsize);
|
||||
c = get_cpu_slab(s, smp_processor_id());
|
||||
if (likely(page == c->page && !SlabDebug(page))) {
|
||||
object[page->offset] = c->freelist;
|
||||
object[c->offset] = c->freelist;
|
||||
c->freelist = object;
|
||||
} else
|
||||
__slab_free(s, page, x, addr);
|
||||
__slab_free(s, page, x, addr, c->offset);
|
||||
|
||||
local_irq_restore(flags);
|
||||
}
|
||||
|
@ -1774,14 +1761,6 @@ static inline int slab_order(int size, int min_objects,
|
|||
int rem;
|
||||
int min_order = slub_min_order;
|
||||
|
||||
/*
|
||||
* If we would create too many object per slab then reduce
|
||||
* the slab order even if it goes below slub_min_order.
|
||||
*/
|
||||
while (min_order > 0 &&
|
||||
(PAGE_SIZE << min_order) >= MAX_OBJECTS_PER_SLAB * size)
|
||||
min_order--;
|
||||
|
||||
for (order = max(min_order,
|
||||
fls(min_objects * size - 1) - PAGE_SHIFT);
|
||||
order <= max_order; order++) {
|
||||
|
@ -1796,9 +1775,6 @@ static inline int slab_order(int size, int min_objects,
|
|||
if (rem <= slab_size / fract_leftover)
|
||||
break;
|
||||
|
||||
/* If the next size is too high then exit now */
|
||||
if (slab_size * 2 >= MAX_OBJECTS_PER_SLAB * size)
|
||||
break;
|
||||
}
|
||||
|
||||
return order;
|
||||
|
@ -1878,6 +1854,7 @@ static void init_kmem_cache_cpu(struct kmem_cache *s,
|
|||
{
|
||||
c->page = NULL;
|
||||
c->freelist = NULL;
|
||||
c->offset = s->offset / sizeof(void *);
|
||||
c->node = 0;
|
||||
}
|
||||
|
||||
|
@ -2110,14 +2087,7 @@ static int calculate_sizes(struct kmem_cache *s)
|
|||
*/
|
||||
s->objects = (PAGE_SIZE << s->order) / size;
|
||||
|
||||
/*
|
||||
* Verify that the number of objects is within permitted limits.
|
||||
* The page->inuse field is only 16 bit wide! So we cannot have
|
||||
* more than 64k objects per slab.
|
||||
*/
|
||||
if (!s->objects || s->objects > MAX_OBJECTS_PER_SLAB)
|
||||
return 0;
|
||||
return 1;
|
||||
return !!s->objects;
|
||||
|
||||
}
|
||||
|
||||
|
|
Loading…
Reference in a new issue