2007-05-07 04:49:36 +07:00
|
|
|
#ifndef _LINUX_SLUB_DEF_H
|
|
|
|
#define _LINUX_SLUB_DEF_H
|
|
|
|
|
|
|
|
/*
|
|
|
|
* SLUB : A Slab allocator without object queues.
|
|
|
|
*
|
|
|
|
* (C) 2007 SGI, Christoph Lameter <clameter@sgi.com>
|
|
|
|
*/
|
|
|
|
#include <linux/types.h>
|
|
|
|
#include <linux/gfp.h>
|
|
|
|
#include <linux/workqueue.h>
|
|
|
|
#include <linux/kobject.h>
|
|
|
|
|
2007-10-16 15:26:05 +07:00
|
|
|
struct kmem_cache_cpu {
|
|
|
|
void **freelist;
|
|
|
|
struct page *page;
|
|
|
|
int node;
|
2007-10-16 15:26:06 +07:00
|
|
|
unsigned int offset;
|
2007-10-16 15:26:05 +07:00
|
|
|
/* Lots of wasted space */
|
|
|
|
} ____cacheline_aligned_in_smp;
|
|
|
|
|
2007-05-07 04:49:36 +07:00
|
|
|
struct kmem_cache_node {
|
|
|
|
spinlock_t list_lock; /* Protect partial list and nr_partial */
|
|
|
|
unsigned long nr_partial;
|
|
|
|
atomic_long_t nr_slabs;
|
|
|
|
struct list_head partial;
|
2007-07-17 18:03:24 +07:00
|
|
|
#ifdef CONFIG_SLUB_DEBUG
|
2007-05-07 04:49:42 +07:00
|
|
|
struct list_head full;
|
2007-07-17 18:03:24 +07:00
|
|
|
#endif
|
2007-05-07 04:49:36 +07:00
|
|
|
};
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Slab cache management.
|
|
|
|
*/
|
|
|
|
struct kmem_cache {
|
|
|
|
/* Used for retriving partial slabs etc */
|
|
|
|
unsigned long flags;
|
|
|
|
int size; /* The size of an object including meta data */
|
|
|
|
int objsize; /* The size of an object without meta data */
|
|
|
|
int offset; /* Free pointer offset. */
|
2007-06-17 00:16:13 +07:00
|
|
|
int order;
|
2007-05-07 04:49:36 +07:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Avoid an extra cache line for UP, SMP and for the node local to
|
|
|
|
* struct kmem_cache.
|
|
|
|
*/
|
|
|
|
struct kmem_cache_node local_node;
|
|
|
|
|
|
|
|
/* Allocation and freeing of slabs */
|
|
|
|
int objects; /* Number of objects in slab */
|
|
|
|
int refcount; /* Refcount for slab cache destroy */
|
|
|
|
void (*ctor)(void *, struct kmem_cache *, unsigned long);
|
|
|
|
int inuse; /* Offset to metadata */
|
|
|
|
int align; /* Alignment */
|
|
|
|
const char *name; /* Name (only for display!) */
|
|
|
|
struct list_head list; /* List of slab caches */
|
2007-07-17 18:03:24 +07:00
|
|
|
#ifdef CONFIG_SLUB_DEBUG
|
2007-05-07 04:49:36 +07:00
|
|
|
struct kobject kobj; /* For sysfs */
|
2007-07-17 18:03:24 +07:00
|
|
|
#endif
|
2007-05-07 04:49:36 +07:00
|
|
|
|
|
|
|
#ifdef CONFIG_NUMA
|
|
|
|
int defrag_ratio;
|
|
|
|
struct kmem_cache_node *node[MAX_NUMNODES];
|
|
|
|
#endif
|
2007-10-16 15:26:05 +07:00
|
|
|
struct kmem_cache_cpu cpu_slab[NR_CPUS];
|
2007-05-07 04:49:36 +07:00
|
|
|
};
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Kmalloc subsystem.
|
|
|
|
*/
|
2007-06-17 00:16:13 +07:00
|
|
|
#if defined(ARCH_KMALLOC_MINALIGN) && ARCH_KMALLOC_MINALIGN > 8
|
|
|
|
#define KMALLOC_MIN_SIZE ARCH_KMALLOC_MINALIGN
|
|
|
|
#else
|
|
|
|
#define KMALLOC_MIN_SIZE 8
|
|
|
|
#endif
|
|
|
|
|
|
|
|
#define KMALLOC_SHIFT_LOW ilog2(KMALLOC_MIN_SIZE)
|
2007-05-07 04:49:36 +07:00
|
|
|
|
|
|
|
/*
|
|
|
|
* We keep the general caches in an array of slab caches that are used for
|
|
|
|
* 2^x bytes of allocations.
|
|
|
|
*/
|
2007-10-16 15:24:38 +07:00
|
|
|
extern struct kmem_cache kmalloc_caches[PAGE_SHIFT];
|
2007-05-07 04:49:36 +07:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Sorry that the following has to be that ugly but some versions of GCC
|
|
|
|
* have trouble with constant propagation and loops.
|
|
|
|
*/
|
2007-08-31 14:48:45 +07:00
|
|
|
static __always_inline int kmalloc_index(size_t size)
|
2007-05-07 04:49:36 +07:00
|
|
|
{
|
2007-06-09 03:46:49 +07:00
|
|
|
if (!size)
|
|
|
|
return 0;
|
2007-05-07 04:49:38 +07:00
|
|
|
|
2007-06-17 00:16:13 +07:00
|
|
|
if (size <= KMALLOC_MIN_SIZE)
|
|
|
|
return KMALLOC_SHIFT_LOW;
|
|
|
|
|
2007-05-07 04:49:36 +07:00
|
|
|
if (size > 64 && size <= 96)
|
|
|
|
return 1;
|
|
|
|
if (size > 128 && size <= 192)
|
|
|
|
return 2;
|
|
|
|
if (size <= 8) return 3;
|
|
|
|
if (size <= 16) return 4;
|
|
|
|
if (size <= 32) return 5;
|
|
|
|
if (size <= 64) return 6;
|
|
|
|
if (size <= 128) return 7;
|
|
|
|
if (size <= 256) return 8;
|
|
|
|
if (size <= 512) return 9;
|
|
|
|
if (size <= 1024) return 10;
|
|
|
|
if (size <= 2 * 1024) return 11;
|
2007-10-16 15:24:38 +07:00
|
|
|
/*
|
|
|
|
* The following is only needed to support architectures with a larger page
|
|
|
|
* size than 4k.
|
|
|
|
*/
|
2007-05-07 04:49:36 +07:00
|
|
|
if (size <= 4 * 1024) return 12;
|
|
|
|
if (size <= 8 * 1024) return 13;
|
|
|
|
if (size <= 16 * 1024) return 14;
|
|
|
|
if (size <= 32 * 1024) return 15;
|
|
|
|
if (size <= 64 * 1024) return 16;
|
|
|
|
if (size <= 128 * 1024) return 17;
|
|
|
|
if (size <= 256 * 1024) return 18;
|
2007-10-16 15:24:38 +07:00
|
|
|
if (size <= 512 * 1024) return 19;
|
2007-05-07 04:49:36 +07:00
|
|
|
if (size <= 1024 * 1024) return 20;
|
|
|
|
if (size <= 2 * 1024 * 1024) return 21;
|
|
|
|
return -1;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* What we really wanted to do and cannot do because of compiler issues is:
|
|
|
|
* int i;
|
|
|
|
* for (i = KMALLOC_SHIFT_LOW; i <= KMALLOC_SHIFT_HIGH; i++)
|
|
|
|
* if (size <= (1 << i))
|
|
|
|
* return i;
|
|
|
|
*/
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Find the slab cache for a given combination of allocation flags and size.
|
|
|
|
*
|
|
|
|
* This ought to end up with a global pointer to the right cache
|
|
|
|
* in kmalloc_caches.
|
|
|
|
*/
|
2007-08-31 14:48:45 +07:00
|
|
|
static __always_inline struct kmem_cache *kmalloc_slab(size_t size)
|
2007-05-07 04:49:36 +07:00
|
|
|
{
|
|
|
|
int index = kmalloc_index(size);
|
|
|
|
|
|
|
|
if (index == 0)
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
return &kmalloc_caches[index];
|
|
|
|
}
|
|
|
|
|
|
|
|
#ifdef CONFIG_ZONE_DMA
|
|
|
|
#define SLUB_DMA __GFP_DMA
|
|
|
|
#else
|
|
|
|
/* Disable DMA functionality */
|
2007-07-20 22:18:06 +07:00
|
|
|
#define SLUB_DMA (__force gfp_t)0
|
2007-05-07 04:49:36 +07:00
|
|
|
#endif
|
|
|
|
|
slob: initial NUMA support
This adds preliminary NUMA support to SLOB, primarily aimed at systems with
small nodes (tested all the way down to a 128kB SRAM block), whether
asymmetric or otherwise.
We follow the same conventions as SLAB/SLUB, preferring current node
placement for new pages, or with explicit placement, if a node has been
specified. Presently on UP NUMA this has the side-effect of preferring
node#0 allocations (since numa_node_id() == 0, though this could be
reworked if we could hand off a pfn to determine node placement), so
single-CPU NUMA systems will want to place smaller nodes further out in
terms of node id. Once a page has been bound to a node (via explicit node
id typing), we only do block allocations from partial free pages that have
a matching node id in the page flags.
The current implementation does have some scalability problems, in that all
partial free pages are tracked in the global freelist (with contention due
to the single spinlock). However, these are things that are being reworked
for SMP scalability first, while things like per-node freelists can easily
be built on top of this sort of functionality once it's been added.
More background can be found in:
http://marc.info/?l=linux-mm&m=118117916022379&w=2
http://marc.info/?l=linux-mm&m=118170446306199&w=2
http://marc.info/?l=linux-mm&m=118187859420048&w=2
and subsequent threads.
Acked-by: Christoph Lameter <clameter@sgi.com>
Acked-by: Matt Mackall <mpm@selenic.com>
Signed-off-by: Paul Mundt <lethal@linux-sh.org>
Acked-by: Nick Piggin <nickpiggin@yahoo.com.au>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2007-07-16 13:38:22 +07:00
|
|
|
void *kmem_cache_alloc(struct kmem_cache *, gfp_t);
|
|
|
|
void *__kmalloc(size_t size, gfp_t flags);
|
|
|
|
|
2007-08-31 14:48:45 +07:00
|
|
|
static __always_inline void *kmalloc(size_t size, gfp_t flags)
|
2007-05-07 04:49:36 +07:00
|
|
|
{
|
2007-10-16 15:24:38 +07:00
|
|
|
if (__builtin_constant_p(size)) {
|
|
|
|
if (size > PAGE_SIZE / 2)
|
|
|
|
return (void *)__get_free_pages(flags | __GFP_COMP,
|
|
|
|
get_order(size));
|
2007-05-07 04:49:36 +07:00
|
|
|
|
2007-10-16 15:24:38 +07:00
|
|
|
if (!(flags & SLUB_DMA)) {
|
|
|
|
struct kmem_cache *s = kmalloc_slab(size);
|
|
|
|
|
|
|
|
if (!s)
|
|
|
|
return ZERO_SIZE_PTR;
|
2007-05-07 04:49:36 +07:00
|
|
|
|
2007-10-16 15:24:38 +07:00
|
|
|
return kmem_cache_alloc(s, flags);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return __kmalloc(size, flags);
|
2007-05-07 04:49:36 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
#ifdef CONFIG_NUMA
|
slob: initial NUMA support
This adds preliminary NUMA support to SLOB, primarily aimed at systems with
small nodes (tested all the way down to a 128kB SRAM block), whether
asymmetric or otherwise.
We follow the same conventions as SLAB/SLUB, preferring current node
placement for new pages, or with explicit placement, if a node has been
specified. Presently on UP NUMA this has the side-effect of preferring
node#0 allocations (since numa_node_id() == 0, though this could be
reworked if we could hand off a pfn to determine node placement), so
single-CPU NUMA systems will want to place smaller nodes further out in
terms of node id. Once a page has been bound to a node (via explicit node
id typing), we only do block allocations from partial free pages that have
a matching node id in the page flags.
The current implementation does have some scalability problems, in that all
partial free pages are tracked in the global freelist (with contention due
to the single spinlock). However, these are things that are being reworked
for SMP scalability first, while things like per-node freelists can easily
be built on top of this sort of functionality once it's been added.
More background can be found in:
http://marc.info/?l=linux-mm&m=118117916022379&w=2
http://marc.info/?l=linux-mm&m=118170446306199&w=2
http://marc.info/?l=linux-mm&m=118187859420048&w=2
and subsequent threads.
Acked-by: Christoph Lameter <clameter@sgi.com>
Acked-by: Matt Mackall <mpm@selenic.com>
Signed-off-by: Paul Mundt <lethal@linux-sh.org>
Acked-by: Nick Piggin <nickpiggin@yahoo.com.au>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2007-07-16 13:38:22 +07:00
|
|
|
void *__kmalloc_node(size_t size, gfp_t flags, int node);
|
|
|
|
void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node);
|
2007-05-07 04:49:36 +07:00
|
|
|
|
2007-08-31 14:48:45 +07:00
|
|
|
static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node)
|
2007-05-07 04:49:36 +07:00
|
|
|
{
|
2007-10-16 15:24:38 +07:00
|
|
|
if (__builtin_constant_p(size) &&
|
|
|
|
size <= PAGE_SIZE / 2 && !(flags & SLUB_DMA)) {
|
|
|
|
struct kmem_cache *s = kmalloc_slab(size);
|
2007-05-07 04:49:36 +07:00
|
|
|
|
|
|
|
if (!s)
|
2007-06-09 03:46:49 +07:00
|
|
|
return ZERO_SIZE_PTR;
|
2007-05-07 04:49:36 +07:00
|
|
|
|
|
|
|
return kmem_cache_alloc_node(s, flags, node);
|
2007-10-16 15:24:38 +07:00
|
|
|
}
|
|
|
|
return __kmalloc_node(size, flags, node);
|
2007-05-07 04:49:36 +07:00
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
|
|
|
#endif /* _LINUX_SLUB_DEF_H */
|