2006-12-13 15:34:23 +07:00
|
|
|
#ifndef _LINUX_SLAB_DEF_H
|
|
|
|
#define _LINUX_SLAB_DEF_H
|
|
|
|
|
reciprocal_divide: update/correction of the algorithm
Jakub Zawadzki noticed that some divisions by reciprocal_divide()
were not correct [1][2], which he could also show with BPF code
after divisions are transformed into reciprocal_value() for runtime
invariance which can be passed to reciprocal_divide() later on;
reverse in BPF dump ended up with a different, off-by-one K in
some situations.
This has been fixed by Eric Dumazet in commit aee636c4809fa5
("bpf: do not use reciprocal divide"). This follow-up patch
improves reciprocal_value() and reciprocal_divide() to work in
all cases by using Granlund and Montgomery method, so that also
future use is safe and without any non-obvious side-effects.
Known problems with the old implementation were that division by 1
always returned 0 and some off-by-ones when the dividend and divisor
where very large. This seemed to not be problematic with its
current users, as far as we can tell. Eric Dumazet checked for
the slab usage, we cannot surely say so in the case of flex_array.
Still, in order to fix that, we propose an extension from the
original implementation from commit 6a2d7a955d8d resp. [3][4],
by using the algorithm proposed in "Division by Invariant Integers
Using Multiplication" [5], Torbjörn Granlund and Peter L.
Montgomery, that is, pseudocode for q = n/d where q, n, d is in
u32 universe:
1) Initialization:
int l = ceil(log_2 d)
uword m' = floor((1<<32)*((1<<l)-d)/d)+1
int sh_1 = min(l,1)
int sh_2 = max(l-1,0)
2) For q = n/d, all uword:
uword t = (n*m')>>32
q = (t+((n-t)>>sh_1))>>sh_2
The assembler implementation from Agner Fog [6] also helped a lot
while implementing. We have tested the implementation on x86_64,
ppc64, i686, s390x; on x86_64/haswell we're still half the latency
compared to normal divide.
Joint work with Daniel Borkmann.
[1] http://www.wireshark.org/~darkjames/reciprocal-buggy.c
[2] http://www.wireshark.org/~darkjames/set-and-dump-filter-k-bug.c
[3] https://gmplib.org/~tege/division-paper.pdf
[4] http://homepage.cs.uiowa.edu/~jones/bcd/divide.html
[5] http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.1.2556
[6] http://www.agner.org/optimize/asmlib.zip
Reported-by: Jakub Zawadzki <darkjames-ws@darkjames.pl>
Cc: Eric Dumazet <eric.dumazet@gmail.com>
Cc: Austin S Hemmelgarn <ahferroin7@gmail.com>
Cc: linux-kernel@vger.kernel.org
Cc: Jesse Gross <jesse@nicira.com>
Cc: Jamal Hadi Salim <jhs@mojatatu.com>
Cc: Stephen Hemminger <stephen@networkplumber.org>
Cc: Matt Mackall <mpm@selenic.com>
Cc: Pekka Enberg <penberg@kernel.org>
Cc: Christoph Lameter <cl@linux-foundation.org>
Cc: Andy Gospodarek <andy@greyhouse.net>
Cc: Veaceslav Falico <vfalico@redhat.com>
Cc: Jay Vosburgh <fubar@us.ibm.com>
Cc: Jakub Zawadzki <darkjames-ws@darkjames.pl>
Signed-off-by: Daniel Borkmann <dborkman@redhat.com>
Signed-off-by: Hannes Frederic Sowa <hannes@stressinduktion.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
2014-01-22 08:29:41 +07:00
|
|
|
#include <linux/reciprocal_div.h>
|
|
|
|
|
2006-12-13 15:34:23 +07:00
|
|
|
/*
|
|
|
|
* Definitions unique to the original Linux SLAB allocator.
|
2008-05-10 01:32:44 +07:00
|
|
|
*/
|
|
|
|
|
|
|
|
struct kmem_cache {
|
2014-10-10 05:26:27 +07:00
|
|
|
struct array_cache __percpu *cpu_cache;
|
|
|
|
|
2014-01-21 08:12:42 +07:00
|
|
|
/* 1) Cache tunables. Protected by slab_mutex */
|
2008-05-10 01:32:44 +07:00
|
|
|
unsigned int batchcount;
|
|
|
|
unsigned int limit;
|
|
|
|
unsigned int shared;
|
|
|
|
|
2012-06-13 22:24:57 +07:00
|
|
|
unsigned int size;
|
reciprocal_divide: update/correction of the algorithm
Jakub Zawadzki noticed that some divisions by reciprocal_divide()
were not correct [1][2], which he could also show with BPF code
after divisions are transformed into reciprocal_value() for runtime
invariance which can be passed to reciprocal_divide() later on;
reverse in BPF dump ended up with a different, off-by-one K in
some situations.
This has been fixed by Eric Dumazet in commit aee636c4809fa5
("bpf: do not use reciprocal divide"). This follow-up patch
improves reciprocal_value() and reciprocal_divide() to work in
all cases by using Granlund and Montgomery method, so that also
future use is safe and without any non-obvious side-effects.
Known problems with the old implementation were that division by 1
always returned 0 and some off-by-ones when the dividend and divisor
where very large. This seemed to not be problematic with its
current users, as far as we can tell. Eric Dumazet checked for
the slab usage, we cannot surely say so in the case of flex_array.
Still, in order to fix that, we propose an extension from the
original implementation from commit 6a2d7a955d8d resp. [3][4],
by using the algorithm proposed in "Division by Invariant Integers
Using Multiplication" [5], Torbjörn Granlund and Peter L.
Montgomery, that is, pseudocode for q = n/d where q, n, d is in
u32 universe:
1) Initialization:
int l = ceil(log_2 d)
uword m' = floor((1<<32)*((1<<l)-d)/d)+1
int sh_1 = min(l,1)
int sh_2 = max(l-1,0)
2) For q = n/d, all uword:
uword t = (n*m')>>32
q = (t+((n-t)>>sh_1))>>sh_2
The assembler implementation from Agner Fog [6] also helped a lot
while implementing. We have tested the implementation on x86_64,
ppc64, i686, s390x; on x86_64/haswell we're still half the latency
compared to normal divide.
Joint work with Daniel Borkmann.
[1] http://www.wireshark.org/~darkjames/reciprocal-buggy.c
[2] http://www.wireshark.org/~darkjames/set-and-dump-filter-k-bug.c
[3] https://gmplib.org/~tege/division-paper.pdf
[4] http://homepage.cs.uiowa.edu/~jones/bcd/divide.html
[5] http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.1.2556
[6] http://www.agner.org/optimize/asmlib.zip
Reported-by: Jakub Zawadzki <darkjames-ws@darkjames.pl>
Cc: Eric Dumazet <eric.dumazet@gmail.com>
Cc: Austin S Hemmelgarn <ahferroin7@gmail.com>
Cc: linux-kernel@vger.kernel.org
Cc: Jesse Gross <jesse@nicira.com>
Cc: Jamal Hadi Salim <jhs@mojatatu.com>
Cc: Stephen Hemminger <stephen@networkplumber.org>
Cc: Matt Mackall <mpm@selenic.com>
Cc: Pekka Enberg <penberg@kernel.org>
Cc: Christoph Lameter <cl@linux-foundation.org>
Cc: Andy Gospodarek <andy@greyhouse.net>
Cc: Veaceslav Falico <vfalico@redhat.com>
Cc: Jay Vosburgh <fubar@us.ibm.com>
Cc: Jakub Zawadzki <darkjames-ws@darkjames.pl>
Signed-off-by: Daniel Borkmann <dborkman@redhat.com>
Signed-off-by: Hannes Frederic Sowa <hannes@stressinduktion.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
2014-01-22 08:29:41 +07:00
|
|
|
struct reciprocal_value reciprocal_buffer_size;
|
2011-07-21 00:04:23 +07:00
|
|
|
/* 2) touched by every alloc & free from the backend */
|
2008-05-10 01:32:44 +07:00
|
|
|
|
|
|
|
unsigned int flags; /* constant flags */
|
|
|
|
unsigned int num; /* # of objs per slab */
|
|
|
|
|
2011-07-21 00:04:23 +07:00
|
|
|
/* 3) cache_grow/shrink */
|
2008-05-10 01:32:44 +07:00
|
|
|
/* order of pgs per slab (2^n) */
|
|
|
|
unsigned int gfporder;
|
|
|
|
|
|
|
|
/* force GFP flags, e.g. GFP_DMA */
|
2012-06-14 19:17:21 +07:00
|
|
|
gfp_t allocflags;
|
2008-05-10 01:32:44 +07:00
|
|
|
|
|
|
|
size_t colour; /* cache colouring range */
|
|
|
|
unsigned int colour_off; /* colour offset */
|
2013-10-24 08:07:49 +07:00
|
|
|
struct kmem_cache *freelist_cache;
|
|
|
|
unsigned int freelist_size;
|
2008-05-10 01:32:44 +07:00
|
|
|
|
|
|
|
/* constructor func */
|
|
|
|
void (*ctor)(void *obj);
|
|
|
|
|
2011-07-21 00:04:23 +07:00
|
|
|
/* 4) cache creation/removal */
|
2008-05-10 01:32:44 +07:00
|
|
|
const char *name;
|
2012-06-13 22:24:57 +07:00
|
|
|
struct list_head list;
|
|
|
|
int refcount;
|
|
|
|
int object_size;
|
|
|
|
int align;
|
2008-05-10 01:32:44 +07:00
|
|
|
|
2011-07-21 00:04:23 +07:00
|
|
|
/* 5) statistics */
|
2008-05-10 01:32:44 +07:00
|
|
|
#ifdef CONFIG_DEBUG_SLAB
|
|
|
|
unsigned long num_active;
|
|
|
|
unsigned long num_allocations;
|
|
|
|
unsigned long high_mark;
|
|
|
|
unsigned long grown;
|
|
|
|
unsigned long reaped;
|
|
|
|
unsigned long errors;
|
|
|
|
unsigned long max_freeable;
|
|
|
|
unsigned long node_allocs;
|
|
|
|
unsigned long node_frees;
|
|
|
|
unsigned long node_overflow;
|
|
|
|
atomic_t allochit;
|
|
|
|
atomic_t allocmiss;
|
|
|
|
atomic_t freehit;
|
|
|
|
atomic_t freemiss;
|
mm/slab: alternative implementation for DEBUG_SLAB_LEAK
DEBUG_SLAB_LEAK is a debug option. It's current implementation requires
status buffer so we need more memory to use it. And, it cause
kmem_cache initialization step more complex.
To remove this extra memory usage and to simplify initialization step,
this patch implement this feature with another way.
When user requests to get slab object owner information, it marks that
getting information is started. And then, all free objects in caches
are flushed to corresponding slab page. Now, we can distinguish all
freed object so we can know all allocated objects, too. After
collecting slab object owner information on allocated objects, mark is
checked that there is no free during the processing. If true, we can be
sure that our information is correct so information is returned to user.
Although this way is rather complex, it has two important benefits
mentioned above. So, I think it is worth changing.
There is one drawback that it takes more time to get slab object owner
information but it is just a debug option so it doesn't matter at all.
To help review, this patch implements new way only. Following patch
will remove useless code.
Signed-off-by: Joonsoo Kim <iamjoonsoo.kim@lge.com>
Cc: Christoph Lameter <cl@linux.com>
Cc: Pekka Enberg <penberg@kernel.org>
Cc: David Rientjes <rientjes@google.com>
Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com>
Cc: Jesper Dangaard Brouer <brouer@redhat.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2016-03-16 04:54:24 +07:00
|
|
|
#ifdef CONFIG_DEBUG_SLAB_LEAK
|
|
|
|
atomic_t store_user_clean;
|
|
|
|
#endif
|
2008-05-10 01:32:44 +07:00
|
|
|
|
|
|
|
/*
|
|
|
|
* If debugging is enabled, then the allocator can add additional
|
2012-06-13 22:24:57 +07:00
|
|
|
* fields and/or padding to every object. size contains the total
|
2008-05-10 01:32:44 +07:00
|
|
|
* object size including these internal fields, the following two
|
|
|
|
* variables contain the offset to the user object and its size.
|
|
|
|
*/
|
|
|
|
int obj_offset;
|
|
|
|
#endif /* CONFIG_DEBUG_SLAB */
|
2016-01-21 06:02:32 +07:00
|
|
|
|
|
|
|
#ifdef CONFIG_MEMCG
|
2015-02-13 05:59:20 +07:00
|
|
|
struct memcg_cache_params memcg_params;
|
2012-12-19 05:22:27 +07:00
|
|
|
#endif
|
2016-03-26 04:21:59 +07:00
|
|
|
#ifdef CONFIG_KASAN
|
|
|
|
struct kasan_cache kasan_info;
|
|
|
|
#endif
|
2008-05-10 01:32:44 +07:00
|
|
|
|
2016-05-20 07:10:37 +07:00
|
|
|
#ifdef CONFIG_SLAB_FREELIST_RANDOM
|
2016-07-27 05:21:56 +07:00
|
|
|
unsigned int *random_seq;
|
2016-05-20 07:10:37 +07:00
|
|
|
#endif
|
|
|
|
|
2014-10-10 05:26:27 +07:00
|
|
|
struct kmem_cache_node *node[MAX_NUMNODES];
|
2008-05-10 01:32:44 +07:00
|
|
|
};
|
|
|
|
|
2016-03-26 04:21:59 +07:00
|
|
|
static inline void *nearest_obj(struct kmem_cache *cache, struct page *page,
|
2016-07-29 05:49:07 +07:00
|
|
|
void *x)
|
|
|
|
{
|
2016-03-26 04:21:59 +07:00
|
|
|
void *object = x - (x - page->s_mem) % cache->size;
|
|
|
|
void *last_object = page->s_mem + (cache->num - 1) * cache->size;
|
|
|
|
|
|
|
|
if (unlikely(object > last_object))
|
|
|
|
return last_object;
|
|
|
|
else
|
|
|
|
return object;
|
|
|
|
}
|
|
|
|
|
2006-12-13 15:34:23 +07:00
|
|
|
#endif /* _LINUX_SLAB_DEF_H */
|