mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-11-24 17:30:53 +07:00
netfilter: x_tables: remove XT_TABLE_INFO_SZ and a dereference.
After Florian patches, there is no need for XT_TABLE_INFO_SZ anymore : Only one copy of table is kept, instead of one copy per cpu. We also can avoid a dereference if we put table data right after xt_table_info. It reduces register pressure and helps compiler. Then, we attempt a kmalloc() if total size is under order-3 allocation, to reduce TLB pressure, as in many cases, rules fit in 32 KB. Signed-off-by: Eric Dumazet <edumazet@google.com> Cc: Florian Westphal <fw@strlen.de> Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>
This commit is contained in:
parent
53b8762727
commit
711bdde6a8
@ -225,12 +225,9 @@ struct xt_table_info {
|
||||
unsigned int __percpu *stackptr;
|
||||
void ***jumpstack;
|
||||
|
||||
/* Note : this field MUST be the last one, see XT_TABLE_INFO_SZ */
|
||||
void *entries;
|
||||
unsigned char entries[0] __aligned(8);
|
||||
};
|
||||
|
||||
#define XT_TABLE_INFO_SZ (offsetof(struct xt_table_info, entries) \
|
||||
+ nr_cpu_ids * sizeof(char *))
|
||||
int xt_register_target(struct xt_target *target);
|
||||
void xt_unregister_target(struct xt_target *target);
|
||||
int xt_register_targets(struct xt_target *target, unsigned int n);
|
||||
|
@ -256,7 +256,7 @@ unsigned int arpt_do_table(struct sk_buff *skb,
|
||||
const struct arphdr *arp;
|
||||
struct arpt_entry *e, *back;
|
||||
const char *indev, *outdev;
|
||||
void *table_base;
|
||||
const void *table_base;
|
||||
const struct xt_table_info *private;
|
||||
struct xt_action_param acpar;
|
||||
unsigned int addend;
|
||||
@ -868,7 +868,7 @@ static int compat_table_info(const struct xt_table_info *info,
|
||||
struct xt_table_info *newinfo)
|
||||
{
|
||||
struct arpt_entry *iter;
|
||||
void *loc_cpu_entry;
|
||||
const void *loc_cpu_entry;
|
||||
int ret;
|
||||
|
||||
if (!newinfo || !info)
|
||||
|
@ -938,7 +938,7 @@ copy_entries_to_user(unsigned int total_size,
|
||||
struct xt_counters *counters;
|
||||
const struct xt_table_info *private = table->private;
|
||||
int ret = 0;
|
||||
void *loc_cpu_entry;
|
||||
const void *loc_cpu_entry;
|
||||
|
||||
counters = alloc_counters(table);
|
||||
if (IS_ERR(counters))
|
||||
@ -1052,7 +1052,7 @@ static int compat_table_info(const struct xt_table_info *info,
|
||||
struct xt_table_info *newinfo)
|
||||
{
|
||||
struct ipt_entry *iter;
|
||||
void *loc_cpu_entry;
|
||||
const void *loc_cpu_entry;
|
||||
int ret;
|
||||
|
||||
if (!newinfo || !info)
|
||||
|
@ -951,7 +951,7 @@ copy_entries_to_user(unsigned int total_size,
|
||||
struct xt_counters *counters;
|
||||
const struct xt_table_info *private = table->private;
|
||||
int ret = 0;
|
||||
void *loc_cpu_entry;
|
||||
const void *loc_cpu_entry;
|
||||
|
||||
counters = alloc_counters(table);
|
||||
if (IS_ERR(counters))
|
||||
@ -1065,7 +1065,7 @@ static int compat_table_info(const struct xt_table_info *info,
|
||||
struct xt_table_info *newinfo)
|
||||
{
|
||||
struct ip6t_entry *iter;
|
||||
void *loc_cpu_entry;
|
||||
const void *loc_cpu_entry;
|
||||
int ret;
|
||||
|
||||
if (!newinfo || !info)
|
||||
|
@ -658,29 +658,23 @@ EXPORT_SYMBOL_GPL(xt_compat_target_to_user);
|
||||
|
||||
struct xt_table_info *xt_alloc_table_info(unsigned int size)
|
||||
{
|
||||
struct xt_table_info *newinfo;
|
||||
struct xt_table_info *info = NULL;
|
||||
size_t sz = sizeof(*info) + size;
|
||||
|
||||
/* Pedantry: prevent them from hitting BUG() in vmalloc.c --RR */
|
||||
if ((SMP_ALIGN(size) >> PAGE_SHIFT) + 2 > totalram_pages)
|
||||
return NULL;
|
||||
|
||||
newinfo = kzalloc(XT_TABLE_INFO_SZ, GFP_KERNEL);
|
||||
if (!newinfo)
|
||||
return NULL;
|
||||
|
||||
newinfo->size = size;
|
||||
|
||||
if (size <= PAGE_SIZE)
|
||||
newinfo->entries = kmalloc(size, GFP_KERNEL);
|
||||
else
|
||||
newinfo->entries = vmalloc(size);
|
||||
|
||||
if (newinfo->entries == NULL) {
|
||||
xt_free_table_info(newinfo);
|
||||
return NULL;
|
||||
if (sz <= (PAGE_SIZE << PAGE_ALLOC_COSTLY_ORDER))
|
||||
info = kmalloc(sz, GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY);
|
||||
if (!info) {
|
||||
info = vmalloc(sz);
|
||||
if (!info)
|
||||
return NULL;
|
||||
}
|
||||
|
||||
return newinfo;
|
||||
memset(info, 0, sizeof(*info));
|
||||
info->size = size;
|
||||
return info;
|
||||
}
|
||||
EXPORT_SYMBOL(xt_alloc_table_info);
|
||||
|
||||
@ -688,8 +682,6 @@ void xt_free_table_info(struct xt_table_info *info)
|
||||
{
|
||||
int cpu;
|
||||
|
||||
kvfree(info->entries);
|
||||
|
||||
if (info->jumpstack != NULL) {
|
||||
for_each_possible_cpu(cpu)
|
||||
kvfree(info->jumpstack[cpu]);
|
||||
@ -698,7 +690,7 @@ void xt_free_table_info(struct xt_table_info *info)
|
||||
|
||||
free_percpu(info->stackptr);
|
||||
|
||||
kfree(info);
|
||||
kvfree(info);
|
||||
}
|
||||
EXPORT_SYMBOL(xt_free_table_info);
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user