ipv6/addrconf: speedup /proc/net/if_inet6 filling

This ensures a linear behaviour when filling /proc/net/if_inet6 thus making
ifconfig run really fast on IPv6 only addresses. In fact, with this patch and
the IPv4 one sent a while ago, ifconfig will run in linear time regardless of
address type.

IPv4 related patch: f04565ddf5
	 dev: use name hash for dev_seq_ops
	 ...

Some statistics (running ifconfig > /dev/null on a different setup):

iface count / IPv6 no-patch time / IPv6 patched time / IPv4 time
----------------------------------------------------------------
      6250  |       0.23 s       |      0.13 s       |  0.11 s
     12500  |       0.62 s       |      0.28 s       |  0.22 s
     25000  |       2.91 s       |      0.57 s       |  0.46 s
     50000  |      11.37 s       |      1.21 s       |  0.94 s
    128000  |      86.78 s       |      3.05 s       |  2.54 s

Signed-off-by: Mihai Maruseac <mmaruseac@ixiacom.com>
Cc: Daniel Baluta <dbaluta@ixiacom.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
Mihai Maruseac 2012-01-03 23:31:35 +00:00 committed by David S. Miller
parent 48529680dc
commit 1d5783030a

View File

@ -3068,20 +3068,39 @@ static void addrconf_dad_run(struct inet6_dev *idev)
struct if6_iter_state {
struct seq_net_private p;
int bucket;
int offset;
};
static struct inet6_ifaddr *if6_get_first(struct seq_file *seq)
static struct inet6_ifaddr *if6_get_first(struct seq_file *seq, loff_t pos)
{
struct inet6_ifaddr *ifa = NULL;
struct if6_iter_state *state = seq->private;
struct net *net = seq_file_net(seq);
int p = 0;
for (state->bucket = 0; state->bucket < IN6_ADDR_HSIZE; ++state->bucket) {
/* initial bucket if pos is 0 */
if (pos == 0) {
state->bucket = 0;
state->offset = 0;
}
for (; state->bucket < IN6_ADDR_HSIZE; ++state->bucket) {
struct hlist_node *n;
hlist_for_each_entry_rcu_bh(ifa, n, &inet6_addr_lst[state->bucket],
addr_lst)
addr_lst) {
/* sync with offset */
if (p < state->offset) {
p++;
continue;
}
state->offset++;
if (net_eq(dev_net(ifa->idev->dev), net))
return ifa;
}
/* prepare for next bucket */
state->offset = 0;
p = 0;
}
return NULL;
}
@ -3093,13 +3112,17 @@ static struct inet6_ifaddr *if6_get_next(struct seq_file *seq,
struct net *net = seq_file_net(seq);
struct hlist_node *n = &ifa->addr_lst;
hlist_for_each_entry_continue_rcu_bh(ifa, n, addr_lst)
hlist_for_each_entry_continue_rcu_bh(ifa, n, addr_lst) {
state->offset++;
if (net_eq(dev_net(ifa->idev->dev), net))
return ifa;
}
while (++state->bucket < IN6_ADDR_HSIZE) {
state->offset = 0;
hlist_for_each_entry_rcu_bh(ifa, n,
&inet6_addr_lst[state->bucket], addr_lst) {
state->offset++;
if (net_eq(dev_net(ifa->idev->dev), net))
return ifa;
}
@ -3108,21 +3131,11 @@ static struct inet6_ifaddr *if6_get_next(struct seq_file *seq,
return NULL;
}
static struct inet6_ifaddr *if6_get_idx(struct seq_file *seq, loff_t pos)
{
struct inet6_ifaddr *ifa = if6_get_first(seq);
if (ifa)
while (pos && (ifa = if6_get_next(seq, ifa)) != NULL)
--pos;
return pos ? NULL : ifa;
}
static void *if6_seq_start(struct seq_file *seq, loff_t *pos)
__acquires(rcu_bh)
{
rcu_read_lock_bh();
return if6_get_idx(seq, *pos);
return if6_get_first(seq, *pos);
}
static void *if6_seq_next(struct seq_file *seq, void *v, loff_t *pos)