mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2025-02-20 05:37:03 +07:00
net: frag, move LRU list maintenance outside of rwlock
Updating the fragmentation queues LRU (Least-Recently-Used) list, required taking the hash writer lock. However, the LRU list isn't tied to the hash at all, so we can use a separate lock for it. Original-idea-by: Florian Westphal <fw@strlen.de> Signed-off-by: Jesper Dangaard Brouer <brouer@redhat.com> Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
parent
6d7b857d54
commit
3ef0eb0db4
@ -6,6 +6,7 @@
|
|||||||
struct netns_frags {
|
struct netns_frags {
|
||||||
int nqueues;
|
int nqueues;
|
||||||
struct list_head lru_list;
|
struct list_head lru_list;
|
||||||
|
spinlock_t lru_lock;
|
||||||
|
|
||||||
/* The percpu_counter "mem" need to be cacheline aligned.
|
/* The percpu_counter "mem" need to be cacheline aligned.
|
||||||
* mem.count must not share cacheline with other writers
|
* mem.count must not share cacheline with other writers
|
||||||
@ -116,4 +117,25 @@ static inline int sum_frag_mem_limit(struct netns_frags *nf)
|
|||||||
return percpu_counter_sum_positive(&nf->mem);
|
return percpu_counter_sum_positive(&nf->mem);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static inline void inet_frag_lru_move(struct inet_frag_queue *q)
|
||||||
|
{
|
||||||
|
spin_lock(&q->net->lru_lock);
|
||||||
|
list_move_tail(&q->lru_list, &q->net->lru_list);
|
||||||
|
spin_unlock(&q->net->lru_lock);
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline void inet_frag_lru_del(struct inet_frag_queue *q)
|
||||||
|
{
|
||||||
|
spin_lock(&q->net->lru_lock);
|
||||||
|
list_del(&q->lru_list);
|
||||||
|
spin_unlock(&q->net->lru_lock);
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline void inet_frag_lru_add(struct netns_frags *nf,
|
||||||
|
struct inet_frag_queue *q)
|
||||||
|
{
|
||||||
|
spin_lock(&nf->lru_lock);
|
||||||
|
list_add_tail(&q->lru_list, &nf->lru_list);
|
||||||
|
spin_unlock(&nf->lru_lock);
|
||||||
|
}
|
||||||
#endif
|
#endif
|
||||||
|
@ -75,6 +75,7 @@ void inet_frags_init_net(struct netns_frags *nf)
|
|||||||
nf->nqueues = 0;
|
nf->nqueues = 0;
|
||||||
init_frag_mem_limit(nf);
|
init_frag_mem_limit(nf);
|
||||||
INIT_LIST_HEAD(&nf->lru_list);
|
INIT_LIST_HEAD(&nf->lru_list);
|
||||||
|
spin_lock_init(&nf->lru_lock);
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(inet_frags_init_net);
|
EXPORT_SYMBOL(inet_frags_init_net);
|
||||||
|
|
||||||
@ -100,9 +101,9 @@ static inline void fq_unlink(struct inet_frag_queue *fq, struct inet_frags *f)
|
|||||||
{
|
{
|
||||||
write_lock(&f->lock);
|
write_lock(&f->lock);
|
||||||
hlist_del(&fq->list);
|
hlist_del(&fq->list);
|
||||||
list_del(&fq->lru_list);
|
|
||||||
fq->net->nqueues--;
|
fq->net->nqueues--;
|
||||||
write_unlock(&f->lock);
|
write_unlock(&f->lock);
|
||||||
|
inet_frag_lru_del(fq);
|
||||||
}
|
}
|
||||||
|
|
||||||
void inet_frag_kill(struct inet_frag_queue *fq, struct inet_frags *f)
|
void inet_frag_kill(struct inet_frag_queue *fq, struct inet_frags *f)
|
||||||
@ -170,16 +171,17 @@ int inet_frag_evictor(struct netns_frags *nf, struct inet_frags *f, bool force)
|
|||||||
|
|
||||||
work = frag_mem_limit(nf) - nf->low_thresh;
|
work = frag_mem_limit(nf) - nf->low_thresh;
|
||||||
while (work > 0) {
|
while (work > 0) {
|
||||||
read_lock(&f->lock);
|
spin_lock(&nf->lru_lock);
|
||||||
|
|
||||||
if (list_empty(&nf->lru_list)) {
|
if (list_empty(&nf->lru_list)) {
|
||||||
read_unlock(&f->lock);
|
spin_unlock(&nf->lru_lock);
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
q = list_first_entry(&nf->lru_list,
|
q = list_first_entry(&nf->lru_list,
|
||||||
struct inet_frag_queue, lru_list);
|
struct inet_frag_queue, lru_list);
|
||||||
atomic_inc(&q->refcnt);
|
atomic_inc(&q->refcnt);
|
||||||
read_unlock(&f->lock);
|
spin_unlock(&nf->lru_lock);
|
||||||
|
|
||||||
spin_lock(&q->lock);
|
spin_lock(&q->lock);
|
||||||
if (!(q->last_in & INET_FRAG_COMPLETE))
|
if (!(q->last_in & INET_FRAG_COMPLETE))
|
||||||
@ -233,9 +235,9 @@ static struct inet_frag_queue *inet_frag_intern(struct netns_frags *nf,
|
|||||||
|
|
||||||
atomic_inc(&qp->refcnt);
|
atomic_inc(&qp->refcnt);
|
||||||
hlist_add_head(&qp->list, &f->hash[hash]);
|
hlist_add_head(&qp->list, &f->hash[hash]);
|
||||||
list_add_tail(&qp->lru_list, &nf->lru_list);
|
|
||||||
nf->nqueues++;
|
nf->nqueues++;
|
||||||
write_unlock(&f->lock);
|
write_unlock(&f->lock);
|
||||||
|
inet_frag_lru_add(nf, qp);
|
||||||
return qp;
|
return qp;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -529,9 +529,7 @@ static int ip_frag_queue(struct ipq *qp, struct sk_buff *skb)
|
|||||||
qp->q.meat == qp->q.len)
|
qp->q.meat == qp->q.len)
|
||||||
return ip_frag_reasm(qp, prev, dev);
|
return ip_frag_reasm(qp, prev, dev);
|
||||||
|
|
||||||
write_lock(&ip4_frags.lock);
|
inet_frag_lru_move(&qp->q);
|
||||||
list_move_tail(&qp->q.lru_list, &qp->q.net->lru_list);
|
|
||||||
write_unlock(&ip4_frags.lock);
|
|
||||||
return -EINPROGRESS;
|
return -EINPROGRESS;
|
||||||
|
|
||||||
err:
|
err:
|
||||||
|
@ -328,9 +328,8 @@ static int nf_ct_frag6_queue(struct frag_queue *fq, struct sk_buff *skb,
|
|||||||
fq->nhoffset = nhoff;
|
fq->nhoffset = nhoff;
|
||||||
fq->q.last_in |= INET_FRAG_FIRST_IN;
|
fq->q.last_in |= INET_FRAG_FIRST_IN;
|
||||||
}
|
}
|
||||||
write_lock(&nf_frags.lock);
|
|
||||||
list_move_tail(&fq->q.lru_list, &fq->q.net->lru_list);
|
inet_frag_lru_move(&fq->q);
|
||||||
write_unlock(&nf_frags.lock);
|
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
discard_fq:
|
discard_fq:
|
||||||
|
@ -341,9 +341,7 @@ static int ip6_frag_queue(struct frag_queue *fq, struct sk_buff *skb,
|
|||||||
fq->q.meat == fq->q.len)
|
fq->q.meat == fq->q.len)
|
||||||
return ip6_frag_reasm(fq, prev, dev);
|
return ip6_frag_reasm(fq, prev, dev);
|
||||||
|
|
||||||
write_lock(&ip6_frags.lock);
|
inet_frag_lru_move(&fq->q);
|
||||||
list_move_tail(&fq->q.lru_list, &fq->q.net->lru_list);
|
|
||||||
write_unlock(&ip6_frags.lock);
|
|
||||||
return -1;
|
return -1;
|
||||||
|
|
||||||
discard_fq:
|
discard_fq:
|
||||||
|
Loading…
Reference in New Issue
Block a user