mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-11-24 04:00:52 +07:00
[PATCH] list: use list_replace_init() instead of list_splice_init()
list_splice_init(list, head) does unneeded job if it is known that list_empty(head) == 1. We can use list_replace_init() instead. Signed-off-by: Oleg Nesterov <oleg@tv-sign.ru> Acked-by: David S. Miller <davem@davemloft.net> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
This commit is contained in:
parent
54e7377035
commit
626ab0e69d
@ -209,19 +209,19 @@ int change_page_attr(struct page *page, int numpages, pgprot_t prot)
|
||||
}
|
||||
|
||||
void global_flush_tlb(void)
|
||||
{
|
||||
LIST_HEAD(l);
|
||||
{
|
||||
struct list_head l;
|
||||
struct page *pg, *next;
|
||||
|
||||
BUG_ON(irqs_disabled());
|
||||
|
||||
spin_lock_irq(&cpa_lock);
|
||||
list_splice_init(&df_list, &l);
|
||||
list_replace_init(&df_list, &l);
|
||||
spin_unlock_irq(&cpa_lock);
|
||||
flush_map();
|
||||
list_for_each_entry_safe(pg, next, &l, lru)
|
||||
__free_page(pg);
|
||||
}
|
||||
}
|
||||
|
||||
#ifdef CONFIG_DEBUG_PAGEALLOC
|
||||
void kernel_map_pages(struct page *page, int numpages, int enable)
|
||||
|
@ -3359,12 +3359,11 @@ EXPORT_SYMBOL(end_that_request_chunk);
|
||||
*/
|
||||
static void blk_done_softirq(struct softirq_action *h)
|
||||
{
|
||||
struct list_head *cpu_list;
|
||||
LIST_HEAD(local_list);
|
||||
struct list_head *cpu_list, local_list;
|
||||
|
||||
local_irq_disable();
|
||||
cpu_list = &__get_cpu_var(blk_cpu_done);
|
||||
list_splice_init(cpu_list, &local_list);
|
||||
list_replace_init(cpu_list, &local_list);
|
||||
local_irq_enable();
|
||||
|
||||
while (!list_empty(&local_list)) {
|
||||
|
4
fs/aio.c
4
fs/aio.c
@ -777,11 +777,11 @@ static ssize_t aio_run_iocb(struct kiocb *iocb)
|
||||
static int __aio_run_iocbs(struct kioctx *ctx)
|
||||
{
|
||||
struct kiocb *iocb;
|
||||
LIST_HEAD(run_list);
|
||||
struct list_head run_list;
|
||||
|
||||
assert_spin_locked(&ctx->ctx_lock);
|
||||
|
||||
list_splice_init(&ctx->run_list, &run_list);
|
||||
list_replace_init(&ctx->run_list, &run_list);
|
||||
while (!list_empty(&run_list)) {
|
||||
iocb = list_entry(run_list.next, struct kiocb,
|
||||
ki_run_list);
|
||||
|
@ -419,10 +419,10 @@ static inline void __run_timers(tvec_base_t *base)
|
||||
|
||||
spin_lock_irq(&base->lock);
|
||||
while (time_after_eq(jiffies, base->timer_jiffies)) {
|
||||
struct list_head work_list = LIST_HEAD_INIT(work_list);
|
||||
struct list_head work_list;
|
||||
struct list_head *head = &work_list;
|
||||
int index = base->timer_jiffies & TVR_MASK;
|
||||
|
||||
|
||||
/*
|
||||
* Cascade timers:
|
||||
*/
|
||||
@ -431,8 +431,8 @@ static inline void __run_timers(tvec_base_t *base)
|
||||
(!cascade(base, &base->tv3, INDEX(1))) &&
|
||||
!cascade(base, &base->tv4, INDEX(2)))
|
||||
cascade(base, &base->tv5, INDEX(3));
|
||||
++base->timer_jiffies;
|
||||
list_splice_init(base->tv1.vec + index, &work_list);
|
||||
++base->timer_jiffies;
|
||||
list_replace_init(base->tv1.vec + index, &work_list);
|
||||
while (!list_empty(head)) {
|
||||
void (*fn)(unsigned long);
|
||||
unsigned long data;
|
||||
|
@ -531,11 +531,11 @@ int current_is_keventd(void)
|
||||
static void take_over_work(struct workqueue_struct *wq, unsigned int cpu)
|
||||
{
|
||||
struct cpu_workqueue_struct *cwq = per_cpu_ptr(wq->cpu_wq, cpu);
|
||||
LIST_HEAD(list);
|
||||
struct list_head list;
|
||||
struct work_struct *work;
|
||||
|
||||
spin_lock_irq(&cwq->lock);
|
||||
list_splice_init(&cwq->worklist, &list);
|
||||
list_replace_init(&cwq->worklist, &list);
|
||||
|
||||
while (!list_empty(&list)) {
|
||||
printk("Taking work for %s\n", wq->name);
|
||||
|
@ -2980,7 +2980,7 @@ static void netdev_wait_allrefs(struct net_device *dev)
|
||||
static DEFINE_MUTEX(net_todo_run_mutex);
|
||||
void netdev_run_todo(void)
|
||||
{
|
||||
struct list_head list = LIST_HEAD_INIT(list);
|
||||
struct list_head list;
|
||||
|
||||
/* Need to guard against multiple cpu's getting out of order. */
|
||||
mutex_lock(&net_todo_run_mutex);
|
||||
@ -2995,9 +2995,9 @@ void netdev_run_todo(void)
|
||||
|
||||
/* Snapshot list, allow later requests */
|
||||
spin_lock(&net_todo_list_lock);
|
||||
list_splice_init(&net_todo_list, &list);
|
||||
list_replace_init(&net_todo_list, &list);
|
||||
spin_unlock(&net_todo_list_lock);
|
||||
|
||||
|
||||
while (!list_empty(&list)) {
|
||||
struct net_device *dev
|
||||
= list_entry(list.next, struct net_device, todo_list);
|
||||
|
@ -91,11 +91,10 @@ static void rfc2863_policy(struct net_device *dev)
|
||||
/* Must be called with the rtnl semaphore held */
|
||||
void linkwatch_run_queue(void)
|
||||
{
|
||||
LIST_HEAD(head);
|
||||
struct list_head *n, *next;
|
||||
struct list_head head, *n, *next;
|
||||
|
||||
spin_lock_irq(&lweventlist_lock);
|
||||
list_splice_init(&lweventlist, &head);
|
||||
list_replace_init(&lweventlist, &head);
|
||||
spin_unlock_irq(&lweventlist_lock);
|
||||
|
||||
list_for_each_safe(n, next, &head) {
|
||||
|
Loading…
Reference in New Issue
Block a user