From ada609ee2ac2e03bd8abb07f9b3e92cd2e650f19 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Tue, 25 Jan 2011 14:35:54 +0100 Subject: [PATCH 01/20] workqueue: use WQ_MEM_RECLAIM instead of WQ_RESCUER WQ_RESCUER is now an internal flag and should only be used in the workqueue implementation proper. Use WQ_MEM_RECLAIM instead. This doesn't introduce any functional difference. Signed-off-by: Tejun Heo Cc: dm-devel@redhat.com Cc: Neil Brown --- drivers/md/md.c | 2 +- fs/nfs/inode.c | 2 +- net/sunrpc/sched.c | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/drivers/md/md.c b/drivers/md/md.c index b76cfc89e1b5..6352e84fd512 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c @@ -7321,7 +7321,7 @@ static int __init md_init(void) { int ret = -ENOMEM; - md_wq = alloc_workqueue("md", WQ_RESCUER, 0); + md_wq = alloc_workqueue("md", WQ_MEM_RECLAIM, 0); if (!md_wq) goto err_wq; diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c index d8512423ba72..0855acdfe706 100644 --- a/fs/nfs/inode.c +++ b/fs/nfs/inode.c @@ -1505,7 +1505,7 @@ static int nfsiod_start(void) { struct workqueue_struct *wq; dprintk("RPC: creating workqueue nfsiod\n"); - wq = alloc_workqueue("nfsiod", WQ_RESCUER, 0); + wq = alloc_workqueue("nfsiod", WQ_MEM_RECLAIM, 0); if (wq == NULL) return -ENOMEM; nfsiod_workqueue = wq; diff --git a/net/sunrpc/sched.c b/net/sunrpc/sched.c index 243fc09b164e..2841cc6bcfda 100644 --- a/net/sunrpc/sched.c +++ b/net/sunrpc/sched.c @@ -908,7 +908,7 @@ static int rpciod_start(void) * Create the rpciod thread and wait for it to start. */ dprintk("RPC: creating workqueue rpciod\n"); - wq = alloc_workqueue("rpciod", WQ_RESCUER, 0); + wq = alloc_workqueue("rpciod", WQ_MEM_RECLAIM, 0); rpciod_workqueue = wq; return rpciod_workqueue != NULL; } From c48730056f69db30c075236f4ee2bc9d3f4f9985 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Wed, 26 Jan 2011 12:12:50 +0100 Subject: [PATCH 02/20] arm/omap: use system_wq in mailbox With cmwq, there's no reason to use a separate workqueue for mailbox. Use the system_wq instead. mbox->rxq->work is sync flushed in omap_mbox_fini() to make sure it's not running on any cpu, which makes sure that no mbox work is running when omap_mbox_exit() is entered. Signed-off-by: Tejun Heo Acked-by: Hari Kanigeri Cc: Tony Lindgren Cc: linux-omap@vger.kernel.org --- arch/arm/plat-omap/mailbox.c | 10 ++-------- 1 file changed, 2 insertions(+), 8 deletions(-) diff --git a/arch/arm/plat-omap/mailbox.c b/arch/arm/plat-omap/mailbox.c index 459b319a9fad..4c9d44c4db48 100644 --- a/arch/arm/plat-omap/mailbox.c +++ b/arch/arm/plat-omap/mailbox.c @@ -32,7 +32,6 @@ #include -static struct workqueue_struct *mboxd; static struct omap_mbox **mboxes; static int mbox_configured; @@ -197,7 +196,7 @@ static void __mbox_rx_interrupt(struct omap_mbox *mbox) /* no more messages in the fifo. clear IRQ source. */ ack_mbox_irq(mbox, IRQ_RX); nomem: - queue_work(mboxd, &mbox->rxq->work); + schedule_work(&mbox->rxq->work); } static irqreturn_t mbox_interrupt(int irq, void *p) @@ -307,7 +306,7 @@ static void omap_mbox_fini(struct omap_mbox *mbox) if (!--mbox->use_count) { free_irq(mbox->irq, mbox); tasklet_kill(&mbox->txq->tasklet); - flush_work(&mbox->rxq->work); + flush_work_sync(&mbox->rxq->work); mbox_queue_free(mbox->txq); mbox_queue_free(mbox->rxq); } @@ -406,10 +405,6 @@ static int __init omap_mbox_init(void) if (err) return err; - mboxd = create_workqueue("mboxd"); - if (!mboxd) - return -ENOMEM; - /* kfifo size sanity check: alignment and minimal size */ mbox_kfifo_size = ALIGN(mbox_kfifo_size, sizeof(mbox_msg_t)); mbox_kfifo_size = max_t(unsigned int, mbox_kfifo_size, @@ -421,7 +416,6 @@ subsys_initcall(omap_mbox_init); static void __exit omap_mbox_exit(void) { - destroy_workqueue(mboxd); class_unregister(&omap_mbox_class); } module_exit(omap_mbox_exit); From bcb6d9161d1720cf68c7f4de0630e91cb95ee60c Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Wed, 26 Jan 2011 12:12:50 +0100 Subject: [PATCH 03/20] wireless/ipw2x00: use system_wq instead of dedicated workqueues With cmwq, there's no reason to use separate workqueues in ipw2x00 drivers. Drop them and use system_wq instead. All used work items are sync canceled on driver detach. Signed-off-by: Tejun Heo Acked-by: "John W. Linville" Cc: linux-wireless@vger.kernel.org --- drivers/net/wireless/ipw2x00/ipw2100.c | 70 ++++----- drivers/net/wireless/ipw2x00/ipw2100.h | 1 - drivers/net/wireless/ipw2x00/ipw2200.c | 196 +++++++++++-------------- drivers/net/wireless/ipw2x00/ipw2200.h | 2 - 4 files changed, 118 insertions(+), 151 deletions(-) diff --git a/drivers/net/wireless/ipw2x00/ipw2100.c b/drivers/net/wireless/ipw2x00/ipw2100.c index 61915f371416..471a52a2f8d4 100644 --- a/drivers/net/wireless/ipw2x00/ipw2100.c +++ b/drivers/net/wireless/ipw2x00/ipw2100.c @@ -706,11 +706,10 @@ static void schedule_reset(struct ipw2100_priv *priv) netif_stop_queue(priv->net_dev); priv->status |= STATUS_RESET_PENDING; if (priv->reset_backoff) - queue_delayed_work(priv->workqueue, &priv->reset_work, - priv->reset_backoff * HZ); + schedule_delayed_work(&priv->reset_work, + priv->reset_backoff * HZ); else - queue_delayed_work(priv->workqueue, &priv->reset_work, - 0); + schedule_delayed_work(&priv->reset_work, 0); if (priv->reset_backoff < MAX_RESET_BACKOFF) priv->reset_backoff++; @@ -1474,7 +1473,7 @@ static int ipw2100_enable_adapter(struct ipw2100_priv *priv) if (priv->stop_hang_check) { priv->stop_hang_check = 0; - queue_delayed_work(priv->workqueue, &priv->hang_check, HZ / 2); + schedule_delayed_work(&priv->hang_check, HZ / 2); } fail_up: @@ -1808,8 +1807,8 @@ static int ipw2100_up(struct ipw2100_priv *priv, int deferred) if (priv->stop_rf_kill) { priv->stop_rf_kill = 0; - queue_delayed_work(priv->workqueue, &priv->rf_kill, - round_jiffies_relative(HZ)); + schedule_delayed_work(&priv->rf_kill, + round_jiffies_relative(HZ)); } deferred = 1; @@ -2086,7 +2085,7 @@ static void isr_indicate_associated(struct ipw2100_priv *priv, u32 status) priv->status |= STATUS_ASSOCIATING; priv->connect_start = get_seconds(); - queue_delayed_work(priv->workqueue, &priv->wx_event_work, HZ / 10); + schedule_delayed_work(&priv->wx_event_work, HZ / 10); } static int ipw2100_set_essid(struct ipw2100_priv *priv, char *essid, @@ -2166,9 +2165,9 @@ static void isr_indicate_association_lost(struct ipw2100_priv *priv, u32 status) return; if (priv->status & STATUS_SECURITY_UPDATED) - queue_delayed_work(priv->workqueue, &priv->security_work, 0); + schedule_delayed_work(&priv->security_work, 0); - queue_delayed_work(priv->workqueue, &priv->wx_event_work, 0); + schedule_delayed_work(&priv->wx_event_work, 0); } static void isr_indicate_rf_kill(struct ipw2100_priv *priv, u32 status) @@ -2183,8 +2182,7 @@ static void isr_indicate_rf_kill(struct ipw2100_priv *priv, u32 status) /* Make sure the RF Kill check timer is running */ priv->stop_rf_kill = 0; cancel_delayed_work(&priv->rf_kill); - queue_delayed_work(priv->workqueue, &priv->rf_kill, - round_jiffies_relative(HZ)); + schedule_delayed_work(&priv->rf_kill, round_jiffies_relative(HZ)); } static void send_scan_event(void *data) @@ -2219,13 +2217,12 @@ static void isr_scan_complete(struct ipw2100_priv *priv, u32 status) /* Only userspace-requested scan completion events go out immediately */ if (!priv->user_requested_scan) { if (!delayed_work_pending(&priv->scan_event_later)) - queue_delayed_work(priv->workqueue, - &priv->scan_event_later, - round_jiffies_relative(msecs_to_jiffies(4000))); + schedule_delayed_work(&priv->scan_event_later, + round_jiffies_relative(msecs_to_jiffies(4000))); } else { priv->user_requested_scan = 0; cancel_delayed_work(&priv->scan_event_later); - queue_work(priv->workqueue, &priv->scan_event_now); + schedule_work(&priv->scan_event_now); } } @@ -4329,8 +4326,8 @@ static int ipw_radio_kill_sw(struct ipw2100_priv *priv, int disable_radio) /* Make sure the RF_KILL check timer is running */ priv->stop_rf_kill = 0; cancel_delayed_work(&priv->rf_kill); - queue_delayed_work(priv->workqueue, &priv->rf_kill, - round_jiffies_relative(HZ)); + schedule_delayed_work(&priv->rf_kill, + round_jiffies_relative(HZ)); } else schedule_reset(priv); } @@ -4461,20 +4458,17 @@ static void bd_queue_initialize(struct ipw2100_priv *priv, IPW_DEBUG_INFO("exit\n"); } -static void ipw2100_kill_workqueue(struct ipw2100_priv *priv) +static void ipw2100_kill_works(struct ipw2100_priv *priv) { - if (priv->workqueue) { - priv->stop_rf_kill = 1; - priv->stop_hang_check = 1; - cancel_delayed_work(&priv->reset_work); - cancel_delayed_work(&priv->security_work); - cancel_delayed_work(&priv->wx_event_work); - cancel_delayed_work(&priv->hang_check); - cancel_delayed_work(&priv->rf_kill); - cancel_delayed_work(&priv->scan_event_later); - destroy_workqueue(priv->workqueue); - priv->workqueue = NULL; - } + priv->stop_rf_kill = 1; + priv->stop_hang_check = 1; + cancel_delayed_work_sync(&priv->reset_work); + cancel_delayed_work_sync(&priv->security_work); + cancel_delayed_work_sync(&priv->wx_event_work); + cancel_delayed_work_sync(&priv->hang_check); + cancel_delayed_work_sync(&priv->rf_kill); + cancel_work_sync(&priv->scan_event_now); + cancel_delayed_work_sync(&priv->scan_event_later); } static int ipw2100_tx_allocate(struct ipw2100_priv *priv) @@ -6046,7 +6040,7 @@ static void ipw2100_hang_check(struct work_struct *work) priv->last_rtc = rtc; if (!priv->stop_hang_check) - queue_delayed_work(priv->workqueue, &priv->hang_check, HZ / 2); + schedule_delayed_work(&priv->hang_check, HZ / 2); spin_unlock_irqrestore(&priv->low_lock, flags); } @@ -6062,8 +6056,8 @@ static void ipw2100_rf_kill(struct work_struct *work) if (rf_kill_active(priv)) { IPW_DEBUG_RF_KILL("RF Kill active, rescheduling GPIO check\n"); if (!priv->stop_rf_kill) - queue_delayed_work(priv->workqueue, &priv->rf_kill, - round_jiffies_relative(HZ)); + schedule_delayed_work(&priv->rf_kill, + round_jiffies_relative(HZ)); goto exit_unlock; } @@ -6209,8 +6203,6 @@ static struct net_device *ipw2100_alloc_device(struct pci_dev *pci_dev, INIT_LIST_HEAD(&priv->fw_pend_list); INIT_STAT(&priv->fw_pend_stat); - priv->workqueue = create_workqueue(DRV_NAME); - INIT_DELAYED_WORK(&priv->reset_work, ipw2100_reset_adapter); INIT_DELAYED_WORK(&priv->security_work, ipw2100_security_work); INIT_DELAYED_WORK(&priv->wx_event_work, ipw2100_wx_event_work); @@ -6410,7 +6402,7 @@ static int ipw2100_pci_init_one(struct pci_dev *pci_dev, if (dev->irq) free_irq(dev->irq, priv); - ipw2100_kill_workqueue(priv); + ipw2100_kill_works(priv); /* These are safe to call even if they weren't allocated */ ipw2100_queues_free(priv); @@ -6460,9 +6452,7 @@ static void __devexit ipw2100_pci_remove_one(struct pci_dev *pci_dev) * first, then close() will crash. */ unregister_netdev(dev); - /* ipw2100_down will ensure that there is no more pending work - * in the workqueue's, so we can safely remove them now. */ - ipw2100_kill_workqueue(priv); + ipw2100_kill_works(priv); ipw2100_queues_free(priv); diff --git a/drivers/net/wireless/ipw2x00/ipw2100.h b/drivers/net/wireless/ipw2x00/ipw2100.h index 838002b4881e..99cba968aa58 100644 --- a/drivers/net/wireless/ipw2x00/ipw2100.h +++ b/drivers/net/wireless/ipw2x00/ipw2100.h @@ -580,7 +580,6 @@ struct ipw2100_priv { struct tasklet_struct irq_tasklet; - struct workqueue_struct *workqueue; struct delayed_work reset_work; struct delayed_work security_work; struct delayed_work wx_event_work; diff --git a/drivers/net/wireless/ipw2x00/ipw2200.c b/drivers/net/wireless/ipw2x00/ipw2200.c index ae438ed80c2f..160881f234cc 100644 --- a/drivers/net/wireless/ipw2x00/ipw2200.c +++ b/drivers/net/wireless/ipw2x00/ipw2200.c @@ -894,9 +894,8 @@ static void ipw_led_link_on(struct ipw_priv *priv) /* If we aren't associated, schedule turning the LED off */ if (!(priv->status & STATUS_ASSOCIATED)) - queue_delayed_work(priv->workqueue, - &priv->led_link_off, - LD_TIME_LINK_ON); + schedule_delayed_work(&priv->led_link_off, + LD_TIME_LINK_ON); } spin_unlock_irqrestore(&priv->lock, flags); @@ -939,8 +938,8 @@ static void ipw_led_link_off(struct ipw_priv *priv) * turning the LED on (blink while unassociated) */ if (!(priv->status & STATUS_RF_KILL_MASK) && !(priv->status & STATUS_ASSOCIATED)) - queue_delayed_work(priv->workqueue, &priv->led_link_on, - LD_TIME_LINK_OFF); + schedule_delayed_work(&priv->led_link_on, + LD_TIME_LINK_OFF); } @@ -980,13 +979,11 @@ static void __ipw_led_activity_on(struct ipw_priv *priv) priv->status |= STATUS_LED_ACT_ON; cancel_delayed_work(&priv->led_act_off); - queue_delayed_work(priv->workqueue, &priv->led_act_off, - LD_TIME_ACT_ON); + schedule_delayed_work(&priv->led_act_off, LD_TIME_ACT_ON); } else { /* Reschedule LED off for full time period */ cancel_delayed_work(&priv->led_act_off); - queue_delayed_work(priv->workqueue, &priv->led_act_off, - LD_TIME_ACT_ON); + schedule_delayed_work(&priv->led_act_off, LD_TIME_ACT_ON); } } @@ -1795,13 +1792,11 @@ static int ipw_radio_kill_sw(struct ipw_priv *priv, int disable_radio) if (disable_radio) { priv->status |= STATUS_RF_KILL_SW; - if (priv->workqueue) { - cancel_delayed_work(&priv->request_scan); - cancel_delayed_work(&priv->request_direct_scan); - cancel_delayed_work(&priv->request_passive_scan); - cancel_delayed_work(&priv->scan_event); - } - queue_work(priv->workqueue, &priv->down); + cancel_delayed_work(&priv->request_scan); + cancel_delayed_work(&priv->request_direct_scan); + cancel_delayed_work(&priv->request_passive_scan); + cancel_delayed_work(&priv->scan_event); + schedule_work(&priv->down); } else { priv->status &= ~STATUS_RF_KILL_SW; if (rf_kill_active(priv)) { @@ -1809,10 +1804,10 @@ static int ipw_radio_kill_sw(struct ipw_priv *priv, int disable_radio) "disabled by HW switch\n"); /* Make sure the RF_KILL check timer is running */ cancel_delayed_work(&priv->rf_kill); - queue_delayed_work(priv->workqueue, &priv->rf_kill, - round_jiffies_relative(2 * HZ)); + schedule_delayed_work(&priv->rf_kill, + round_jiffies_relative(2 * HZ)); } else - queue_work(priv->workqueue, &priv->up); + schedule_work(&priv->up); } return 1; @@ -2063,7 +2058,7 @@ static void ipw_irq_tasklet(struct ipw_priv *priv) cancel_delayed_work(&priv->request_passive_scan); cancel_delayed_work(&priv->scan_event); schedule_work(&priv->link_down); - queue_delayed_work(priv->workqueue, &priv->rf_kill, 2 * HZ); + schedule_delayed_work(&priv->rf_kill, 2 * HZ); handled |= IPW_INTA_BIT_RF_KILL_DONE; } @@ -2103,7 +2098,7 @@ static void ipw_irq_tasklet(struct ipw_priv *priv) priv->status &= ~STATUS_HCMD_ACTIVE; wake_up_interruptible(&priv->wait_command_queue); - queue_work(priv->workqueue, &priv->adapter_restart); + schedule_work(&priv->adapter_restart); handled |= IPW_INTA_BIT_FATAL_ERROR; } @@ -2323,11 +2318,6 @@ static int ipw_send_adapter_address(struct ipw_priv *priv, u8 * mac) return ipw_send_cmd_pdu(priv, IPW_CMD_ADAPTER_ADDRESS, ETH_ALEN, mac); } -/* - * NOTE: This must be executed from our workqueue as it results in udelay - * being called which may corrupt the keyboard if executed on default - * workqueue - */ static void ipw_adapter_restart(void *adapter) { struct ipw_priv *priv = adapter; @@ -2368,13 +2358,13 @@ static void ipw_scan_check(void *data) IPW_DEBUG_SCAN("Scan completion watchdog resetting " "adapter after (%dms).\n", jiffies_to_msecs(IPW_SCAN_CHECK_WATCHDOG)); - queue_work(priv->workqueue, &priv->adapter_restart); + schedule_work(&priv->adapter_restart); } else if (priv->status & STATUS_SCANNING) { IPW_DEBUG_SCAN("Scan completion watchdog aborting scan " "after (%dms).\n", jiffies_to_msecs(IPW_SCAN_CHECK_WATCHDOG)); ipw_abort_scan(priv); - queue_delayed_work(priv->workqueue, &priv->scan_check, HZ); + schedule_delayed_work(&priv->scan_check, HZ); } } @@ -3943,7 +3933,7 @@ static void ipw_send_disassociate(struct ipw_priv *priv, int quiet) if (priv->status & STATUS_ASSOCIATING) { IPW_DEBUG_ASSOC("Disassociating while associating.\n"); - queue_work(priv->workqueue, &priv->disassociate); + schedule_work(&priv->disassociate); return; } @@ -4360,8 +4350,7 @@ static void ipw_gather_stats(struct ipw_priv *priv) priv->quality = quality; - queue_delayed_work(priv->workqueue, &priv->gather_stats, - IPW_STATS_INTERVAL); + schedule_delayed_work(&priv->gather_stats, IPW_STATS_INTERVAL); } static void ipw_bg_gather_stats(struct work_struct *work) @@ -4396,10 +4385,10 @@ static void ipw_handle_missed_beacon(struct ipw_priv *priv, IPW_DEBUG(IPW_DL_INFO | IPW_DL_NOTIF | IPW_DL_STATE, "Aborting scan with missed beacon.\n"); - queue_work(priv->workqueue, &priv->abort_scan); + schedule_work(&priv->abort_scan); } - queue_work(priv->workqueue, &priv->disassociate); + schedule_work(&priv->disassociate); return; } @@ -4425,8 +4414,7 @@ static void ipw_handle_missed_beacon(struct ipw_priv *priv, if (!(priv->status & STATUS_ROAMING)) { priv->status |= STATUS_ROAMING; if (!(priv->status & STATUS_SCANNING)) - queue_delayed_work(priv->workqueue, - &priv->request_scan, 0); + schedule_delayed_work(&priv->request_scan, 0); } return; } @@ -4439,7 +4427,7 @@ static void ipw_handle_missed_beacon(struct ipw_priv *priv, * channels..) */ IPW_DEBUG(IPW_DL_INFO | IPW_DL_NOTIF | IPW_DL_STATE, "Aborting scan with missed beacon.\n"); - queue_work(priv->workqueue, &priv->abort_scan); + schedule_work(&priv->abort_scan); } IPW_DEBUG_NOTIF("Missed beacon: %d\n", missed_count); @@ -4462,8 +4450,8 @@ static void handle_scan_event(struct ipw_priv *priv) /* Only userspace-requested scan completion events go out immediately */ if (!priv->user_requested_scan) { if (!delayed_work_pending(&priv->scan_event)) - queue_delayed_work(priv->workqueue, &priv->scan_event, - round_jiffies_relative(msecs_to_jiffies(4000))); + schedule_delayed_work(&priv->scan_event, + round_jiffies_relative(msecs_to_jiffies(4000))); } else { union iwreq_data wrqu; @@ -4516,20 +4504,17 @@ static void ipw_rx_notification(struct ipw_priv *priv, IPW_DEBUG_ASSOC ("queueing adhoc check\n"); - queue_delayed_work(priv-> - workqueue, - &priv-> - adhoc_check, - le16_to_cpu(priv-> - assoc_request. - beacon_interval)); + schedule_delayed_work( + &priv->adhoc_check, + le16_to_cpu(priv-> + assoc_request. + beacon_interval)); break; } priv->status &= ~STATUS_ASSOCIATING; priv->status |= STATUS_ASSOCIATED; - queue_work(priv->workqueue, - &priv->system_config); + schedule_work(&priv->system_config); #ifdef CONFIG_IPW2200_QOS #define IPW_GET_PACKET_STYPE(x) WLAN_FC_GET_STYPE( \ @@ -4792,43 +4777,37 @@ static void ipw_rx_notification(struct ipw_priv *priv, #ifdef CONFIG_IPW2200_MONITOR if (priv->ieee->iw_mode == IW_MODE_MONITOR) { priv->status |= STATUS_SCAN_FORCED; - queue_delayed_work(priv->workqueue, - &priv->request_scan, 0); + schedule_delayed_work(&priv->request_scan, 0); break; } priv->status &= ~STATUS_SCAN_FORCED; #endif /* CONFIG_IPW2200_MONITOR */ /* Do queued direct scans first */ - if (priv->status & STATUS_DIRECT_SCAN_PENDING) { - queue_delayed_work(priv->workqueue, - &priv->request_direct_scan, 0); - } + if (priv->status & STATUS_DIRECT_SCAN_PENDING) + schedule_delayed_work(&priv->request_direct_scan, 0); if (!(priv->status & (STATUS_ASSOCIATED | STATUS_ASSOCIATING | STATUS_ROAMING | STATUS_DISASSOCIATING))) - queue_work(priv->workqueue, &priv->associate); + schedule_work(&priv->associate); else if (priv->status & STATUS_ROAMING) { if (x->status == SCAN_COMPLETED_STATUS_COMPLETE) /* If a scan completed and we are in roam mode, then * the scan that completed was the one requested as a * result of entering roam... so, schedule the * roam work */ - queue_work(priv->workqueue, - &priv->roam); + schedule_work(&priv->roam); else /* Don't schedule if we aborted the scan */ priv->status &= ~STATUS_ROAMING; } else if (priv->status & STATUS_SCAN_PENDING) - queue_delayed_work(priv->workqueue, - &priv->request_scan, 0); + schedule_delayed_work(&priv->request_scan, 0); else if (priv->config & CFG_BACKGROUND_SCAN && priv->status & STATUS_ASSOCIATED) - queue_delayed_work(priv->workqueue, - &priv->request_scan, - round_jiffies_relative(HZ)); + schedule_delayed_work(&priv->request_scan, + round_jiffies_relative(HZ)); /* Send an empty event to user space. * We don't send the received data on the event because @@ -5192,7 +5171,7 @@ static void ipw_rx_queue_restock(struct ipw_priv *priv) /* If the pre-allocated buffer pool is dropping low, schedule to * refill it */ if (rxq->free_count <= RX_LOW_WATERMARK) - queue_work(priv->workqueue, &priv->rx_replenish); + schedule_work(&priv->rx_replenish); /* If we've added more space for the firmware to place data, tell it */ if (write != rxq->write) @@ -6133,8 +6112,8 @@ static void ipw_adhoc_check(void *data) return; } - queue_delayed_work(priv->workqueue, &priv->adhoc_check, - le16_to_cpu(priv->assoc_request.beacon_interval)); + schedule_delayed_work(&priv->adhoc_check, + le16_to_cpu(priv->assoc_request.beacon_interval)); } static void ipw_bg_adhoc_check(struct work_struct *work) @@ -6523,8 +6502,7 @@ static int ipw_request_scan_helper(struct ipw_priv *priv, int type, int direct) } else priv->status &= ~STATUS_SCAN_PENDING; - queue_delayed_work(priv->workqueue, &priv->scan_check, - IPW_SCAN_CHECK_WATCHDOG); + schedule_delayed_work(&priv->scan_check, IPW_SCAN_CHECK_WATCHDOG); done: mutex_unlock(&priv->mutex); return err; @@ -6994,8 +6972,7 @@ static int ipw_qos_handle_probe_response(struct ipw_priv *priv, !memcmp(network->ssid, priv->assoc_network->ssid, network->ssid_len)) { - queue_work(priv->workqueue, - &priv->merge_networks); + schedule_work(&priv->merge_networks); } } @@ -7663,7 +7640,7 @@ static int ipw_associate(void *data) if (priv->status & STATUS_DISASSOCIATING) { IPW_DEBUG_ASSOC("Not attempting association (in " "disassociating)\n "); - queue_work(priv->workqueue, &priv->associate); + schedule_work(&priv->associate); return 0; } @@ -7731,12 +7708,10 @@ static int ipw_associate(void *data) if (!(priv->status & STATUS_SCANNING)) { if (!(priv->config & CFG_SPEED_SCAN)) - queue_delayed_work(priv->workqueue, - &priv->request_scan, - SCAN_INTERVAL); + schedule_delayed_work(&priv->request_scan, + SCAN_INTERVAL); else - queue_delayed_work(priv->workqueue, - &priv->request_scan, 0); + schedule_delayed_work(&priv->request_scan, 0); } return 0; @@ -8899,7 +8874,7 @@ static int ipw_wx_set_mode(struct net_device *dev, priv->ieee->iw_mode = wrqu->mode; - queue_work(priv->workqueue, &priv->adapter_restart); + schedule_work(&priv->adapter_restart); mutex_unlock(&priv->mutex); return err; } @@ -9598,7 +9573,7 @@ static int ipw_wx_set_scan(struct net_device *dev, IPW_DEBUG_WX("Start scan\n"); - queue_delayed_work(priv->workqueue, work, 0); + schedule_delayed_work(work, 0); return 0; } @@ -9937,7 +9912,7 @@ static int ipw_wx_set_monitor(struct net_device *dev, #else priv->net_dev->type = ARPHRD_IEEE80211; #endif - queue_work(priv->workqueue, &priv->adapter_restart); + schedule_work(&priv->adapter_restart); } ipw_set_channel(priv, parms[1]); @@ -9947,7 +9922,7 @@ static int ipw_wx_set_monitor(struct net_device *dev, return 0; } priv->net_dev->type = ARPHRD_ETHER; - queue_work(priv->workqueue, &priv->adapter_restart); + schedule_work(&priv->adapter_restart); } mutex_unlock(&priv->mutex); return 0; @@ -9961,7 +9936,7 @@ static int ipw_wx_reset(struct net_device *dev, { struct ipw_priv *priv = libipw_priv(dev); IPW_DEBUG_WX("RESET\n"); - queue_work(priv->workqueue, &priv->adapter_restart); + schedule_work(&priv->adapter_restart); return 0; } @@ -10551,7 +10526,7 @@ static int ipw_net_set_mac_address(struct net_device *dev, void *p) memcpy(priv->mac_addr, addr->sa_data, ETH_ALEN); printk(KERN_INFO "%s: Setting MAC to %pM\n", priv->net_dev->name, priv->mac_addr); - queue_work(priv->workqueue, &priv->adapter_restart); + schedule_work(&priv->adapter_restart); mutex_unlock(&priv->mutex); return 0; } @@ -10684,9 +10659,7 @@ static void ipw_rf_kill(void *adapter) if (rf_kill_active(priv)) { IPW_DEBUG_RF_KILL("RF Kill active, rescheduling GPIO check\n"); - if (priv->workqueue) - queue_delayed_work(priv->workqueue, - &priv->rf_kill, 2 * HZ); + schedule_delayed_work(&priv->rf_kill, 2 * HZ); goto exit_unlock; } @@ -10697,7 +10670,7 @@ static void ipw_rf_kill(void *adapter) "device\n"); /* we can not do an adapter restart while inside an irq lock */ - queue_work(priv->workqueue, &priv->adapter_restart); + schedule_work(&priv->adapter_restart); } else IPW_DEBUG_RF_KILL("HW RF Kill deactivated. SW RF Kill still " "enabled\n"); @@ -10735,7 +10708,7 @@ static void ipw_link_up(struct ipw_priv *priv) notify_wx_assoc_event(priv); if (priv->config & CFG_BACKGROUND_SCAN) - queue_delayed_work(priv->workqueue, &priv->request_scan, HZ); + schedule_delayed_work(&priv->request_scan, HZ); } static void ipw_bg_link_up(struct work_struct *work) @@ -10764,7 +10737,7 @@ static void ipw_link_down(struct ipw_priv *priv) if (!(priv->status & STATUS_EXIT_PENDING)) { /* Queue up another scan... */ - queue_delayed_work(priv->workqueue, &priv->request_scan, 0); + schedule_delayed_work(&priv->request_scan, 0); } else cancel_delayed_work(&priv->scan_event); } @@ -10782,7 +10755,6 @@ static int __devinit ipw_setup_deferred_work(struct ipw_priv *priv) { int ret = 0; - priv->workqueue = create_workqueue(DRV_NAME); init_waitqueue_head(&priv->wait_command_queue); init_waitqueue_head(&priv->wait_state); @@ -11339,8 +11311,7 @@ static int ipw_up(struct ipw_priv *priv) IPW_WARNING("Radio Frequency Kill Switch is On:\n" "Kill switch must be turned off for " "wireless networking to work.\n"); - queue_delayed_work(priv->workqueue, &priv->rf_kill, - 2 * HZ); + schedule_delayed_work(&priv->rf_kill, 2 * HZ); return 0; } @@ -11350,8 +11321,7 @@ static int ipw_up(struct ipw_priv *priv) /* If configure to try and auto-associate, kick * off a scan. */ - queue_delayed_work(priv->workqueue, - &priv->request_scan, 0); + schedule_delayed_work(&priv->request_scan, 0); return 0; } @@ -11817,7 +11787,7 @@ static int __devinit ipw_pci_probe(struct pci_dev *pdev, err = request_irq(pdev->irq, ipw_isr, IRQF_SHARED, DRV_NAME, priv); if (err) { IPW_ERROR("Error allocating IRQ %d\n", pdev->irq); - goto out_destroy_workqueue; + goto out_iounmap; } SET_NETDEV_DEV(net_dev, &pdev->dev); @@ -11885,9 +11855,6 @@ static int __devinit ipw_pci_probe(struct pci_dev *pdev, sysfs_remove_group(&pdev->dev.kobj, &ipw_attribute_group); out_release_irq: free_irq(pdev->irq, priv); - out_destroy_workqueue: - destroy_workqueue(priv->workqueue); - priv->workqueue = NULL; out_iounmap: iounmap(priv->hw_base); out_pci_release_regions: @@ -11930,18 +11897,31 @@ static void __devexit ipw_pci_remove(struct pci_dev *pdev) kfree(priv->cmdlog); priv->cmdlog = NULL; } - /* ipw_down will ensure that there is no more pending work - * in the workqueue's, so we can safely remove them now. */ - cancel_delayed_work(&priv->adhoc_check); - cancel_delayed_work(&priv->gather_stats); - cancel_delayed_work(&priv->request_scan); - cancel_delayed_work(&priv->request_direct_scan); - cancel_delayed_work(&priv->request_passive_scan); - cancel_delayed_work(&priv->scan_event); - cancel_delayed_work(&priv->rf_kill); - cancel_delayed_work(&priv->scan_check); - destroy_workqueue(priv->workqueue); - priv->workqueue = NULL; + + /* make sure all works are inactive */ + cancel_delayed_work_sync(&priv->adhoc_check); + cancel_work_sync(&priv->associate); + cancel_work_sync(&priv->disassociate); + cancel_work_sync(&priv->system_config); + cancel_work_sync(&priv->rx_replenish); + cancel_work_sync(&priv->adapter_restart); + cancel_delayed_work_sync(&priv->rf_kill); + cancel_work_sync(&priv->up); + cancel_work_sync(&priv->down); + cancel_delayed_work_sync(&priv->request_scan); + cancel_delayed_work_sync(&priv->request_direct_scan); + cancel_delayed_work_sync(&priv->request_passive_scan); + cancel_delayed_work_sync(&priv->scan_event); + cancel_delayed_work_sync(&priv->gather_stats); + cancel_work_sync(&priv->abort_scan); + cancel_work_sync(&priv->roam); + cancel_delayed_work_sync(&priv->scan_check); + cancel_work_sync(&priv->link_up); + cancel_work_sync(&priv->link_down); + cancel_delayed_work_sync(&priv->led_link_on); + cancel_delayed_work_sync(&priv->led_link_off); + cancel_delayed_work_sync(&priv->led_act_off); + cancel_work_sync(&priv->merge_networks); /* Free MAC hash list for ADHOC */ for (i = 0; i < IPW_IBSS_MAC_HASH_SIZE; i++) { @@ -12029,7 +12009,7 @@ static int ipw_pci_resume(struct pci_dev *pdev) priv->suspend_time = get_seconds() - priv->suspend_at; /* Bring the device back up */ - queue_work(priv->workqueue, &priv->up); + schedule_work(&priv->up); return 0; } diff --git a/drivers/net/wireless/ipw2x00/ipw2200.h b/drivers/net/wireless/ipw2x00/ipw2200.h index d7d049c7a4fa..0441445b8bfa 100644 --- a/drivers/net/wireless/ipw2x00/ipw2200.h +++ b/drivers/net/wireless/ipw2x00/ipw2200.h @@ -1299,8 +1299,6 @@ struct ipw_priv { u8 direct_scan_ssid[IW_ESSID_MAX_SIZE]; u8 direct_scan_ssid_len; - struct workqueue_struct *workqueue; - struct delayed_work adhoc_check; struct work_struct associate; struct work_struct disassociate; From 57df5573a56322e6895451f759c19e875252817d Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Wed, 26 Jan 2011 12:12:50 +0100 Subject: [PATCH 04/20] cpufreq: use system_wq instead of dedicated workqueues With cmwq, there's no reason for cpufreq drivers to use separate workqueues. Remove the dedicated workqueues from cpufreq_conservative and cpufreq_ondemand and use system_wq instead. The work items are already sync canceled on stop, so it's already guaranteed that no work is running on module exit. Signed-off-by: Tejun Heo Acked-by: Dave Jones Cc: cpufreq@vger.kernel.org --- drivers/cpufreq/cpufreq_conservative.c | 22 +++------------------- drivers/cpufreq/cpufreq_ondemand.c | 20 +++----------------- 2 files changed, 6 insertions(+), 36 deletions(-) diff --git a/drivers/cpufreq/cpufreq_conservative.c b/drivers/cpufreq/cpufreq_conservative.c index 526bfbf69611..94284c8473b1 100644 --- a/drivers/cpufreq/cpufreq_conservative.c +++ b/drivers/cpufreq/cpufreq_conservative.c @@ -81,8 +81,6 @@ static unsigned int dbs_enable; /* number of CPUs using this policy */ */ static DEFINE_MUTEX(dbs_mutex); -static struct workqueue_struct *kconservative_wq; - static struct dbs_tuners { unsigned int sampling_rate; unsigned int sampling_down_factor; @@ -560,7 +558,7 @@ static void do_dbs_timer(struct work_struct *work) dbs_check_cpu(dbs_info); - queue_delayed_work_on(cpu, kconservative_wq, &dbs_info->work, delay); + schedule_delayed_work_on(cpu, &dbs_info->work, delay); mutex_unlock(&dbs_info->timer_mutex); } @@ -572,8 +570,7 @@ static inline void dbs_timer_init(struct cpu_dbs_info_s *dbs_info) dbs_info->enable = 1; INIT_DELAYED_WORK_DEFERRABLE(&dbs_info->work, do_dbs_timer); - queue_delayed_work_on(dbs_info->cpu, kconservative_wq, &dbs_info->work, - delay); + schedule_delayed_work_on(dbs_info->cpu, &dbs_info->work, delay); } static inline void dbs_timer_exit(struct cpu_dbs_info_s *dbs_info) @@ -716,25 +713,12 @@ struct cpufreq_governor cpufreq_gov_conservative = { static int __init cpufreq_gov_dbs_init(void) { - int err; - - kconservative_wq = create_workqueue("kconservative"); - if (!kconservative_wq) { - printk(KERN_ERR "Creation of kconservative failed\n"); - return -EFAULT; - } - - err = cpufreq_register_governor(&cpufreq_gov_conservative); - if (err) - destroy_workqueue(kconservative_wq); - - return err; + return cpufreq_register_governor(&cpufreq_gov_conservative); } static void __exit cpufreq_gov_dbs_exit(void) { cpufreq_unregister_governor(&cpufreq_gov_conservative); - destroy_workqueue(kconservative_wq); } diff --git a/drivers/cpufreq/cpufreq_ondemand.c b/drivers/cpufreq/cpufreq_ondemand.c index c631f27a3dcc..58aa85ea5ec6 100644 --- a/drivers/cpufreq/cpufreq_ondemand.c +++ b/drivers/cpufreq/cpufreq_ondemand.c @@ -104,8 +104,6 @@ static unsigned int dbs_enable; /* number of CPUs using this policy */ */ static DEFINE_MUTEX(dbs_mutex); -static struct workqueue_struct *kondemand_wq; - static struct dbs_tuners { unsigned int sampling_rate; unsigned int up_threshold; @@ -667,7 +665,7 @@ static void do_dbs_timer(struct work_struct *work) __cpufreq_driver_target(dbs_info->cur_policy, dbs_info->freq_lo, CPUFREQ_RELATION_H); } - queue_delayed_work_on(cpu, kondemand_wq, &dbs_info->work, delay); + schedule_delayed_work_on(cpu, &dbs_info->work, delay); mutex_unlock(&dbs_info->timer_mutex); } @@ -681,8 +679,7 @@ static inline void dbs_timer_init(struct cpu_dbs_info_s *dbs_info) dbs_info->sample_type = DBS_NORMAL_SAMPLE; INIT_DELAYED_WORK_DEFERRABLE(&dbs_info->work, do_dbs_timer); - queue_delayed_work_on(dbs_info->cpu, kondemand_wq, &dbs_info->work, - delay); + schedule_delayed_work_on(dbs_info->cpu, &dbs_info->work, delay); } static inline void dbs_timer_exit(struct cpu_dbs_info_s *dbs_info) @@ -814,7 +811,6 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy, static int __init cpufreq_gov_dbs_init(void) { - int err; cputime64_t wall; u64 idle_time; int cpu = get_cpu(); @@ -838,22 +834,12 @@ static int __init cpufreq_gov_dbs_init(void) MIN_SAMPLING_RATE_RATIO * jiffies_to_usecs(10); } - kondemand_wq = create_workqueue("kondemand"); - if (!kondemand_wq) { - printk(KERN_ERR "Creation of kondemand failed\n"); - return -EFAULT; - } - err = cpufreq_register_governor(&cpufreq_gov_ondemand); - if (err) - destroy_workqueue(kondemand_wq); - - return err; + return cpufreq_register_governor(&cpufreq_gov_ondemand); } static void __exit cpufreq_gov_dbs_exit(void) { cpufreq_unregister_governor(&cpufreq_gov_ondemand); - destroy_workqueue(kondemand_wq); } From 1c1e8646963e319132b4cf551fbfd10b364d0aed Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Wed, 26 Jan 2011 12:12:50 +0100 Subject: [PATCH 05/20] input/tps6507x-ts: use system_wq instead of dedicated workqueue With cmwq, there's no reason to use a separate workqueue. Drop tps6507x_ts->wq and use system_wq instead. Signed-off-by: Tejun Heo Acked-by: Todd Fischer Acked-by: Dmitry Torokhov Cc: linux-input@vger.kernel.org Cc: Dan Carpenter --- drivers/input/touchscreen/tps6507x-ts.c | 12 ++++-------- 1 file changed, 4 insertions(+), 8 deletions(-) diff --git a/drivers/input/touchscreen/tps6507x-ts.c b/drivers/input/touchscreen/tps6507x-ts.c index c8c136cf7bbc..43031492d733 100644 --- a/drivers/input/touchscreen/tps6507x-ts.c +++ b/drivers/input/touchscreen/tps6507x-ts.c @@ -43,7 +43,6 @@ struct tps6507x_ts { struct input_dev *input_dev; struct device *dev; char phys[32]; - struct workqueue_struct *wq; struct delayed_work work; unsigned polling; /* polling is active */ struct ts_event tc; @@ -220,8 +219,8 @@ static void tps6507x_ts_handler(struct work_struct *work) poll = 1; if (poll) { - schd = queue_delayed_work(tsc->wq, &tsc->work, - msecs_to_jiffies(tsc->poll_period)); + schd = schedule_delayed_work(&tsc->work, + msecs_to_jiffies(tsc->poll_period)); if (schd) tsc->polling = 1; else { @@ -303,7 +302,6 @@ static int tps6507x_ts_probe(struct platform_device *pdev) tsc->input_dev = input_dev; INIT_DELAYED_WORK(&tsc->work, tps6507x_ts_handler); - tsc->wq = create_workqueue("TPS6507x Touchscreen"); if (init_data) { tsc->poll_period = init_data->poll_period; @@ -325,8 +323,8 @@ static int tps6507x_ts_probe(struct platform_device *pdev) if (error) goto err2; - schd = queue_delayed_work(tsc->wq, &tsc->work, - msecs_to_jiffies(tsc->poll_period)); + schd = schedule_delayed_work(&tsc->work, + msecs_to_jiffies(tsc->poll_period)); if (schd) tsc->polling = 1; @@ -341,7 +339,6 @@ static int tps6507x_ts_probe(struct platform_device *pdev) err2: cancel_delayed_work_sync(&tsc->work); - destroy_workqueue(tsc->wq); input_free_device(input_dev); err1: kfree(tsc); @@ -357,7 +354,6 @@ static int __devexit tps6507x_ts_remove(struct platform_device *pdev) struct input_dev *input_dev = tsc->input_dev; cancel_delayed_work_sync(&tsc->work); - destroy_workqueue(tsc->wq); input_unregister_device(input_dev); From d37adaa1596246929f7ab49843fd124595506175 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Wed, 26 Jan 2011 17:42:27 +0100 Subject: [PATCH 06/20] fs/aio: aio_wq isn't used in memory reclaim path aio_wq isn't used during memory reclaim. Convert to alloc_workqueue() without WQ_MEM_RECLAIM. It's possible to use system_wq but given that the number of work items is determined from userland and the work item may block, enforcing strict concurrency limit would be a good idea. Also, move fput_work to system_wq so that aio_wq is used soley to throttle the max concurrency of aio work items and fput_work doesn't interact with other work items. Signed-off-by: Tejun Heo Acked-by: Jeff Moyer Cc: Benjamin LaHaise Cc: linux-aio@kvack.org --- fs/aio.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/fs/aio.c b/fs/aio.c index fc557a3be0a9..8007bd675889 100644 --- a/fs/aio.c +++ b/fs/aio.c @@ -85,7 +85,7 @@ static int __init aio_setup(void) kiocb_cachep = KMEM_CACHE(kiocb, SLAB_HWCACHE_ALIGN|SLAB_PANIC); kioctx_cachep = KMEM_CACHE(kioctx,SLAB_HWCACHE_ALIGN|SLAB_PANIC); - aio_wq = create_workqueue("aio"); + aio_wq = alloc_workqueue("aio", 0, 1); /* used to limit concurrency */ abe_pool = mempool_create_kmalloc_pool(1, sizeof(struct aio_batch_entry)); BUG_ON(!aio_wq || !abe_pool); @@ -569,7 +569,7 @@ static int __aio_put_req(struct kioctx *ctx, struct kiocb *req) spin_lock(&fput_lock); list_add(&req->ki_list, &fput_head); spin_unlock(&fput_lock); - queue_work(aio_wq, &fput_work); + schedule_work(&fput_work); } else { req->ki_filp = NULL; really_put_req(ctx, req); From 44d2588e1102b4e35022d03b7f124dd6ea013ce8 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Tue, 1 Feb 2011 11:42:42 +0100 Subject: [PATCH 07/20] acpi: kacpi*_wq don't need WQ_MEM_RECLAIM ACPI workqueues aren't used during memory reclaming. Use alloc_workqueue() to create workqueues w/o rescuers. If the purpose of the separation between kacpid_wq and kacpi_notify_wq was to give notifications better response time, kacpi_notify_wq can be dropped and kacpi_wq can be created with higher @max_active. Signed-off-by: Tejun Heo Cc: Len Brown Cc: linux-acpi@vger.kernel.org --- drivers/acpi/osl.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/drivers/acpi/osl.c b/drivers/acpi/osl.c index b0931818cf98..60a80cbfcdc7 100644 --- a/drivers/acpi/osl.c +++ b/drivers/acpi/osl.c @@ -1578,9 +1578,9 @@ acpi_status __init acpi_os_initialize(void) acpi_status __init acpi_os_initialize1(void) { - kacpid_wq = create_workqueue("kacpid"); - kacpi_notify_wq = create_workqueue("kacpi_notify"); - kacpi_hotplug_wq = create_workqueue("kacpi_hotplug"); + kacpid_wq = alloc_workqueue("kacpid", 0, 1); + kacpi_notify_wq = alloc_workqueue("kacpi_notify", 0, 1); + kacpi_hotplug_wq = alloc_workqueue("kacpi_hotplug", 0, 1); BUG_ON(!kacpid_wq); BUG_ON(!kacpi_notify_wq); BUG_ON(!kacpi_hotplug_wq); From 52286713a9ae1c4c80d521a8990e8c3ba14118f3 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Tue, 1 Feb 2011 11:42:42 +0100 Subject: [PATCH 08/20] i2o: use alloc_workqueue() instead of create_workqueue() This is an identity conversion. Signed-off-by: Tejun Heo Cc: Markus Lidel --- drivers/message/i2o/driver.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/message/i2o/driver.c b/drivers/message/i2o/driver.c index a0421efe04ca..8a5b2d8f4daf 100644 --- a/drivers/message/i2o/driver.c +++ b/drivers/message/i2o/driver.c @@ -84,7 +84,8 @@ int i2o_driver_register(struct i2o_driver *drv) osm_debug("Register driver %s\n", drv->name); if (drv->event) { - drv->event_queue = create_workqueue(drv->name); + drv->event_queue = alloc_workqueue(drv->name, + WQ_MEM_RECLAIM, 1); if (!drv->event_queue) { osm_err("Could not initialize event queue for driver " "%s\n", drv->name); From 51f50f815778b91c699fbcc3aac0dda891a7b795 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Tue, 1 Feb 2011 11:42:42 +0100 Subject: [PATCH 09/20] misc/iwmc3200top: use system_wq instead of dedicated workqueues With cmwq, there's no reason to use separate workqueues in iwmc3200top. Drop them and use system_wq instead. The used work items are sync flushed before driver detach. Signed-off-by: Tejun Heo Cc: Tomas Winkler --- drivers/misc/iwmc3200top/iwmc3200top.h | 4 +--- drivers/misc/iwmc3200top/main.c | 14 +++++--------- 2 files changed, 6 insertions(+), 12 deletions(-) diff --git a/drivers/misc/iwmc3200top/iwmc3200top.h b/drivers/misc/iwmc3200top/iwmc3200top.h index 740ff0738ea8..620973ed8bf9 100644 --- a/drivers/misc/iwmc3200top/iwmc3200top.h +++ b/drivers/misc/iwmc3200top/iwmc3200top.h @@ -183,9 +183,7 @@ struct iwmct_priv { u32 barker; struct iwmct_dbg dbg; - /* drivers work queue */ - struct workqueue_struct *wq; - struct workqueue_struct *bus_rescan_wq; + /* drivers work items */ struct work_struct bus_rescan_worker; struct work_struct isr_worker; diff --git a/drivers/misc/iwmc3200top/main.c b/drivers/misc/iwmc3200top/main.c index c73cef2c3c5e..727af07f1fbd 100644 --- a/drivers/misc/iwmc3200top/main.c +++ b/drivers/misc/iwmc3200top/main.c @@ -89,7 +89,7 @@ static void op_top_message(struct iwmct_priv *priv, struct top_msg *msg) switch (msg->hdr.opcode) { case OP_OPR_ALIVE: LOG_INFO(priv, FW_MSG, "Got ALIVE from device, wake rescan\n"); - queue_work(priv->bus_rescan_wq, &priv->bus_rescan_worker); + schedule_work(&priv->bus_rescan_worker); break; default: LOG_INFO(priv, FW_MSG, "Received msg opcode 0x%X\n", @@ -360,7 +360,7 @@ static void iwmct_irq(struct sdio_func *func) /* clear the function's interrupt request bit (write 1 to clear) */ sdio_writeb(func, 1, IWMC_SDIO_INTR_CLEAR_ADDR, &ret); - queue_work(priv->wq, &priv->isr_worker); + schedule_work(&priv->isr_worker); LOG_TRACE(priv, IRQ, "exit iwmct_irq\n"); @@ -506,10 +506,6 @@ static int iwmct_probe(struct sdio_func *func, priv->func = func; sdio_set_drvdata(func, priv); - - /* create drivers work queue */ - priv->wq = create_workqueue(DRV_NAME "_wq"); - priv->bus_rescan_wq = create_workqueue(DRV_NAME "_rescan_wq"); INIT_WORK(&priv->bus_rescan_worker, iwmct_rescan_worker); INIT_WORK(&priv->isr_worker, iwmct_irq_read_worker); @@ -604,9 +600,9 @@ static void iwmct_remove(struct sdio_func *func) sdio_release_irq(func); sdio_release_host(func); - /* Safely destroy osc workqueue */ - destroy_workqueue(priv->bus_rescan_wq); - destroy_workqueue(priv->wq); + /* Make sure works are finished */ + flush_work_sync(&priv->bus_rescan_worker); + flush_work_sync(&priv->isr_worker); sdio_claim_host(func); sdio_disable_func(func); From 278274d544e6c6b02312fee59817faa6e810b03a Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Tue, 1 Feb 2011 11:42:42 +0100 Subject: [PATCH 10/20] scsi/be2iscsi,qla2xxx: convert to alloc_workqueue() Switch to new workqueue interface alloc_workqueue(). These are identity conversions. Signed-off-by: Tejun Heo Acked-by: Madhuranath Iyengar Cc: Jayamohan Kallickal Cc: Andrew Vasquez Cc: "James E.J. Bottomley" Cc: linux-scsi@vger.kernel.org --- drivers/scsi/be2iscsi/be_main.c | 2 +- drivers/scsi/qla2xxx/qla_os.c | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/scsi/be2iscsi/be_main.c b/drivers/scsi/be2iscsi/be_main.c index 79cefbe31367..638c72b7f94a 100644 --- a/drivers/scsi/be2iscsi/be_main.c +++ b/drivers/scsi/be2iscsi/be_main.c @@ -4277,7 +4277,7 @@ static int __devinit beiscsi_dev_probe(struct pci_dev *pcidev, snprintf(phba->wq_name, sizeof(phba->wq_name), "beiscsi_q_irq%u", phba->shost->host_no); - phba->wq = create_workqueue(phba->wq_name); + phba->wq = alloc_workqueue(phba->wq_name, WQ_MEM_RECLAIM, 1); if (!phba->wq) { shost_printk(KERN_ERR, phba->shost, "beiscsi_dev_probe-" "Failed to allocate work queue\n"); diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c index c194c23ca1fb..1d0607677727 100644 --- a/drivers/scsi/qla2xxx/qla_os.c +++ b/drivers/scsi/qla2xxx/qla_os.c @@ -349,7 +349,7 @@ static int qla25xx_setup_mode(struct scsi_qla_host *vha) "Can't create request queue\n"); goto fail; } - ha->wq = create_workqueue("qla2xxx_wq"); + ha->wq = alloc_workqueue("qla2xxx_wq", WQ_MEM_RECLAIM, 1); vha->req = ha->req_q_map[req]; options |= BIT_1; for (ques = 1; ques < ha->max_rsp_queues; ques++) { From 40f38ffb72cd58452dc5afc25ca5215bb90538a4 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Tue, 1 Feb 2011 11:42:42 +0100 Subject: [PATCH 11/20] scsi/scsi_tgt_lib: scsi_tgtd isn't used in memory reclaim path Workqueue scsi_tgtd isn't used during memory reclaim. Convert to alloc_workqueue() without WQ_MEM_RECLAIM. Signed-off-by: Tejun Heo Cc: FUJITA Tomonori Cc: "James E.J. Bottomley" Cc: linux-scsi@vger.kernel.org --- drivers/scsi/scsi_tgt_lib.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/scsi/scsi_tgt_lib.c b/drivers/scsi/scsi_tgt_lib.c index c399be979921..f67282058ba1 100644 --- a/drivers/scsi/scsi_tgt_lib.c +++ b/drivers/scsi/scsi_tgt_lib.c @@ -629,7 +629,7 @@ static int __init scsi_tgt_init(void) if (!scsi_tgt_cmd_cache) return -ENOMEM; - scsi_tgtd = create_workqueue("scsi_tgtd"); + scsi_tgtd = alloc_workqueue("scsi_tgtd", 0, 1); if (!scsi_tgtd) { err = -ENOMEM; goto free_kmemcache; From fd89d5f2030ac83324330bfd0bc73abf1beadaa6 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Tue, 1 Feb 2011 11:42:42 +0100 Subject: [PATCH 12/20] ext4: convert to alloc_workqueue() Convert create_workqueue() to alloc_workqueue(). This is an identity conversion. Signed-off-by: Tejun Heo Cc: "Theodore Ts'o" Cc: Andreas Dilger Cc: linux-ext4@vger.kernel.org --- fs/ext4/super.c | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/fs/ext4/super.c b/fs/ext4/super.c index 48ce561fafac..0fcf6720af09 100644 --- a/fs/ext4/super.c +++ b/fs/ext4/super.c @@ -3507,7 +3507,12 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent) percpu_counter_set(&sbi->s_dirtyblocks_counter, 0); no_journal: - EXT4_SB(sb)->dio_unwritten_wq = create_workqueue("ext4-dio-unwritten"); + /* + * The maximum number of concurrent works can be high and + * concurrency isn't really necessary. Limit it to 1. + */ + EXT4_SB(sb)->dio_unwritten_wq = + alloc_workqueue("ext4-dio-unwritten", WQ_MEM_RECLAIM, 1); if (!EXT4_SB(sb)->dio_unwritten_wq) { printk(KERN_ERR "EXT4-fs: failed to create DIO workqueue\n"); goto failed_mount_wq; From 316873c958eee302952edcadb8dc72d6d3d19d3c Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Tue, 1 Feb 2011 11:42:42 +0100 Subject: [PATCH 13/20] ocfs2: use system_wq instead of ocfs2_quota_wq ocfs2_quota_wq is not depended upon during memory reclaim and, with cmwq, there's no reason to use a dedicated workqueue. Drop ocfs2_quota_wq and use system_wq instead. dqi_sync_work is already sync canceled on quota disable and no further synchronization is necessary. This change makes ocfs2_quota_setup/shutdown() noops. Both functions removed. Signed-off-by: Tejun Heo Cc: Mark Fasheh Cc: Joel Becker --- fs/ocfs2/quota.h | 3 --- fs/ocfs2/quota_global.c | 27 ++++----------------------- fs/ocfs2/super.c | 7 ------- 3 files changed, 4 insertions(+), 33 deletions(-) diff --git a/fs/ocfs2/quota.h b/fs/ocfs2/quota.h index 196fcb52d95d..d5ab56cbe5c5 100644 --- a/fs/ocfs2/quota.h +++ b/fs/ocfs2/quota.h @@ -114,7 +114,4 @@ int ocfs2_local_write_dquot(struct dquot *dquot); extern const struct dquot_operations ocfs2_quota_operations; extern struct quota_format_type ocfs2_quota_format; -int ocfs2_quota_setup(void); -void ocfs2_quota_shutdown(void); - #endif /* _OCFS2_QUOTA_H */ diff --git a/fs/ocfs2/quota_global.c b/fs/ocfs2/quota_global.c index 4607923eb24c..a73f64166481 100644 --- a/fs/ocfs2/quota_global.c +++ b/fs/ocfs2/quota_global.c @@ -63,8 +63,6 @@ * write to gf */ -static struct workqueue_struct *ocfs2_quota_wq = NULL; - static void qsync_work_fn(struct work_struct *work); static void ocfs2_global_disk2memdqb(struct dquot *dquot, void *dp) @@ -400,8 +398,8 @@ int ocfs2_global_read_info(struct super_block *sb, int type) OCFS2_QBLK_RESERVED_SPACE; oinfo->dqi_gi.dqi_qtree_depth = qtree_depth(&oinfo->dqi_gi); INIT_DELAYED_WORK(&oinfo->dqi_sync_work, qsync_work_fn); - queue_delayed_work(ocfs2_quota_wq, &oinfo->dqi_sync_work, - msecs_to_jiffies(oinfo->dqi_syncms)); + schedule_delayed_work(&oinfo->dqi_sync_work, + msecs_to_jiffies(oinfo->dqi_syncms)); out_err: mlog_exit(status); @@ -635,8 +633,8 @@ static void qsync_work_fn(struct work_struct *work) struct super_block *sb = oinfo->dqi_gqinode->i_sb; dquot_scan_active(sb, ocfs2_sync_dquot_helper, oinfo->dqi_type); - queue_delayed_work(ocfs2_quota_wq, &oinfo->dqi_sync_work, - msecs_to_jiffies(oinfo->dqi_syncms)); + schedule_delayed_work(&oinfo->dqi_sync_work, + msecs_to_jiffies(oinfo->dqi_syncms)); } /* @@ -923,20 +921,3 @@ const struct dquot_operations ocfs2_quota_operations = { .alloc_dquot = ocfs2_alloc_dquot, .destroy_dquot = ocfs2_destroy_dquot, }; - -int ocfs2_quota_setup(void) -{ - ocfs2_quota_wq = create_workqueue("o2quot"); - if (!ocfs2_quota_wq) - return -ENOMEM; - return 0; -} - -void ocfs2_quota_shutdown(void) -{ - if (ocfs2_quota_wq) { - flush_workqueue(ocfs2_quota_wq); - destroy_workqueue(ocfs2_quota_wq); - ocfs2_quota_wq = NULL; - } -} diff --git a/fs/ocfs2/super.c b/fs/ocfs2/super.c index 38f986d2447e..84a70113b43a 100644 --- a/fs/ocfs2/super.c +++ b/fs/ocfs2/super.c @@ -1645,16 +1645,11 @@ static int __init ocfs2_init(void) mlog(ML_ERROR, "Unable to create ocfs2 debugfs root.\n"); } - status = ocfs2_quota_setup(); - if (status) - goto leave; - ocfs2_set_locking_protocol(); status = register_quota_format(&ocfs2_quota_format); leave: if (status < 0) { - ocfs2_quota_shutdown(); ocfs2_free_mem_caches(); exit_ocfs2_uptodate_cache(); } @@ -1671,8 +1666,6 @@ static void __exit ocfs2_exit(void) { mlog_entry_void(); - ocfs2_quota_shutdown(); - if (ocfs2_wq) { flush_workqueue(ocfs2_wq); destroy_workqueue(ocfs2_wq); From 28aadf51693f56c41326ebbc795318a49011b12d Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Tue, 1 Feb 2011 11:42:42 +0100 Subject: [PATCH 14/20] reiserfs: make commit_wq use the default concurrency level The maximum number of concurrent work items queued on commit_wq is bound by the number of active journals. Convert to alloc_workqueue() and use the default concurrency level so that they can be processed in parallel. Signed-off-by: Tejun Heo Cc: reiserfs-devel@vger.kernel.org --- fs/reiserfs/journal.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/fs/reiserfs/journal.c b/fs/reiserfs/journal.c index 3eea859e6990..c77514bd5776 100644 --- a/fs/reiserfs/journal.c +++ b/fs/reiserfs/journal.c @@ -2876,7 +2876,7 @@ int journal_init(struct super_block *sb, const char *j_dev_name, reiserfs_mounted_fs_count++; if (reiserfs_mounted_fs_count <= 1) { reiserfs_write_unlock(sb); - commit_wq = create_workqueue("reiserfs"); + commit_wq = alloc_workqueue("reiserfs", WQ_MEM_RECLAIM, 0); reiserfs_write_lock(sb); } From 83e759043abe9d0291f58f2427ba12bbb0a6e4f1 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Tue, 1 Feb 2011 11:42:43 +0100 Subject: [PATCH 15/20] xfs: convert to alloc_workqueue() Convert from create[_singlethread]_workqueue() to alloc_workqueue(). * xfsdatad_workqueue and xfsconvertd_workqueue are identity converted. Using higher concurrency limit might be useful but given the complexity of workqueue usage in xfs, proceeding cautiously seems better. * xfs_mru_reap_wq is converted to non-ordered workqueue with max concurrency of 1 as the work items don't require any specific ordering and already have proper synchronization. It seems it was singlethreaded to save worker threads, which is no longer a concern. Signed-off-by: Tejun Heo Cc: Alex Elder Cc: xfs-masters@oss.sgi.com Cc: Christoph Hellwig --- fs/xfs/linux-2.6/xfs_buf.c | 5 +++-- fs/xfs/xfs_mru_cache.c | 2 +- 2 files changed, 4 insertions(+), 3 deletions(-) diff --git a/fs/xfs/linux-2.6/xfs_buf.c b/fs/xfs/linux-2.6/xfs_buf.c index ac1c7e8378dd..f83a4c830a65 100644 --- a/fs/xfs/linux-2.6/xfs_buf.c +++ b/fs/xfs/linux-2.6/xfs_buf.c @@ -2022,11 +2022,12 @@ xfs_buf_init(void) if (!xfslogd_workqueue) goto out_free_buf_zone; - xfsdatad_workqueue = create_workqueue("xfsdatad"); + xfsdatad_workqueue = alloc_workqueue("xfsdatad", WQ_MEM_RECLAIM, 1); if (!xfsdatad_workqueue) goto out_destroy_xfslogd_workqueue; - xfsconvertd_workqueue = create_workqueue("xfsconvertd"); + xfsconvertd_workqueue = alloc_workqueue("xfsconvertd", + WQ_MEM_RECLAIM, 1); if (!xfsconvertd_workqueue) goto out_destroy_xfsdatad_workqueue; diff --git a/fs/xfs/xfs_mru_cache.c b/fs/xfs/xfs_mru_cache.c index edfa178bafb6..4aff56395732 100644 --- a/fs/xfs/xfs_mru_cache.c +++ b/fs/xfs/xfs_mru_cache.c @@ -309,7 +309,7 @@ xfs_mru_cache_init(void) if (!xfs_mru_elem_zone) goto out; - xfs_mru_reap_wq = create_singlethread_workqueue("xfs_mru_cache"); + xfs_mru_reap_wq = alloc_workqueue("xfs_mru_cache", WQ_MEM_RECLAIM, 1); if (!xfs_mru_reap_wq) goto out_destroy_mru_elem_zone; From 61edeeed917958dce5b43134d6704451ddf421fa Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Tue, 1 Feb 2011 11:42:43 +0100 Subject: [PATCH 16/20] net/9p: use system_wq instead of p9_mux_wq With cmwq, there's no reason to use a dedicated workqueue in trans_fd. Drop p9_mux_wq and use system_wq instead. The used work items are already sync canceled in p9_conn_destroy() and doesn't require further synchronization. Signed-off-by: Tejun Heo Cc: Eric Van Hensbergen Cc: Ron Minnich Cc: Latchesar Ionkov Cc: v9fs-developer@lists.sourceforge.net --- net/9p/trans_fd.c | 20 +++++--------------- 1 file changed, 5 insertions(+), 15 deletions(-) diff --git a/net/9p/trans_fd.c b/net/9p/trans_fd.c index 078eb162d9bf..e9f797d24414 100644 --- a/net/9p/trans_fd.c +++ b/net/9p/trans_fd.c @@ -155,7 +155,6 @@ struct p9_conn { static DEFINE_SPINLOCK(p9_poll_lock); static LIST_HEAD(p9_poll_pending_list); -static struct workqueue_struct *p9_mux_wq; static struct task_struct *p9_poll_task; static void p9_mux_poll_stop(struct p9_conn *m) @@ -384,7 +383,7 @@ static void p9_read_work(struct work_struct *work) if (n & POLLIN) { P9_DPRINTK(P9_DEBUG_TRANS, "sched read work %p\n", m); - queue_work(p9_mux_wq, &m->rq); + schedule_work(&m->rq); } else clear_bit(Rworksched, &m->wsched); } else @@ -497,7 +496,7 @@ static void p9_write_work(struct work_struct *work) if (n & POLLOUT) { P9_DPRINTK(P9_DEBUG_TRANS, "sched write work %p\n", m); - queue_work(p9_mux_wq, &m->wq); + schedule_work(&m->wq); } else clear_bit(Wworksched, &m->wsched); } else @@ -629,7 +628,7 @@ static void p9_poll_mux(struct p9_conn *m) P9_DPRINTK(P9_DEBUG_TRANS, "mux %p can read\n", m); if (!test_and_set_bit(Rworksched, &m->wsched)) { P9_DPRINTK(P9_DEBUG_TRANS, "sched read work %p\n", m); - queue_work(p9_mux_wq, &m->rq); + schedule_work(&m->rq); } } @@ -639,7 +638,7 @@ static void p9_poll_mux(struct p9_conn *m) if ((m->wsize || !list_empty(&m->unsent_req_list)) && !test_and_set_bit(Wworksched, &m->wsched)) { P9_DPRINTK(P9_DEBUG_TRANS, "sched write work %p\n", m); - queue_work(p9_mux_wq, &m->wq); + schedule_work(&m->wq); } } } @@ -677,7 +676,7 @@ static int p9_fd_request(struct p9_client *client, struct p9_req_t *req) n = p9_fd_poll(m->client, NULL); if (n & POLLOUT && !test_and_set_bit(Wworksched, &m->wsched)) - queue_work(p9_mux_wq, &m->wq); + schedule_work(&m->wq); return 0; } @@ -1083,15 +1082,8 @@ static int p9_poll_proc(void *a) int p9_trans_fd_init(void) { - p9_mux_wq = create_workqueue("v9fs"); - if (!p9_mux_wq) { - printk(KERN_WARNING "v9fs: mux: creating workqueue failed\n"); - return -ENOMEM; - } - p9_poll_task = kthread_run(p9_poll_proc, NULL, "v9fs-poll"); if (IS_ERR(p9_poll_task)) { - destroy_workqueue(p9_mux_wq); printk(KERN_WARNING "v9fs: mux: creating poll task failed\n"); return PTR_ERR(p9_poll_task); } @@ -1109,6 +1101,4 @@ void p9_trans_fd_exit(void) v9fs_unregister_trans(&p9_tcp_trans); v9fs_unregister_trans(&p9_unix_trans); v9fs_unregister_trans(&p9_fd_trans); - - destroy_workqueue(p9_mux_wq); } From aa70c585b15f64da6948bdacc7a7692addd65364 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Tue, 1 Feb 2011 11:42:43 +0100 Subject: [PATCH 17/20] net/9p: replace p9_poll_task with a work Now that cmwq can handle high concurrency, it's more efficient to use work than a dedicated kthread. Convert p9_poll_proc() to a work function for p9_poll_work and make p9_pollwake() schedule it on each poll event. The work is sync flushed on module exit. Signed-off-by: Tejun Heo Cc: Eric Van Hensbergen Cc: Ron Minnich Cc: Latchesar Ionkov Cc: v9fs-developer@lists.sourceforge.net --- net/9p/trans_fd.c | 32 ++++++++------------------------ 1 file changed, 8 insertions(+), 24 deletions(-) diff --git a/net/9p/trans_fd.c b/net/9p/trans_fd.c index e9f797d24414..a30471e51740 100644 --- a/net/9p/trans_fd.c +++ b/net/9p/trans_fd.c @@ -153,9 +153,11 @@ struct p9_conn { unsigned long wsched; }; +static void p9_poll_workfn(struct work_struct *work); + static DEFINE_SPINLOCK(p9_poll_lock); static LIST_HEAD(p9_poll_pending_list); -static struct task_struct *p9_poll_task; +static DECLARE_WORK(p9_poll_work, p9_poll_workfn); static void p9_mux_poll_stop(struct p9_conn *m) { @@ -515,15 +517,14 @@ static int p9_pollwake(wait_queue_t *wait, unsigned mode, int sync, void *key) container_of(wait, struct p9_poll_wait, wait); struct p9_conn *m = pwait->conn; unsigned long flags; - DECLARE_WAITQUEUE(dummy_wait, p9_poll_task); spin_lock_irqsave(&p9_poll_lock, flags); if (list_empty(&m->poll_pending_link)) list_add_tail(&m->poll_pending_link, &p9_poll_pending_list); spin_unlock_irqrestore(&p9_poll_lock, flags); - /* perform the default wake up operation */ - return default_wake_function(&dummy_wait, mode, sync, key); + schedule_work(&p9_poll_work); + return 1; } /** @@ -1046,12 +1047,12 @@ static struct p9_trans_module p9_fd_trans = { * */ -static int p9_poll_proc(void *a) +static void p9_poll_workfn(struct work_struct *work) { unsigned long flags; P9_DPRINTK(P9_DEBUG_TRANS, "start %p\n", current); - repeat: + spin_lock_irqsave(&p9_poll_lock, flags); while (!list_empty(&p9_poll_pending_list)) { struct p9_conn *conn = list_first_entry(&p9_poll_pending_list, @@ -1066,28 +1067,11 @@ static int p9_poll_proc(void *a) } spin_unlock_irqrestore(&p9_poll_lock, flags); - set_current_state(TASK_INTERRUPTIBLE); - if (list_empty(&p9_poll_pending_list)) { - P9_DPRINTK(P9_DEBUG_TRANS, "sleeping...\n"); - schedule(); - } - __set_current_state(TASK_RUNNING); - - if (!kthread_should_stop()) - goto repeat; - P9_DPRINTK(P9_DEBUG_TRANS, "finish\n"); - return 0; } int p9_trans_fd_init(void) { - p9_poll_task = kthread_run(p9_poll_proc, NULL, "v9fs-poll"); - if (IS_ERR(p9_poll_task)) { - printk(KERN_WARNING "v9fs: mux: creating poll task failed\n"); - return PTR_ERR(p9_poll_task); - } - v9fs_register_trans(&p9_tcp_trans); v9fs_register_trans(&p9_unix_trans); v9fs_register_trans(&p9_fd_trans); @@ -1097,7 +1081,7 @@ int p9_trans_fd_init(void) void p9_trans_fd_exit(void) { - kthread_stop(p9_poll_task); + flush_work_sync(&p9_poll_work); v9fs_unregister_trans(&p9_tcp_trans); v9fs_unregister_trans(&p9_unix_trans); v9fs_unregister_trans(&p9_fd_trans); From c534a107e8fe446202b0fab102abc015c56c0317 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Tue, 1 Feb 2011 11:42:43 +0100 Subject: [PATCH 18/20] rds/ib: use system_wq instead of rds_ib_fmr_wq With cmwq, there's no reason to use dedicated rds_ib_fmr_wq - it's not in the memory reclaim path and the maximum number of concurrent work items is bound by the number of devices. Drop it and use system_wq instead. This rds_ib_fmr_init/exit() noops. Both removed. Signed-off-by: Tejun Heo Cc: Andy Grover --- net/rds/ib.c | 9 +-------- net/rds/ib.h | 2 -- net/rds/ib_rdma.c | 27 +++------------------------ 3 files changed, 4 insertions(+), 34 deletions(-) diff --git a/net/rds/ib.c b/net/rds/ib.c index 4123967d4d65..cce19f95c624 100644 --- a/net/rds/ib.c +++ b/net/rds/ib.c @@ -364,7 +364,6 @@ void rds_ib_exit(void) rds_ib_sysctl_exit(); rds_ib_recv_exit(); rds_trans_unregister(&rds_ib_transport); - rds_ib_fmr_exit(); } struct rds_transport rds_ib_transport = { @@ -400,13 +399,9 @@ int rds_ib_init(void) INIT_LIST_HEAD(&rds_ib_devices); - ret = rds_ib_fmr_init(); - if (ret) - goto out; - ret = ib_register_client(&rds_ib_client); if (ret) - goto out_fmr_exit; + goto out; ret = rds_ib_sysctl_init(); if (ret) @@ -430,8 +425,6 @@ int rds_ib_init(void) rds_ib_sysctl_exit(); out_ibreg: rds_ib_unregister_client(); -out_fmr_exit: - rds_ib_fmr_exit(); out: return ret; } diff --git a/net/rds/ib.h b/net/rds/ib.h index e34ad032b66d..4297d92788dc 100644 --- a/net/rds/ib.h +++ b/net/rds/ib.h @@ -307,8 +307,6 @@ void *rds_ib_get_mr(struct scatterlist *sg, unsigned long nents, void rds_ib_sync_mr(void *trans_private, int dir); void rds_ib_free_mr(void *trans_private, int invalidate); void rds_ib_flush_mrs(void); -int rds_ib_fmr_init(void); -void rds_ib_fmr_exit(void); /* ib_recv.c */ int rds_ib_recv_init(void); diff --git a/net/rds/ib_rdma.c b/net/rds/ib_rdma.c index 18a833c450c8..819c35a0d9cb 100644 --- a/net/rds/ib_rdma.c +++ b/net/rds/ib_rdma.c @@ -38,8 +38,6 @@ #include "ib.h" #include "xlist.h" -static struct workqueue_struct *rds_ib_fmr_wq; - static DEFINE_PER_CPU(unsigned long, clean_list_grace); #define CLEAN_LIST_BUSY_BIT 0 @@ -307,7 +305,7 @@ static struct rds_ib_mr *rds_ib_alloc_fmr(struct rds_ib_device *rds_ibdev) int err = 0, iter = 0; if (atomic_read(&pool->dirty_count) >= pool->max_items / 10) - queue_delayed_work(rds_ib_fmr_wq, &pool->flush_worker, 10); + schedule_delayed_work(&pool->flush_worker, 10); while (1) { ibmr = rds_ib_reuse_fmr(pool); @@ -696,24 +694,6 @@ static int rds_ib_flush_mr_pool(struct rds_ib_mr_pool *pool, return ret; } -int rds_ib_fmr_init(void) -{ - rds_ib_fmr_wq = create_workqueue("rds_fmr_flushd"); - if (!rds_ib_fmr_wq) - return -ENOMEM; - return 0; -} - -/* - * By the time this is called all the IB devices should have been torn down and - * had their pools freed. As each pool is freed its work struct is waited on, - * so the pool flushing work queue should be idle by the time we get here. - */ -void rds_ib_fmr_exit(void) -{ - destroy_workqueue(rds_ib_fmr_wq); -} - static void rds_ib_mr_pool_flush_worker(struct work_struct *work) { struct rds_ib_mr_pool *pool = container_of(work, struct rds_ib_mr_pool, flush_worker.work); @@ -741,7 +721,7 @@ void rds_ib_free_mr(void *trans_private, int invalidate) /* If we've pinned too many pages, request a flush */ if (atomic_read(&pool->free_pinned) >= pool->max_free_pinned || atomic_read(&pool->dirty_count) >= pool->max_items / 10) - queue_delayed_work(rds_ib_fmr_wq, &pool->flush_worker, 10); + schedule_delayed_work(&pool->flush_worker, 10); if (invalidate) { if (likely(!in_interrupt())) { @@ -749,8 +729,7 @@ void rds_ib_free_mr(void *trans_private, int invalidate) } else { /* We get here if the user created a MR marked * as use_once and invalidate at the same time. */ - queue_delayed_work(rds_ib_fmr_wq, - &pool->flush_worker, 10); + schedule_delayed_work(&pool->flush_worker, 10); } } From 4149efb22da66e326fc48baf80d628834509f7f0 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Tue, 8 Feb 2011 10:39:03 +0100 Subject: [PATCH 19/20] workqueue: add system_freezeable_wq Add system wide freezeable workqueue. Signed-off-by: Tejun Heo Acked-by: Dmitry Torokhov Cc: "Rafael J. Wysocki" --- include/linux/workqueue.h | 4 ++++ kernel/workqueue.c | 6 +++++- 2 files changed, 9 insertions(+), 1 deletion(-) diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h index 1ac11586a2f5..de6a755befac 100644 --- a/include/linux/workqueue.h +++ b/include/linux/workqueue.h @@ -286,11 +286,15 @@ enum { * any specific CPU, not concurrency managed, and all queued works are * executed immediately as long as max_active limit is not reached and * resources are available. + * + * system_freezeable_wq is equivalent to system_wq except that it's + * freezeable. */ extern struct workqueue_struct *system_wq; extern struct workqueue_struct *system_long_wq; extern struct workqueue_struct *system_nrt_wq; extern struct workqueue_struct *system_unbound_wq; +extern struct workqueue_struct *system_freezeable_wq; extern struct workqueue_struct * __alloc_workqueue_key(const char *name, unsigned int flags, int max_active, diff --git a/kernel/workqueue.c b/kernel/workqueue.c index 11869faa6819..28f8bd08f0e7 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -249,10 +249,12 @@ struct workqueue_struct *system_wq __read_mostly; struct workqueue_struct *system_long_wq __read_mostly; struct workqueue_struct *system_nrt_wq __read_mostly; struct workqueue_struct *system_unbound_wq __read_mostly; +struct workqueue_struct *system_freezeable_wq __read_mostly; EXPORT_SYMBOL_GPL(system_wq); EXPORT_SYMBOL_GPL(system_long_wq); EXPORT_SYMBOL_GPL(system_nrt_wq); EXPORT_SYMBOL_GPL(system_unbound_wq); +EXPORT_SYMBOL_GPL(system_freezeable_wq); #define CREATE_TRACE_POINTS #include @@ -3764,8 +3766,10 @@ static int __init init_workqueues(void) system_nrt_wq = alloc_workqueue("events_nrt", WQ_NON_REENTRANT, 0); system_unbound_wq = alloc_workqueue("events_unbound", WQ_UNBOUND, WQ_UNBOUND_MAX_ACTIVE); + system_freezeable_wq = alloc_workqueue("events_freezeable", + WQ_FREEZEABLE, 0); BUG_ON(!system_wq || !system_long_wq || !system_nrt_wq || - !system_unbound_wq); + !system_unbound_wq || !system_freezeable_wq); return 0; } early_initcall(init_workqueues); From 24d51add7438f9696a7205927bf9de3c5c787a58 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Mon, 21 Feb 2011 09:52:50 +0100 Subject: [PATCH 20/20] workqueue: fix build failure introduced by s/freezeable/freezable/ wq:fixes-2.6.38 does s/WQ_FREEZEABLE/WQ_FREEZABLE and wq:for-2.6.39 adds new usage of the flag. The combination of the two creates a build failure after merge. Fix it by renaming all freezeables to freezables. Signed-off-by: Tejun Heo Reported-by: Stephen Rothwell --- include/linux/workqueue.h | 6 +++--- kernel/workqueue.c | 10 +++++----- 2 files changed, 8 insertions(+), 8 deletions(-) diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h index d110cc4f9fed..f584aba78ca9 100644 --- a/include/linux/workqueue.h +++ b/include/linux/workqueue.h @@ -287,14 +287,14 @@ enum { * executed immediately as long as max_active limit is not reached and * resources are available. * - * system_freezeable_wq is equivalent to system_wq except that it's - * freezeable. + * system_freezable_wq is equivalent to system_wq except that it's + * freezable. */ extern struct workqueue_struct *system_wq; extern struct workqueue_struct *system_long_wq; extern struct workqueue_struct *system_nrt_wq; extern struct workqueue_struct *system_unbound_wq; -extern struct workqueue_struct *system_freezeable_wq; +extern struct workqueue_struct *system_freezable_wq; extern struct workqueue_struct * __alloc_workqueue_key(const char *name, unsigned int flags, int max_active, diff --git a/kernel/workqueue.c b/kernel/workqueue.c index 572f559f6cb9..1b64d225f067 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -251,12 +251,12 @@ struct workqueue_struct *system_wq __read_mostly; struct workqueue_struct *system_long_wq __read_mostly; struct workqueue_struct *system_nrt_wq __read_mostly; struct workqueue_struct *system_unbound_wq __read_mostly; -struct workqueue_struct *system_freezeable_wq __read_mostly; +struct workqueue_struct *system_freezable_wq __read_mostly; EXPORT_SYMBOL_GPL(system_wq); EXPORT_SYMBOL_GPL(system_long_wq); EXPORT_SYMBOL_GPL(system_nrt_wq); EXPORT_SYMBOL_GPL(system_unbound_wq); -EXPORT_SYMBOL_GPL(system_freezeable_wq); +EXPORT_SYMBOL_GPL(system_freezable_wq); #define CREATE_TRACE_POINTS #include @@ -3777,10 +3777,10 @@ static int __init init_workqueues(void) system_nrt_wq = alloc_workqueue("events_nrt", WQ_NON_REENTRANT, 0); system_unbound_wq = alloc_workqueue("events_unbound", WQ_UNBOUND, WQ_UNBOUND_MAX_ACTIVE); - system_freezeable_wq = alloc_workqueue("events_freezeable", - WQ_FREEZEABLE, 0); + system_freezable_wq = alloc_workqueue("events_freezable", + WQ_FREEZABLE, 0); BUG_ON(!system_wq || !system_long_wq || !system_nrt_wq || - !system_unbound_wq || !system_freezeable_wq); + !system_unbound_wq || !system_freezable_wq); return 0; } early_initcall(init_workqueues);