mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-12-16 20:07:31 +07:00
3f7e62bba0
The commit356fd2663c
("scsi: Set request queue runtime PM status back to active on resume") fixed up the inconsistent RPM status between request queue and device. However changing request queue RPM status shall be done only on successful resume, otherwise status may be still inconsistent as below, Request queue: RPM_ACTIVE Device: RPM_SUSPENDED This ends up soft lockup because requests can be submitted to underlying devices but those devices and their required resource are not resumed. For example, After above inconsistent status happens, IO request can be submitted to UFS device driver but required resource (like clock) is not resumed yet thus lead to warning as below call stack, WARN_ON(hba->clk_gating.state != CLKS_ON); ufshcd_queuecommand scsi_dispatch_cmd scsi_request_fn __blk_run_queue cfq_insert_request __elv_add_request blk_flush_plug_list blk_finish_plug jbd2_journal_commit_transaction kjournald2 We may see all behind IO requests hang because of no response from storage host or device and then soft lockup happens in system. In the end, system may crash in many ways. Fixes:356fd2663c
(scsi: Set request queue runtime PM status back to active on resume) Cc: stable@vger.kernel.org Signed-off-by: Stanley Chu <stanley.chu@mediatek.com> Reviewed-by: Bart Van Assche <bvanassche@acm.org> Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
360 lines
8.2 KiB
C
360 lines
8.2 KiB
C
/*
|
|
* scsi_pm.c Copyright (C) 2010 Alan Stern
|
|
*
|
|
* SCSI dynamic Power Management
|
|
* Initial version: Alan Stern <stern@rowland.harvard.edu>
|
|
*/
|
|
|
|
#include <linux/pm_runtime.h>
|
|
#include <linux/export.h>
|
|
#include <linux/async.h>
|
|
#include <linux/blk-pm.h>
|
|
|
|
#include <scsi/scsi.h>
|
|
#include <scsi/scsi_device.h>
|
|
#include <scsi/scsi_driver.h>
|
|
#include <scsi/scsi_host.h>
|
|
|
|
#include "scsi_priv.h"
|
|
|
|
#ifdef CONFIG_PM_SLEEP
|
|
|
|
static int do_scsi_suspend(struct device *dev, const struct dev_pm_ops *pm)
|
|
{
|
|
return pm && pm->suspend ? pm->suspend(dev) : 0;
|
|
}
|
|
|
|
static int do_scsi_freeze(struct device *dev, const struct dev_pm_ops *pm)
|
|
{
|
|
return pm && pm->freeze ? pm->freeze(dev) : 0;
|
|
}
|
|
|
|
static int do_scsi_poweroff(struct device *dev, const struct dev_pm_ops *pm)
|
|
{
|
|
return pm && pm->poweroff ? pm->poweroff(dev) : 0;
|
|
}
|
|
|
|
static int do_scsi_resume(struct device *dev, const struct dev_pm_ops *pm)
|
|
{
|
|
return pm && pm->resume ? pm->resume(dev) : 0;
|
|
}
|
|
|
|
static int do_scsi_thaw(struct device *dev, const struct dev_pm_ops *pm)
|
|
{
|
|
return pm && pm->thaw ? pm->thaw(dev) : 0;
|
|
}
|
|
|
|
static int do_scsi_restore(struct device *dev, const struct dev_pm_ops *pm)
|
|
{
|
|
return pm && pm->restore ? pm->restore(dev) : 0;
|
|
}
|
|
|
|
static int scsi_dev_type_suspend(struct device *dev,
|
|
int (*cb)(struct device *, const struct dev_pm_ops *))
|
|
{
|
|
const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
|
|
int err;
|
|
|
|
/* flush pending in-flight resume operations, suspend is synchronous */
|
|
async_synchronize_full_domain(&scsi_sd_pm_domain);
|
|
|
|
err = scsi_device_quiesce(to_scsi_device(dev));
|
|
if (err == 0) {
|
|
err = cb(dev, pm);
|
|
if (err)
|
|
scsi_device_resume(to_scsi_device(dev));
|
|
}
|
|
dev_dbg(dev, "scsi suspend: %d\n", err);
|
|
return err;
|
|
}
|
|
|
|
static int scsi_dev_type_resume(struct device *dev,
|
|
int (*cb)(struct device *, const struct dev_pm_ops *))
|
|
{
|
|
const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
|
|
int err = 0;
|
|
|
|
err = cb(dev, pm);
|
|
scsi_device_resume(to_scsi_device(dev));
|
|
dev_dbg(dev, "scsi resume: %d\n", err);
|
|
|
|
if (err == 0) {
|
|
pm_runtime_disable(dev);
|
|
err = pm_runtime_set_active(dev);
|
|
pm_runtime_enable(dev);
|
|
|
|
/*
|
|
* Forcibly set runtime PM status of request queue to "active"
|
|
* to make sure we can again get requests from the queue
|
|
* (see also blk_pm_peek_request()).
|
|
*
|
|
* The resume hook will correct runtime PM status of the disk.
|
|
*/
|
|
if (!err && scsi_is_sdev_device(dev)) {
|
|
struct scsi_device *sdev = to_scsi_device(dev);
|
|
|
|
if (sdev->request_queue->dev)
|
|
blk_set_runtime_active(sdev->request_queue);
|
|
}
|
|
}
|
|
|
|
return err;
|
|
}
|
|
|
|
static int
|
|
scsi_bus_suspend_common(struct device *dev,
|
|
int (*cb)(struct device *, const struct dev_pm_ops *))
|
|
{
|
|
int err = 0;
|
|
|
|
if (scsi_is_sdev_device(dev)) {
|
|
/*
|
|
* All the high-level SCSI drivers that implement runtime
|
|
* PM treat runtime suspend, system suspend, and system
|
|
* hibernate nearly identically. In all cases the requirements
|
|
* for runtime suspension are stricter.
|
|
*/
|
|
if (pm_runtime_suspended(dev))
|
|
return 0;
|
|
|
|
err = scsi_dev_type_suspend(dev, cb);
|
|
}
|
|
|
|
return err;
|
|
}
|
|
|
|
static void async_sdev_resume(void *dev, async_cookie_t cookie)
|
|
{
|
|
scsi_dev_type_resume(dev, do_scsi_resume);
|
|
}
|
|
|
|
static void async_sdev_thaw(void *dev, async_cookie_t cookie)
|
|
{
|
|
scsi_dev_type_resume(dev, do_scsi_thaw);
|
|
}
|
|
|
|
static void async_sdev_restore(void *dev, async_cookie_t cookie)
|
|
{
|
|
scsi_dev_type_resume(dev, do_scsi_restore);
|
|
}
|
|
|
|
static int scsi_bus_resume_common(struct device *dev,
|
|
int (*cb)(struct device *, const struct dev_pm_ops *))
|
|
{
|
|
async_func_t fn;
|
|
|
|
if (!scsi_is_sdev_device(dev))
|
|
fn = NULL;
|
|
else if (cb == do_scsi_resume)
|
|
fn = async_sdev_resume;
|
|
else if (cb == do_scsi_thaw)
|
|
fn = async_sdev_thaw;
|
|
else if (cb == do_scsi_restore)
|
|
fn = async_sdev_restore;
|
|
else
|
|
fn = NULL;
|
|
|
|
if (fn) {
|
|
async_schedule_domain(fn, dev, &scsi_sd_pm_domain);
|
|
|
|
/*
|
|
* If a user has disabled async probing a likely reason
|
|
* is due to a storage enclosure that does not inject
|
|
* staggered spin-ups. For safety, make resume
|
|
* synchronous as well in that case.
|
|
*/
|
|
if (strncmp(scsi_scan_type, "async", 5) != 0)
|
|
async_synchronize_full_domain(&scsi_sd_pm_domain);
|
|
} else {
|
|
pm_runtime_disable(dev);
|
|
pm_runtime_set_active(dev);
|
|
pm_runtime_enable(dev);
|
|
}
|
|
return 0;
|
|
}
|
|
|
|
static int scsi_bus_prepare(struct device *dev)
|
|
{
|
|
if (scsi_is_sdev_device(dev)) {
|
|
/* sd probing uses async_schedule. Wait until it finishes. */
|
|
async_synchronize_full_domain(&scsi_sd_probe_domain);
|
|
|
|
} else if (scsi_is_host_device(dev)) {
|
|
/* Wait until async scanning is finished */
|
|
scsi_complete_async_scans();
|
|
}
|
|
return 0;
|
|
}
|
|
|
|
static int scsi_bus_suspend(struct device *dev)
|
|
{
|
|
return scsi_bus_suspend_common(dev, do_scsi_suspend);
|
|
}
|
|
|
|
static int scsi_bus_resume(struct device *dev)
|
|
{
|
|
return scsi_bus_resume_common(dev, do_scsi_resume);
|
|
}
|
|
|
|
static int scsi_bus_freeze(struct device *dev)
|
|
{
|
|
return scsi_bus_suspend_common(dev, do_scsi_freeze);
|
|
}
|
|
|
|
static int scsi_bus_thaw(struct device *dev)
|
|
{
|
|
return scsi_bus_resume_common(dev, do_scsi_thaw);
|
|
}
|
|
|
|
static int scsi_bus_poweroff(struct device *dev)
|
|
{
|
|
return scsi_bus_suspend_common(dev, do_scsi_poweroff);
|
|
}
|
|
|
|
static int scsi_bus_restore(struct device *dev)
|
|
{
|
|
return scsi_bus_resume_common(dev, do_scsi_restore);
|
|
}
|
|
|
|
#else /* CONFIG_PM_SLEEP */
|
|
|
|
#define scsi_bus_prepare NULL
|
|
#define scsi_bus_suspend NULL
|
|
#define scsi_bus_resume NULL
|
|
#define scsi_bus_freeze NULL
|
|
#define scsi_bus_thaw NULL
|
|
#define scsi_bus_poweroff NULL
|
|
#define scsi_bus_restore NULL
|
|
|
|
#endif /* CONFIG_PM_SLEEP */
|
|
|
|
static int sdev_runtime_suspend(struct device *dev)
|
|
{
|
|
const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
|
|
struct scsi_device *sdev = to_scsi_device(dev);
|
|
int err = 0;
|
|
|
|
err = blk_pre_runtime_suspend(sdev->request_queue);
|
|
if (err)
|
|
return err;
|
|
if (pm && pm->runtime_suspend)
|
|
err = pm->runtime_suspend(dev);
|
|
blk_post_runtime_suspend(sdev->request_queue, err);
|
|
|
|
return err;
|
|
}
|
|
|
|
static int scsi_runtime_suspend(struct device *dev)
|
|
{
|
|
int err = 0;
|
|
|
|
dev_dbg(dev, "scsi_runtime_suspend\n");
|
|
if (scsi_is_sdev_device(dev))
|
|
err = sdev_runtime_suspend(dev);
|
|
|
|
/* Insert hooks here for targets, hosts, and transport classes */
|
|
|
|
return err;
|
|
}
|
|
|
|
static int sdev_runtime_resume(struct device *dev)
|
|
{
|
|
struct scsi_device *sdev = to_scsi_device(dev);
|
|
const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
|
|
int err = 0;
|
|
|
|
blk_pre_runtime_resume(sdev->request_queue);
|
|
if (pm && pm->runtime_resume)
|
|
err = pm->runtime_resume(dev);
|
|
blk_post_runtime_resume(sdev->request_queue, err);
|
|
|
|
return err;
|
|
}
|
|
|
|
static int scsi_runtime_resume(struct device *dev)
|
|
{
|
|
int err = 0;
|
|
|
|
dev_dbg(dev, "scsi_runtime_resume\n");
|
|
if (scsi_is_sdev_device(dev))
|
|
err = sdev_runtime_resume(dev);
|
|
|
|
/* Insert hooks here for targets, hosts, and transport classes */
|
|
|
|
return err;
|
|
}
|
|
|
|
static int scsi_runtime_idle(struct device *dev)
|
|
{
|
|
dev_dbg(dev, "scsi_runtime_idle\n");
|
|
|
|
/* Insert hooks here for targets, hosts, and transport classes */
|
|
|
|
if (scsi_is_sdev_device(dev)) {
|
|
pm_runtime_mark_last_busy(dev);
|
|
pm_runtime_autosuspend(dev);
|
|
return -EBUSY;
|
|
}
|
|
|
|
return 0;
|
|
}
|
|
|
|
int scsi_autopm_get_device(struct scsi_device *sdev)
|
|
{
|
|
int err;
|
|
|
|
err = pm_runtime_get_sync(&sdev->sdev_gendev);
|
|
if (err < 0 && err !=-EACCES)
|
|
pm_runtime_put_sync(&sdev->sdev_gendev);
|
|
else
|
|
err = 0;
|
|
return err;
|
|
}
|
|
EXPORT_SYMBOL_GPL(scsi_autopm_get_device);
|
|
|
|
void scsi_autopm_put_device(struct scsi_device *sdev)
|
|
{
|
|
pm_runtime_put_sync(&sdev->sdev_gendev);
|
|
}
|
|
EXPORT_SYMBOL_GPL(scsi_autopm_put_device);
|
|
|
|
void scsi_autopm_get_target(struct scsi_target *starget)
|
|
{
|
|
pm_runtime_get_sync(&starget->dev);
|
|
}
|
|
|
|
void scsi_autopm_put_target(struct scsi_target *starget)
|
|
{
|
|
pm_runtime_put_sync(&starget->dev);
|
|
}
|
|
|
|
int scsi_autopm_get_host(struct Scsi_Host *shost)
|
|
{
|
|
int err;
|
|
|
|
err = pm_runtime_get_sync(&shost->shost_gendev);
|
|
if (err < 0 && err !=-EACCES)
|
|
pm_runtime_put_sync(&shost->shost_gendev);
|
|
else
|
|
err = 0;
|
|
return err;
|
|
}
|
|
|
|
void scsi_autopm_put_host(struct Scsi_Host *shost)
|
|
{
|
|
pm_runtime_put_sync(&shost->shost_gendev);
|
|
}
|
|
|
|
const struct dev_pm_ops scsi_bus_pm_ops = {
|
|
.prepare = scsi_bus_prepare,
|
|
.suspend = scsi_bus_suspend,
|
|
.resume = scsi_bus_resume,
|
|
.freeze = scsi_bus_freeze,
|
|
.thaw = scsi_bus_thaw,
|
|
.poweroff = scsi_bus_poweroff,
|
|
.restore = scsi_bus_restore,
|
|
.runtime_suspend = scsi_runtime_suspend,
|
|
.runtime_resume = scsi_runtime_resume,
|
|
.runtime_idle = scsi_runtime_idle,
|
|
};
|