linux_dsm_epyc7002/drivers/scsi/scsi_pm.c
Linus Torvalds ba6d10ab80 SCSI misc on 20190709
This is mostly update of the usual drivers: qla2xxx, hpsa, lpfc, ufs,
 mpt3sas, ibmvscsi, megaraid_sas, bnx2fc and hisi_sas as well as the
 removal of the osst driver (I heard from Willem privately that he
 would like the driver removed because all his test hardware has
 failed).  Plus number of minor changes, spelling fixes and other
 trivia.
 
 Signed-off-by: James E.J. Bottomley <jejb@linux.ibm.com>
 -----BEGIN PGP SIGNATURE-----
 
 iJwEABMIAEQWIQTnYEDbdso9F2cI+arnQslM7pishQUCXSTl4yYcamFtZXMuYm90
 dG9tbGV5QGhhbnNlbnBhcnRuZXJzaGlwLmNvbQAKCRDnQslM7pishdcxAQDCJVbd
 fPUX76/V1ldupunF97+3DTharxxbst+VnkOnCwD8D4c0KFFFOI9+F36cnMGCPegE
 fjy17dQLvsJ4GsidHy8=
 =aS5B
 -----END PGP SIGNATURE-----

Merge tag 'scsi-misc' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi

Pull SCSI updates from James Bottomley:
 "This is mostly update of the usual drivers: qla2xxx, hpsa, lpfc, ufs,
  mpt3sas, ibmvscsi, megaraid_sas, bnx2fc and hisi_sas as well as the
  removal of the osst driver (I heard from Willem privately that he
  would like the driver removed because all his test hardware has
  failed). Plus number of minor changes, spelling fixes and other
  trivia.

  The big merge conflict this time around is the SPDX licence tags.
  Following discussion on linux-next, we believe our version to be more
  accurate than the one in the tree, so the resolution is to take our
  version for all the SPDX conflicts"

Note on the SPDX license tag conversion conflicts: the SCSI tree had
done its own SPDX conversion, which in some cases conflicted with the
treewide ones done by Thomas & co.

In almost all cases, the conflicts were purely syntactic: the SCSI tree
used the old-style SPDX tags ("GPL-2.0" and "GPL-2.0+") while the
treewide conversion had used the new-style ones ("GPL-2.0-only" and
"GPL-2.0-or-later").

In these cases I picked the new-style one.

In a few cases, the SPDX conversion was actually different, though.  As
explained by James above, and in more detail in a pre-pull-request
thread:

 "The other problem is actually substantive: In the libsas code Luben
  Tuikov originally specified gpl 2.0 only by dint of stating:

  * This file is licensed under GPLv2.

  In all the libsas files, but then muddied the water by quoting GPLv2
  verbatim (which includes the or later than language). So for these
  files Christoph did the conversion to v2 only SPDX tags and Thomas
  converted to v2 or later tags"

So in those cases, where the spdx tag substantially mattered, I took the
SCSI tree conversion of it, but then also took the opportunity to turn
the old-style "GPL-2.0" into a new-style "GPL-2.0-only" tag.

Similarly, when there were whitespace differences or other differences
to the comments around the copyright notices, I took the version from
the SCSI tree as being the more specific conversion.

Finally, in the spdx conversions that had no conflicts (because the
treewide ones hadn't been done for those files), I just took the SCSI
tree version as-is, even if it was old-style.  The old-style conversions
are perfectly valid, even if the "-only" and "-or-later" versions are
perhaps more descriptive.

* tag 'scsi-misc' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi: (185 commits)
  scsi: qla2xxx: move IO flush to the front of NVME rport unregistration
  scsi: qla2xxx: Fix NVME cmd and LS cmd timeout race condition
  scsi: qla2xxx: on session delete, return nvme cmd
  scsi: qla2xxx: Fix kernel crash after disconnecting NVMe devices
  scsi: megaraid_sas: Update driver version to 07.710.06.00-rc1
  scsi: megaraid_sas: Introduce various Aero performance modes
  scsi: megaraid_sas: Use high IOPS queues based on IO workload
  scsi: megaraid_sas: Set affinity for high IOPS reply queues
  scsi: megaraid_sas: Enable coalescing for high IOPS queues
  scsi: megaraid_sas: Add support for High IOPS queues
  scsi: megaraid_sas: Add support for MPI toolbox commands
  scsi: megaraid_sas: Offload Aero RAID5/6 division calculations to driver
  scsi: megaraid_sas: RAID1 PCI bandwidth limit algorithm is applicable for only Ventura
  scsi: megaraid_sas: megaraid_sas: Add check for count returned by HOST_DEVICE_LIST DCMD
  scsi: megaraid_sas: Handle sequence JBOD map failure at driver level
  scsi: megaraid_sas: Don't send FPIO to RL Bypass queue
  scsi: megaraid_sas: In probe context, retry IOC INIT once if firmware is in fault
  scsi: megaraid_sas: Release Mutex lock before OCR in case of DCMD timeout
  scsi: megaraid_sas: Call disable_irq from process IRQ poll
  scsi: megaraid_sas: Remove few debug counters from IO path
  ...
2019-07-11 15:14:01 -07:00

357 lines
8.1 KiB
C

// SPDX-License-Identifier: GPL-2.0-only
/*
* scsi_pm.c Copyright (C) 2010 Alan Stern
*
* SCSI dynamic Power Management
* Initial version: Alan Stern <stern@rowland.harvard.edu>
*/
#include <linux/pm_runtime.h>
#include <linux/export.h>
#include <linux/async.h>
#include <linux/blk-pm.h>
#include <scsi/scsi.h>
#include <scsi/scsi_device.h>
#include <scsi/scsi_driver.h>
#include <scsi/scsi_host.h>
#include "scsi_priv.h"
#ifdef CONFIG_PM_SLEEP
static int do_scsi_suspend(struct device *dev, const struct dev_pm_ops *pm)
{
return pm && pm->suspend ? pm->suspend(dev) : 0;
}
static int do_scsi_freeze(struct device *dev, const struct dev_pm_ops *pm)
{
return pm && pm->freeze ? pm->freeze(dev) : 0;
}
static int do_scsi_poweroff(struct device *dev, const struct dev_pm_ops *pm)
{
return pm && pm->poweroff ? pm->poweroff(dev) : 0;
}
static int do_scsi_resume(struct device *dev, const struct dev_pm_ops *pm)
{
return pm && pm->resume ? pm->resume(dev) : 0;
}
static int do_scsi_thaw(struct device *dev, const struct dev_pm_ops *pm)
{
return pm && pm->thaw ? pm->thaw(dev) : 0;
}
static int do_scsi_restore(struct device *dev, const struct dev_pm_ops *pm)
{
return pm && pm->restore ? pm->restore(dev) : 0;
}
static int scsi_dev_type_suspend(struct device *dev,
int (*cb)(struct device *, const struct dev_pm_ops *))
{
const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
int err;
/* flush pending in-flight resume operations, suspend is synchronous */
async_synchronize_full_domain(&scsi_sd_pm_domain);
err = scsi_device_quiesce(to_scsi_device(dev));
if (err == 0) {
err = cb(dev, pm);
if (err)
scsi_device_resume(to_scsi_device(dev));
}
dev_dbg(dev, "scsi suspend: %d\n", err);
return err;
}
static int scsi_dev_type_resume(struct device *dev,
int (*cb)(struct device *, const struct dev_pm_ops *))
{
const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
int err = 0;
err = cb(dev, pm);
scsi_device_resume(to_scsi_device(dev));
dev_dbg(dev, "scsi resume: %d\n", err);
if (err == 0) {
pm_runtime_disable(dev);
err = pm_runtime_set_active(dev);
pm_runtime_enable(dev);
/*
* Forcibly set runtime PM status of request queue to "active"
* to make sure we can again get requests from the queue
* (see also blk_pm_peek_request()).
*
* The resume hook will correct runtime PM status of the disk.
*/
if (!err && scsi_is_sdev_device(dev)) {
struct scsi_device *sdev = to_scsi_device(dev);
if (sdev->request_queue->dev)
blk_set_runtime_active(sdev->request_queue);
}
}
return err;
}
static int
scsi_bus_suspend_common(struct device *dev,
int (*cb)(struct device *, const struct dev_pm_ops *))
{
int err = 0;
if (scsi_is_sdev_device(dev)) {
/*
* All the high-level SCSI drivers that implement runtime
* PM treat runtime suspend, system suspend, and system
* hibernate nearly identically. In all cases the requirements
* for runtime suspension are stricter.
*/
if (pm_runtime_suspended(dev))
return 0;
err = scsi_dev_type_suspend(dev, cb);
}
return err;
}
static void async_sdev_resume(void *dev, async_cookie_t cookie)
{
scsi_dev_type_resume(dev, do_scsi_resume);
}
static void async_sdev_thaw(void *dev, async_cookie_t cookie)
{
scsi_dev_type_resume(dev, do_scsi_thaw);
}
static void async_sdev_restore(void *dev, async_cookie_t cookie)
{
scsi_dev_type_resume(dev, do_scsi_restore);
}
static int scsi_bus_resume_common(struct device *dev,
int (*cb)(struct device *, const struct dev_pm_ops *))
{
async_func_t fn;
if (!scsi_is_sdev_device(dev))
fn = NULL;
else if (cb == do_scsi_resume)
fn = async_sdev_resume;
else if (cb == do_scsi_thaw)
fn = async_sdev_thaw;
else if (cb == do_scsi_restore)
fn = async_sdev_restore;
else
fn = NULL;
if (fn) {
async_schedule_domain(fn, dev, &scsi_sd_pm_domain);
/*
* If a user has disabled async probing a likely reason
* is due to a storage enclosure that does not inject
* staggered spin-ups. For safety, make resume
* synchronous as well in that case.
*/
if (strncmp(scsi_scan_type, "async", 5) != 0)
async_synchronize_full_domain(&scsi_sd_pm_domain);
} else {
pm_runtime_disable(dev);
pm_runtime_set_active(dev);
pm_runtime_enable(dev);
}
return 0;
}
static int scsi_bus_prepare(struct device *dev)
{
if (scsi_is_host_device(dev)) {
/* Wait until async scanning is finished */
scsi_complete_async_scans();
}
return 0;
}
static int scsi_bus_suspend(struct device *dev)
{
return scsi_bus_suspend_common(dev, do_scsi_suspend);
}
static int scsi_bus_resume(struct device *dev)
{
return scsi_bus_resume_common(dev, do_scsi_resume);
}
static int scsi_bus_freeze(struct device *dev)
{
return scsi_bus_suspend_common(dev, do_scsi_freeze);
}
static int scsi_bus_thaw(struct device *dev)
{
return scsi_bus_resume_common(dev, do_scsi_thaw);
}
static int scsi_bus_poweroff(struct device *dev)
{
return scsi_bus_suspend_common(dev, do_scsi_poweroff);
}
static int scsi_bus_restore(struct device *dev)
{
return scsi_bus_resume_common(dev, do_scsi_restore);
}
#else /* CONFIG_PM_SLEEP */
#define scsi_bus_prepare NULL
#define scsi_bus_suspend NULL
#define scsi_bus_resume NULL
#define scsi_bus_freeze NULL
#define scsi_bus_thaw NULL
#define scsi_bus_poweroff NULL
#define scsi_bus_restore NULL
#endif /* CONFIG_PM_SLEEP */
static int sdev_runtime_suspend(struct device *dev)
{
const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
struct scsi_device *sdev = to_scsi_device(dev);
int err = 0;
err = blk_pre_runtime_suspend(sdev->request_queue);
if (err)
return err;
if (pm && pm->runtime_suspend)
err = pm->runtime_suspend(dev);
blk_post_runtime_suspend(sdev->request_queue, err);
return err;
}
static int scsi_runtime_suspend(struct device *dev)
{
int err = 0;
dev_dbg(dev, "scsi_runtime_suspend\n");
if (scsi_is_sdev_device(dev))
err = sdev_runtime_suspend(dev);
/* Insert hooks here for targets, hosts, and transport classes */
return err;
}
static int sdev_runtime_resume(struct device *dev)
{
struct scsi_device *sdev = to_scsi_device(dev);
const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
int err = 0;
blk_pre_runtime_resume(sdev->request_queue);
if (pm && pm->runtime_resume)
err = pm->runtime_resume(dev);
blk_post_runtime_resume(sdev->request_queue, err);
return err;
}
static int scsi_runtime_resume(struct device *dev)
{
int err = 0;
dev_dbg(dev, "scsi_runtime_resume\n");
if (scsi_is_sdev_device(dev))
err = sdev_runtime_resume(dev);
/* Insert hooks here for targets, hosts, and transport classes */
return err;
}
static int scsi_runtime_idle(struct device *dev)
{
dev_dbg(dev, "scsi_runtime_idle\n");
/* Insert hooks here for targets, hosts, and transport classes */
if (scsi_is_sdev_device(dev)) {
pm_runtime_mark_last_busy(dev);
pm_runtime_autosuspend(dev);
return -EBUSY;
}
return 0;
}
int scsi_autopm_get_device(struct scsi_device *sdev)
{
int err;
err = pm_runtime_get_sync(&sdev->sdev_gendev);
if (err < 0 && err !=-EACCES)
pm_runtime_put_sync(&sdev->sdev_gendev);
else
err = 0;
return err;
}
EXPORT_SYMBOL_GPL(scsi_autopm_get_device);
void scsi_autopm_put_device(struct scsi_device *sdev)
{
pm_runtime_put_sync(&sdev->sdev_gendev);
}
EXPORT_SYMBOL_GPL(scsi_autopm_put_device);
void scsi_autopm_get_target(struct scsi_target *starget)
{
pm_runtime_get_sync(&starget->dev);
}
void scsi_autopm_put_target(struct scsi_target *starget)
{
pm_runtime_put_sync(&starget->dev);
}
int scsi_autopm_get_host(struct Scsi_Host *shost)
{
int err;
err = pm_runtime_get_sync(&shost->shost_gendev);
if (err < 0 && err !=-EACCES)
pm_runtime_put_sync(&shost->shost_gendev);
else
err = 0;
return err;
}
void scsi_autopm_put_host(struct Scsi_Host *shost)
{
pm_runtime_put_sync(&shost->shost_gendev);
}
const struct dev_pm_ops scsi_bus_pm_ops = {
.prepare = scsi_bus_prepare,
.suspend = scsi_bus_suspend,
.resume = scsi_bus_resume,
.freeze = scsi_bus_freeze,
.thaw = scsi_bus_thaw,
.poweroff = scsi_bus_poweroff,
.restore = scsi_bus_restore,
.runtime_suspend = scsi_runtime_suspend,
.runtime_resume = scsi_runtime_resume,
.runtime_idle = scsi_runtime_idle,
};