mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-12-04 02:06:43 +07:00
block: Move power management code into a new source file
Move the code for runtime power management from blk-core.c into the new source file blk-pm.c. Move the corresponding declarations from <linux/blkdev.h> into <linux/blk-pm.h>. For CONFIG_PM=n, leave out the declarations of the functions that are not used in that mode. This patch not only reduces the number of #ifdefs in the block layer core code but also reduces the size of header file <linux/blkdev.h> and hence should help to reduce the build time of the Linux kernel if CONFIG_PM is not defined. Signed-off-by: Bart Van Assche <bvanassche@acm.org> Reviewed-by: Ming Lei <ming.lei@redhat.com> Reviewed-by: Christoph Hellwig <hch@lst.de> Cc: Jianchao Wang <jianchao.w.wang@oracle.com> Cc: Hannes Reinecke <hare@suse.com> Cc: Johannes Thumshirn <jthumshirn@suse.de> Cc: Alan Stern <stern@rowland.harvard.edu> Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
parent
3cfa210bf3
commit
bca6b067b0
@ -228,4 +228,7 @@ config BLK_MQ_RDMA
|
|||||||
depends on BLOCK && INFINIBAND
|
depends on BLOCK && INFINIBAND
|
||||||
default y
|
default y
|
||||||
|
|
||||||
|
config BLK_PM
|
||||||
|
def_bool BLOCK && PM
|
||||||
|
|
||||||
source block/Kconfig.iosched
|
source block/Kconfig.iosched
|
||||||
|
@ -37,3 +37,4 @@ obj-$(CONFIG_BLK_WBT) += blk-wbt.o
|
|||||||
obj-$(CONFIG_BLK_DEBUG_FS) += blk-mq-debugfs.o
|
obj-$(CONFIG_BLK_DEBUG_FS) += blk-mq-debugfs.o
|
||||||
obj-$(CONFIG_BLK_DEBUG_FS_ZONED)+= blk-mq-debugfs-zoned.o
|
obj-$(CONFIG_BLK_DEBUG_FS_ZONED)+= blk-mq-debugfs-zoned.o
|
||||||
obj-$(CONFIG_BLK_SED_OPAL) += sed-opal.o
|
obj-$(CONFIG_BLK_SED_OPAL) += sed-opal.o
|
||||||
|
obj-$(CONFIG_BLK_PM) += blk-pm.o
|
||||||
|
196
block/blk-core.c
196
block/blk-core.c
@ -42,6 +42,7 @@
|
|||||||
#include "blk.h"
|
#include "blk.h"
|
||||||
#include "blk-mq.h"
|
#include "blk-mq.h"
|
||||||
#include "blk-mq-sched.h"
|
#include "blk-mq-sched.h"
|
||||||
|
#include "blk-pm.h"
|
||||||
#include "blk-rq-qos.h"
|
#include "blk-rq-qos.h"
|
||||||
|
|
||||||
#ifdef CONFIG_DEBUG_FS
|
#ifdef CONFIG_DEBUG_FS
|
||||||
@ -1726,16 +1727,6 @@ void part_round_stats(struct request_queue *q, int cpu, struct hd_struct *part)
|
|||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(part_round_stats);
|
EXPORT_SYMBOL_GPL(part_round_stats);
|
||||||
|
|
||||||
#ifdef CONFIG_PM
|
|
||||||
static void blk_pm_put_request(struct request *rq)
|
|
||||||
{
|
|
||||||
if (rq->q->dev && !(rq->rq_flags & RQF_PM) && !--rq->q->nr_pending)
|
|
||||||
pm_runtime_mark_last_busy(rq->q->dev);
|
|
||||||
}
|
|
||||||
#else
|
|
||||||
static inline void blk_pm_put_request(struct request *rq) {}
|
|
||||||
#endif
|
|
||||||
|
|
||||||
void __blk_put_request(struct request_queue *q, struct request *req)
|
void __blk_put_request(struct request_queue *q, struct request *req)
|
||||||
{
|
{
|
||||||
req_flags_t rq_flags = req->rq_flags;
|
req_flags_t rq_flags = req->rq_flags;
|
||||||
@ -3757,191 +3748,6 @@ void blk_finish_plug(struct blk_plug *plug)
|
|||||||
}
|
}
|
||||||
EXPORT_SYMBOL(blk_finish_plug);
|
EXPORT_SYMBOL(blk_finish_plug);
|
||||||
|
|
||||||
#ifdef CONFIG_PM
|
|
||||||
/**
|
|
||||||
* blk_pm_runtime_init - Block layer runtime PM initialization routine
|
|
||||||
* @q: the queue of the device
|
|
||||||
* @dev: the device the queue belongs to
|
|
||||||
*
|
|
||||||
* Description:
|
|
||||||
* Initialize runtime-PM-related fields for @q and start auto suspend for
|
|
||||||
* @dev. Drivers that want to take advantage of request-based runtime PM
|
|
||||||
* should call this function after @dev has been initialized, and its
|
|
||||||
* request queue @q has been allocated, and runtime PM for it can not happen
|
|
||||||
* yet(either due to disabled/forbidden or its usage_count > 0). In most
|
|
||||||
* cases, driver should call this function before any I/O has taken place.
|
|
||||||
*
|
|
||||||
* This function takes care of setting up using auto suspend for the device,
|
|
||||||
* the autosuspend delay is set to -1 to make runtime suspend impossible
|
|
||||||
* until an updated value is either set by user or by driver. Drivers do
|
|
||||||
* not need to touch other autosuspend settings.
|
|
||||||
*
|
|
||||||
* The block layer runtime PM is request based, so only works for drivers
|
|
||||||
* that use request as their IO unit instead of those directly use bio's.
|
|
||||||
*/
|
|
||||||
void blk_pm_runtime_init(struct request_queue *q, struct device *dev)
|
|
||||||
{
|
|
||||||
/* Don't enable runtime PM for blk-mq until it is ready */
|
|
||||||
if (q->mq_ops) {
|
|
||||||
pm_runtime_disable(dev);
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
q->dev = dev;
|
|
||||||
q->rpm_status = RPM_ACTIVE;
|
|
||||||
pm_runtime_set_autosuspend_delay(q->dev, -1);
|
|
||||||
pm_runtime_use_autosuspend(q->dev);
|
|
||||||
}
|
|
||||||
EXPORT_SYMBOL(blk_pm_runtime_init);
|
|
||||||
|
|
||||||
/**
|
|
||||||
* blk_pre_runtime_suspend - Pre runtime suspend check
|
|
||||||
* @q: the queue of the device
|
|
||||||
*
|
|
||||||
* Description:
|
|
||||||
* This function will check if runtime suspend is allowed for the device
|
|
||||||
* by examining if there are any requests pending in the queue. If there
|
|
||||||
* are requests pending, the device can not be runtime suspended; otherwise,
|
|
||||||
* the queue's status will be updated to SUSPENDING and the driver can
|
|
||||||
* proceed to suspend the device.
|
|
||||||
*
|
|
||||||
* For the not allowed case, we mark last busy for the device so that
|
|
||||||
* runtime PM core will try to autosuspend it some time later.
|
|
||||||
*
|
|
||||||
* This function should be called near the start of the device's
|
|
||||||
* runtime_suspend callback.
|
|
||||||
*
|
|
||||||
* Return:
|
|
||||||
* 0 - OK to runtime suspend the device
|
|
||||||
* -EBUSY - Device should not be runtime suspended
|
|
||||||
*/
|
|
||||||
int blk_pre_runtime_suspend(struct request_queue *q)
|
|
||||||
{
|
|
||||||
int ret = 0;
|
|
||||||
|
|
||||||
if (!q->dev)
|
|
||||||
return ret;
|
|
||||||
|
|
||||||
spin_lock_irq(q->queue_lock);
|
|
||||||
if (q->nr_pending) {
|
|
||||||
ret = -EBUSY;
|
|
||||||
pm_runtime_mark_last_busy(q->dev);
|
|
||||||
} else {
|
|
||||||
q->rpm_status = RPM_SUSPENDING;
|
|
||||||
}
|
|
||||||
spin_unlock_irq(q->queue_lock);
|
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
EXPORT_SYMBOL(blk_pre_runtime_suspend);
|
|
||||||
|
|
||||||
/**
|
|
||||||
* blk_post_runtime_suspend - Post runtime suspend processing
|
|
||||||
* @q: the queue of the device
|
|
||||||
* @err: return value of the device's runtime_suspend function
|
|
||||||
*
|
|
||||||
* Description:
|
|
||||||
* Update the queue's runtime status according to the return value of the
|
|
||||||
* device's runtime suspend function and mark last busy for the device so
|
|
||||||
* that PM core will try to auto suspend the device at a later time.
|
|
||||||
*
|
|
||||||
* This function should be called near the end of the device's
|
|
||||||
* runtime_suspend callback.
|
|
||||||
*/
|
|
||||||
void blk_post_runtime_suspend(struct request_queue *q, int err)
|
|
||||||
{
|
|
||||||
if (!q->dev)
|
|
||||||
return;
|
|
||||||
|
|
||||||
spin_lock_irq(q->queue_lock);
|
|
||||||
if (!err) {
|
|
||||||
q->rpm_status = RPM_SUSPENDED;
|
|
||||||
} else {
|
|
||||||
q->rpm_status = RPM_ACTIVE;
|
|
||||||
pm_runtime_mark_last_busy(q->dev);
|
|
||||||
}
|
|
||||||
spin_unlock_irq(q->queue_lock);
|
|
||||||
}
|
|
||||||
EXPORT_SYMBOL(blk_post_runtime_suspend);
|
|
||||||
|
|
||||||
/**
|
|
||||||
* blk_pre_runtime_resume - Pre runtime resume processing
|
|
||||||
* @q: the queue of the device
|
|
||||||
*
|
|
||||||
* Description:
|
|
||||||
* Update the queue's runtime status to RESUMING in preparation for the
|
|
||||||
* runtime resume of the device.
|
|
||||||
*
|
|
||||||
* This function should be called near the start of the device's
|
|
||||||
* runtime_resume callback.
|
|
||||||
*/
|
|
||||||
void blk_pre_runtime_resume(struct request_queue *q)
|
|
||||||
{
|
|
||||||
if (!q->dev)
|
|
||||||
return;
|
|
||||||
|
|
||||||
spin_lock_irq(q->queue_lock);
|
|
||||||
q->rpm_status = RPM_RESUMING;
|
|
||||||
spin_unlock_irq(q->queue_lock);
|
|
||||||
}
|
|
||||||
EXPORT_SYMBOL(blk_pre_runtime_resume);
|
|
||||||
|
|
||||||
/**
|
|
||||||
* blk_post_runtime_resume - Post runtime resume processing
|
|
||||||
* @q: the queue of the device
|
|
||||||
* @err: return value of the device's runtime_resume function
|
|
||||||
*
|
|
||||||
* Description:
|
|
||||||
* Update the queue's runtime status according to the return value of the
|
|
||||||
* device's runtime_resume function. If it is successfully resumed, process
|
|
||||||
* the requests that are queued into the device's queue when it is resuming
|
|
||||||
* and then mark last busy and initiate autosuspend for it.
|
|
||||||
*
|
|
||||||
* This function should be called near the end of the device's
|
|
||||||
* runtime_resume callback.
|
|
||||||
*/
|
|
||||||
void blk_post_runtime_resume(struct request_queue *q, int err)
|
|
||||||
{
|
|
||||||
if (!q->dev)
|
|
||||||
return;
|
|
||||||
|
|
||||||
spin_lock_irq(q->queue_lock);
|
|
||||||
if (!err) {
|
|
||||||
q->rpm_status = RPM_ACTIVE;
|
|
||||||
__blk_run_queue(q);
|
|
||||||
pm_runtime_mark_last_busy(q->dev);
|
|
||||||
pm_request_autosuspend(q->dev);
|
|
||||||
} else {
|
|
||||||
q->rpm_status = RPM_SUSPENDED;
|
|
||||||
}
|
|
||||||
spin_unlock_irq(q->queue_lock);
|
|
||||||
}
|
|
||||||
EXPORT_SYMBOL(blk_post_runtime_resume);
|
|
||||||
|
|
||||||
/**
|
|
||||||
* blk_set_runtime_active - Force runtime status of the queue to be active
|
|
||||||
* @q: the queue of the device
|
|
||||||
*
|
|
||||||
* If the device is left runtime suspended during system suspend the resume
|
|
||||||
* hook typically resumes the device and corrects runtime status
|
|
||||||
* accordingly. However, that does not affect the queue runtime PM status
|
|
||||||
* which is still "suspended". This prevents processing requests from the
|
|
||||||
* queue.
|
|
||||||
*
|
|
||||||
* This function can be used in driver's resume hook to correct queue
|
|
||||||
* runtime PM status and re-enable peeking requests from the queue. It
|
|
||||||
* should be called before first request is added to the queue.
|
|
||||||
*/
|
|
||||||
void blk_set_runtime_active(struct request_queue *q)
|
|
||||||
{
|
|
||||||
spin_lock_irq(q->queue_lock);
|
|
||||||
q->rpm_status = RPM_ACTIVE;
|
|
||||||
pm_runtime_mark_last_busy(q->dev);
|
|
||||||
pm_request_autosuspend(q->dev);
|
|
||||||
spin_unlock_irq(q->queue_lock);
|
|
||||||
}
|
|
||||||
EXPORT_SYMBOL(blk_set_runtime_active);
|
|
||||||
#endif
|
|
||||||
|
|
||||||
int __init blk_dev_init(void)
|
int __init blk_dev_init(void)
|
||||||
{
|
{
|
||||||
BUILD_BUG_ON(REQ_OP_LAST >= (1 << REQ_OP_BITS));
|
BUILD_BUG_ON(REQ_OP_LAST >= (1 << REQ_OP_BITS));
|
||||||
|
188
block/blk-pm.c
Normal file
188
block/blk-pm.c
Normal file
@ -0,0 +1,188 @@
|
|||||||
|
// SPDX-License-Identifier: GPL-2.0
|
||||||
|
|
||||||
|
#include <linux/blk-pm.h>
|
||||||
|
#include <linux/blkdev.h>
|
||||||
|
#include <linux/pm_runtime.h>
|
||||||
|
|
||||||
|
/**
|
||||||
|
* blk_pm_runtime_init - Block layer runtime PM initialization routine
|
||||||
|
* @q: the queue of the device
|
||||||
|
* @dev: the device the queue belongs to
|
||||||
|
*
|
||||||
|
* Description:
|
||||||
|
* Initialize runtime-PM-related fields for @q and start auto suspend for
|
||||||
|
* @dev. Drivers that want to take advantage of request-based runtime PM
|
||||||
|
* should call this function after @dev has been initialized, and its
|
||||||
|
* request queue @q has been allocated, and runtime PM for it can not happen
|
||||||
|
* yet(either due to disabled/forbidden or its usage_count > 0). In most
|
||||||
|
* cases, driver should call this function before any I/O has taken place.
|
||||||
|
*
|
||||||
|
* This function takes care of setting up using auto suspend for the device,
|
||||||
|
* the autosuspend delay is set to -1 to make runtime suspend impossible
|
||||||
|
* until an updated value is either set by user or by driver. Drivers do
|
||||||
|
* not need to touch other autosuspend settings.
|
||||||
|
*
|
||||||
|
* The block layer runtime PM is request based, so only works for drivers
|
||||||
|
* that use request as their IO unit instead of those directly use bio's.
|
||||||
|
*/
|
||||||
|
void blk_pm_runtime_init(struct request_queue *q, struct device *dev)
|
||||||
|
{
|
||||||
|
/* Don't enable runtime PM for blk-mq until it is ready */
|
||||||
|
if (q->mq_ops) {
|
||||||
|
pm_runtime_disable(dev);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
q->dev = dev;
|
||||||
|
q->rpm_status = RPM_ACTIVE;
|
||||||
|
pm_runtime_set_autosuspend_delay(q->dev, -1);
|
||||||
|
pm_runtime_use_autosuspend(q->dev);
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL(blk_pm_runtime_init);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* blk_pre_runtime_suspend - Pre runtime suspend check
|
||||||
|
* @q: the queue of the device
|
||||||
|
*
|
||||||
|
* Description:
|
||||||
|
* This function will check if runtime suspend is allowed for the device
|
||||||
|
* by examining if there are any requests pending in the queue. If there
|
||||||
|
* are requests pending, the device can not be runtime suspended; otherwise,
|
||||||
|
* the queue's status will be updated to SUSPENDING and the driver can
|
||||||
|
* proceed to suspend the device.
|
||||||
|
*
|
||||||
|
* For the not allowed case, we mark last busy for the device so that
|
||||||
|
* runtime PM core will try to autosuspend it some time later.
|
||||||
|
*
|
||||||
|
* This function should be called near the start of the device's
|
||||||
|
* runtime_suspend callback.
|
||||||
|
*
|
||||||
|
* Return:
|
||||||
|
* 0 - OK to runtime suspend the device
|
||||||
|
* -EBUSY - Device should not be runtime suspended
|
||||||
|
*/
|
||||||
|
int blk_pre_runtime_suspend(struct request_queue *q)
|
||||||
|
{
|
||||||
|
int ret = 0;
|
||||||
|
|
||||||
|
if (!q->dev)
|
||||||
|
return ret;
|
||||||
|
|
||||||
|
spin_lock_irq(q->queue_lock);
|
||||||
|
if (q->nr_pending) {
|
||||||
|
ret = -EBUSY;
|
||||||
|
pm_runtime_mark_last_busy(q->dev);
|
||||||
|
} else {
|
||||||
|
q->rpm_status = RPM_SUSPENDING;
|
||||||
|
}
|
||||||
|
spin_unlock_irq(q->queue_lock);
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL(blk_pre_runtime_suspend);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* blk_post_runtime_suspend - Post runtime suspend processing
|
||||||
|
* @q: the queue of the device
|
||||||
|
* @err: return value of the device's runtime_suspend function
|
||||||
|
*
|
||||||
|
* Description:
|
||||||
|
* Update the queue's runtime status according to the return value of the
|
||||||
|
* device's runtime suspend function and mark last busy for the device so
|
||||||
|
* that PM core will try to auto suspend the device at a later time.
|
||||||
|
*
|
||||||
|
* This function should be called near the end of the device's
|
||||||
|
* runtime_suspend callback.
|
||||||
|
*/
|
||||||
|
void blk_post_runtime_suspend(struct request_queue *q, int err)
|
||||||
|
{
|
||||||
|
if (!q->dev)
|
||||||
|
return;
|
||||||
|
|
||||||
|
spin_lock_irq(q->queue_lock);
|
||||||
|
if (!err) {
|
||||||
|
q->rpm_status = RPM_SUSPENDED;
|
||||||
|
} else {
|
||||||
|
q->rpm_status = RPM_ACTIVE;
|
||||||
|
pm_runtime_mark_last_busy(q->dev);
|
||||||
|
}
|
||||||
|
spin_unlock_irq(q->queue_lock);
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL(blk_post_runtime_suspend);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* blk_pre_runtime_resume - Pre runtime resume processing
|
||||||
|
* @q: the queue of the device
|
||||||
|
*
|
||||||
|
* Description:
|
||||||
|
* Update the queue's runtime status to RESUMING in preparation for the
|
||||||
|
* runtime resume of the device.
|
||||||
|
*
|
||||||
|
* This function should be called near the start of the device's
|
||||||
|
* runtime_resume callback.
|
||||||
|
*/
|
||||||
|
void blk_pre_runtime_resume(struct request_queue *q)
|
||||||
|
{
|
||||||
|
if (!q->dev)
|
||||||
|
return;
|
||||||
|
|
||||||
|
spin_lock_irq(q->queue_lock);
|
||||||
|
q->rpm_status = RPM_RESUMING;
|
||||||
|
spin_unlock_irq(q->queue_lock);
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL(blk_pre_runtime_resume);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* blk_post_runtime_resume - Post runtime resume processing
|
||||||
|
* @q: the queue of the device
|
||||||
|
* @err: return value of the device's runtime_resume function
|
||||||
|
*
|
||||||
|
* Description:
|
||||||
|
* Update the queue's runtime status according to the return value of the
|
||||||
|
* device's runtime_resume function. If it is successfully resumed, process
|
||||||
|
* the requests that are queued into the device's queue when it is resuming
|
||||||
|
* and then mark last busy and initiate autosuspend for it.
|
||||||
|
*
|
||||||
|
* This function should be called near the end of the device's
|
||||||
|
* runtime_resume callback.
|
||||||
|
*/
|
||||||
|
void blk_post_runtime_resume(struct request_queue *q, int err)
|
||||||
|
{
|
||||||
|
if (!q->dev)
|
||||||
|
return;
|
||||||
|
|
||||||
|
spin_lock_irq(q->queue_lock);
|
||||||
|
if (!err) {
|
||||||
|
q->rpm_status = RPM_ACTIVE;
|
||||||
|
__blk_run_queue(q);
|
||||||
|
pm_runtime_mark_last_busy(q->dev);
|
||||||
|
pm_request_autosuspend(q->dev);
|
||||||
|
} else {
|
||||||
|
q->rpm_status = RPM_SUSPENDED;
|
||||||
|
}
|
||||||
|
spin_unlock_irq(q->queue_lock);
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL(blk_post_runtime_resume);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* blk_set_runtime_active - Force runtime status of the queue to be active
|
||||||
|
* @q: the queue of the device
|
||||||
|
*
|
||||||
|
* If the device is left runtime suspended during system suspend the resume
|
||||||
|
* hook typically resumes the device and corrects runtime status
|
||||||
|
* accordingly. However, that does not affect the queue runtime PM status
|
||||||
|
* which is still "suspended". This prevents processing requests from the
|
||||||
|
* queue.
|
||||||
|
*
|
||||||
|
* This function can be used in driver's resume hook to correct queue
|
||||||
|
* runtime PM status and re-enable peeking requests from the queue. It
|
||||||
|
* should be called before first request is added to the queue.
|
||||||
|
*/
|
||||||
|
void blk_set_runtime_active(struct request_queue *q)
|
||||||
|
{
|
||||||
|
spin_lock_irq(q->queue_lock);
|
||||||
|
q->rpm_status = RPM_ACTIVE;
|
||||||
|
pm_runtime_mark_last_busy(q->dev);
|
||||||
|
pm_request_autosuspend(q->dev);
|
||||||
|
spin_unlock_irq(q->queue_lock);
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL(blk_set_runtime_active);
|
43
block/blk-pm.h
Normal file
43
block/blk-pm.h
Normal file
@ -0,0 +1,43 @@
|
|||||||
|
/* SPDX-License-Identifier: GPL-2.0 */
|
||||||
|
|
||||||
|
#ifndef _BLOCK_BLK_PM_H_
|
||||||
|
#define _BLOCK_BLK_PM_H_
|
||||||
|
|
||||||
|
#include <linux/pm_runtime.h>
|
||||||
|
|
||||||
|
#ifdef CONFIG_PM
|
||||||
|
static inline void blk_pm_requeue_request(struct request *rq)
|
||||||
|
{
|
||||||
|
if (rq->q->dev && !(rq->rq_flags & RQF_PM))
|
||||||
|
rq->q->nr_pending--;
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline void blk_pm_add_request(struct request_queue *q,
|
||||||
|
struct request *rq)
|
||||||
|
{
|
||||||
|
if (q->dev && !(rq->rq_flags & RQF_PM) && q->nr_pending++ == 0 &&
|
||||||
|
(q->rpm_status == RPM_SUSPENDED || q->rpm_status == RPM_SUSPENDING))
|
||||||
|
pm_request_resume(q->dev);
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline void blk_pm_put_request(struct request *rq)
|
||||||
|
{
|
||||||
|
if (rq->q->dev && !(rq->rq_flags & RQF_PM) && !--rq->q->nr_pending)
|
||||||
|
pm_runtime_mark_last_busy(rq->q->dev);
|
||||||
|
}
|
||||||
|
#else
|
||||||
|
static inline void blk_pm_requeue_request(struct request *rq)
|
||||||
|
{
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline void blk_pm_add_request(struct request_queue *q,
|
||||||
|
struct request *rq)
|
||||||
|
{
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline void blk_pm_put_request(struct request *rq)
|
||||||
|
{
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#endif /* _BLOCK_BLK_PM_H_ */
|
@ -41,6 +41,7 @@
|
|||||||
|
|
||||||
#include "blk.h"
|
#include "blk.h"
|
||||||
#include "blk-mq-sched.h"
|
#include "blk-mq-sched.h"
|
||||||
|
#include "blk-pm.h"
|
||||||
#include "blk-wbt.h"
|
#include "blk-wbt.h"
|
||||||
|
|
||||||
static DEFINE_SPINLOCK(elv_list_lock);
|
static DEFINE_SPINLOCK(elv_list_lock);
|
||||||
@ -557,27 +558,6 @@ void elv_bio_merged(struct request_queue *q, struct request *rq,
|
|||||||
e->type->ops.sq.elevator_bio_merged_fn(q, rq, bio);
|
e->type->ops.sq.elevator_bio_merged_fn(q, rq, bio);
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef CONFIG_PM
|
|
||||||
static void blk_pm_requeue_request(struct request *rq)
|
|
||||||
{
|
|
||||||
if (rq->q->dev && !(rq->rq_flags & RQF_PM))
|
|
||||||
rq->q->nr_pending--;
|
|
||||||
}
|
|
||||||
|
|
||||||
static void blk_pm_add_request(struct request_queue *q, struct request *rq)
|
|
||||||
{
|
|
||||||
if (q->dev && !(rq->rq_flags & RQF_PM) && q->nr_pending++ == 0 &&
|
|
||||||
(q->rpm_status == RPM_SUSPENDED || q->rpm_status == RPM_SUSPENDING))
|
|
||||||
pm_request_resume(q->dev);
|
|
||||||
}
|
|
||||||
#else
|
|
||||||
static inline void blk_pm_requeue_request(struct request *rq) {}
|
|
||||||
static inline void blk_pm_add_request(struct request_queue *q,
|
|
||||||
struct request *rq)
|
|
||||||
{
|
|
||||||
}
|
|
||||||
#endif
|
|
||||||
|
|
||||||
void elv_requeue_request(struct request_queue *q, struct request *rq)
|
void elv_requeue_request(struct request_queue *q, struct request *rq)
|
||||||
{
|
{
|
||||||
/*
|
/*
|
||||||
|
@ -8,6 +8,7 @@
|
|||||||
#include <linux/pm_runtime.h>
|
#include <linux/pm_runtime.h>
|
||||||
#include <linux/export.h>
|
#include <linux/export.h>
|
||||||
#include <linux/async.h>
|
#include <linux/async.h>
|
||||||
|
#include <linux/blk-pm.h>
|
||||||
|
|
||||||
#include <scsi/scsi.h>
|
#include <scsi/scsi.h>
|
||||||
#include <scsi/scsi_device.h>
|
#include <scsi/scsi_device.h>
|
||||||
|
@ -45,6 +45,7 @@
|
|||||||
#include <linux/init.h>
|
#include <linux/init.h>
|
||||||
#include <linux/blkdev.h>
|
#include <linux/blkdev.h>
|
||||||
#include <linux/blkpg.h>
|
#include <linux/blkpg.h>
|
||||||
|
#include <linux/blk-pm.h>
|
||||||
#include <linux/delay.h>
|
#include <linux/delay.h>
|
||||||
#include <linux/mutex.h>
|
#include <linux/mutex.h>
|
||||||
#include <linux/string_helpers.h>
|
#include <linux/string_helpers.h>
|
||||||
|
@ -43,6 +43,7 @@
|
|||||||
#include <linux/interrupt.h>
|
#include <linux/interrupt.h>
|
||||||
#include <linux/init.h>
|
#include <linux/init.h>
|
||||||
#include <linux/blkdev.h>
|
#include <linux/blkdev.h>
|
||||||
|
#include <linux/blk-pm.h>
|
||||||
#include <linux/mutex.h>
|
#include <linux/mutex.h>
|
||||||
#include <linux/slab.h>
|
#include <linux/slab.h>
|
||||||
#include <linux/pm_runtime.h>
|
#include <linux/pm_runtime.h>
|
||||||
|
24
include/linux/blk-pm.h
Normal file
24
include/linux/blk-pm.h
Normal file
@ -0,0 +1,24 @@
|
|||||||
|
/* SPDX-License-Identifier: GPL-2.0 */
|
||||||
|
|
||||||
|
#ifndef _BLK_PM_H_
|
||||||
|
#define _BLK_PM_H_
|
||||||
|
|
||||||
|
struct device;
|
||||||
|
struct request_queue;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* block layer runtime pm functions
|
||||||
|
*/
|
||||||
|
#ifdef CONFIG_PM
|
||||||
|
extern void blk_pm_runtime_init(struct request_queue *q, struct device *dev);
|
||||||
|
extern int blk_pre_runtime_suspend(struct request_queue *q);
|
||||||
|
extern void blk_post_runtime_suspend(struct request_queue *q, int err);
|
||||||
|
extern void blk_pre_runtime_resume(struct request_queue *q);
|
||||||
|
extern void blk_post_runtime_resume(struct request_queue *q, int err);
|
||||||
|
extern void blk_set_runtime_active(struct request_queue *q);
|
||||||
|
#else
|
||||||
|
static inline void blk_pm_runtime_init(struct request_queue *q,
|
||||||
|
struct device *dev) {}
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#endif /* _BLK_PM_H_ */
|
@ -1280,29 +1280,6 @@ struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id,
|
|||||||
extern void blk_put_queue(struct request_queue *);
|
extern void blk_put_queue(struct request_queue *);
|
||||||
extern void blk_set_queue_dying(struct request_queue *);
|
extern void blk_set_queue_dying(struct request_queue *);
|
||||||
|
|
||||||
/*
|
|
||||||
* block layer runtime pm functions
|
|
||||||
*/
|
|
||||||
#ifdef CONFIG_PM
|
|
||||||
extern void blk_pm_runtime_init(struct request_queue *q, struct device *dev);
|
|
||||||
extern int blk_pre_runtime_suspend(struct request_queue *q);
|
|
||||||
extern void blk_post_runtime_suspend(struct request_queue *q, int err);
|
|
||||||
extern void blk_pre_runtime_resume(struct request_queue *q);
|
|
||||||
extern void blk_post_runtime_resume(struct request_queue *q, int err);
|
|
||||||
extern void blk_set_runtime_active(struct request_queue *q);
|
|
||||||
#else
|
|
||||||
static inline void blk_pm_runtime_init(struct request_queue *q,
|
|
||||||
struct device *dev) {}
|
|
||||||
static inline int blk_pre_runtime_suspend(struct request_queue *q)
|
|
||||||
{
|
|
||||||
return -ENOSYS;
|
|
||||||
}
|
|
||||||
static inline void blk_post_runtime_suspend(struct request_queue *q, int err) {}
|
|
||||||
static inline void blk_pre_runtime_resume(struct request_queue *q) {}
|
|
||||||
static inline void blk_post_runtime_resume(struct request_queue *q, int err) {}
|
|
||||||
static inline void blk_set_runtime_active(struct request_queue *q) {}
|
|
||||||
#endif
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* blk_plug permits building a queue of related requests by holding the I/O
|
* blk_plug permits building a queue of related requests by holding the I/O
|
||||||
* fragments for a short period. This allows merging of sequential requests
|
* fragments for a short period. This allows merging of sequential requests
|
||||||
|
Loading…
Reference in New Issue
Block a user