summaryrefslogtreecommitdiff
path: root/block/blk-core.c
diff options
context:
space:
mode:
Diffstat (limited to 'block/blk-core.c')
-rw-r--r--block/blk-core.c265
1 files changed, 196 insertions, 69 deletions
diff --git a/block/blk-core.c b/block/blk-core.c
index 7c288358a745..33c33bc99ddd 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -30,6 +30,7 @@
#include <linux/list_sort.h>
#include <linux/delay.h>
#include <linux/ratelimit.h>
+#include <linux/pm_runtime.h>
#define CREATE_TRACE_POINTS
#include <trace/events/block.h>
@@ -159,20 +160,10 @@ static void req_bio_endio(struct request *rq, struct bio *bio,
else if (!test_bit(BIO_UPTODATE, &bio->bi_flags))
error = -EIO;
- if (unlikely(nbytes > bio->bi_size)) {
- printk(KERN_ERR "%s: want %u bytes done, %u left\n",
- __func__, nbytes, bio->bi_size);
- nbytes = bio->bi_size;
- }
-
if (unlikely(rq->cmd_flags & REQ_QUIET))
set_bit(BIO_QUIET, &bio->bi_flags);
- bio->bi_size -= nbytes;
- bio->bi_sector += (nbytes >> 9);
-
- if (bio_integrity(bio))
- bio_integrity_advance(bio, nbytes);
+ bio_advance(bio, nbytes);
/* don't actually finish bio if it's part of flush sequence */
if (bio->bi_size == 0 && !(rq->cmd_flags & REQ_FLUSH_SEQ))
@@ -1264,6 +1255,16 @@ void part_round_stats(int cpu, struct hd_struct *part)
}
EXPORT_SYMBOL_GPL(part_round_stats);
+#ifdef CONFIG_PM_RUNTIME
+static void blk_pm_put_request(struct request *rq)
+{
+ if (rq->q->dev && !(rq->cmd_flags & REQ_PM) && !--rq->q->nr_pending)
+ pm_runtime_mark_last_busy(rq->q->dev);
+}
+#else
+static inline void blk_pm_put_request(struct request *rq) {}
+#endif
+
/*
* queue lock must be held
*/
@@ -1274,6 +1275,8 @@ void __blk_put_request(struct request_queue *q, struct request *req)
if (unlikely(--req->ref_count))
return;
+ blk_pm_put_request(req);
+
elv_completed_request(q, req);
/* this is a bio leak */
@@ -1597,7 +1600,7 @@ static void handle_bad_sector(struct bio *bio)
printk(KERN_INFO "%s: rw=%ld, want=%Lu, limit=%Lu\n",
bdevname(bio->bi_bdev, b),
bio->bi_rw,
- (unsigned long long)bio->bi_sector + bio_sectors(bio),
+ (unsigned long long)bio_end_sector(bio),
(long long)(i_size_read(bio->bi_bdev->bd_inode) >> 9));
set_bit(BIO_EOF, &bio->bi_flags);
@@ -2053,6 +2056,28 @@ static void blk_account_io_done(struct request *req)
}
}
+#ifdef CONFIG_PM_RUNTIME
+/*
+ * Don't process normal requests when queue is suspended
+ * or in the process of suspending/resuming
+ */
+static struct request *blk_pm_peek_request(struct request_queue *q,
+ struct request *rq)
+{
+ if (q->dev && (q->rpm_status == RPM_SUSPENDED ||
+ (q->rpm_status != RPM_ACTIVE && !(rq->cmd_flags & REQ_PM))))
+ return NULL;
+ else
+ return rq;
+}
+#else
+static inline struct request *blk_pm_peek_request(struct request_queue *q,
+ struct request *rq)
+{
+ return rq;
+}
+#endif
+
/**
* blk_peek_request - peek at the top of a request queue
* @q: request queue to peek at
@@ -2075,6 +2100,11 @@ struct request *blk_peek_request(struct request_queue *q)
int ret;
while ((rq = __elv_next_request(q)) != NULL) {
+
+ rq = blk_pm_peek_request(q, rq);
+ if (!rq)
+ break;
+
if (!(rq->cmd_flags & REQ_STARTED)) {
/*
* This is the first time the device driver
@@ -2253,8 +2283,7 @@ EXPORT_SYMBOL(blk_fetch_request);
**/
bool blk_update_request(struct request *req, int error, unsigned int nr_bytes)
{
- int total_bytes, bio_nbytes, next_idx = 0;
- struct bio *bio;
+ int total_bytes;
if (!req->bio)
return false;
@@ -2300,56 +2329,21 @@ bool blk_update_request(struct request *req, int error, unsigned int nr_bytes)
blk_account_io_completion(req, nr_bytes);
- total_bytes = bio_nbytes = 0;
- while ((bio = req->bio) != NULL) {
- int nbytes;
+ total_bytes = 0;
+ while (req->bio) {
+ struct bio *bio = req->bio;
+ unsigned bio_bytes = min(bio->bi_size, nr_bytes);
- if (nr_bytes >= bio->bi_size) {
+ if (bio_bytes == bio->bi_size)
req->bio = bio->bi_next;
- nbytes = bio->bi_size;
- req_bio_endio(req, bio, nbytes, error);
- next_idx = 0;
- bio_nbytes = 0;
- } else {
- int idx = bio->bi_idx + next_idx;
- if (unlikely(idx >= bio->bi_vcnt)) {
- blk_dump_rq_flags(req, "__end_that");
- printk(KERN_ERR "%s: bio idx %d >= vcnt %d\n",
- __func__, idx, bio->bi_vcnt);
- break;
- }
+ req_bio_endio(req, bio, bio_bytes, error);
- nbytes = bio_iovec_idx(bio, idx)->bv_len;
- BIO_BUG_ON(nbytes > bio->bi_size);
+ total_bytes += bio_bytes;
+ nr_bytes -= bio_bytes;
- /*
- * not a complete bvec done
- */
- if (unlikely(nbytes > nr_bytes)) {
- bio_nbytes += nr_bytes;
- total_bytes += nr_bytes;
- break;
- }
-
- /*
- * advance to the next vector
- */
- next_idx++;
- bio_nbytes += nbytes;
- }
-
- total_bytes += nbytes;
- nr_bytes -= nbytes;
-
- bio = req->bio;
- if (bio) {
- /*
- * end more in this run, or just return 'not-done'
- */
- if (unlikely(nr_bytes <= 0))
- break;
- }
+ if (!nr_bytes)
+ break;
}
/*
@@ -2365,16 +2359,6 @@ bool blk_update_request(struct request *req, int error, unsigned int nr_bytes)
return false;
}
- /*
- * if the request wasn't completed, update state
- */
- if (bio_nbytes) {
- req_bio_endio(req, bio, bio_nbytes, error);
- bio->bi_idx += next_idx;
- bio_iovec(bio)->bv_offset += nr_bytes;
- bio_iovec(bio)->bv_len -= nr_bytes;
- }
-
req->__data_len -= total_bytes;
req->buffer = bio_data(req->bio);
@@ -3046,6 +3030,149 @@ void blk_finish_plug(struct blk_plug *plug)
}
EXPORT_SYMBOL(blk_finish_plug);
+#ifdef CONFIG_PM_RUNTIME
+/**
+ * blk_pm_runtime_init - Block layer runtime PM initialization routine
+ * @q: the queue of the device
+ * @dev: the device the queue belongs to
+ *
+ * Description:
+ * Initialize runtime-PM-related fields for @q and start auto suspend for
+ * @dev. Drivers that want to take advantage of request-based runtime PM
+ * should call this function after @dev has been initialized, and its
+ * request queue @q has been allocated, and runtime PM for it can not happen
+ * yet(either due to disabled/forbidden or its usage_count > 0). In most
+ * cases, driver should call this function before any I/O has taken place.
+ *
+ * This function takes care of setting up using auto suspend for the device,
+ * the autosuspend delay is set to -1 to make runtime suspend impossible
+ * until an updated value is either set by user or by driver. Drivers do
+ * not need to touch other autosuspend settings.
+ *
+ * The block layer runtime PM is request based, so only works for drivers
+ * that use request as their IO unit instead of those directly use bio's.
+ */
+void blk_pm_runtime_init(struct request_queue *q, struct device *dev)
+{
+ q->dev = dev;
+ q->rpm_status = RPM_ACTIVE;
+ pm_runtime_set_autosuspend_delay(q->dev, -1);
+ pm_runtime_use_autosuspend(q->dev);
+}
+EXPORT_SYMBOL(blk_pm_runtime_init);
+
+/**
+ * blk_pre_runtime_suspend - Pre runtime suspend check
+ * @q: the queue of the device
+ *
+ * Description:
+ * This function will check if runtime suspend is allowed for the device
+ * by examining if there are any requests pending in the queue. If there
+ * are requests pending, the device can not be runtime suspended; otherwise,
+ * the queue's status will be updated to SUSPENDING and the driver can
+ * proceed to suspend the device.
+ *
+ * For the not allowed case, we mark last busy for the device so that
+ * runtime PM core will try to autosuspend it some time later.
+ *
+ * This function should be called near the start of the device's
+ * runtime_suspend callback.
+ *
+ * Return:
+ * 0 - OK to runtime suspend the device
+ * -EBUSY - Device should not be runtime suspended
+ */
+int blk_pre_runtime_suspend(struct request_queue *q)
+{
+ int ret = 0;
+
+ spin_lock_irq(q->queue_lock);
+ if (q->nr_pending) {
+ ret = -EBUSY;
+ pm_runtime_mark_last_busy(q->dev);
+ } else {
+ q->rpm_status = RPM_SUSPENDING;
+ }
+ spin_unlock_irq(q->queue_lock);
+ return ret;
+}
+EXPORT_SYMBOL(blk_pre_runtime_suspend);
+
+/**
+ * blk_post_runtime_suspend - Post runtime suspend processing
+ * @q: the queue of the device
+ * @err: return value of the device's runtime_suspend function
+ *
+ * Description:
+ * Update the queue's runtime status according to the return value of the
+ * device's runtime suspend function and mark last busy for the device so
+ * that PM core will try to auto suspend the device at a later time.
+ *
+ * This function should be called near the end of the device's
+ * runtime_suspend callback.
+ */
+void blk_post_runtime_suspend(struct request_queue *q, int err)
+{
+ spin_lock_irq(q->queue_lock);
+ if (!err) {
+ q->rpm_status = RPM_SUSPENDED;
+ } else {
+ q->rpm_status = RPM_ACTIVE;
+ pm_runtime_mark_last_busy(q->dev);
+ }
+ spin_unlock_irq(q->queue_lock);
+}
+EXPORT_SYMBOL(blk_post_runtime_suspend);
+
+/**
+ * blk_pre_runtime_resume - Pre runtime resume processing
+ * @q: the queue of the device
+ *
+ * Description:
+ * Update the queue's runtime status to RESUMING in preparation for the
+ * runtime resume of the device.
+ *
+ * This function should be called near the start of the device's
+ * runtime_resume callback.
+ */
+void blk_pre_runtime_resume(struct request_queue *q)
+{
+ spin_lock_irq(q->queue_lock);
+ q->rpm_status = RPM_RESUMING;
+ spin_unlock_irq(q->queue_lock);
+}
+EXPORT_SYMBOL(blk_pre_runtime_resume);
+
+/**
+ * blk_post_runtime_resume - Post runtime resume processing
+ * @q: the queue of the device
+ * @err: return value of the device's runtime_resume function
+ *
+ * Description:
+ * Update the queue's runtime status according to the return value of the
+ * device's runtime_resume function. If it is successfully resumed, process
+ * the requests that are queued into the device's queue when it is resuming
+ * and then mark last busy and initiate autosuspend for it.
+ *
+ * This function should be called near the end of the device's
+ * runtime_resume callback.
+ */
+void blk_post_runtime_resume(struct request_queue *q, int err)
+{
+ spin_lock_irq(q->queue_lock);
+ if (!err) {
+ q->rpm_status = RPM_ACTIVE;
+ __blk_run_queue(q);
+ pm_runtime_mark_last_busy(q->dev);
+ pm_runtime_autosuspend(q->dev);
+ } else {
+ q->rpm_status = RPM_SUSPENDED;
+ }
+ spin_unlock_irq(q->queue_lock);
+}
+EXPORT_SYMBOL(blk_post_runtime_resume);
+#endif
+
int __init blk_dev_init(void)
{
BUILD_BUG_ON(__REQ_NR_BITS > 8 *