aboutsummaryrefslogtreecommitdiffstats
path: root/block/blk-core.c
diff options
context:
space:
mode:
authorTejun Heo <tj@kernel.org>2011-12-14 00:33:37 +0100
committerZiyan <jaraidaniel@gmail.com>2016-01-08 10:36:47 +0100
commit02e1d3f46f63e0ee9c97e5d5748efbc68128dfd8 (patch)
tree04d592cf2478ada8a4631fe790836627c5f096ee /block/blk-core.c
parentcb316cfb3a35d948a21e34869f442b72014e837e (diff)
downloadkernel_samsung_tuna-02e1d3f46f63e0ee9c97e5d5748efbc68128dfd8.zip
kernel_samsung_tuna-02e1d3f46f63e0ee9c97e5d5748efbc68128dfd8.tar.gz
kernel_samsung_tuna-02e1d3f46f63e0ee9c97e5d5748efbc68128dfd8.tar.bz2
block: add missing blk_queue_dead() checks
blk_insert_cloned_request(), blk_execute_rq_nowait() and blk_flush_plug_list() either didn't check whether the queue was dead or did it without holding queue_lock. Update them so that dead state is checked while holding queue_lock. AFAICS, this plugs all holes (requeue doesn't matter as the request is transitioning atomically from in_flight to queued). Signed-off-by: Tejun Heo <tj@kernel.org> Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'block/blk-core.c')
-rw-r--r--block/blk-core.c21
1 files changed, 21 insertions, 0 deletions
diff --git a/block/blk-core.c b/block/blk-core.c
index 18c2ef0..1452ee3 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -1720,6 +1720,10 @@ int blk_insert_cloned_request(struct request_queue *q, struct request *rq)
return -EIO;
spin_lock_irqsave(q->queue_lock, flags);
+ if (unlikely(blk_queue_dead(q))) {
+ spin_unlock_irqrestore(q->queue_lock, flags);
+ return -ENODEV;
+ }
/*
* Submitting request must be dequeued before calling this function
@@ -2694,6 +2698,14 @@ static void queue_unplugged(struct request_queue *q, unsigned int depth,
trace_block_unplug(q, depth, !from_schedule);
/*
+ * Don't mess with dead queue.
+ */
+ if (unlikely(blk_queue_dead(q))) {
+ spin_unlock(q->queue_lock);
+ return;
+ }
+
+ /*
* If we are punting this to kblockd, then we can safely drop
* the queue_lock before waking kblockd (which needs to take
* this lock).
@@ -2769,6 +2781,15 @@ void blk_flush_plug_list(struct blk_plug *plug, bool from_schedule)
depth = 0;
spin_lock(q->queue_lock);
}
+
+ /*
+ * Short-circuit if @q is dead
+ */
+ if (unlikely(blk_queue_dead(q))) {
+ __blk_end_request_all(rq, -ENODEV);
+ continue;
+ }
+
/*
* rq is already accounted, so use raw insert
*/