aboutsummaryrefslogtreecommitdiffstats
path: root/block
diff options
context:
space:
mode:
authorBart Van Assche <bvanassche@acm.org>2012-11-28 13:46:45 +0100
committerJens Axboe <axboe@kernel.dk>2012-12-06 14:33:00 +0100
commit24faf6f604efe18236bded4303009fc252913bf0 (patch)
treebc4d35a38c44a37ffd718089729ae59b23d50d99 /block
parent704605711ef048a7c6ad2ec599f15d2e0baf86b2 (diff)
downloadkernel_goldelico_gta04-24faf6f604efe18236bded4303009fc252913bf0.zip
kernel_goldelico_gta04-24faf6f604efe18236bded4303009fc252913bf0.tar.gz
kernel_goldelico_gta04-24faf6f604efe18236bded4303009fc252913bf0.tar.bz2
block: Make blk_cleanup_queue() wait until request_fn finished
Some request_fn implementations, e.g. scsi_request_fn(), unlock the queue lock internally. This may result in multiple threads executing request_fn for the same queue simultaneously. Keep track of the number of active request_fn calls and make sure that blk_cleanup_queue() waits until all active request_fn invocations have finished. A block driver may start cleaning up resources needed by its request_fn as soon as blk_cleanup_queue() finished, so blk_cleanup_queue() must wait for all outstanding request_fn invocations to finish. Signed-off-by: Bart Van Assche <bvanassche@acm.org> Reported-by: Chanho Min <chanho.min@lge.com> Cc: James Bottomley <JBottomley@Parallels.com> Cc: Mike Christie <michaelc@cs.wisc.edu> Acked-by: Tejun Heo <tj@kernel.org> Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'block')
-rw-r--r--block/blk-core.c10
1 files changed, 10 insertions, 0 deletions
diff --git a/block/blk-core.c b/block/blk-core.c
index 9fb2353..473015e 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -309,7 +309,16 @@ inline void __blk_run_queue_uncond(struct request_queue *q)
if (unlikely(blk_queue_dead(q)))
return;
+ /*
+ * Some request_fn implementations, e.g. scsi_request_fn(), unlock
+ * the queue lock internally. As a result multiple threads may be
+ * running such a request function concurrently. Keep track of the
+ * number of active request_fn invocations such that blk_drain_queue()
+ * can wait until all these request_fn calls have finished.
+ */
+ q->request_fn_active++;
q->request_fn(q);
+ q->request_fn_active--;
}
/**
@@ -408,6 +417,7 @@ static void __blk_drain_queue(struct request_queue *q, bool drain_all)
__blk_run_queue(q);
drain |= q->nr_rqs_elvpriv;
+ drain |= q->request_fn_active;
/*
* Unfortunately, requests are queued at and tracked from