diff options
author | Jens Axboe <jaxboe@fusionio.com> | 2011-04-16 13:51:05 +0200 |
---|---|---|
committer | Jens Axboe <jaxboe@fusionio.com> | 2011-04-16 13:51:05 +0200 |
commit | 49cac01e1fa74174d72adb0e872504a7fefd7c01 (patch) | |
tree | a1ab1974eceea3179a604413955ad8369ba715d7 /block | |
parent | a237c1c5bc5dc5c76a21be922dca4826f3eca8ca (diff) | |
download | kernel_samsung_espresso10-49cac01e1fa74174d72adb0e872504a7fefd7c01.zip kernel_samsung_espresso10-49cac01e1fa74174d72adb0e872504a7fefd7c01.tar.gz kernel_samsung_espresso10-49cac01e1fa74174d72adb0e872504a7fefd7c01.tar.bz2 |
block: make unplug timer trace event correspond to the schedule() unplug
It's a pretty close match to what we had before - the timer triggering
would mean that nobody unplugged the plug in due time, in the new
scheme this matches very closely what the schedule() unplug now is.
It's essentially the difference between an explicit unplug (IO unplug)
or an implicit unplug (timer unplug, we scheduled with pending IO
queued).
Signed-off-by: Jens Axboe <jaxboe@fusionio.com>
Diffstat (limited to 'block')
-rw-r--r-- | block/blk-core.c | 18 |
1 files changed, 12 insertions, 6 deletions
diff --git a/block/blk-core.c b/block/blk-core.c index 3c81210..78b7b0c 100644 --- a/block/blk-core.c +++ b/block/blk-core.c @@ -2662,17 +2662,23 @@ static int plug_rq_cmp(void *priv, struct list_head *a, struct list_head *b) return !(rqa->q <= rqb->q); } +/* + * If 'from_schedule' is true, then postpone the dispatch of requests + * until a safe kblockd context. We due this to avoid accidental big + * additional stack usage in driver dispatch, in places where the originally + * plugger did not intend it. + */ static void queue_unplugged(struct request_queue *q, unsigned int depth, - bool force_kblockd) + bool from_schedule) { - trace_block_unplug_io(q, depth); - __blk_run_queue(q, force_kblockd); + trace_block_unplug(q, depth, !from_schedule); + __blk_run_queue(q, from_schedule); if (q->unplugged_fn) q->unplugged_fn(q); } -void blk_flush_plug_list(struct blk_plug *plug, bool force_kblockd) +void blk_flush_plug_list(struct blk_plug *plug, bool from_schedule) { struct request_queue *q; unsigned long flags; @@ -2707,7 +2713,7 @@ void blk_flush_plug_list(struct blk_plug *plug, bool force_kblockd) BUG_ON(!rq->q); if (rq->q != q) { if (q) { - queue_unplugged(q, depth, force_kblockd); + queue_unplugged(q, depth, from_schedule); spin_unlock(q->queue_lock); } q = rq->q; @@ -2728,7 +2734,7 @@ void blk_flush_plug_list(struct blk_plug *plug, bool force_kblockd) } if (q) { - queue_unplugged(q, depth, force_kblockd); + queue_unplugged(q, depth, from_schedule); spin_unlock(q->queue_lock); } |