diff options
author | Corrado Zoccolo <czoccolo@gmail.com> | 2009-11-26 10:02:57 +0100 |
---|---|---|
committer | Jens Axboe <jens.axboe@oracle.com> | 2009-11-26 10:02:57 +0100 |
commit | e459dd08f45d2aa68abb0c02f8ab045cf8a598b8 (patch) | |
tree | e5bba2c95dbbd93d2880fdc81e1ea7589625a6ed /block | |
parent | 75e7b634309ef4eabf8a93d36e58863f727fa209 (diff) | |
download | kernel_samsung_tuna-e459dd08f45d2aa68abb0c02f8ab045cf8a598b8.zip kernel_samsung_tuna-e459dd08f45d2aa68abb0c02f8ab045cf8a598b8.tar.gz kernel_samsung_tuna-e459dd08f45d2aa68abb0c02f8ab045cf8a598b8.tar.bz2 |
cfq-iosched: fix ncq detection code
CFQ's detection of queueing devices initially assumes a queuing device
and detects if the queue depth reaches a certain threshold.
However, it will reconsider this choice periodically.
Unfortunately, if device is considered not queuing, CFQ will force a
unit queue depth for some workloads, thus defeating the detection logic.
This leads to poor performance on queuing hardware,
since the idle window remains enabled.
Given this premise, switching to hw_tag = 0 after we have proved at
least once that the device is NCQ capable is not a good choice.
The new detection code starts in an indeterminate state, in which CFQ behaves
as if hw_tag = 1, and then, if for a long observation period we never saw
large depth, we switch to hw_tag = 0, otherwise we stick to hw_tag = 1,
without reconsidering it again.
Signed-off-by: Corrado Zoccolo <czoccolo@gmail.com>
Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
Diffstat (limited to 'block')
-rw-r--r-- | block/cfq-iosched.c | 24 |
1 files changed, 15 insertions, 9 deletions
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c index c2ef5d1..47abd24 100644 --- a/block/cfq-iosched.c +++ b/block/cfq-iosched.c @@ -191,8 +191,14 @@ struct cfq_data { */ int rq_queued; int hw_tag; - int hw_tag_samples; - int rq_in_driver_peak; + /* + * hw_tag can be + * -1 => indeterminate, (cfq will behave as if NCQ is present, to allow better detection) + * 1 => NCQ is present (hw_tag_est_depth is the estimated max depth) + * 0 => no NCQ + */ + int hw_tag_est_depth; + unsigned int hw_tag_samples; /* * idle window management @@ -2518,8 +2524,11 @@ static void cfq_update_hw_tag(struct cfq_data *cfqd) { struct cfq_queue *cfqq = cfqd->active_queue; - if (rq_in_driver(cfqd) > cfqd->rq_in_driver_peak) - cfqd->rq_in_driver_peak = rq_in_driver(cfqd); + if (rq_in_driver(cfqd) > cfqd->hw_tag_est_depth) + cfqd->hw_tag_est_depth = rq_in_driver(cfqd); + + if (cfqd->hw_tag == 1) + return; if (cfqd->rq_queued <= CFQ_HW_QUEUE_MIN && rq_in_driver(cfqd) <= CFQ_HW_QUEUE_MIN) @@ -2538,13 +2547,10 @@ static void cfq_update_hw_tag(struct cfq_data *cfqd) if (cfqd->hw_tag_samples++ < 50) return; - if (cfqd->rq_in_driver_peak >= CFQ_HW_QUEUE_MIN) + if (cfqd->hw_tag_est_depth >= CFQ_HW_QUEUE_MIN) cfqd->hw_tag = 1; else cfqd->hw_tag = 0; - - cfqd->hw_tag_samples = 0; - cfqd->rq_in_driver_peak = 0; } static void cfq_completed_request(struct request_queue *q, struct request *rq) @@ -2951,7 +2957,7 @@ static void *cfq_init_queue(struct request_queue *q) cfqd->cfq_slice_async_rq = cfq_slice_async_rq; cfqd->cfq_slice_idle = cfq_slice_idle; cfqd->cfq_latency = 1; - cfqd->hw_tag = 1; + cfqd->hw_tag = -1; cfqd->last_end_sync_rq = jiffies; return cfqd; } |