aboutsummaryrefslogtreecommitdiffstats
path: root/block
diff options
context:
space:
mode:
authorTatyana Brokhman <tlinder@codeaurora.org>2012-12-04 16:04:15 +0200
committerPawit Pornkitprasan <p.pawit@gmail.com>2013-02-09 14:17:26 +0700
commit80468a9a39033524ea8df3394690f76e72de51f0 (patch)
treecfd9782f7c25201ce095158e72cfed538d67ceb9 /block
parentab8ebd66d16b2e79d13ba756d63461e52a0300bc (diff)
downloadkernel_samsung_aries-80468a9a39033524ea8df3394690f76e72de51f0.zip
kernel_samsung_aries-80468a9a39033524ea8df3394690f76e72de51f0.tar.gz
kernel_samsung_aries-80468a9a39033524ea8df3394690f76e72de51f0.tar.bz2
block: Add API for urgent request handling
This patch add support in block & elevator layers for handling urgent requests. The decision if a request is urgent or not is taken by the scheduler. Urgent request notification is passed to the underlying block device driver (eMMC for example). Block device driver may decide to interrupt the currently running low priority request to serve the new urgent request. By doing so READ latency is greatly reduced in read&write collision scenarios. Note that if the current scheduler doesn't implement the urgent request mechanism, this code path is never activated. Change-Id: I8aa74b9b45c0d3a2221bd4e82ea76eb4103e7cfa Signed-off-by: Tatyana Brokhman <tlinder@codeaurora.org> Signed-off-by: franciscofranco <franciscofranco.1990@gmail.com>
Diffstat (limited to 'block')
-rw-r--r--block/blk-core.c26
-rw-r--r--block/blk-settings.c12
-rw-r--r--block/blk.h11
-rw-r--r--block/elevator.c5
4 files changed, 52 insertions, 2 deletions
diff --git a/block/blk-core.c b/block/blk-core.c
index 160f31d..9a7b169 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -296,13 +296,26 @@ EXPORT_SYMBOL(blk_sync_queue);
* Description:
* See @blk_run_queue. This variant must be called with the queue lock
* held and interrupts disabled.
+ * Device driver will be notified of an urgent request
+ * pending under the following conditions:
+ * 1. The driver and the current scheduler support urgent reques handling
+ * 2. There is an urgent request pending in the scheduler
+ * 3. There isn't already an urgent request in flight, meaning previously
+ * notified urgent request completed (!q->notified_urgent)
*/
void __blk_run_queue(struct request_queue *q)
{
if (unlikely(blk_queue_stopped(q)))
return;
- q->request_fn(q);
+ if (!q->notified_urgent &&
+ q->elevator->elevator_type->ops.elevator_is_urgent_fn &&
+ q->urgent_request_fn &&
+ q->elevator->elevator_type->ops.elevator_is_urgent_fn(q)) {
+ q->notified_urgent = true;
+ q->urgent_request_fn(q);
+ } else
+ q->request_fn(q);
}
EXPORT_SYMBOL(__blk_run_queue);
@@ -2013,8 +2026,17 @@ struct request *blk_fetch_request(struct request_queue *q)
struct request *rq;
rq = blk_peek_request(q);
- if (rq)
+ if (rq) {
+ /*
+ * Assumption: the next request fetched from scheduler after we
+ * notified "urgent request pending" - will be the urgent one
+ */
+ if (q->notified_urgent && !q->dispatched_urgent) {
+ q->dispatched_urgent = true;
+ (void)blk_mark_rq_urgent(rq);
+ }
blk_start_request(rq);
+ }
return rq;
}
EXPORT_SYMBOL(blk_fetch_request);
diff --git a/block/blk-settings.c b/block/blk-settings.c
index fa1eb04..7d3ee7f 100644
--- a/block/blk-settings.c
+++ b/block/blk-settings.c
@@ -100,6 +100,18 @@ void blk_queue_lld_busy(struct request_queue *q, lld_busy_fn *fn)
EXPORT_SYMBOL_GPL(blk_queue_lld_busy);
/**
+ * blk_urgent_request() - Set an urgent_request handler function for queue
+ * @q: queue
+ * @fn: handler for urgent requests
+ *
+ */
+void blk_urgent_request(struct request_queue *q, request_fn_proc *fn)
+{
+ q->urgent_request_fn = fn;
+}
+EXPORT_SYMBOL(blk_urgent_request);
+
+/**
* blk_set_default_limits - reset limits to default values
* @lim: the queue_limits structure to reset
*
diff --git a/block/blk.h b/block/blk.h
index d658628..2285f51 100644
--- a/block/blk.h
+++ b/block/blk.h
@@ -28,6 +28,7 @@ void __generic_unplug_device(struct request_queue *);
*/
enum rq_atomic_flags {
REQ_ATOM_COMPLETE = 0,
+ REQ_ATOM_URGENT = 1,
};
/*
@@ -44,6 +45,16 @@ static inline void blk_clear_rq_complete(struct request *rq)
clear_bit(REQ_ATOM_COMPLETE, &rq->atomic_flags);
}
+static inline int blk_mark_rq_urgent(struct request *rq)
+{
+ return test_and_set_bit(REQ_ATOM_URGENT, &rq->atomic_flags);
+}
+
+static inline void blk_clear_rq_urgent(struct request *rq)
+{
+ clear_bit(REQ_ATOM_URGENT, &rq->atomic_flags);
+}
+
/*
* Internal elevator interface
*/
diff --git a/block/elevator.c b/block/elevator.c
index 01075fe..b328d8a 100644
--- a/block/elevator.c
+++ b/block/elevator.c
@@ -845,6 +845,11 @@ void elv_completed_request(struct request_queue *q, struct request *rq)
{
struct elevator_queue *e = q->elevator;
+ if (test_bit(REQ_ATOM_URGENT, &rq->atomic_flags)) {
+ q->notified_urgent = false;
+ q->dispatched_urgent = false;
+ blk_clear_rq_urgent(rq);
+ }
/*
* request is released from the driver, io must be done
*/