aboutsummaryrefslogtreecommitdiffstats
path: root/block
diff options
context:
space:
mode:
authorChristoph Hellwig <hch@lst.de>2010-08-07 18:17:56 +0200
committerJens Axboe <jaxboe@fusionio.com>2010-08-07 18:17:56 +0200
commit33659ebbae262228eef4e0fe990f393d1f0ed941 (patch)
treefcb537f09359c8dad3a6f6e16dc4319562dc42cc /block
parent7e005f79791dcd58436c88ded4a7f5aed1b82147 (diff)
downloadkernel_samsung_espresso10-33659ebbae262228eef4e0fe990f393d1f0ed941.zip
kernel_samsung_espresso10-33659ebbae262228eef4e0fe990f393d1f0ed941.tar.gz
kernel_samsung_espresso10-33659ebbae262228eef4e0fe990f393d1f0ed941.tar.bz2
block: remove wrappers for request type/flags
Remove all the trivial wrappers for the cmd_type and cmd_flags fields in struct requests. This allows much easier grepping for different request types instead of unwinding through macros. Signed-off-by: Christoph Hellwig <hch@lst.de> Signed-off-by: Jens Axboe <jaxboe@fusionio.com>
Diffstat (limited to 'block')
-rw-r--r--block/blk-barrier.c7
-rw-r--r--block/blk-core.c13
-rw-r--r--block/blk-exec.c2
-rw-r--r--block/blk-merge.c4
-rw-r--r--block/blk.h6
-rw-r--r--block/cfq-iosched.c19
-rw-r--r--block/elevator.c16
7 files changed, 39 insertions, 28 deletions
diff --git a/block/blk-barrier.c b/block/blk-barrier.c
index 0d710c9..74e4043 100644
--- a/block/blk-barrier.c
+++ b/block/blk-barrier.c
@@ -79,7 +79,7 @@ unsigned blk_ordered_req_seq(struct request *rq)
*
* http://thread.gmane.org/gmane.linux.kernel/537473
*/
- if (!blk_fs_request(rq))
+ if (rq->cmd_type != REQ_TYPE_FS)
return QUEUE_ORDSEQ_DRAIN;
if ((rq->cmd_flags & REQ_ORDERED_COLOR) ==
@@ -236,7 +236,8 @@ static inline bool start_ordered(struct request_queue *q, struct request **rqp)
bool blk_do_ordered(struct request_queue *q, struct request **rqp)
{
struct request *rq = *rqp;
- const int is_barrier = blk_fs_request(rq) && blk_barrier_rq(rq);
+ const int is_barrier = rq->cmd_type == REQ_TYPE_FS &&
+ (rq->cmd_flags & REQ_HARDBARRIER);
if (!q->ordseq) {
if (!is_barrier)
@@ -261,7 +262,7 @@ bool blk_do_ordered(struct request_queue *q, struct request **rqp)
*/
/* Special requests are not subject to ordering rules. */
- if (!blk_fs_request(rq) &&
+ if (rq->cmd_type != REQ_TYPE_FS &&
rq != &q->pre_flush_rq && rq != &q->post_flush_rq)
return true;
diff --git a/block/blk-core.c b/block/blk-core.c
index b4131d2..dca43a3 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -184,7 +184,7 @@ void blk_dump_rq_flags(struct request *rq, char *msg)
printk(KERN_INFO " bio %p, biotail %p, buffer %p, len %u\n",
rq->bio, rq->biotail, rq->buffer, blk_rq_bytes(rq));
- if (blk_pc_request(rq)) {
+ if (rq->cmd_type == REQ_TYPE_BLOCK_PC) {
printk(KERN_INFO " cdb: ");
for (bit = 0; bit < BLK_MAX_CDB; bit++)
printk("%02x ", rq->cmd[bit]);
@@ -1796,7 +1796,7 @@ struct request *blk_peek_request(struct request_queue *q)
* sees this request (possibly after
* requeueing). Notify IO scheduler.
*/
- if (blk_sorted_rq(rq))
+ if (rq->cmd_flags & REQ_SORTED)
elv_activate_rq(q, rq);
/*
@@ -1984,10 +1984,11 @@ bool blk_update_request(struct request *req, int error, unsigned int nr_bytes)
* TODO: tj: This is too subtle. It would be better to let
* low level drivers do what they see fit.
*/
- if (blk_fs_request(req))
+ if (req->cmd_type == REQ_TYPE_FS)
req->errors = 0;
- if (error && (blk_fs_request(req) && !(req->cmd_flags & REQ_QUIET))) {
+ if (error && req->cmd_type == REQ_TYPE_FS &&
+ !(req->cmd_flags & REQ_QUIET)) {
printk(KERN_ERR "end_request: I/O error, dev %s, sector %llu\n",
req->rq_disk ? req->rq_disk->disk_name : "?",
(unsigned long long)blk_rq_pos(req));
@@ -2074,7 +2075,7 @@ bool blk_update_request(struct request *req, int error, unsigned int nr_bytes)
req->buffer = bio_data(req->bio);
/* update sector only for requests with clear definition of sector */
- if (blk_fs_request(req) || blk_discard_rq(req))
+ if (req->cmd_type == REQ_TYPE_FS || (req->cmd_flags & REQ_DISCARD))
req->__sector += total_bytes >> 9;
/* mixed attributes always follow the first bio */
@@ -2127,7 +2128,7 @@ static void blk_finish_request(struct request *req, int error)
BUG_ON(blk_queued_rq(req));
- if (unlikely(laptop_mode) && blk_fs_request(req))
+ if (unlikely(laptop_mode) && req->cmd_type == REQ_TYPE_FS)
laptop_io_completion(&req->q->backing_dev_info);
blk_delete_timer(req);
diff --git a/block/blk-exec.c b/block/blk-exec.c
index 49557e9..e1672f1 100644
--- a/block/blk-exec.c
+++ b/block/blk-exec.c
@@ -57,7 +57,7 @@ void blk_execute_rq_nowait(struct request_queue *q, struct gendisk *bd_disk,
__elv_add_request(q, rq, where, 1);
__generic_unplug_device(q);
/* the queue is stopped so it won't be plugged+unplugged */
- if (blk_pm_resume_request(rq))
+ if (rq->cmd_type == REQ_TYPE_PM_RESUME)
q->request_fn(q);
spin_unlock_irq(q->queue_lock);
}
diff --git a/block/blk-merge.c b/block/blk-merge.c
index 5e7dc99..87e4fb7 100644
--- a/block/blk-merge.c
+++ b/block/blk-merge.c
@@ -226,7 +226,7 @@ int ll_back_merge_fn(struct request_queue *q, struct request *req,
{
unsigned short max_sectors;
- if (unlikely(blk_pc_request(req)))
+ if (unlikely(req->cmd_type == REQ_TYPE_BLOCK_PC))
max_sectors = queue_max_hw_sectors(q);
else
max_sectors = queue_max_sectors(q);
@@ -250,7 +250,7 @@ int ll_front_merge_fn(struct request_queue *q, struct request *req,
{
unsigned short max_sectors;
- if (unlikely(blk_pc_request(req)))
+ if (unlikely(req->cmd_type == REQ_TYPE_BLOCK_PC))
max_sectors = queue_max_hw_sectors(q);
else
max_sectors = queue_max_sectors(q);
diff --git a/block/blk.h b/block/blk.h
index 5ee3d7e..6e7dc87 100644
--- a/block/blk.h
+++ b/block/blk.h
@@ -161,8 +161,10 @@ static inline int blk_cpu_to_group(int cpu)
*/
static inline int blk_do_io_stat(struct request *rq)
{
- return rq->rq_disk && blk_rq_io_stat(rq) &&
- (blk_fs_request(rq) || blk_discard_rq(rq));
+ return rq->rq_disk &&
+ (rq->cmd_flags & REQ_IO_STAT) &&
+ (rq->cmd_type == REQ_TYPE_FS ||
+ (rq->cmd_flags & REQ_DISCARD));
}
#endif
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c
index 7982b83..d4edeb8 100644
--- a/block/cfq-iosched.c
+++ b/block/cfq-iosched.c
@@ -646,9 +646,10 @@ cfq_choose_req(struct cfq_data *cfqd, struct request *rq1, struct request *rq2,
return rq1;
else if (rq_is_sync(rq2) && !rq_is_sync(rq1))
return rq2;
- if (rq_is_meta(rq1) && !rq_is_meta(rq2))
+ if ((rq1->cmd_flags & REQ_RW_META) && !(rq2->cmd_flags & REQ_RW_META))
return rq1;
- else if (rq_is_meta(rq2) && !rq_is_meta(rq1))
+ else if ((rq2->cmd_flags & REQ_RW_META) &&
+ !(rq1->cmd_flags & REQ_RW_META))
return rq2;
s1 = blk_rq_pos(rq1);
@@ -1484,7 +1485,7 @@ static void cfq_remove_request(struct request *rq)
cfqq->cfqd->rq_queued--;
cfq_blkiocg_update_io_remove_stats(&(RQ_CFQG(rq))->blkg,
rq_data_dir(rq), rq_is_sync(rq));
- if (rq_is_meta(rq)) {
+ if (rq->cmd_flags & REQ_RW_META) {
WARN_ON(!cfqq->meta_pending);
cfqq->meta_pending--;
}
@@ -3176,7 +3177,7 @@ cfq_should_preempt(struct cfq_data *cfqd, struct cfq_queue *new_cfqq,
* So both queues are sync. Let the new request get disk time if
* it's a metadata request and the current queue is doing regular IO.
*/
- if (rq_is_meta(rq) && !cfqq->meta_pending)
+ if ((rq->cmd_flags & REQ_RW_META) && !cfqq->meta_pending)
return true;
/*
@@ -3230,7 +3231,7 @@ cfq_rq_enqueued(struct cfq_data *cfqd, struct cfq_queue *cfqq,
struct cfq_io_context *cic = RQ_CIC(rq);
cfqd->rq_queued++;
- if (rq_is_meta(rq))
+ if (rq->cmd_flags & REQ_RW_META)
cfqq->meta_pending++;
cfq_update_io_thinktime(cfqd, cic);
@@ -3365,7 +3366,8 @@ static void cfq_completed_request(struct request_queue *q, struct request *rq)
unsigned long now;
now = jiffies;
- cfq_log_cfqq(cfqd, cfqq, "complete rqnoidle %d", !!rq_noidle(rq));
+ cfq_log_cfqq(cfqd, cfqq, "complete rqnoidle %d",
+ !!(rq->cmd_flags & REQ_NOIDLE));
cfq_update_hw_tag(cfqd);
@@ -3419,11 +3421,12 @@ static void cfq_completed_request(struct request_queue *q, struct request *rq)
cfq_slice_expired(cfqd, 1);
else if (sync && cfqq_empty &&
!cfq_close_cooperator(cfqd, cfqq)) {
- cfqd->noidle_tree_requires_idle |= !rq_noidle(rq);
+ cfqd->noidle_tree_requires_idle |=
+ !(rq->cmd_flags & REQ_NOIDLE);
/*
* Idling is enabled for SYNC_WORKLOAD.
* SYNC_NOIDLE_WORKLOAD idles at the end of the tree
- * only if we processed at least one !rq_noidle request
+ * only if we processed at least one !REQ_NOIDLE request
*/
if (cfqd->serving_type == SYNC_WORKLOAD
|| cfqd->noidle_tree_requires_idle
diff --git a/block/elevator.c b/block/elevator.c
index 923a913..aa99b59 100644
--- a/block/elevator.c
+++ b/block/elevator.c
@@ -428,7 +428,8 @@ void elv_dispatch_sort(struct request_queue *q, struct request *rq)
list_for_each_prev(entry, &q->queue_head) {
struct request *pos = list_entry_rq(entry);
- if (blk_discard_rq(rq) != blk_discard_rq(pos))
+ if ((rq->cmd_flags & REQ_DISCARD) !=
+ (pos->cmd_flags & REQ_DISCARD))
break;
if (rq_data_dir(rq) != rq_data_dir(pos))
break;
@@ -558,7 +559,7 @@ void elv_requeue_request(struct request_queue *q, struct request *rq)
*/
if (blk_account_rq(rq)) {
q->in_flight[rq_is_sync(rq)]--;
- if (blk_sorted_rq(rq))
+ if (rq->cmd_flags & REQ_SORTED)
elv_deactivate_rq(q, rq);
}
@@ -644,7 +645,8 @@ void elv_insert(struct request_queue *q, struct request *rq, int where)
break;
case ELEVATOR_INSERT_SORT:
- BUG_ON(!blk_fs_request(rq) && !blk_discard_rq(rq));
+ BUG_ON(rq->cmd_type != REQ_TYPE_FS &&
+ !(rq->cmd_flags & REQ_DISCARD));
rq->cmd_flags |= REQ_SORTED;
q->nr_sorted++;
if (rq_mergeable(rq)) {
@@ -716,7 +718,7 @@ void __elv_add_request(struct request_queue *q, struct request *rq, int where,
/*
* toggle ordered color
*/
- if (blk_barrier_rq(rq))
+ if (rq->cmd_flags & REQ_HARDBARRIER)
q->ordcolor ^= 1;
/*
@@ -729,7 +731,8 @@ void __elv_add_request(struct request_queue *q, struct request *rq, int where,
* this request is scheduling boundary, update
* end_sector
*/
- if (blk_fs_request(rq) || blk_discard_rq(rq)) {
+ if (rq->cmd_type == REQ_TYPE_FS ||
+ (rq->cmd_flags & REQ_DISCARD)) {
q->end_sector = rq_end_sector(rq);
q->boundary_rq = rq;
}
@@ -843,7 +846,8 @@ void elv_completed_request(struct request_queue *q, struct request *rq)
*/
if (blk_account_rq(rq)) {
q->in_flight[rq_is_sync(rq)]--;
- if (blk_sorted_rq(rq) && e->ops->elevator_completed_req_fn)
+ if ((rq->cmd_flags & REQ_SORTED) &&
+ e->ops->elevator_completed_req_fn)
e->ops->elevator_completed_req_fn(q, rq);
}