aboutsummaryrefslogtreecommitdiffstats
path: root/block
diff options
context:
space:
mode:
Diffstat (limited to 'block')
-rw-r--r--block/blk-barrier.c2
-rw-r--r--block/blk-core.c37
-rw-r--r--block/blk-map.c2
-rw-r--r--block/blk-merge.c2
-rw-r--r--block/cfq-iosched.c14
-rw-r--r--block/elevator.c3
6 files changed, 21 insertions, 39 deletions
diff --git a/block/blk-barrier.c b/block/blk-barrier.c
index 74e4043..7c6f4a7 100644
--- a/block/blk-barrier.c
+++ b/block/blk-barrier.c
@@ -203,7 +203,7 @@ static inline bool start_ordered(struct request_queue *q, struct request **rqp)
/* initialize proxy request and queue it */
blk_rq_init(q, rq);
if (bio_data_dir(q->orig_bar_rq->bio) == WRITE)
- rq->cmd_flags |= REQ_RW;
+ rq->cmd_flags |= REQ_WRITE;
if (q->ordered & QUEUE_ORDERED_DO_FUA)
rq->cmd_flags |= REQ_FUA;
init_request_from_bio(rq, q->orig_bar_rq->bio);
diff --git a/block/blk-core.c b/block/blk-core.c
index dca43a3..66c3cfe 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -1140,25 +1140,9 @@ void init_request_from_bio(struct request *req, struct bio *bio)
req->cpu = bio->bi_comp_cpu;
req->cmd_type = REQ_TYPE_FS;
- /*
- * Inherit FAILFAST from bio (for read-ahead, and explicit
- * FAILFAST). FAILFAST flags are identical for req and bio.
- */
- if (bio_rw_flagged(bio, BIO_RW_AHEAD))
+ req->cmd_flags |= bio->bi_rw & REQ_COMMON_MASK;
+ if (bio->bi_rw & REQ_RAHEAD)
req->cmd_flags |= REQ_FAILFAST_MASK;
- else
- req->cmd_flags |= bio->bi_rw & REQ_FAILFAST_MASK;
-
- if (bio_rw_flagged(bio, BIO_RW_DISCARD))
- req->cmd_flags |= REQ_DISCARD;
- if (bio_rw_flagged(bio, BIO_RW_BARRIER))
- req->cmd_flags |= REQ_HARDBARRIER;
- if (bio_rw_flagged(bio, BIO_RW_SYNCIO))
- req->cmd_flags |= REQ_RW_SYNC;
- if (bio_rw_flagged(bio, BIO_RW_META))
- req->cmd_flags |= REQ_RW_META;
- if (bio_rw_flagged(bio, BIO_RW_NOIDLE))
- req->cmd_flags |= REQ_NOIDLE;
req->errors = 0;
req->__sector = bio->bi_sector;
@@ -1181,12 +1165,12 @@ static int __make_request(struct request_queue *q, struct bio *bio)
int el_ret;
unsigned int bytes = bio->bi_size;
const unsigned short prio = bio_prio(bio);
- const bool sync = bio_rw_flagged(bio, BIO_RW_SYNCIO);
- const bool unplug = bio_rw_flagged(bio, BIO_RW_UNPLUG);
+ const bool sync = (bio->bi_rw & REQ_SYNC);
+ const bool unplug = (bio->bi_rw & REQ_UNPLUG);
const unsigned int ff = bio->bi_rw & REQ_FAILFAST_MASK;
int rw_flags;
- if (bio_rw_flagged(bio, BIO_RW_BARRIER) &&
+ if ((bio->bi_rw & REQ_HARDBARRIER) &&
(q->next_ordered == QUEUE_ORDERED_NONE)) {
bio_endio(bio, -EOPNOTSUPP);
return 0;
@@ -1200,7 +1184,7 @@ static int __make_request(struct request_queue *q, struct bio *bio)
spin_lock_irq(q->queue_lock);
- if (unlikely(bio_rw_flagged(bio, BIO_RW_BARRIER)) || elv_queue_empty(q))
+ if (unlikely((bio->bi_rw & REQ_HARDBARRIER)) || elv_queue_empty(q))
goto get_rq;
el_ret = elv_merge(q, &req, bio);
@@ -1275,7 +1259,7 @@ get_rq:
*/
rw_flags = bio_data_dir(bio);
if (sync)
- rw_flags |= REQ_RW_SYNC;
+ rw_flags |= REQ_SYNC;
/*
* Grab a free request. This is might sleep but can not fail.
@@ -1464,7 +1448,7 @@ static inline void __generic_make_request(struct bio *bio)
goto end_io;
}
- if (unlikely(!bio_rw_flagged(bio, BIO_RW_DISCARD) &&
+ if (unlikely(!(bio->bi_rw & REQ_DISCARD) &&
nr_sectors > queue_max_hw_sectors(q))) {
printk(KERN_ERR "bio too big device %s (%u > %u)\n",
bdevname(bio->bi_bdev, b),
@@ -1497,8 +1481,7 @@ static inline void __generic_make_request(struct bio *bio)
if (bio_check_eod(bio, nr_sectors))
goto end_io;
- if (bio_rw_flagged(bio, BIO_RW_DISCARD) &&
- !blk_queue_discard(q)) {
+ if ((bio->bi_rw & REQ_DISCARD) && !blk_queue_discard(q)) {
err = -EOPNOTSUPP;
goto end_io;
}
@@ -2365,7 +2348,7 @@ void blk_rq_bio_prep(struct request_queue *q, struct request *rq,
struct bio *bio)
{
/* Bit 0 (R/W) is identical in rq->cmd_flags and bio->bi_rw */
- rq->cmd_flags |= bio->bi_rw & REQ_RW;
+ rq->cmd_flags |= bio->bi_rw & REQ_WRITE;
if (bio_has_data(bio)) {
rq->nr_phys_segments = bio_phys_segments(q, bio);
diff --git a/block/blk-map.c b/block/blk-map.c
index 9083cf0..c65d759 100644
--- a/block/blk-map.c
+++ b/block/blk-map.c
@@ -307,7 +307,7 @@ int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf,
return PTR_ERR(bio);
if (rq_data_dir(rq) == WRITE)
- bio->bi_rw |= (1 << BIO_RW);
+ bio->bi_rw |= (1 << REQ_WRITE);
if (do_copy)
rq->cmd_flags |= REQ_COPY_USER;
diff --git a/block/blk-merge.c b/block/blk-merge.c
index 87e4fb7..4852475 100644
--- a/block/blk-merge.c
+++ b/block/blk-merge.c
@@ -180,7 +180,7 @@ new_segment:
}
if (q->dma_drain_size && q->dma_drain_needed(rq)) {
- if (rq->cmd_flags & REQ_RW)
+ if (rq->cmd_flags & REQ_WRITE)
memset(q->dma_drain_buffer, 0, q->dma_drain_size);
sg->page_link &= ~0x02;
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c
index d4edeb8..eb4086f 100644
--- a/block/cfq-iosched.c
+++ b/block/cfq-iosched.c
@@ -458,7 +458,7 @@ static inline struct cfq_data *cic_to_cfqd(struct cfq_io_context *cic)
*/
static inline bool cfq_bio_sync(struct bio *bio)
{
- return bio_data_dir(bio) == READ || bio_rw_flagged(bio, BIO_RW_SYNCIO);
+ return bio_data_dir(bio) == READ || (bio->bi_rw & REQ_SYNC);
}
/*
@@ -646,10 +646,10 @@ cfq_choose_req(struct cfq_data *cfqd, struct request *rq1, struct request *rq2,
return rq1;
else if (rq_is_sync(rq2) && !rq_is_sync(rq1))
return rq2;
- if ((rq1->cmd_flags & REQ_RW_META) && !(rq2->cmd_flags & REQ_RW_META))
+ if ((rq1->cmd_flags & REQ_META) && !(rq2->cmd_flags & REQ_META))
return rq1;
- else if ((rq2->cmd_flags & REQ_RW_META) &&
- !(rq1->cmd_flags & REQ_RW_META))
+ else if ((rq2->cmd_flags & REQ_META) &&
+ !(rq1->cmd_flags & REQ_META))
return rq2;
s1 = blk_rq_pos(rq1);
@@ -1485,7 +1485,7 @@ static void cfq_remove_request(struct request *rq)
cfqq->cfqd->rq_queued--;
cfq_blkiocg_update_io_remove_stats(&(RQ_CFQG(rq))->blkg,
rq_data_dir(rq), rq_is_sync(rq));
- if (rq->cmd_flags & REQ_RW_META) {
+ if (rq->cmd_flags & REQ_META) {
WARN_ON(!cfqq->meta_pending);
cfqq->meta_pending--;
}
@@ -3177,7 +3177,7 @@ cfq_should_preempt(struct cfq_data *cfqd, struct cfq_queue *new_cfqq,
* So both queues are sync. Let the new request get disk time if
* it's a metadata request and the current queue is doing regular IO.
*/
- if ((rq->cmd_flags & REQ_RW_META) && !cfqq->meta_pending)
+ if ((rq->cmd_flags & REQ_META) && !cfqq->meta_pending)
return true;
/*
@@ -3231,7 +3231,7 @@ cfq_rq_enqueued(struct cfq_data *cfqd, struct cfq_queue *cfqq,
struct cfq_io_context *cic = RQ_CIC(rq);
cfqd->rq_queued++;
- if (rq->cmd_flags & REQ_RW_META)
+ if (rq->cmd_flags & REQ_META)
cfqq->meta_pending++;
cfq_update_io_thinktime(cfqd, cic);
diff --git a/block/elevator.c b/block/elevator.c
index aa99b59..816a7c8 100644
--- a/block/elevator.c
+++ b/block/elevator.c
@@ -79,8 +79,7 @@ int elv_rq_merge_ok(struct request *rq, struct bio *bio)
/*
* Don't merge file system requests and discard requests
*/
- if (bio_rw_flagged(bio, BIO_RW_DISCARD) !=
- bio_rw_flagged(rq->bio, BIO_RW_DISCARD))
+ if ((bio->bi_rw & REQ_DISCARD) != (rq->bio->bi_rw & REQ_DISCARD))
return 0;
/*