diff options
author | Konrad Rzeszutek Wilk <konrad.wilk@oracle.com> | 2011-04-26 12:57:59 -0400 |
---|---|---|
committer | Konrad Rzeszutek Wilk <konrad.wilk@oracle.com> | 2011-04-26 13:01:32 -0400 |
commit | 97961ef46b9b5a6a7c918a38b898a7b3e49869f4 (patch) | |
tree | 5ba59239707b467095a84e743724f29085eb8858 /drivers/block/xen-blkback | |
parent | 8b6bf747d70e5bac1a34c8fd773230e1cfdd7546 (diff) | |
download | kernel_goldelico_gta04-97961ef46b9b5a6a7c918a38b898a7b3e49869f4.zip kernel_goldelico_gta04-97961ef46b9b5a6a7c918a38b898a7b3e49869f4.tar.gz kernel_goldelico_gta04-97961ef46b9b5a6a7c918a38b898a7b3e49869f4.tar.bz2 |
xen/blkback: Move the plugging/unplugging to a higher level.
We used to the plug/unplug on the submit_bio. But that means
if within a stream of WRITE, WRITE, WRITE,...,WRITE we have
one READ, it could stall the pipeline (as the 'submio_bio'
could trigger the unplug_fnc to be called and stall/sync
when doing the READ). Instead we want to move the unplugging
when the whole (or as a much as possible) ring buffer has been
processed. This also eliminates us doing plug/unplug for
each request.
Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
Diffstat (limited to 'drivers/block/xen-blkback')
-rw-r--r-- | drivers/block/xen-blkback/blkback.c | 13 |
1 files changed, 6 insertions, 7 deletions
diff --git a/drivers/block/xen-blkback/blkback.c b/drivers/block/xen-blkback/blkback.c index c4bc85e..ed85ba9 100644 --- a/drivers/block/xen-blkback/blkback.c +++ b/drivers/block/xen-blkback/blkback.c @@ -276,6 +276,8 @@ int xen_blkif_schedule(void *arg) printk(KERN_DEBUG "%s: started\n", current->comm); while (!kthread_should_stop()) { + struct blk_plug plug; + if (try_to_freeze()) continue; if (unlikely(vbd->size != vbd_sz(vbd))) @@ -292,9 +294,13 @@ int xen_blkif_schedule(void *arg) blkif->waiting_reqs = 0; smp_mb(); /* clear flag *before* checking for work */ + blk_start_plug(&plug); + if (do_block_io_op(blkif)) blkif->waiting_reqs = 1; + blk_finish_plug(&plug); + if (log_stats && time_after(jiffies, blkif->st_print)) print_stats(blkif); } @@ -547,7 +553,6 @@ static void dispatch_rw_block_io(struct blkif_st *blkif, struct bio *biolist[BLKIF_MAX_SEGMENTS_PER_REQUEST]; int i, nbio = 0; int operation; - struct blk_plug plug; switch (req->operation) { case BLKIF_OP_READ: @@ -660,15 +665,9 @@ static void dispatch_rw_block_io(struct blkif_st *blkif, */ atomic_set(&pending_req->pendcnt, nbio); - /* Get a reference count for the disk queue and start sending I/O */ - blk_start_plug(&plug); - for (i = 0; i < nbio; i++) submit_bio(operation, biolist[i]); - blk_finish_plug(&plug); - /* Let the I/Os go.. */ - if (operation == READ) blkif->st_rd_sect += preq.nr_sects; else if (operation == WRITE || operation == WRITE_BARRIER) |