aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/block/zram
diff options
context:
space:
mode:
authorSergey Senozhatsky <sergey.senozhatsky@gmail.com>2014-04-07 15:38:01 -0700
committerAndreas Blaesius <skate4life@gmx.de>2016-06-05 21:21:44 +0200
commit8d34c7612cbd9b98b7e4b75561c6f33c22935b06 (patch)
treeabc88f989da77242767852be6562a89974baa722 /drivers/block/zram
parentbfeb197ca636331f8d7218eeb47cb6e98c076a82 (diff)
downloadkernel_samsung_espresso10-8d34c7612cbd9b98b7e4b75561c6f33c22935b06.zip
kernel_samsung_espresso10-8d34c7612cbd9b98b7e4b75561c6f33c22935b06.tar.gz
kernel_samsung_espresso10-8d34c7612cbd9b98b7e4b75561c6f33c22935b06.tar.bz2
zram: do not pass rw argument to __zram_make_request()
Do not pass rw argument down the __zram_make_request() -> zram_bvec_rw() chain, decode it in zram_bvec_rw() instead. Besides, this is the place where we distinguish READ and WRITE bio data directions, so account zram RW stats here, instead of __zram_make_request(). This also allows to account a real number of zram READ/WRITE operations, not just requests (single RW request may cause a number of zram RW ops with separate locking, compression/decompression, etc). Change-Id: Ibc8aa078d076ea84fb952eac6877ac48493e7288 Signed-off-by: Sergey Senozhatsky <sergey.senozhatsky@gmail.com> Acked-by: Minchan Kim <minchan@kernel.org> Acked-by: Jerome Marchand <jmarchan@redhat.com> Cc: Nitin Gupta <ngupta@vflare.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'drivers/block/zram')
-rw-r--r--drivers/block/zram/zram_drv.c32
1 files changed, 13 insertions, 19 deletions
diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c
index 254dc3e..813f37e 100644
--- a/drivers/block/zram/zram_drv.c
+++ b/drivers/block/zram/zram_drv.c
@@ -544,14 +544,18 @@ out:
}
static int zram_bvec_rw(struct zram *zram, struct bio_vec *bvec, u32 index,
- int offset, struct bio *bio, int rw)
+ int offset, struct bio *bio)
{
int ret;
+ int rw = bio_data_dir(bio);
- if (rw == READ)
+ if (rw == READ) {
+ atomic64_inc(&zram->stats.num_reads);
ret = zram_bvec_read(zram, bvec, index, offset, bio);
- else
+ } else {
+ atomic64_inc(&zram->stats.num_writes);
ret = zram_bvec_write(zram, bvec, index, offset);
+ }
return ret;
}
@@ -683,22 +687,13 @@ out:
return ret;
}
-static void __zram_make_request(struct zram *zram, struct bio *bio, int rw)
+static void __zram_make_request(struct zram *zram, struct bio *bio)
{
int i, offset;
u32 index;
struct bio_vec *bvec;
- switch (rw) {
- case READ:
- atomic64_inc(&zram->stats.num_reads);
- break;
- case WRITE:
- atomic64_inc(&zram->stats.num_writes);
- break;
- }
-
- index = bio->bi_sector >> SECTORS_PER_PAGE_SHIFT;
+ index = bio->bi_sector >> SECTORS_PER_PAGE_SHIFT;
offset = (bio->bi_sector & (SECTORS_PER_PAGE - 1)) << SECTOR_SHIFT;
bio_for_each_segment(bvec, bio, i) {
@@ -715,16 +710,15 @@ static void __zram_make_request(struct zram *zram, struct bio *bio, int rw)
bv.bv_len = max_transfer_size;
bv.bv_offset = bvec->bv_offset;
- if (zram_bvec_rw(zram, &bv, index, offset, bio, rw) < 0)
+ if (zram_bvec_rw(zram, &bv, index, offset, bio) < 0)
goto out;
bv.bv_len = bvec->bv_len - max_transfer_size;
bv.bv_offset += max_transfer_size;
- if (zram_bvec_rw(zram, &bv, index+1, 0, bio, rw) < 0)
+ if (zram_bvec_rw(zram, &bv, index + 1, 0, bio) < 0)
goto out;
} else
- if (zram_bvec_rw(zram, bvec, index, offset, bio, rw)
- < 0)
+ if (zram_bvec_rw(zram, bvec, index, offset, bio) < 0)
goto out;
update_position(&index, &offset, bvec);
@@ -754,7 +748,7 @@ static int zram_make_request(struct request_queue *queue, struct bio *bio)
goto error;
}
- __zram_make_request(zram, bio, bio_data_dir(bio));
+ __zram_make_request(zram, bio);
up_read(&zram->init_lock);
return 0;