aboutsummaryrefslogtreecommitdiffstats
path: root/fs/bio.c
diff options
context:
space:
mode:
authorKent Overstreet <koverstreet@google.com>2012-09-06 15:35:00 -0700
committerJens Axboe <axboe@kernel.dk>2012-09-09 10:35:39 +0200
commit4254bba17d92d53822a56ebc2a0c1eb7e2a71155 (patch)
tree77668d39949a0751c5e9ef8d0fb47b96392e84e5 /fs/bio.c
parentccc5c9ca6aac08218b1ba52afd07a1a9864c8c5d (diff)
downloadkernel_goldelico_gta04-4254bba17d92d53822a56ebc2a0c1eb7e2a71155.zip
kernel_goldelico_gta04-4254bba17d92d53822a56ebc2a0c1eb7e2a71155.tar.gz
kernel_goldelico_gta04-4254bba17d92d53822a56ebc2a0c1eb7e2a71155.tar.bz2
block: Kill bi_destructor
Now that we've got generic code for freeing bios allocated from bio pools, this isn't needed anymore. This patch also makes bio_free() static, since without bi_destructor there should be no need for it to be called anywhere else. bio_free() is now only called from bio_put, so we can refactor those a bit - move some code from bio_put() to bio_free() and kill the redundant bio->bi_next = NULL. v5: Switch to BIO_KMALLOC_POOL ((void *)~0), per Boaz v6: BIO_KMALLOC_POOL now NULL, drop bio_free's EXPORT_SYMBOL v7: No #define BIO_KMALLOC_POOL anymore Signed-off-by: Kent Overstreet <koverstreet@google.com> CC: Jens Axboe <axboe@kernel.dk> Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'fs/bio.c')
-rw-r--r--fs/bio.c64
1 files changed, 26 insertions, 38 deletions
diff --git a/fs/bio.c b/fs/bio.c
index 919ee9a..736ef12 100644
--- a/fs/bio.c
+++ b/fs/bio.c
@@ -233,26 +233,37 @@ fallback:
return bvl;
}
-void bio_free(struct bio *bio, struct bio_set *bs)
+static void __bio_free(struct bio *bio)
{
- void *p;
-
- if (bio_has_allocated_vec(bio))
- bvec_free_bs(bs, bio->bi_io_vec, BIO_POOL_IDX(bio));
+ bio_disassociate_task(bio);
if (bio_integrity(bio))
bio_integrity_free(bio);
+}
- /*
- * If we have front padding, adjust the bio pointer before freeing
- */
- p = bio;
- if (bs->front_pad)
+static void bio_free(struct bio *bio)
+{
+ struct bio_set *bs = bio->bi_pool;
+ void *p;
+
+ __bio_free(bio);
+
+ if (bs) {
+ if (bio_has_allocated_vec(bio))
+ bvec_free_bs(bs, bio->bi_io_vec, BIO_POOL_IDX(bio));
+
+ /*
+ * If we have front padding, adjust the bio pointer before freeing
+ */
+ p = bio;
p -= bs->front_pad;
- mempool_free(p, bs->bio_pool);
+ mempool_free(p, bs->bio_pool);
+ } else {
+ /* Bio was allocated by bio_kmalloc() */
+ kfree(bio);
+ }
}
-EXPORT_SYMBOL(bio_free);
void bio_init(struct bio *bio)
{
@@ -276,10 +287,7 @@ void bio_reset(struct bio *bio)
{
unsigned long flags = bio->bi_flags & (~0UL << BIO_RESET_BITS);
- if (bio_integrity(bio))
- bio_integrity_free(bio);
-
- bio_disassociate_task(bio);
+ __bio_free(bio);
memset(bio, 0, BIO_RESET_BYTES);
bio->bi_flags = flags|(1 << BIO_UPTODATE);
@@ -362,13 +370,6 @@ struct bio *bio_alloc(gfp_t gfp_mask, unsigned int nr_iovecs)
}
EXPORT_SYMBOL(bio_alloc);
-static void bio_kmalloc_destructor(struct bio *bio)
-{
- if (bio_integrity(bio))
- bio_integrity_free(bio);
- kfree(bio);
-}
-
/**
* bio_kmalloc - allocate a bio for I/O using kmalloc()
* @gfp_mask: the GFP_ mask given to the slab allocator
@@ -395,7 +396,6 @@ struct bio *bio_kmalloc(gfp_t gfp_mask, unsigned int nr_iovecs)
bio->bi_flags |= BIO_POOL_NONE << BIO_POOL_OFFSET;
bio->bi_max_vecs = nr_iovecs;
bio->bi_io_vec = bio->bi_inline_vecs;
- bio->bi_destructor = bio_kmalloc_destructor;
return bio;
}
@@ -431,20 +431,8 @@ void bio_put(struct bio *bio)
/*
* last put frees it
*/
- if (atomic_dec_and_test(&bio->bi_cnt)) {
- bio_disassociate_task(bio);
- bio->bi_next = NULL;
-
- /*
- * This if statement is temporary - bi_pool is replacing
- * bi_destructor, but bi_destructor will be taken out in another
- * patch.
- */
- if (bio->bi_pool)
- bio_free(bio, bio->bi_pool);
- else
- bio->bi_destructor(bio);
- }
+ if (atomic_dec_and_test(&bio->bi_cnt))
+ bio_free(bio);
}
EXPORT_SYMBOL(bio_put);