diff options
author | Jun'ichi Nomura <j-nomura@ce.jp.nec.com> | 2007-12-13 14:15:25 +0000 |
---|---|---|
committer | Alasdair G Kergon <agk@redhat.com> | 2007-12-20 17:32:08 +0000 |
commit | 512875bd9661368da6f993205a61213b79ba1df0 (patch) | |
tree | 7a2e010060b6233cd02e2e36b62f5dcaa96c2c36 /drivers/md/dm.c | |
parent | fbdcf18df73758b2e187ab94678b30cd5f6ff9f9 (diff) | |
download | kernel_samsung_aries-512875bd9661368da6f993205a61213b79ba1df0.zip kernel_samsung_aries-512875bd9661368da6f993205a61213b79ba1df0.tar.gz kernel_samsung_aries-512875bd9661368da6f993205a61213b79ba1df0.tar.bz2 |
dm: table detect io beyond device
This patch fixes a panic on shrinking a DM device if there is
outstanding I/O to the part of the device that is being removed.
(Normally this doesn't happen - a filesystem would be resized first,
for example.)
The bug is that __clone_and_map() assumes dm_table_find_target()
always returns a valid pointer. It may fail if a bio arrives from the
block layer but its target sector is no longer included in the DM
btree.
This patch appends an empty entry to table->targets[] which will
be returned by a lookup beyond the end of the device.
After calling dm_table_find_target(), __clone_and_map() and target_message()
check for this condition using
dm_target_is_valid().
Sample test script to trigger oops:
Diffstat (limited to 'drivers/md/dm.c')
-rw-r--r-- | drivers/md/dm.c | 24 |
1 files changed, 18 insertions, 6 deletions
diff --git a/drivers/md/dm.c b/drivers/md/dm.c index 07cbbb8..cff2a71 100644 --- a/drivers/md/dm.c +++ b/drivers/md/dm.c @@ -672,13 +672,19 @@ static struct bio *clone_bio(struct bio *bio, sector_t sector, return clone; } -static void __clone_and_map(struct clone_info *ci) +static int __clone_and_map(struct clone_info *ci) { struct bio *clone, *bio = ci->bio; - struct dm_target *ti = dm_table_find_target(ci->map, ci->sector); - sector_t len = 0, max = max_io_len(ci->md, ci->sector, ti); + struct dm_target *ti; + sector_t len = 0, max; struct dm_target_io *tio; + ti = dm_table_find_target(ci->map, ci->sector); + if (!dm_target_is_valid(ti)) + return -EIO; + + max = max_io_len(ci->md, ci->sector, ti); + /* * Allocate a target io object. */ @@ -736,6 +742,9 @@ static void __clone_and_map(struct clone_info *ci) do { if (offset) { ti = dm_table_find_target(ci->map, ci->sector); + if (!dm_target_is_valid(ti)) + return -EIO; + max = max_io_len(ci->md, ci->sector, ti); tio = alloc_tio(ci->md); @@ -759,6 +768,8 @@ static void __clone_and_map(struct clone_info *ci) ci->idx++; } + + return 0; } /* @@ -767,6 +778,7 @@ static void __clone_and_map(struct clone_info *ci) static int __split_bio(struct mapped_device *md, struct bio *bio) { struct clone_info ci; + int error = 0; ci.map = dm_get_table(md); if (unlikely(!ci.map)) @@ -784,11 +796,11 @@ static int __split_bio(struct mapped_device *md, struct bio *bio) ci.idx = bio->bi_idx; start_io_acct(ci.io); - while (ci.sector_count) - __clone_and_map(&ci); + while (ci.sector_count && !error) + error = __clone_and_map(&ci); /* drop the extra reference count */ - dec_pending(ci.io, 0); + dec_pending(ci.io, error); dm_table_put(ci.map); return 0; |