diff options
author | Jonathan E Brassow <jbrassow@redhat.com> | 2012-07-27 15:08:04 +0100 |
---|---|---|
committer | Alasdair G Kergon <agk@redhat.com> | 2012-07-27 15:08:04 +0100 |
commit | c039c332f23e794deb6d6f37b9f07ff3b27fb2cf (patch) | |
tree | 2b6bce014f9359e6152c957594aa86f9549f7dab /drivers/md | |
parent | f999e8fe70bd0b8faa27ccdac14b5942999c6e78 (diff) | |
download | kernel_goldelico_gta04-c039c332f23e794deb6d6f37b9f07ff3b27fb2cf.zip kernel_goldelico_gta04-c039c332f23e794deb6d6f37b9f07ff3b27fb2cf.tar.gz kernel_goldelico_gta04-c039c332f23e794deb6d6f37b9f07ff3b27fb2cf.tar.bz2 |
dm raid: move sectors_per_dev calculation
In preparation for RAID10 inclusion in dm-raid, we move the sectors_per_dev
calculation later in the device creation process. This is because we won't
know up-front how many stripes vs how many mirrors there are which will
change the calculation.
Signed-off-by: Jonathan Brassow <jbrassow@redhat.com>
Signed-off-by: Alasdair G Kergon <agk@redhat.com>
Diffstat (limited to 'drivers/md')
-rw-r--r-- | drivers/md/dm-raid.c | 18 |
1 files changed, 9 insertions, 9 deletions
diff --git a/drivers/md/dm-raid.c b/drivers/md/dm-raid.c index 1717ed3..f4275a8 100644 --- a/drivers/md/dm-raid.c +++ b/drivers/md/dm-raid.c @@ -101,20 +101,12 @@ static struct raid_set *context_alloc(struct dm_target *ti, struct raid_type *ra { unsigned i; struct raid_set *rs; - sector_t sectors_per_dev; if (raid_devs <= raid_type->parity_devs) { ti->error = "Insufficient number of devices"; return ERR_PTR(-EINVAL); } - sectors_per_dev = ti->len; - if ((raid_type->level > 1) && - sector_div(sectors_per_dev, (raid_devs - raid_type->parity_devs))) { - ti->error = "Target length not divisible by number of data devices"; - return ERR_PTR(-EINVAL); - } - rs = kzalloc(sizeof(*rs) + raid_devs * sizeof(rs->dev[0]), GFP_KERNEL); if (!rs) { ti->error = "Cannot allocate raid context"; @@ -128,7 +120,6 @@ static struct raid_set *context_alloc(struct dm_target *ti, struct raid_type *ra rs->md.raid_disks = raid_devs; rs->md.level = raid_type->level; rs->md.new_level = rs->md.level; - rs->md.dev_sectors = sectors_per_dev; rs->md.layout = raid_type->algorithm; rs->md.new_layout = rs->md.layout; rs->md.delta_disks = 0; @@ -143,6 +134,7 @@ static struct raid_set *context_alloc(struct dm_target *ti, struct raid_type *ra * rs->md.external * rs->md.chunk_sectors * rs->md.new_chunk_sectors + * rs->md.dev_sectors */ return rs; @@ -353,6 +345,7 @@ static int parse_raid_params(struct raid_set *rs, char **argv, { unsigned i, rebuild_cnt = 0; unsigned long value, region_size = 0; + sector_t sectors_per_dev = rs->ti->len; sector_t max_io_len; char *key; @@ -545,6 +538,13 @@ static int parse_raid_params(struct raid_set *rs, char **argv, if (dm_set_target_max_io_len(rs->ti, max_io_len)) return -EINVAL; + if ((rs->raid_type->level > 1) && + sector_div(sectors_per_dev, (rs->md.raid_disks - rs->raid_type->parity_devs))) { + rs->ti->error = "Target length not divisible by number of data devices"; + return -EINVAL; + } + rs->md.dev_sectors = sectors_per_dev; + /* Assume there are no metadata devices until the drives are parsed */ rs->md.persistent = 0; rs->md.external = 1; |