aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/md
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/md')
-rw-r--r--drivers/md/dm-mpath.c3
-rw-r--r--drivers/md/dm-raid1.c4
-rw-r--r--drivers/md/linear.c6
-rw-r--r--drivers/md/md.c13
-rw-r--r--drivers/md/raid1.c64
5 files changed, 60 insertions, 30 deletions
diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c
index 217615b..93f701e 100644
--- a/drivers/md/dm-mpath.c
+++ b/drivers/md/dm-mpath.c
@@ -710,6 +710,8 @@ static int multipath_ctr(struct dm_target *ti, unsigned int argc,
return -EINVAL;
}
+ m->ti = ti;
+
r = parse_features(&as, m, ti);
if (r)
goto bad;
@@ -751,7 +753,6 @@ static int multipath_ctr(struct dm_target *ti, unsigned int argc,
}
ti->private = m;
- m->ti = ti;
return 0;
diff --git a/drivers/md/dm-raid1.c b/drivers/md/dm-raid1.c
index be48ced..c54de98 100644
--- a/drivers/md/dm-raid1.c
+++ b/drivers/md/dm-raid1.c
@@ -255,7 +255,9 @@ static struct region *__rh_alloc(struct region_hash *rh, region_t region)
struct region *reg, *nreg;
read_unlock(&rh->hash_lock);
- nreg = mempool_alloc(rh->region_pool, GFP_NOIO);
+ nreg = mempool_alloc(rh->region_pool, GFP_ATOMIC);
+ if (unlikely(!nreg))
+ nreg = kmalloc(sizeof(struct region), GFP_NOIO);
nreg->state = rh->log->type->in_sync(rh->log, region, 1) ?
RH_CLEAN : RH_NOSYNC;
nreg->rh = rh;
diff --git a/drivers/md/linear.c b/drivers/md/linear.c
index ff83c9b..b99c19c 100644
--- a/drivers/md/linear.c
+++ b/drivers/md/linear.c
@@ -162,7 +162,7 @@ static linear_conf_t *linear_conf(mddev_t *mddev, int raid_disks)
goto out;
}
- min_spacing = mddev->array_size;
+ min_spacing = conf->array_size;
sector_div(min_spacing, PAGE_SIZE/sizeof(struct dev_info *));
/* min_spacing is the minimum spacing that will fit the hash
@@ -171,7 +171,7 @@ static linear_conf_t *linear_conf(mddev_t *mddev, int raid_disks)
* that is larger than min_spacing as use the size of that as
* the actual spacing
*/
- conf->hash_spacing = mddev->array_size;
+ conf->hash_spacing = conf->array_size;
for (i=0; i < cnt-1 ; i++) {
sector_t sz = 0;
int j;
@@ -228,7 +228,7 @@ static linear_conf_t *linear_conf(mddev_t *mddev, int raid_disks)
curr_offset = 0;
i = 0;
for (curr_offset = 0;
- curr_offset < mddev->array_size;
+ curr_offset < conf->array_size;
curr_offset += conf->hash_spacing) {
while (i < mddev->raid_disks-1 &&
diff --git a/drivers/md/md.c b/drivers/md/md.c
index b6d1602..8dbab2e 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -1597,6 +1597,19 @@ void md_update_sb(mddev_t * mddev)
repeat:
spin_lock_irq(&mddev->write_lock);
+
+ if (mddev->degraded && mddev->sb_dirty == 3)
+ /* If the array is degraded, then skipping spares is both
+ * dangerous and fairly pointless.
+ * Dangerous because a device that was removed from the array
+ * might have a event_count that still looks up-to-date,
+ * so it can be re-added without a resync.
+ * Pointless because if there are any spares to skip,
+ * then a recovery will happen and soon that array won't
+ * be degraded any more and the spare can go back to sleep then.
+ */
+ mddev->sb_dirty = 1;
+
sync_req = mddev->in_sync;
mddev->utime = get_seconds();
if (mddev->sb_dirty == 3)
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index 1efe22a..3b4d69c 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -930,10 +930,13 @@ static void status(struct seq_file *seq, mddev_t *mddev)
seq_printf(seq, " [%d/%d] [", conf->raid_disks,
conf->working_disks);
- for (i = 0; i < conf->raid_disks; i++)
+ rcu_read_lock();
+ for (i = 0; i < conf->raid_disks; i++) {
+ mdk_rdev_t *rdev = rcu_dereference(conf->mirrors[i].rdev);
seq_printf(seq, "%s",
- conf->mirrors[i].rdev &&
- test_bit(In_sync, &conf->mirrors[i].rdev->flags) ? "U" : "_");
+ rdev && test_bit(In_sync, &rdev->flags) ? "U" : "_");
+ }
+ rcu_read_unlock();
seq_printf(seq, "]");
}
@@ -975,7 +978,6 @@ static void error(mddev_t *mddev, mdk_rdev_t *rdev)
static void print_conf(conf_t *conf)
{
int i;
- mirror_info_t *tmp;
printk("RAID1 conf printout:\n");
if (!conf) {
@@ -985,14 +987,17 @@ static void print_conf(conf_t *conf)
printk(" --- wd:%d rd:%d\n", conf->working_disks,
conf->raid_disks);
+ rcu_read_lock();
for (i = 0; i < conf->raid_disks; i++) {
char b[BDEVNAME_SIZE];
- tmp = conf->mirrors + i;
- if (tmp->rdev)
+ mdk_rdev_t *rdev = rcu_dereference(conf->mirrors[i].rdev);
+ if (rdev)
printk(" disk %d, wo:%d, o:%d, dev:%s\n",
- i, !test_bit(In_sync, &tmp->rdev->flags), !test_bit(Faulty, &tmp->rdev->flags),
- bdevname(tmp->rdev->bdev,b));
+ i, !test_bit(In_sync, &rdev->flags),
+ !test_bit(Faulty, &rdev->flags),
+ bdevname(rdev->bdev,b));
}
+ rcu_read_unlock();
}
static void close_sync(conf_t *conf)
@@ -1008,20 +1013,20 @@ static int raid1_spare_active(mddev_t *mddev)
{
int i;
conf_t *conf = mddev->private;
- mirror_info_t *tmp;
/*
* Find all failed disks within the RAID1 configuration
- * and mark them readable
+ * and mark them readable.
+ * Called under mddev lock, so rcu protection not needed.
*/
for (i = 0; i < conf->raid_disks; i++) {
- tmp = conf->mirrors + i;
- if (tmp->rdev
- && !test_bit(Faulty, &tmp->rdev->flags)
- && !test_bit(In_sync, &tmp->rdev->flags)) {
+ mdk_rdev_t *rdev = conf->mirrors[i].rdev;
+ if (rdev
+ && !test_bit(Faulty, &rdev->flags)
+ && !test_bit(In_sync, &rdev->flags)) {
conf->working_disks++;
mddev->degraded--;
- set_bit(In_sync, &tmp->rdev->flags);
+ set_bit(In_sync, &rdev->flags);
}
}
@@ -1237,7 +1242,7 @@ static void sync_request_write(mddev_t *mddev, r1bio_t *r1_bio)
/* ouch - failed to read all of that.
* Try some synchronous reads of other devices to get
* good data, much like with normal read errors. Only
- * read into the pages we already have so they we don't
+ * read into the pages we already have so we don't
* need to re-issue the read request.
* We don't need to freeze the array, because being in an
* active sync request, there is no normal IO, and
@@ -1257,6 +1262,10 @@ static void sync_request_write(mddev_t *mddev, r1bio_t *r1_bio)
s = PAGE_SIZE >> 9;
do {
if (r1_bio->bios[d]->bi_end_io == end_sync_read) {
+ /* No rcu protection needed here devices
+ * can only be removed when no resync is
+ * active, and resync is currently active
+ */
rdev = conf->mirrors[d].rdev;
if (sync_page_io(rdev->bdev,
sect + rdev->data_offset,
@@ -1463,6 +1472,11 @@ static void raid1d(mddev_t *mddev)
s = PAGE_SIZE >> 9;
do {
+ /* Note: no rcu protection needed here
+ * as this is synchronous in the raid1d thread
+ * which is the thread that might remove
+ * a device. If raid1d ever becomes multi-threaded....
+ */
rdev = conf->mirrors[d].rdev;
if (rdev &&
test_bit(In_sync, &rdev->flags) &&
@@ -1486,7 +1500,6 @@ static void raid1d(mddev_t *mddev)
d = conf->raid_disks;
d--;
rdev = conf->mirrors[d].rdev;
- atomic_add(s, &rdev->corrected_errors);
if (rdev &&
test_bit(In_sync, &rdev->flags)) {
if (sync_page_io(rdev->bdev,
@@ -1509,9 +1522,11 @@ static void raid1d(mddev_t *mddev)
s<<9, conf->tmppage, READ) == 0)
/* Well, this device is dead */
md_error(mddev, rdev);
- else
+ else {
+ atomic_add(s, &rdev->corrected_errors);
printk(KERN_INFO "raid1:%s: read error corrected (%d sectors at %llu on %s)\n",
mdname(mddev), s, (unsigned long long)(sect + rdev->data_offset), bdevname(rdev->bdev, b));
+ }
}
}
} else {
@@ -1625,15 +1640,16 @@ static sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *skipped, i
return 0;
}
- /* before building a request, check if we can skip these blocks..
- * This call the bitmap_start_sync doesn't actually record anything
- */
if (mddev->bitmap == NULL &&
mddev->recovery_cp == MaxSector &&
+ !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery) &&
conf->fullsync == 0) {
*skipped = 1;
return max_sector - sector_nr;
}
+ /* before building a request, check if we can skip these blocks..
+ * This call the bitmap_start_sync doesn't actually record anything
+ */
if (!bitmap_start_sync(mddev->bitmap, sector_nr, &sync_blocks, 1) &&
!conf->fullsync && !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) {
/* We can skip this block, and probably several more */
@@ -1786,19 +1802,17 @@ static sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *skipped, i
for (i=0; i<conf->raid_disks; i++) {
bio = r1_bio->bios[i];
if (bio->bi_end_io == end_sync_read) {
- md_sync_acct(conf->mirrors[i].rdev->bdev, nr_sectors);
+ md_sync_acct(bio->bi_bdev, nr_sectors);
generic_make_request(bio);
}
}
} else {
atomic_set(&r1_bio->remaining, 1);
bio = r1_bio->bios[r1_bio->read_disk];
- md_sync_acct(conf->mirrors[r1_bio->read_disk].rdev->bdev,
- nr_sectors);
+ md_sync_acct(bio->bi_bdev, nr_sectors);
generic_make_request(bio);
}
-
return nr_sectors;
}