aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/md
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/md')
-rw-r--r--drivers/md/bitmap.c2
-rw-r--r--drivers/md/dm-crypt.c38
-rw-r--r--drivers/md/dm-exception-store.c2
-rw-r--r--drivers/md/dm-flakey.c11
-rw-r--r--drivers/md/dm-io.c23
-rw-r--r--drivers/md/dm-linear.c12
-rw-r--r--drivers/md/dm-mpath.c6
-rw-r--r--drivers/md/dm-raid.c1
-rw-r--r--drivers/md/raid1.c17
-rw-r--r--drivers/md/raid10.c17
-rw-r--r--drivers/md/raid5.c36
11 files changed, 119 insertions, 46 deletions
diff --git a/drivers/md/bitmap.c b/drivers/md/bitmap.c
index 574b09a..703a3fa 100644
--- a/drivers/md/bitmap.c
+++ b/drivers/md/bitmap.c
@@ -1982,6 +1982,8 @@ location_store(mddev_t *mddev, const char *buf, size_t len)
if (mddev->pers) {
mddev->pers->quiesce(mddev, 1);
rv = bitmap_create(mddev);
+ if (!rv)
+ rv = bitmap_load(mddev);
if (rv) {
bitmap_destroy(mddev);
mddev->bitmap_info.offset = 0;
diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c
index c8827ff..6f906bc 100644
--- a/drivers/md/dm-crypt.c
+++ b/drivers/md/dm-crypt.c
@@ -177,7 +177,6 @@ struct crypt_config {
#define MIN_IOS 16
#define MIN_POOL_PAGES 32
-#define MIN_BIO_PAGES 8
static struct kmem_cache *_crypt_io_pool;
@@ -849,12 +848,11 @@ static struct bio *crypt_alloc_buffer(struct dm_crypt_io *io, unsigned size,
}
/*
- * if additional pages cannot be allocated without waiting,
- * return a partially allocated bio, the caller will then try
- * to allocate additional bios while submitting this partial bio
+ * If additional pages cannot be allocated without waiting,
+ * return a partially-allocated bio. The caller will then try
+ * to allocate more bios while submitting this partial bio.
*/
- if (i == (MIN_BIO_PAGES - 1))
- gfp_mask = (gfp_mask | __GFP_NOWARN) & ~__GFP_WAIT;
+ gfp_mask = (gfp_mask | __GFP_NOWARN) & ~__GFP_WAIT;
len = (size > PAGE_SIZE) ? PAGE_SIZE : size;
@@ -1047,16 +1045,14 @@ static void kcryptd_queue_io(struct dm_crypt_io *io)
queue_work(cc->io_queue, &io->work);
}
-static void kcryptd_crypt_write_io_submit(struct dm_crypt_io *io,
- int error, int async)
+static void kcryptd_crypt_write_io_submit(struct dm_crypt_io *io, int async)
{
struct bio *clone = io->ctx.bio_out;
struct crypt_config *cc = io->target->private;
- if (unlikely(error < 0)) {
+ if (unlikely(io->error < 0)) {
crypt_free_buffer_pages(cc, clone);
bio_put(clone);
- io->error = -EIO;
crypt_dec_pending(io);
return;
}
@@ -1107,12 +1103,16 @@ static void kcryptd_crypt_write_convert(struct dm_crypt_io *io)
sector += bio_sectors(clone);
crypt_inc_pending(io);
+
r = crypt_convert(cc, &io->ctx);
+ if (r < 0)
+ io->error = -EIO;
+
crypt_finished = atomic_dec_and_test(&io->ctx.pending);
/* Encryption was already finished, submit io now */
if (crypt_finished) {
- kcryptd_crypt_write_io_submit(io, r, 0);
+ kcryptd_crypt_write_io_submit(io, 0);
/*
* If there was an error, do not try next fragments.
@@ -1163,11 +1163,8 @@ static void kcryptd_crypt_write_convert(struct dm_crypt_io *io)
crypt_dec_pending(io);
}
-static void kcryptd_crypt_read_done(struct dm_crypt_io *io, int error)
+static void kcryptd_crypt_read_done(struct dm_crypt_io *io)
{
- if (unlikely(error < 0))
- io->error = -EIO;
-
crypt_dec_pending(io);
}
@@ -1182,9 +1179,11 @@ static void kcryptd_crypt_read_convert(struct dm_crypt_io *io)
io->sector);
r = crypt_convert(cc, &io->ctx);
+ if (r < 0)
+ io->error = -EIO;
if (atomic_dec_and_test(&io->ctx.pending))
- kcryptd_crypt_read_done(io, r);
+ kcryptd_crypt_read_done(io);
crypt_dec_pending(io);
}
@@ -1205,15 +1204,18 @@ static void kcryptd_async_done(struct crypto_async_request *async_req,
if (!error && cc->iv_gen_ops && cc->iv_gen_ops->post)
error = cc->iv_gen_ops->post(cc, iv_of_dmreq(cc, dmreq), dmreq);
+ if (error < 0)
+ io->error = -EIO;
+
mempool_free(req_of_dmreq(cc, dmreq), cc->req_pool);
if (!atomic_dec_and_test(&ctx->pending))
return;
if (bio_data_dir(io->base_bio) == READ)
- kcryptd_crypt_read_done(io, error);
+ kcryptd_crypt_read_done(io);
else
- kcryptd_crypt_write_io_submit(io, error, 1);
+ kcryptd_crypt_write_io_submit(io, 1);
}
static void kcryptd_crypt(struct work_struct *work)
diff --git a/drivers/md/dm-exception-store.c b/drivers/md/dm-exception-store.c
index 0bdb201..7344534 100644
--- a/drivers/md/dm-exception-store.c
+++ b/drivers/md/dm-exception-store.c
@@ -282,7 +282,7 @@ int dm_exception_store_init(void)
return 0;
persistent_fail:
- dm_persistent_snapshot_exit();
+ dm_transient_snapshot_exit();
transient_fail:
return r;
}
diff --git a/drivers/md/dm-flakey.c b/drivers/md/dm-flakey.c
index ea79062..3e90b80 100644
--- a/drivers/md/dm-flakey.c
+++ b/drivers/md/dm-flakey.c
@@ -149,8 +149,17 @@ static int flakey_status(struct dm_target *ti, status_type_t type,
static int flakey_ioctl(struct dm_target *ti, unsigned int cmd, unsigned long arg)
{
struct flakey_c *fc = ti->private;
+ struct dm_dev *dev = fc->dev;
+ int r = 0;
- return __blkdev_driver_ioctl(fc->dev->bdev, fc->dev->mode, cmd, arg);
+ /*
+ * Only pass ioctls through if the device sizes match exactly.
+ */
+ if (fc->start ||
+ ti->len != i_size_read(dev->bdev->bd_inode) >> SECTOR_SHIFT)
+ r = scsi_verify_blk_ioctl(NULL, cmd);
+
+ return r ? : __blkdev_driver_ioctl(dev->bdev, dev->mode, cmd, arg);
}
static int flakey_merge(struct dm_target *ti, struct bvec_merge_data *bvm,
diff --git a/drivers/md/dm-io.c b/drivers/md/dm-io.c
index ad2eba4..ea5dd28 100644
--- a/drivers/md/dm-io.c
+++ b/drivers/md/dm-io.c
@@ -296,6 +296,8 @@ static void do_region(int rw, unsigned region, struct dm_io_region *where,
unsigned offset;
unsigned num_bvecs;
sector_t remaining = where->count;
+ struct request_queue *q = bdev_get_queue(where->bdev);
+ sector_t discard_sectors;
/*
* where->count may be zero if rw holds a flush and we need to
@@ -305,9 +307,12 @@ static void do_region(int rw, unsigned region, struct dm_io_region *where,
/*
* Allocate a suitably sized-bio.
*/
- num_bvecs = dm_sector_div_up(remaining,
- (PAGE_SIZE >> SECTOR_SHIFT));
- num_bvecs = min_t(int, bio_get_nr_vecs(where->bdev), num_bvecs);
+ if (rw & REQ_DISCARD)
+ num_bvecs = 1;
+ else
+ num_bvecs = min_t(int, bio_get_nr_vecs(where->bdev),
+ dm_sector_div_up(remaining, (PAGE_SIZE >> SECTOR_SHIFT)));
+
bio = bio_alloc_bioset(GFP_NOIO, num_bvecs, io->client->bios);
bio->bi_sector = where->sector + (where->count - remaining);
bio->bi_bdev = where->bdev;
@@ -315,10 +320,14 @@ static void do_region(int rw, unsigned region, struct dm_io_region *where,
bio->bi_destructor = dm_bio_destructor;
store_io_and_region_in_bio(bio, io, region);
- /*
- * Try and add as many pages as possible.
- */
- while (remaining) {
+ if (rw & REQ_DISCARD) {
+ discard_sectors = min_t(sector_t, q->limits.max_discard_sectors, remaining);
+ bio->bi_size = discard_sectors << SECTOR_SHIFT;
+ remaining -= discard_sectors;
+ } else while (remaining) {
+ /*
+ * Try and add as many pages as possible.
+ */
dp->get_page(dp, &page, &len, &offset);
len = min(len, to_bytes(remaining));
if (!bio_add_page(bio, page, len, offset))
diff --git a/drivers/md/dm-linear.c b/drivers/md/dm-linear.c
index 3921e3b..9728839 100644
--- a/drivers/md/dm-linear.c
+++ b/drivers/md/dm-linear.c
@@ -116,7 +116,17 @@ static int linear_ioctl(struct dm_target *ti, unsigned int cmd,
unsigned long arg)
{
struct linear_c *lc = (struct linear_c *) ti->private;
- return __blkdev_driver_ioctl(lc->dev->bdev, lc->dev->mode, cmd, arg);
+ struct dm_dev *dev = lc->dev;
+ int r = 0;
+
+ /*
+ * Only pass ioctls through if the device sizes match exactly.
+ */
+ if (lc->start ||
+ ti->len != i_size_read(dev->bdev->bd_inode) >> SECTOR_SHIFT)
+ r = scsi_verify_blk_ioctl(NULL, cmd);
+
+ return r ? : __blkdev_driver_ioctl(dev->bdev, dev->mode, cmd, arg);
}
static int linear_merge(struct dm_target *ti, struct bvec_merge_data *bvm,
diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c
index 209991b..70373bf 100644
--- a/drivers/md/dm-mpath.c
+++ b/drivers/md/dm-mpath.c
@@ -1584,6 +1584,12 @@ static int multipath_ioctl(struct dm_target *ti, unsigned int cmd,
spin_unlock_irqrestore(&m->lock, flags);
+ /*
+ * Only pass ioctls through if the device sizes match exactly.
+ */
+ if (!r && ti->len != i_size_read(bdev->bd_inode) >> SECTOR_SHIFT)
+ r = scsi_verify_blk_ioctl(NULL, cmd);
+
return r ? : __blkdev_driver_ioctl(bdev, mode, cmd, arg);
}
diff --git a/drivers/md/dm-raid.c b/drivers/md/dm-raid.c
index e5d8904..437ae18 100644
--- a/drivers/md/dm-raid.c
+++ b/drivers/md/dm-raid.c
@@ -468,6 +468,7 @@ static int raid_ctr(struct dm_target *ti, unsigned argc, char **argv)
INIT_WORK(&rs->md.event_work, do_table_event);
ti->split_io = rs->md.chunk_sectors;
ti->private = rs;
+ ti->num_flush_requests = 1;
mutex_lock(&rs->md.reconfig_mutex);
ret = md_run(&rs->md);
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index 3a9e59f..36f1ed3 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -614,9 +614,22 @@ static void wait_barrier(conf_t *conf)
spin_lock_irq(&conf->resync_lock);
if (conf->barrier) {
conf->nr_waiting++;
- wait_event_lock_irq(conf->wait_barrier, !conf->barrier,
+ /* Wait for the barrier to drop.
+ * However if there are already pending
+ * requests (preventing the barrier from
+ * rising completely), and the
+ * pre-process bio queue isn't empty,
+ * then don't wait, as we need to empty
+ * that queue to get the nr_pending
+ * count down.
+ */
+ wait_event_lock_irq(conf->wait_barrier,
+ !conf->barrier ||
+ (conf->nr_pending &&
+ current->bio_list &&
+ !bio_list_empty(current->bio_list)),
conf->resync_lock,
- );
+ );
conf->nr_waiting--;
}
conf->nr_pending++;
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index 17cb6ab..0d6c42f 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -667,9 +667,22 @@ static void wait_barrier(conf_t *conf)
spin_lock_irq(&conf->resync_lock);
if (conf->barrier) {
conf->nr_waiting++;
- wait_event_lock_irq(conf->wait_barrier, !conf->barrier,
+ /* Wait for the barrier to drop.
+ * However if there are already pending
+ * requests (preventing the barrier from
+ * rising completely), and the
+ * pre-process bio queue isn't empty,
+ * then don't wait, as we need to empty
+ * that queue to get the nr_pending
+ * count down.
+ */
+ wait_event_lock_irq(conf->wait_barrier,
+ !conf->barrier ||
+ (conf->nr_pending &&
+ current->bio_list &&
+ !bio_list_empty(current->bio_list)),
conf->resync_lock,
- );
+ );
conf->nr_waiting--;
}
conf->nr_pending++;
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index 2581ba1..1f6c68d 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -3078,7 +3078,7 @@ static void handle_stripe5(struct stripe_head *sh)
/* Not in-sync */;
else if (test_bit(In_sync, &rdev->flags))
set_bit(R5_Insync, &dev->flags);
- else {
+ else if (!test_bit(Faulty, &rdev->flags)) {
/* could be in-sync depending on recovery/reshape status */
if (sh->sector + STRIPE_SECTORS <= rdev->recovery_offset)
set_bit(R5_Insync, &dev->flags);
@@ -3120,12 +3120,16 @@ static void handle_stripe5(struct stripe_head *sh)
/* check if the array has lost two devices and, if so, some requests might
* need to be failed
*/
- if (s.failed > 1 && s.to_read+s.to_write+s.written)
- handle_failed_stripe(conf, sh, &s, disks, &return_bi);
- if (s.failed > 1 && s.syncing) {
- md_done_sync(conf->mddev, STRIPE_SECTORS,0);
- clear_bit(STRIPE_SYNCING, &sh->state);
- s.syncing = 0;
+ if (s.failed > 1) {
+ sh->check_state = 0;
+ sh->reconstruct_state = 0;
+ if (s.to_read+s.to_write+s.written)
+ handle_failed_stripe(conf, sh, &s, disks, &return_bi);
+ if (s.syncing) {
+ md_done_sync(conf->mddev, STRIPE_SECTORS,0);
+ clear_bit(STRIPE_SYNCING, &sh->state);
+ s.syncing = 0;
+ }
}
/* might be able to return some write requests if the parity block
@@ -3369,7 +3373,7 @@ static void handle_stripe6(struct stripe_head *sh)
/* Not in-sync */;
else if (test_bit(In_sync, &rdev->flags))
set_bit(R5_Insync, &dev->flags);
- else {
+ else if (!test_bit(Faulty, &rdev->flags)) {
/* in sync if before recovery_offset */
if (sh->sector + STRIPE_SECTORS <= rdev->recovery_offset)
set_bit(R5_Insync, &dev->flags);
@@ -3412,12 +3416,16 @@ static void handle_stripe6(struct stripe_head *sh)
/* check if the array has lost >2 devices and, if so, some requests
* might need to be failed
*/
- if (s.failed > 2 && s.to_read+s.to_write+s.written)
- handle_failed_stripe(conf, sh, &s, disks, &return_bi);
- if (s.failed > 2 && s.syncing) {
- md_done_sync(conf->mddev, STRIPE_SECTORS,0);
- clear_bit(STRIPE_SYNCING, &sh->state);
- s.syncing = 0;
+ if (s.failed > 2) {
+ sh->check_state = 0;
+ sh->reconstruct_state = 0;
+ if (s.to_read+s.to_write+s.written)
+ handle_failed_stripe(conf, sh, &s, disks, &return_bi);
+ if (s.syncing) {
+ md_done_sync(conf->mddev, STRIPE_SECTORS,0);
+ clear_bit(STRIPE_SYNCING, &sh->state);
+ s.syncing = 0;
+ }
}
/*