diff options
Diffstat (limited to 'drivers/block')
-rw-r--r-- | drivers/block/cciss.c | 3 | ||||
-rw-r--r-- | drivers/block/drbd/Kconfig | 2 | ||||
-rw-r--r-- | drivers/block/drbd/drbd_int.h | 7 | ||||
-rw-r--r-- | drivers/block/drbd/drbd_main.c | 3 | ||||
-rw-r--r-- | drivers/block/drbd/drbd_nl.c | 19 | ||||
-rw-r--r-- | drivers/block/drbd/drbd_receiver.c | 48 | ||||
-rw-r--r-- | drivers/block/pktcdvd.c | 2 | ||||
-rw-r--r-- | drivers/block/viodasd.c | 86 | ||||
-rw-r--r-- | drivers/block/virtio_blk.c | 61 |
9 files changed, 135 insertions, 96 deletions
diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c index 873e594..9291614 100644 --- a/drivers/block/cciss.c +++ b/drivers/block/cciss.c @@ -337,6 +337,9 @@ static int cciss_seq_show(struct seq_file *seq, void *v) if (*pos > h->highest_lun) return 0; + if (drv == NULL) /* it's possible for h->drv[] to have holes. */ + return 0; + if (drv->heads == 0) return 0; diff --git a/drivers/block/drbd/Kconfig b/drivers/block/drbd/Kconfig index f4acd04..df09837 100644 --- a/drivers/block/drbd/Kconfig +++ b/drivers/block/drbd/Kconfig @@ -3,7 +3,7 @@ # comment "DRBD disabled because PROC_FS, INET or CONNECTOR not selected" - depends on !PROC_FS || !INET || !CONNECTOR + depends on PROC_FS='n' || INET='n' || CONNECTOR='n' config BLK_DEV_DRBD tristate "DRBD Distributed Replicated Block Device support" diff --git a/drivers/block/drbd/drbd_int.h b/drivers/block/drbd/drbd_int.h index c975587..2bf3a6e 100644 --- a/drivers/block/drbd/drbd_int.h +++ b/drivers/block/drbd/drbd_int.h @@ -1275,7 +1275,7 @@ struct bm_extent { #if DRBD_MAX_SECTORS_BM < DRBD_MAX_SECTORS_32 #define DRBD_MAX_SECTORS DRBD_MAX_SECTORS_BM #define DRBD_MAX_SECTORS_FLEX DRBD_MAX_SECTORS_BM -#elif !defined(CONFIG_LBD) && BITS_PER_LONG == 32 +#elif !defined(CONFIG_LBDAF) && BITS_PER_LONG == 32 #define DRBD_MAX_SECTORS DRBD_MAX_SECTORS_32 #define DRBD_MAX_SECTORS_FLEX DRBD_MAX_SECTORS_32 #else @@ -1371,10 +1371,9 @@ extern int is_valid_ar_handle(struct drbd_request *, sector_t); extern void drbd_suspend_io(struct drbd_conf *mdev); extern void drbd_resume_io(struct drbd_conf *mdev); extern char *ppsize(char *buf, unsigned long long size); -extern sector_t drbd_new_dev_size(struct drbd_conf *, - struct drbd_backing_dev *); +extern sector_t drbd_new_dev_size(struct drbd_conf *, struct drbd_backing_dev *, int); enum determine_dev_size { dev_size_error = -1, unchanged = 0, shrunk = 1, grew = 2 }; -extern enum determine_dev_size drbd_determin_dev_size(struct drbd_conf *) __must_hold(local); +extern enum determine_dev_size drbd_determin_dev_size(struct drbd_conf *, int force) __must_hold(local); extern void resync_after_online_grow(struct drbd_conf *); extern void drbd_setup_queue_param(struct drbd_conf *mdev, unsigned int) __must_hold(local); extern int drbd_set_role(struct drbd_conf *mdev, enum drbd_role new_role, diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c index 9348f33..ab871e0 100644 --- a/drivers/block/drbd/drbd_main.c +++ b/drivers/block/drbd/drbd_main.c @@ -1298,6 +1298,7 @@ static void after_state_ch(struct drbd_conf *mdev, union drbd_state os, dev_err(DEV, "Sending state in drbd_io_error() failed\n"); } + wait_event(mdev->misc_wait, !atomic_read(&mdev->local_cnt)); lc_destroy(mdev->resync); mdev->resync = NULL; lc_destroy(mdev->act_log); @@ -2972,7 +2973,6 @@ struct drbd_conf *drbd_new_device(unsigned int minor) goto out_no_q; mdev->rq_queue = q; q->queuedata = mdev; - blk_queue_max_segment_size(q, DRBD_MAX_SEGMENT_SIZE); disk = alloc_disk(1); if (!disk) @@ -2996,6 +2996,7 @@ struct drbd_conf *drbd_new_device(unsigned int minor) q->backing_dev_info.congested_data = mdev; blk_queue_make_request(q, drbd_make_request_26); + blk_queue_max_segment_size(q, DRBD_MAX_SEGMENT_SIZE); blk_queue_bounce_limit(q, BLK_BOUNCE_ANY); blk_queue_merge_bvec(q, drbd_merge_bvec); q->queue_lock = &mdev->req_lock; /* needed since we use */ diff --git a/drivers/block/drbd/drbd_nl.c b/drivers/block/drbd/drbd_nl.c index 4e0726a..1292e06 100644 --- a/drivers/block/drbd/drbd_nl.c +++ b/drivers/block/drbd/drbd_nl.c @@ -510,7 +510,7 @@ void drbd_resume_io(struct drbd_conf *mdev) * Returns 0 on success, negative return values indicate errors. * You should call drbd_md_sync() after calling this function. */ -enum determine_dev_size drbd_determin_dev_size(struct drbd_conf *mdev) __must_hold(local) +enum determine_dev_size drbd_determin_dev_size(struct drbd_conf *mdev, int force) __must_hold(local) { sector_t prev_first_sect, prev_size; /* previous meta location */ sector_t la_size; @@ -541,7 +541,7 @@ enum determine_dev_size drbd_determin_dev_size(struct drbd_conf *mdev) __must_ho /* TODO: should only be some assert here, not (re)init... */ drbd_md_set_sector_offsets(mdev, mdev->ldev); - size = drbd_new_dev_size(mdev, mdev->ldev); + size = drbd_new_dev_size(mdev, mdev->ldev, force); if (drbd_get_capacity(mdev->this_bdev) != size || drbd_bm_capacity(mdev) != size) { @@ -596,7 +596,7 @@ out: } sector_t -drbd_new_dev_size(struct drbd_conf *mdev, struct drbd_backing_dev *bdev) +drbd_new_dev_size(struct drbd_conf *mdev, struct drbd_backing_dev *bdev, int assume_peer_has_space) { sector_t p_size = mdev->p_size; /* partner's disk size. */ sector_t la_size = bdev->md.la_size_sect; /* last agreed size. */ @@ -606,6 +606,11 @@ drbd_new_dev_size(struct drbd_conf *mdev, struct drbd_backing_dev *bdev) m_size = drbd_get_max_capacity(bdev); + if (mdev->state.conn < C_CONNECTED && assume_peer_has_space) { + dev_warn(DEV, "Resize while not connected was forced by the user!\n"); + p_size = m_size; + } + if (p_size && m_size) { size = min_t(sector_t, p_size, m_size); } else { @@ -965,7 +970,7 @@ static int drbd_nl_disk_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp /* Prevent shrinking of consistent devices ! */ if (drbd_md_test_flag(nbc, MDF_CONSISTENT) && - drbd_new_dev_size(mdev, nbc) < nbc->md.la_size_sect) { + drbd_new_dev_size(mdev, nbc, 0) < nbc->md.la_size_sect) { dev_warn(DEV, "refusing to truncate a consistent device\n"); retcode = ERR_DISK_TO_SMALL; goto force_diskless_dec; @@ -1052,7 +1057,7 @@ static int drbd_nl_disk_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp !drbd_md_test_flag(mdev->ldev, MDF_CONNECTED_IND)) set_bit(USE_DEGR_WFC_T, &mdev->flags); - dd = drbd_determin_dev_size(mdev); + dd = drbd_determin_dev_size(mdev, 0); if (dd == dev_size_error) { retcode = ERR_NOMEM_BITMAP; goto force_diskless_dec; @@ -1271,7 +1276,7 @@ static int drbd_nl_net_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp, goto fail; } - if (crypto_tfm_alg_type(crypto_hash_tfm(tfm)) != CRYPTO_ALG_TYPE_SHASH) { + if (!drbd_crypto_is_hash(crypto_hash_tfm(tfm))) { retcode = ERR_AUTH_ALG_ND; goto fail; } @@ -1504,7 +1509,7 @@ static int drbd_nl_resize(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp, } mdev->ldev->dc.disk_size = (sector_t)rs.resize_size; - dd = drbd_determin_dev_size(mdev); + dd = drbd_determin_dev_size(mdev, rs.resize_force); drbd_md_sync(mdev); put_ldev(mdev); if (dd == dev_size_error) { diff --git a/drivers/block/drbd/drbd_receiver.c b/drivers/block/drbd/drbd_receiver.c index 259c135..d065c64 100644 --- a/drivers/block/drbd/drbd_receiver.c +++ b/drivers/block/drbd/drbd_receiver.c @@ -878,9 +878,13 @@ retry: if (mdev->cram_hmac_tfm) { /* drbd_request_state(mdev, NS(conn, WFAuth)); */ - if (!drbd_do_auth(mdev)) { + switch (drbd_do_auth(mdev)) { + case -1: dev_err(DEV, "Authentication of peer failed\n"); return -1; + case 0: + dev_err(DEV, "Authentication of peer failed, trying again.\n"); + return 0; } } @@ -1201,10 +1205,11 @@ static int receive_Barrier(struct drbd_conf *mdev, struct p_header *h) case WO_bdev_flush: case WO_drain_io: - D_ASSERT(rv == FE_STILL_LIVE); - set_bit(DE_BARRIER_IN_NEXT_EPOCH_ISSUED, &mdev->current_epoch->flags); - drbd_wait_ee_list_empty(mdev, &mdev->active_ee); - rv = drbd_flush_after_epoch(mdev, mdev->current_epoch); + if (rv == FE_STILL_LIVE) { + set_bit(DE_BARRIER_IN_NEXT_EPOCH_ISSUED, &mdev->current_epoch->flags); + drbd_wait_ee_list_empty(mdev, &mdev->active_ee); + rv = drbd_flush_after_epoch(mdev, mdev->current_epoch); + } if (rv == FE_RECYCLED) return TRUE; @@ -1219,7 +1224,7 @@ static int receive_Barrier(struct drbd_conf *mdev, struct p_header *h) epoch = kmalloc(sizeof(struct drbd_epoch), GFP_NOIO); if (!epoch) { dev_warn(DEV, "Allocation of an epoch failed, slowing down\n"); - issue_flush = !test_and_set_bit(DE_BARRIER_IN_NEXT_EPOCH_ISSUED, &epoch->flags); + issue_flush = !test_and_set_bit(DE_BARRIER_IN_NEXT_EPOCH_ISSUED, &mdev->current_epoch->flags); drbd_wait_ee_list_empty(mdev, &mdev->active_ee); if (issue_flush) { rv = drbd_flush_after_epoch(mdev, mdev->current_epoch); @@ -2865,7 +2870,7 @@ static int receive_sizes(struct drbd_conf *mdev, struct p_header *h) /* Never shrink a device with usable data during connect. But allow online shrinking if we are connected. */ - if (drbd_new_dev_size(mdev, mdev->ldev) < + if (drbd_new_dev_size(mdev, mdev->ldev, 0) < drbd_get_capacity(mdev->this_bdev) && mdev->state.disk >= D_OUTDATED && mdev->state.conn < C_CONNECTED) { @@ -2880,7 +2885,7 @@ static int receive_sizes(struct drbd_conf *mdev, struct p_header *h) #undef min_not_zero if (get_ldev(mdev)) { - dd = drbd_determin_dev_size(mdev); + dd = drbd_determin_dev_size(mdev, 0); put_ldev(mdev); if (dd == dev_size_error) return FALSE; @@ -3830,10 +3835,17 @@ static int drbd_do_auth(struct drbd_conf *mdev) { dev_err(DEV, "This kernel was build without CONFIG_CRYPTO_HMAC.\n"); dev_err(DEV, "You need to disable 'cram-hmac-alg' in drbd.conf.\n"); - return 0; + return -1; } #else #define CHALLENGE_LEN 64 + +/* Return value: + 1 - auth succeeded, + 0 - failed, try again (network error), + -1 - auth failed, don't try again. +*/ + static int drbd_do_auth(struct drbd_conf *mdev) { char my_challenge[CHALLENGE_LEN]; /* 64 Bytes... */ @@ -3854,7 +3866,7 @@ static int drbd_do_auth(struct drbd_conf *mdev) (u8 *)mdev->net_conf->shared_secret, key_len); if (rv) { dev_err(DEV, "crypto_hash_setkey() failed with %d\n", rv); - rv = 0; + rv = -1; goto fail; } @@ -3877,14 +3889,14 @@ static int drbd_do_auth(struct drbd_conf *mdev) if (p.length > CHALLENGE_LEN*2) { dev_err(DEV, "expected AuthChallenge payload too big.\n"); - rv = 0; + rv = -1; goto fail; } peers_ch = kmalloc(p.length, GFP_NOIO); if (peers_ch == NULL) { dev_err(DEV, "kmalloc of peers_ch failed\n"); - rv = 0; + rv = -1; goto fail; } @@ -3900,7 +3912,7 @@ static int drbd_do_auth(struct drbd_conf *mdev) response = kmalloc(resp_size, GFP_NOIO); if (response == NULL) { dev_err(DEV, "kmalloc of response failed\n"); - rv = 0; + rv = -1; goto fail; } @@ -3910,7 +3922,7 @@ static int drbd_do_auth(struct drbd_conf *mdev) rv = crypto_hash_digest(&desc, &sg, sg.length, response); if (rv) { dev_err(DEV, "crypto_hash_digest() failed with %d\n", rv); - rv = 0; + rv = -1; goto fail; } @@ -3944,9 +3956,9 @@ static int drbd_do_auth(struct drbd_conf *mdev) } right_response = kmalloc(resp_size, GFP_NOIO); - if (response == NULL) { + if (right_response == NULL) { dev_err(DEV, "kmalloc of right_response failed\n"); - rv = 0; + rv = -1; goto fail; } @@ -3955,7 +3967,7 @@ static int drbd_do_auth(struct drbd_conf *mdev) rv = crypto_hash_digest(&desc, &sg, sg.length, right_response); if (rv) { dev_err(DEV, "crypto_hash_digest() failed with %d\n", rv); - rv = 0; + rv = -1; goto fail; } @@ -3964,6 +3976,8 @@ static int drbd_do_auth(struct drbd_conf *mdev) if (rv) dev_info(DEV, "Peer authenticated using %d bytes of '%s' HMAC\n", resp_size, mdev->net_conf->cram_hmac_alg); + else + rv = -1; fail: kfree(peers_ch); diff --git a/drivers/block/pktcdvd.c b/drivers/block/pktcdvd.c index 2ddf03a..68b5957 100644 --- a/drivers/block/pktcdvd.c +++ b/drivers/block/pktcdvd.c @@ -322,7 +322,7 @@ static void pkt_sysfs_dev_remove(struct pktcdvd_device *pd) pkt_kobj_remove(pd->kobj_stat); pkt_kobj_remove(pd->kobj_wqueue); if (class_pktcdvd) - device_destroy(class_pktcdvd, pd->pkt_dev); + device_unregister(pd->dev); } diff --git a/drivers/block/viodasd.c b/drivers/block/viodasd.c index a8c8b56..1b3def1 100644 --- a/drivers/block/viodasd.c +++ b/drivers/block/viodasd.c @@ -28,6 +28,9 @@ * All disk operations are performed by sending messages back and forth to * the OS/400 partition. */ + +#define pr_fmt(fmt) "viod: " fmt + #include <linux/major.h> #include <linux/fs.h> #include <linux/module.h> @@ -63,9 +66,6 @@ MODULE_LICENSE("GPL"); #define VIOD_VERS "1.64" -#define VIOD_KERN_WARNING KERN_WARNING "viod: " -#define VIOD_KERN_INFO KERN_INFO "viod: " - enum { PARTITION_SHIFT = 3, MAX_DISKNO = HVMAXARCHITECTEDVIRTUALDISKS, @@ -156,7 +156,7 @@ static int viodasd_open(struct block_device *bdev, fmode_t mode) ((u64)DEVICE_NO(d) << 48) | ((u64)flags << 32), 0, 0, 0); if (hvrc != 0) { - printk(VIOD_KERN_WARNING "HV open failed %d\n", (int)hvrc); + pr_warning("HV open failed %d\n", (int)hvrc); return -EIO; } @@ -167,9 +167,8 @@ static int viodasd_open(struct block_device *bdev, fmode_t mode) const struct vio_error_entry *err = vio_lookup_rc(viodasd_err_table, we.sub_result); - printk(VIOD_KERN_WARNING - "bad rc opening disk: %d:0x%04x (%s)\n", - (int)we.rc, we.sub_result, err->msg); + pr_warning("bad rc opening disk: %d:0x%04x (%s)\n", + (int)we.rc, we.sub_result, err->msg); return -EIO; } @@ -195,8 +194,7 @@ static int viodasd_release(struct gendisk *disk, fmode_t mode) ((u64)DEVICE_NO(d) << 48) /* | ((u64)flags << 32) */, 0, 0, 0); if (hvrc != 0) - printk(VIOD_KERN_WARNING "HV close call failed %d\n", - (int)hvrc); + pr_warning("HV close call failed %d\n", (int)hvrc); return 0; } @@ -288,8 +286,7 @@ static int send_request(struct request *req) bevent = (struct vioblocklpevent *) vio_get_event_buffer(viomajorsubtype_blockio); if (bevent == NULL) { - printk(VIOD_KERN_WARNING - "error allocating disk event buffer\n"); + pr_warning("error allocating disk event buffer\n"); goto error_ret; } @@ -333,9 +330,8 @@ static int send_request(struct request *req) } if (hvrc != HvLpEvent_Rc_Good) { - printk(VIOD_KERN_WARNING - "error sending disk event to OS/400 (rc %d)\n", - (int)hvrc); + pr_warning("error sending disk event to OS/400 (rc %d)\n", + (int)hvrc); goto error_ret; } spin_unlock_irqrestore(&viodasd_spinlock, flags); @@ -402,7 +398,7 @@ retry: ((u64)dev_no << 48) | ((u64)flags<< 32), 0, 0, 0); if (hvrc != 0) { - printk(VIOD_KERN_WARNING "bad rc on HV open %d\n", (int)hvrc); + pr_warning("bad rc on HV open %d\n", (int)hvrc); return 0; } @@ -416,9 +412,8 @@ retry: goto retry; } if (we.max_disk > (MAX_DISKNO - 1)) { - printk_once(VIOD_KERN_INFO - "Only examining the first %d of %d disks connected\n", - MAX_DISKNO, we.max_disk + 1); + printk_once(KERN_INFO pr_fmt("Only examining the first %d of %d disks connected\n"), + MAX_DISKNO, we.max_disk + 1); } /* Send the close event to OS/400. We DON'T expect a response */ @@ -432,17 +427,15 @@ retry: ((u64)dev_no << 48) | ((u64)flags << 32), 0, 0, 0); if (hvrc != 0) { - printk(VIOD_KERN_WARNING - "bad rc sending event to OS/400 %d\n", (int)hvrc); + pr_warning("bad rc sending event to OS/400 %d\n", (int)hvrc); return 0; } if (d->dev == NULL) { /* this is when we reprobe for new disks */ if (vio_create_viodasd(dev_no) == NULL) { - printk(VIOD_KERN_WARNING - "cannot allocate virtual device for disk %d\n", - dev_no); + pr_warning("cannot allocate virtual device for disk %d\n", + dev_no); return 0; } /* @@ -457,15 +450,13 @@ retry: spin_lock_init(&d->q_lock); q = blk_init_queue(do_viodasd_request, &d->q_lock); if (q == NULL) { - printk(VIOD_KERN_WARNING "cannot allocate queue for disk %d\n", - dev_no); + pr_warning("cannot allocate queue for disk %d\n", dev_no); return 0; } g = alloc_disk(1 << PARTITION_SHIFT); if (g == NULL) { - printk(VIOD_KERN_WARNING - "cannot allocate disk structure for disk %d\n", - dev_no); + pr_warning("cannot allocate disk structure for disk %d\n", + dev_no); blk_cleanup_queue(q); return 0; } @@ -489,13 +480,12 @@ retry: g->driverfs_dev = d->dev; set_capacity(g, d->size >> 9); - printk(VIOD_KERN_INFO "disk %d: %lu sectors (%lu MB) " - "CHS=%d/%d/%d sector size %d%s\n", - dev_no, (unsigned long)(d->size >> 9), - (unsigned long)(d->size >> 20), - (int)d->cylinders, (int)d->tracks, - (int)d->sectors, (int)d->bytes_per_sector, - d->read_only ? " (RO)" : ""); + pr_info("disk %d: %lu sectors (%lu MB) CHS=%d/%d/%d sector size %d%s\n", + dev_no, (unsigned long)(d->size >> 9), + (unsigned long)(d->size >> 20), + (int)d->cylinders, (int)d->tracks, + (int)d->sectors, (int)d->bytes_per_sector, + d->read_only ? " (RO)" : ""); /* register us in the global list */ add_disk(g); @@ -580,8 +570,8 @@ static int viodasd_handle_read_write(struct vioblocklpevent *bevent) if (error) { const struct vio_error_entry *err; err = vio_lookup_rc(viodasd_err_table, bevent->sub_result); - printk(VIOD_KERN_WARNING "read/write error %d:0x%04x (%s)\n", - event->xRc, bevent->sub_result, err->msg); + pr_warning("read/write error %d:0x%04x (%s)\n", + event->xRc, bevent->sub_result, err->msg); num_sect = blk_rq_sectors(req); } qlock = req->q->queue_lock; @@ -606,8 +596,7 @@ static void handle_block_event(struct HvLpEvent *event) return; /* First, we should NEVER get an int here...only acks */ if (hvlpevent_is_int(event)) { - printk(VIOD_KERN_WARNING - "Yikes! got an int in viodasd event handler!\n"); + pr_warning("Yikes! got an int in viodasd event handler!\n"); if (hvlpevent_need_ack(event)) { event->xRc = HvLpEvent_Rc_InvalidSubtype; HvCallEvent_ackLpEvent(event); @@ -650,7 +639,7 @@ static void handle_block_event(struct HvLpEvent *event) break; default: - printk(VIOD_KERN_WARNING "invalid subtype!"); + pr_warning("invalid subtype!"); if (hvlpevent_need_ack(event)) { event->xRc = HvLpEvent_Rc_InvalidSubtype; HvCallEvent_ackLpEvent(event); @@ -739,29 +728,26 @@ static int __init viodasd_init(void) vio_set_hostlp(); if (viopath_hostLp == HvLpIndexInvalid) { - printk(VIOD_KERN_WARNING "invalid hosting partition\n"); + pr_warning("invalid hosting partition\n"); rc = -EIO; goto early_fail; } - printk(VIOD_KERN_INFO "vers " VIOD_VERS ", hosting partition %d\n", - viopath_hostLp); + pr_info("vers " VIOD_VERS ", hosting partition %d\n", viopath_hostLp); /* register the block device */ rc = register_blkdev(VIODASD_MAJOR, VIOD_GENHD_NAME); if (rc) { - printk(VIOD_KERN_WARNING - "Unable to get major number %d for %s\n", - VIODASD_MAJOR, VIOD_GENHD_NAME); + pr_warning("Unable to get major number %d for %s\n", + VIODASD_MAJOR, VIOD_GENHD_NAME); goto early_fail; } /* Actually open the path to the hosting partition */ rc = viopath_open(viopath_hostLp, viomajorsubtype_blockio, VIOMAXREQ + 2); if (rc) { - printk(VIOD_KERN_WARNING - "error opening path to host partition %d\n", - viopath_hostLp); + pr_warning("error opening path to host partition %d\n", + viopath_hostLp); goto unregister_blk; } @@ -770,7 +756,7 @@ static int __init viodasd_init(void) rc = vio_register_driver(&viodasd_driver); if (rc) { - printk(VIOD_KERN_WARNING "vio_register_driver failed\n"); + pr_warning("vio_register_driver failed\n"); goto unset_handler; } diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c index 51042f0ba7..7eff828 100644 --- a/drivers/block/virtio_blk.c +++ b/drivers/block/virtio_blk.c @@ -243,10 +243,12 @@ static int index_to_minor(int index) static int __devinit virtblk_probe(struct virtio_device *vdev) { struct virtio_blk *vblk; + struct request_queue *q; int err; u64 cap; - u32 v; - u32 blk_size, sg_elems; + u32 v, blk_size, sg_elems, opt_io_size; + u16 min_io_size; + u8 physical_block_exp, alignment_offset; if (index_to_minor(index) >= 1 << MINORBITS) return -ENOSPC; @@ -293,13 +295,13 @@ static int __devinit virtblk_probe(struct virtio_device *vdev) goto out_mempool; } - vblk->disk->queue = blk_init_queue(do_virtblk_request, &vblk->lock); - if (!vblk->disk->queue) { + q = vblk->disk->queue = blk_init_queue(do_virtblk_request, &vblk->lock); + if (!q) { err = -ENOMEM; goto out_put_disk; } - vblk->disk->queue->queuedata = vblk; + q->queuedata = vblk; if (index < 26) { sprintf(vblk->disk->disk_name, "vd%c", 'a' + index % 26); @@ -323,10 +325,10 @@ static int __devinit virtblk_probe(struct virtio_device *vdev) /* If barriers are supported, tell block layer that queue is ordered */ if (virtio_has_feature(vdev, VIRTIO_BLK_F_FLUSH)) - blk_queue_ordered(vblk->disk->queue, QUEUE_ORDERED_DRAIN_FLUSH, + blk_queue_ordered(q, QUEUE_ORDERED_DRAIN_FLUSH, virtblk_prepare_flush); else if (virtio_has_feature(vdev, VIRTIO_BLK_F_BARRIER)) - blk_queue_ordered(vblk->disk->queue, QUEUE_ORDERED_TAG, NULL); + blk_queue_ordered(q, QUEUE_ORDERED_TAG, NULL); /* If disk is read-only in the host, the guest should obey */ if (virtio_has_feature(vdev, VIRTIO_BLK_F_RO)) @@ -345,14 +347,14 @@ static int __devinit virtblk_probe(struct virtio_device *vdev) set_capacity(vblk->disk, cap); /* We can handle whatever the host told us to handle. */ - blk_queue_max_phys_segments(vblk->disk->queue, vblk->sg_elems-2); - blk_queue_max_hw_segments(vblk->disk->queue, vblk->sg_elems-2); + blk_queue_max_phys_segments(q, vblk->sg_elems-2); + blk_queue_max_hw_segments(q, vblk->sg_elems-2); /* No need to bounce any requests */ - blk_queue_bounce_limit(vblk->disk->queue, BLK_BOUNCE_ANY); + blk_queue_bounce_limit(q, BLK_BOUNCE_ANY); /* No real sector limit. */ - blk_queue_max_sectors(vblk->disk->queue, -1U); + blk_queue_max_sectors(q, -1U); /* Host can optionally specify maximum segment size and number of * segments. */ @@ -360,16 +362,45 @@ static int __devinit virtblk_probe(struct virtio_device *vdev) offsetof(struct virtio_blk_config, size_max), &v); if (!err) - blk_queue_max_segment_size(vblk->disk->queue, v); + blk_queue_max_segment_size(q, v); else - blk_queue_max_segment_size(vblk->disk->queue, -1U); + blk_queue_max_segment_size(q, -1U); /* Host can optionally specify the block size of the device */ err = virtio_config_val(vdev, VIRTIO_BLK_F_BLK_SIZE, offsetof(struct virtio_blk_config, blk_size), &blk_size); if (!err) - blk_queue_logical_block_size(vblk->disk->queue, blk_size); + blk_queue_logical_block_size(q, blk_size); + else + blk_size = queue_logical_block_size(q); + + /* Use topology information if available */ + err = virtio_config_val(vdev, VIRTIO_BLK_F_TOPOLOGY, + offsetof(struct virtio_blk_config, physical_block_exp), + &physical_block_exp); + if (!err && physical_block_exp) + blk_queue_physical_block_size(q, + blk_size * (1 << physical_block_exp)); + + err = virtio_config_val(vdev, VIRTIO_BLK_F_TOPOLOGY, + offsetof(struct virtio_blk_config, alignment_offset), + &alignment_offset); + if (!err && alignment_offset) + blk_queue_alignment_offset(q, blk_size * alignment_offset); + + err = virtio_config_val(vdev, VIRTIO_BLK_F_TOPOLOGY, + offsetof(struct virtio_blk_config, min_io_size), + &min_io_size); + if (!err && min_io_size) + blk_queue_io_min(q, blk_size * min_io_size); + + err = virtio_config_val(vdev, VIRTIO_BLK_F_TOPOLOGY, + offsetof(struct virtio_blk_config, opt_io_size), + &opt_io_size); + if (!err && opt_io_size) + blk_queue_io_opt(q, blk_size * opt_io_size); + add_disk(vblk->disk); return 0; @@ -412,7 +443,7 @@ static struct virtio_device_id id_table[] = { static unsigned int features[] = { VIRTIO_BLK_F_BARRIER, VIRTIO_BLK_F_SEG_MAX, VIRTIO_BLK_F_SIZE_MAX, VIRTIO_BLK_F_GEOMETRY, VIRTIO_BLK_F_RO, VIRTIO_BLK_F_BLK_SIZE, - VIRTIO_BLK_F_SCSI, VIRTIO_BLK_F_FLUSH + VIRTIO_BLK_F_SCSI, VIRTIO_BLK_F_FLUSH, VIRTIO_BLK_F_TOPOLOGY }; /* |