diff options
author | James Smart <james.smart@emulex.com> | 2010-08-31 22:27:31 -0400 |
---|---|---|
committer | James Bottomley <James.Bottomley@suse.de> | 2010-09-09 15:37:40 -0500 |
commit | 78d16341facf829a71b6f7c68ec5511b9c168060 (patch) | |
tree | ea63d438409b6b955f8514268bc368bf5a99c4d3 /drivers/scsi/scsi_transport_fc.c | |
parent | 91978465b1e5f89025cd43cd2102943160ec6dee (diff) | |
download | kernel_samsung_espresso10-78d16341facf829a71b6f7c68ec5511b9c168060.zip kernel_samsung_espresso10-78d16341facf829a71b6f7c68ec5511b9c168060.tar.gz kernel_samsung_espresso10-78d16341facf829a71b6f7c68ec5511b9c168060.tar.bz2 |
[SCSI] scsi_transport_fc: fix blocked bsg request when fc object deleted
When an rport is "blocked" and a bsg request is received, the bsg request gets
placed on the queue but the queue stalls. If the fc object is then deleted - the
bsg queue never restarts and keeps the reference on the object, and stops the
overall teardown.
This patch restarts the bsg queue on teardown and drains any pending requests,
allowing the teardown to succeed.
Signed-off-by: Carl Lajeunesse <carl.lajeunesse@emulex.com>
Signed-off-by: James Smart <james.smart@emulex.com>
Signed-off-by: James Bottomley <James.Bottomley@suse.de>
Diffstat (limited to 'drivers/scsi/scsi_transport_fc.c')
-rw-r--r-- | drivers/scsi/scsi_transport_fc.c | 43 |
1 files changed, 43 insertions, 0 deletions
diff --git a/drivers/scsi/scsi_transport_fc.c b/drivers/scsi/scsi_transport_fc.c index 9f0f7d9..78486d5 100644 --- a/drivers/scsi/scsi_transport_fc.c +++ b/drivers/scsi/scsi_transport_fc.c @@ -4048,11 +4048,54 @@ fc_bsg_rportadd(struct Scsi_Host *shost, struct fc_rport *rport) /** * fc_bsg_remove - Deletes the bsg hooks on fchosts/rports * @q: the request_queue that is to be torn down. + * + * Notes: + * Before unregistering the queue empty any requests that are blocked + * + * */ static void fc_bsg_remove(struct request_queue *q) { + struct request *req; /* block request */ + int counts; /* totals for request_list count and starved */ + if (q) { + /* Stop taking in new requests */ + spin_lock_irq(q->queue_lock); + blk_stop_queue(q); + + /* drain all requests in the queue */ + while (1) { + /* need the lock to fetch a request + * this may fetch the same reqeust as the previous pass + */ + req = blk_fetch_request(q); + /* save requests in use and starved */ + counts = q->rq.count[0] + q->rq.count[1] + + q->rq.starved[0] + q->rq.starved[1]; + spin_unlock_irq(q->queue_lock); + /* any requests still outstanding? */ + if (counts == 0) + break; + + /* This may be the same req as the previous iteration, + * always send the blk_end_request_all after a prefetch. + * It is not okay to not end the request because the + * prefetch started the request. + */ + if (req) { + /* return -ENXIO to indicate that this queue is + * going away + */ + req->errors = -ENXIO; + blk_end_request_all(req, -ENXIO); + } + + msleep(200); /* allow bsg to possibly finish */ + spin_lock_irq(q->queue_lock); + } + bsg_unregister_queue(q); blk_cleanup_queue(q); } |