From f3eb0aaa0211fd804057070bee1fd067cd65cb13 Mon Sep 17 00:00:00 2001 From: Pierre Ossman Date: Sat, 16 Aug 2008 21:34:02 +0200 Subject: mmc_block: inform block layer about sector count restriction Make sure we consider the maximum block count when we tell the block layer about the maximum sector count. That way we don't have to chop up the request ourselves. Signed-off-by: Pierre Ossman --- drivers/mmc/card/block.c | 23 +---------------------- drivers/mmc/card/queue.c | 20 ++++++++++++++------ 2 files changed, 15 insertions(+), 28 deletions(-) (limited to 'drivers/mmc/card') diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c index ebc8b9d..d73cac8 100644 --- a/drivers/mmc/card/block.c +++ b/drivers/mmc/card/block.c @@ -215,8 +215,7 @@ static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req) struct mmc_blk_data *md = mq->data; struct mmc_card *card = md->queue.card; struct mmc_blk_request brq; - int ret = 1, data_size, i; - struct scatterlist *sg; + int ret = 1; mmc_claim_host(card->host); @@ -237,8 +236,6 @@ static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req) brq.stop.arg = 0; brq.stop.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | MMC_CMD_AC; brq.data.blocks = req->nr_sectors >> (md->block_bits - 9); - if (brq.data.blocks > card->host->max_blk_count) - brq.data.blocks = card->host->max_blk_count; if (brq.data.blocks > 1) { /* SPI multiblock writes terminate using a special @@ -270,24 +267,6 @@ static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req) mmc_queue_bounce_pre(mq); - /* - * Adjust the sg list so it is the same size as the - * request. - */ - if (brq.data.blocks != - (req->nr_sectors >> (md->block_bits - 9))) { - data_size = brq.data.blocks * brq.data.blksz; - for_each_sg(brq.data.sg, sg, brq.data.sg_len, i) { - data_size -= sg->length; - if (data_size <= 0) { - sg->length += data_size; - i++; - break; - } - } - brq.data.sg_len = i; - } - mmc_wait_for_req(card->host, &brq.mrq); mmc_queue_bounce_post(mq); diff --git a/drivers/mmc/card/queue.c b/drivers/mmc/card/queue.c index 3dee97e..5c8f037 100644 --- a/drivers/mmc/card/queue.c +++ b/drivers/mmc/card/queue.c @@ -142,12 +142,19 @@ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card, spinlock_t *lock bouncesz = host->max_req_size; if (bouncesz > host->max_seg_size) bouncesz = host->max_seg_size; + if (bouncesz > (host->max_blk_count * 512)) + bouncesz = host->max_blk_count * 512; + + if (bouncesz > 512) { + mq->bounce_buf = kmalloc(bouncesz, GFP_KERNEL); + if (!mq->bounce_buf) { + printk(KERN_WARNING "%s: unable to " + "allocate bounce buffer\n", + mmc_card_name(card)); + } + } - mq->bounce_buf = kmalloc(bouncesz, GFP_KERNEL); - if (!mq->bounce_buf) { - printk(KERN_WARNING "%s: unable to allocate " - "bounce buffer\n", mmc_card_name(card)); - } else { + if (mq->bounce_buf) { blk_queue_bounce_limit(mq->queue, BLK_BOUNCE_ANY); blk_queue_max_sectors(mq->queue, bouncesz / 512); blk_queue_max_phys_segments(mq->queue, bouncesz / 512); @@ -175,7 +182,8 @@ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card, spinlock_t *lock if (!mq->bounce_buf) { blk_queue_bounce_limit(mq->queue, limit); - blk_queue_max_sectors(mq->queue, host->max_req_size / 512); + blk_queue_max_sectors(mq->queue, + min(host->max_blk_count, host->max_req_size / 512)); blk_queue_max_phys_segments(mq->queue, host->max_phys_segs); blk_queue_max_hw_segments(mq->queue, host->max_hw_segs); blk_queue_max_segment_size(mq->queue, host->max_seg_size); -- cgit v1.1