aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/mmc/card/queue.c
diff options
context:
space:
mode:
authorAdrian Hunter <adrian.hunter@intel.com>2011-06-28 17:16:02 +0300
committerChris Ball <cjb@laptop.org>2011-07-20 17:21:03 -0400
commite056a1b5b67b4e4bfad00bf143ab14f634777705 (patch)
treec9cd3a6144787bcb434e52a4a32dec3c37e9f343 /drivers/mmc/card/queue.c
parente8cd77e467f7bb1d4b942037c47b087334a484d4 (diff)
downloadkernel_goldelico_gta04-e056a1b5b67b4e4bfad00bf143ab14f634777705.zip
kernel_goldelico_gta04-e056a1b5b67b4e4bfad00bf143ab14f634777705.tar.gz
kernel_goldelico_gta04-e056a1b5b67b4e4bfad00bf143ab14f634777705.tar.bz2
mmc: queue: let host controllers specify maximum discard timeout
Some host controllers will not operate without a hardware timeout that is limited in value. However large discards require large timeouts, so there needs to be a way to specify the maximum discard size. A host controller driver may now specify the maximum discard timeout possible so that max_discard_sectors can be calculated. However, for eMMC when the High Capacity Erase Group Size is not in use, the timeout calculation depends on clock rate which may change. For that case Preferred Erase Size is used instead. Signed-off-by: Adrian Hunter <adrian.hunter@intel.com> Signed-off-by: Chris Ball <cjb@laptop.org>
Diffstat (limited to 'drivers/mmc/card/queue.c')
-rw-r--r--drivers/mmc/card/queue.c33
1 files changed, 23 insertions, 10 deletions
diff --git a/drivers/mmc/card/queue.c b/drivers/mmc/card/queue.c
index 6413afa..defc11b 100644
--- a/drivers/mmc/card/queue.c
+++ b/drivers/mmc/card/queue.c
@@ -101,6 +101,27 @@ static void mmc_request(struct request_queue *q)
wake_up_process(mq->thread);
}
+static void mmc_queue_setup_discard(struct request_queue *q,
+ struct mmc_card *card)
+{
+ unsigned max_discard;
+
+ max_discard = mmc_calc_max_discard(card);
+ if (!max_discard)
+ return;
+
+ queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, q);
+ q->limits.max_discard_sectors = max_discard;
+ if (card->erased_byte == 0)
+ q->limits.discard_zeroes_data = 1;
+ q->limits.discard_granularity = card->pref_erase << 9;
+ /* granularity must not be greater than max. discard */
+ if (card->pref_erase > max_discard)
+ q->limits.discard_granularity = 0;
+ if (mmc_can_secure_erase_trim(card))
+ queue_flag_set_unlocked(QUEUE_FLAG_SECDISCARD, q);
+}
+
/**
* mmc_init_queue - initialise a queue structure.
* @mq: mmc queue
@@ -130,16 +151,8 @@ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card,
blk_queue_prep_rq(mq->queue, mmc_prep_request);
queue_flag_set_unlocked(QUEUE_FLAG_NONROT, mq->queue);
- if (mmc_can_erase(card)) {
- queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, mq->queue);
- mq->queue->limits.max_discard_sectors = UINT_MAX;
- if (card->erased_byte == 0)
- mq->queue->limits.discard_zeroes_data = 1;
- mq->queue->limits.discard_granularity = card->pref_erase << 9;
- if (mmc_can_secure_erase_trim(card))
- queue_flag_set_unlocked(QUEUE_FLAG_SECDISCARD,
- mq->queue);
- }
+ if (mmc_can_erase(card))
+ mmc_queue_setup_discard(mq->queue, card);
#ifdef CONFIG_MMC_BLOCK_BOUNCE
if (host->max_segs == 1) {