aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/mmc/card
diff options
context:
space:
mode:
authorAdrian Hunter <adrian.hunter@intel.com>2011-06-28 10:16:02 -0400
committerChris Ball <cjb@laptop.org>2011-07-20 17:21:03 -0400
commite056a1b5b67b4e4bfad00bf143ab14f634777705 (patch)
treec9cd3a6144787bcb434e52a4a32dec3c37e9f343 /drivers/mmc/card
parente8cd77e467f7bb1d4b942037c47b087334a484d4 (diff)
mmc: queue: let host controllers specify maximum discard timeout
Some host controllers will not operate without a hardware timeout that is limited in value. However large discards require large timeouts, so there needs to be a way to specify the maximum discard size. A host controller driver may now specify the maximum discard timeout possible so that max_discard_sectors can be calculated. However, for eMMC when the High Capacity Erase Group Size is not in use, the timeout calculation depends on clock rate which may change. For that case Preferred Erase Size is used instead. Signed-off-by: Adrian Hunter <adrian.hunter@intel.com> Signed-off-by: Chris Ball <cjb@laptop.org>
Diffstat (limited to 'drivers/mmc/card')
-rw-r--r--drivers/mmc/card/queue.c33
1 files changed, 23 insertions, 10 deletions
diff --git a/drivers/mmc/card/queue.c b/drivers/mmc/card/queue.c
index 6413afa318d2..defc11b4572c 100644
--- a/drivers/mmc/card/queue.c
+++ b/drivers/mmc/card/queue.c
@@ -101,6 +101,27 @@ static void mmc_request(struct request_queue *q)
101 wake_up_process(mq->thread); 101 wake_up_process(mq->thread);
102} 102}
103 103
104static void mmc_queue_setup_discard(struct request_queue *q,
105 struct mmc_card *card)
106{
107 unsigned max_discard;
108
109 max_discard = mmc_calc_max_discard(card);
110 if (!max_discard)
111 return;
112
113 queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, q);
114 q->limits.max_discard_sectors = max_discard;
115 if (card->erased_byte == 0)
116 q->limits.discard_zeroes_data = 1;
117 q->limits.discard_granularity = card->pref_erase << 9;
118 /* granularity must not be greater than max. discard */
119 if (card->pref_erase > max_discard)
120 q->limits.discard_granularity = 0;
121 if (mmc_can_secure_erase_trim(card))
122 queue_flag_set_unlocked(QUEUE_FLAG_SECDISCARD, q);
123}
124
104/** 125/**
105 * mmc_init_queue - initialise a queue structure. 126 * mmc_init_queue - initialise a queue structure.
106 * @mq: mmc queue 127 * @mq: mmc queue
@@ -130,16 +151,8 @@ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card,
130 151
131 blk_queue_prep_rq(mq->queue, mmc_prep_request); 152 blk_queue_prep_rq(mq->queue, mmc_prep_request);
132 queue_flag_set_unlocked(QUEUE_FLAG_NONROT, mq->queue); 153 queue_flag_set_unlocked(QUEUE_FLAG_NONROT, mq->queue);
133 if (mmc_can_erase(card)) { 154 if (mmc_can_erase(card))
134 queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, mq->queue); 155 mmc_queue_setup_discard(mq->queue, card);
135 mq->queue->limits.max_discard_sectors = UINT_MAX;
136 if (card->erased_byte == 0)
137 mq->queue->limits.discard_zeroes_data = 1;
138 mq->queue->limits.discard_granularity = card->pref_erase << 9;
139 if (mmc_can_secure_erase_trim(card))
140 queue_flag_set_unlocked(QUEUE_FLAG_SECDISCARD,
141 mq->queue);
142 }
143 156
144#ifdef CONFIG_MMC_BLOCK_BOUNCE 157#ifdef CONFIG_MMC_BLOCK_BOUNCE
145 if (host->max_segs == 1) { 158 if (host->max_segs == 1) {