aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/mmc/card/queue.c
diff options
context:
space:
mode:
authorGlenn Elliott <gelliott@cs.unc.edu>2012-03-04 19:47:13 -0500
committerGlenn Elliott <gelliott@cs.unc.edu>2012-03-04 19:47:13 -0500
commitc71c03bda1e86c9d5198c5d83f712e695c4f2a1e (patch)
treeecb166cb3e2b7e2adb3b5e292245fefd23381ac8 /drivers/mmc/card/queue.c
parentea53c912f8a86a8567697115b6a0d8152beee5c8 (diff)
parent6a00f206debf8a5c8899055726ad127dbeeed098 (diff)
Merge branch 'mpi-master' into wip-k-fmlpwip-k-fmlp
Conflicts: litmus/sched_cedf.c
Diffstat (limited to 'drivers/mmc/card/queue.c')
-rw-r--r--drivers/mmc/card/queue.c37
1 files changed, 13 insertions, 24 deletions
diff --git a/drivers/mmc/card/queue.c b/drivers/mmc/card/queue.c
index e876678176be..6413afa318d2 100644
--- a/drivers/mmc/card/queue.c
+++ b/drivers/mmc/card/queue.c
@@ -55,8 +55,7 @@ static int mmc_queue_thread(void *d)
55 55
56 spin_lock_irq(q->queue_lock); 56 spin_lock_irq(q->queue_lock);
57 set_current_state(TASK_INTERRUPTIBLE); 57 set_current_state(TASK_INTERRUPTIBLE);
58 if (!blk_queue_plugged(q)) 58 req = blk_fetch_request(q);
59 req = blk_fetch_request(q);
60 mq->req = req; 59 mq->req = req;
61 spin_unlock_irq(q->queue_lock); 60 spin_unlock_irq(q->queue_lock);
62 61
@@ -107,10 +106,12 @@ static void mmc_request(struct request_queue *q)
107 * @mq: mmc queue 106 * @mq: mmc queue
108 * @card: mmc card to attach this queue 107 * @card: mmc card to attach this queue
109 * @lock: queue lock 108 * @lock: queue lock
109 * @subname: partition subname
110 * 110 *
111 * Initialise a MMC card request queue. 111 * Initialise a MMC card request queue.
112 */ 112 */
113int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card, spinlock_t *lock) 113int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card,
114 spinlock_t *lock, const char *subname)
114{ 115{
115 struct mmc_host *host = card->host; 116 struct mmc_host *host = card->host;
116 u64 limit = BLK_BOUNCE_HIGH; 117 u64 limit = BLK_BOUNCE_HIGH;
@@ -128,26 +129,20 @@ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card, spinlock_t *lock
128 mq->req = NULL; 129 mq->req = NULL;
129 130
130 blk_queue_prep_rq(mq->queue, mmc_prep_request); 131 blk_queue_prep_rq(mq->queue, mmc_prep_request);
131 blk_queue_ordered(mq->queue, QUEUE_ORDERED_DRAIN);
132 queue_flag_set_unlocked(QUEUE_FLAG_NONROT, mq->queue); 132 queue_flag_set_unlocked(QUEUE_FLAG_NONROT, mq->queue);
133 if (mmc_can_erase(card)) { 133 if (mmc_can_erase(card)) {
134 queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, mq->queue); 134 queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, mq->queue);
135 mq->queue->limits.max_discard_sectors = UINT_MAX; 135 mq->queue->limits.max_discard_sectors = UINT_MAX;
136 if (card->erased_byte == 0) 136 if (card->erased_byte == 0)
137 mq->queue->limits.discard_zeroes_data = 1; 137 mq->queue->limits.discard_zeroes_data = 1;
138 if (!mmc_can_trim(card) && is_power_of_2(card->erase_size)) { 138 mq->queue->limits.discard_granularity = card->pref_erase << 9;
139 mq->queue->limits.discard_granularity =
140 card->erase_size << 9;
141 mq->queue->limits.discard_alignment =
142 card->erase_size << 9;
143 }
144 if (mmc_can_secure_erase_trim(card)) 139 if (mmc_can_secure_erase_trim(card))
145 queue_flag_set_unlocked(QUEUE_FLAG_SECDISCARD, 140 queue_flag_set_unlocked(QUEUE_FLAG_SECDISCARD,
146 mq->queue); 141 mq->queue);
147 } 142 }
148 143
149#ifdef CONFIG_MMC_BLOCK_BOUNCE 144#ifdef CONFIG_MMC_BLOCK_BOUNCE
150 if (host->max_hw_segs == 1) { 145 if (host->max_segs == 1) {
151 unsigned int bouncesz; 146 unsigned int bouncesz;
152 147
153 bouncesz = MMC_QUEUE_BOUNCESZ; 148 bouncesz = MMC_QUEUE_BOUNCESZ;
@@ -197,21 +192,23 @@ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card, spinlock_t *lock
197 blk_queue_bounce_limit(mq->queue, limit); 192 blk_queue_bounce_limit(mq->queue, limit);
198 blk_queue_max_hw_sectors(mq->queue, 193 blk_queue_max_hw_sectors(mq->queue,
199 min(host->max_blk_count, host->max_req_size / 512)); 194 min(host->max_blk_count, host->max_req_size / 512));
200 blk_queue_max_segments(mq->queue, host->max_hw_segs); 195 blk_queue_max_segments(mq->queue, host->max_segs);
201 blk_queue_max_segment_size(mq->queue, host->max_seg_size); 196 blk_queue_max_segment_size(mq->queue, host->max_seg_size);
202 197
203 mq->sg = kmalloc(sizeof(struct scatterlist) * 198 mq->sg = kmalloc(sizeof(struct scatterlist) *
204 host->max_phys_segs, GFP_KERNEL); 199 host->max_segs, GFP_KERNEL);
205 if (!mq->sg) { 200 if (!mq->sg) {
206 ret = -ENOMEM; 201 ret = -ENOMEM;
207 goto cleanup_queue; 202 goto cleanup_queue;
208 } 203 }
209 sg_init_table(mq->sg, host->max_phys_segs); 204 sg_init_table(mq->sg, host->max_segs);
210 } 205 }
211 206
212 init_MUTEX(&mq->thread_sem); 207 sema_init(&mq->thread_sem, 1);
208
209 mq->thread = kthread_run(mmc_queue_thread, mq, "mmcqd/%d%s",
210 host->index, subname ? subname : "");
213 211
214 mq->thread = kthread_run(mmc_queue_thread, mq, "mmcqd");
215 if (IS_ERR(mq->thread)) { 212 if (IS_ERR(mq->thread)) {
216 ret = PTR_ERR(mq->thread); 213 ret = PTR_ERR(mq->thread);
217 goto free_bounce_sg; 214 goto free_bounce_sg;
@@ -343,18 +340,14 @@ unsigned int mmc_queue_map_sg(struct mmc_queue *mq)
343 */ 340 */
344void mmc_queue_bounce_pre(struct mmc_queue *mq) 341void mmc_queue_bounce_pre(struct mmc_queue *mq)
345{ 342{
346 unsigned long flags;
347
348 if (!mq->bounce_buf) 343 if (!mq->bounce_buf)
349 return; 344 return;
350 345
351 if (rq_data_dir(mq->req) != WRITE) 346 if (rq_data_dir(mq->req) != WRITE)
352 return; 347 return;
353 348
354 local_irq_save(flags);
355 sg_copy_to_buffer(mq->bounce_sg, mq->bounce_sg_len, 349 sg_copy_to_buffer(mq->bounce_sg, mq->bounce_sg_len,
356 mq->bounce_buf, mq->sg[0].length); 350 mq->bounce_buf, mq->sg[0].length);
357 local_irq_restore(flags);
358} 351}
359 352
360/* 353/*
@@ -363,17 +356,13 @@ void mmc_queue_bounce_pre(struct mmc_queue *mq)
363 */ 356 */
364void mmc_queue_bounce_post(struct mmc_queue *mq) 357void mmc_queue_bounce_post(struct mmc_queue *mq)
365{ 358{
366 unsigned long flags;
367
368 if (!mq->bounce_buf) 359 if (!mq->bounce_buf)
369 return; 360 return;
370 361
371 if (rq_data_dir(mq->req) != READ) 362 if (rq_data_dir(mq->req) != READ)
372 return; 363 return;
373 364
374 local_irq_save(flags);
375 sg_copy_from_buffer(mq->bounce_sg, mq->bounce_sg_len, 365 sg_copy_from_buffer(mq->bounce_sg, mq->bounce_sg_len,
376 mq->bounce_buf, mq->sg[0].length); 366 mq->bounce_buf, mq->sg[0].length);
377 local_irq_restore(flags);
378} 367}
379 368