aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/mmc/card/queue.c
diff options
context:
space:
mode:
authorPer Forlin <per.forlin@linaro.org>2011-07-09 17:12:36 -0400
committerChris Ball <cjb@laptop.org>2011-07-20 17:21:13 -0400
commit97868a2bdfc2fc79a987b64f1611034b56a3f8c4 (patch)
treee2d76b7706ae6a4d999265e225654b814fa35ad3 /drivers/mmc/card/queue.c
parentbf043330362b1ccb0c0611b8fc394e06ba8498b0 (diff)
mmc: block: add member in mmc queue struct to hold request data
The way the request data is organized in the mmc queue struct, it only allows processing of one request at a time. This patch adds a new struct to hold mmc queue request data such as sg list, request, blk request and bounce buffers, and updates any functions depending on the mmc queue struct. This prepares for using multiple active requests in one mmc queue. Signed-off-by: Per Forlin <per.forlin@linaro.org> Acked-by: Kyungmin Park <kyungmin.park@samsung.com> Acked-by: Arnd Bergmann <arnd@arndb.de> Reviewed-by: Venkatraman S <svenkatr@ti.com> Tested-by: Sourav Poddar <sourav.poddar@ti.com> Tested-by: Linus Walleij <linus.walleij@linaro.org> Signed-off-by: Chris Ball <cjb@laptop.org>
Diffstat (limited to 'drivers/mmc/card/queue.c')
-rw-r--r--drivers/mmc/card/queue.c129
1 files changed, 67 insertions, 62 deletions
diff --git a/drivers/mmc/card/queue.c b/drivers/mmc/card/queue.c
index defc11b4572c..9122ff5f39c8 100644
--- a/drivers/mmc/card/queue.c
+++ b/drivers/mmc/card/queue.c
@@ -56,7 +56,7 @@ static int mmc_queue_thread(void *d)
56 spin_lock_irq(q->queue_lock); 56 spin_lock_irq(q->queue_lock);
57 set_current_state(TASK_INTERRUPTIBLE); 57 set_current_state(TASK_INTERRUPTIBLE);
58 req = blk_fetch_request(q); 58 req = blk_fetch_request(q);
59 mq->req = req; 59 mq->mqrq_cur->req = req;
60 spin_unlock_irq(q->queue_lock); 60 spin_unlock_irq(q->queue_lock);
61 61
62 if (!req) { 62 if (!req) {
@@ -97,10 +97,25 @@ static void mmc_request(struct request_queue *q)
97 return; 97 return;
98 } 98 }
99 99
100 if (!mq->req) 100 if (!mq->mqrq_cur->req)
101 wake_up_process(mq->thread); 101 wake_up_process(mq->thread);
102} 102}
103 103
104struct scatterlist *mmc_alloc_sg(int sg_len, int *err)
105{
106 struct scatterlist *sg;
107
108 sg = kmalloc(sizeof(struct scatterlist)*sg_len, GFP_KERNEL);
109 if (!sg)
110 *err = -ENOMEM;
111 else {
112 *err = 0;
113 sg_init_table(sg, sg_len);
114 }
115
116 return sg;
117}
118
104static void mmc_queue_setup_discard(struct request_queue *q, 119static void mmc_queue_setup_discard(struct request_queue *q,
105 struct mmc_card *card) 120 struct mmc_card *card)
106{ 121{
@@ -137,6 +152,7 @@ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card,
137 struct mmc_host *host = card->host; 152 struct mmc_host *host = card->host;
138 u64 limit = BLK_BOUNCE_HIGH; 153 u64 limit = BLK_BOUNCE_HIGH;
139 int ret; 154 int ret;
155 struct mmc_queue_req *mqrq_cur = &mq->mqrq[0];
140 156
141 if (mmc_dev(host)->dma_mask && *mmc_dev(host)->dma_mask) 157 if (mmc_dev(host)->dma_mask && *mmc_dev(host)->dma_mask)
142 limit = *mmc_dev(host)->dma_mask; 158 limit = *mmc_dev(host)->dma_mask;
@@ -146,8 +162,9 @@ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card,
146 if (!mq->queue) 162 if (!mq->queue)
147 return -ENOMEM; 163 return -ENOMEM;
148 164
165 memset(&mq->mqrq_cur, 0, sizeof(mq->mqrq_cur));
166 mq->mqrq_cur = mqrq_cur;
149 mq->queue->queuedata = mq; 167 mq->queue->queuedata = mq;
150 mq->req = NULL;
151 168
152 blk_queue_prep_rq(mq->queue, mmc_prep_request); 169 blk_queue_prep_rq(mq->queue, mmc_prep_request);
153 queue_flag_set_unlocked(QUEUE_FLAG_NONROT, mq->queue); 170 queue_flag_set_unlocked(QUEUE_FLAG_NONROT, mq->queue);
@@ -168,53 +185,44 @@ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card,
168 bouncesz = host->max_blk_count * 512; 185 bouncesz = host->max_blk_count * 512;
169 186
170 if (bouncesz > 512) { 187 if (bouncesz > 512) {
171 mq->bounce_buf = kmalloc(bouncesz, GFP_KERNEL); 188 mqrq_cur->bounce_buf = kmalloc(bouncesz, GFP_KERNEL);
172 if (!mq->bounce_buf) { 189 if (!mqrq_cur->bounce_buf) {
173 printk(KERN_WARNING "%s: unable to " 190 printk(KERN_WARNING "%s: unable to "
174 "allocate bounce buffer\n", 191 "allocate bounce cur buffer\n",
175 mmc_card_name(card)); 192 mmc_card_name(card));
176 } 193 }
177 } 194 }
178 195
179 if (mq->bounce_buf) { 196 if (mqrq_cur->bounce_buf) {
180 blk_queue_bounce_limit(mq->queue, BLK_BOUNCE_ANY); 197 blk_queue_bounce_limit(mq->queue, BLK_BOUNCE_ANY);
181 blk_queue_max_hw_sectors(mq->queue, bouncesz / 512); 198 blk_queue_max_hw_sectors(mq->queue, bouncesz / 512);
182 blk_queue_max_segments(mq->queue, bouncesz / 512); 199 blk_queue_max_segments(mq->queue, bouncesz / 512);
183 blk_queue_max_segment_size(mq->queue, bouncesz); 200 blk_queue_max_segment_size(mq->queue, bouncesz);
184 201
185 mq->sg = kmalloc(sizeof(struct scatterlist), 202 mqrq_cur->sg = mmc_alloc_sg(1, &ret);
186 GFP_KERNEL); 203 if (ret)
187 if (!mq->sg) {
188 ret = -ENOMEM;
189 goto cleanup_queue; 204 goto cleanup_queue;
190 }
191 sg_init_table(mq->sg, 1);
192 205
193 mq->bounce_sg = kmalloc(sizeof(struct scatterlist) * 206 mqrq_cur->bounce_sg =
194 bouncesz / 512, GFP_KERNEL); 207 mmc_alloc_sg(bouncesz / 512, &ret);
195 if (!mq->bounce_sg) { 208 if (ret)
196 ret = -ENOMEM;
197 goto cleanup_queue; 209 goto cleanup_queue;
198 } 210
199 sg_init_table(mq->bounce_sg, bouncesz / 512);
200 } 211 }
201 } 212 }
202#endif 213#endif
203 214
204 if (!mq->bounce_buf) { 215 if (!mqrq_cur->bounce_buf) {
205 blk_queue_bounce_limit(mq->queue, limit); 216 blk_queue_bounce_limit(mq->queue, limit);
206 blk_queue_max_hw_sectors(mq->queue, 217 blk_queue_max_hw_sectors(mq->queue,
207 min(host->max_blk_count, host->max_req_size / 512)); 218 min(host->max_blk_count, host->max_req_size / 512));
208 blk_queue_max_segments(mq->queue, host->max_segs); 219 blk_queue_max_segments(mq->queue, host->max_segs);
209 blk_queue_max_segment_size(mq->queue, host->max_seg_size); 220 blk_queue_max_segment_size(mq->queue, host->max_seg_size);
210 221
211 mq->sg = kmalloc(sizeof(struct scatterlist) * 222 mqrq_cur->sg = mmc_alloc_sg(host->max_segs, &ret);
212 host->max_segs, GFP_KERNEL); 223 if (ret)
213 if (!mq->sg) {
214 ret = -ENOMEM;
215 goto cleanup_queue; 224 goto cleanup_queue;
216 } 225
217 sg_init_table(mq->sg, host->max_segs);
218 } 226 }
219 227
220 sema_init(&mq->thread_sem, 1); 228 sema_init(&mq->thread_sem, 1);
@@ -229,16 +237,15 @@ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card,
229 237
230 return 0; 238 return 0;
231 free_bounce_sg: 239 free_bounce_sg:
232 if (mq->bounce_sg) 240 kfree(mqrq_cur->bounce_sg);
233 kfree(mq->bounce_sg); 241 mqrq_cur->bounce_sg = NULL;
234 mq->bounce_sg = NULL; 242
235 cleanup_queue: 243 cleanup_queue:
236 if (mq->sg) 244 kfree(mqrq_cur->sg);
237 kfree(mq->sg); 245 mqrq_cur->sg = NULL;
238 mq->sg = NULL; 246 kfree(mqrq_cur->bounce_buf);
239 if (mq->bounce_buf) 247 mqrq_cur->bounce_buf = NULL;
240 kfree(mq->bounce_buf); 248
241 mq->bounce_buf = NULL;
242 blk_cleanup_queue(mq->queue); 249 blk_cleanup_queue(mq->queue);
243 return ret; 250 return ret;
244} 251}
@@ -247,6 +254,7 @@ void mmc_cleanup_queue(struct mmc_queue *mq)
247{ 254{
248 struct request_queue *q = mq->queue; 255 struct request_queue *q = mq->queue;
249 unsigned long flags; 256 unsigned long flags;
257 struct mmc_queue_req *mqrq_cur = mq->mqrq_cur;
250 258
251 /* Make sure the queue isn't suspended, as that will deadlock */ 259 /* Make sure the queue isn't suspended, as that will deadlock */
252 mmc_queue_resume(mq); 260 mmc_queue_resume(mq);
@@ -260,16 +268,14 @@ void mmc_cleanup_queue(struct mmc_queue *mq)
260 blk_start_queue(q); 268 blk_start_queue(q);
261 spin_unlock_irqrestore(q->queue_lock, flags); 269 spin_unlock_irqrestore(q->queue_lock, flags);
262 270
263 if (mq->bounce_sg) 271 kfree(mqrq_cur->bounce_sg);
264 kfree(mq->bounce_sg); 272 mqrq_cur->bounce_sg = NULL;
265 mq->bounce_sg = NULL;
266 273
267 kfree(mq->sg); 274 kfree(mqrq_cur->sg);
268 mq->sg = NULL; 275 mqrq_cur->sg = NULL;
269 276
270 if (mq->bounce_buf) 277 kfree(mqrq_cur->bounce_buf);
271 kfree(mq->bounce_buf); 278 mqrq_cur->bounce_buf = NULL;
272 mq->bounce_buf = NULL;
273 279
274 mq->card = NULL; 280 mq->card = NULL;
275} 281}
@@ -322,27 +328,27 @@ void mmc_queue_resume(struct mmc_queue *mq)
322/* 328/*
323 * Prepare the sg list(s) to be handed of to the host driver 329 * Prepare the sg list(s) to be handed of to the host driver
324 */ 330 */
325unsigned int mmc_queue_map_sg(struct mmc_queue *mq) 331unsigned int mmc_queue_map_sg(struct mmc_queue *mq, struct mmc_queue_req *mqrq)
326{ 332{
327 unsigned int sg_len; 333 unsigned int sg_len;
328 size_t buflen; 334 size_t buflen;
329 struct scatterlist *sg; 335 struct scatterlist *sg;
330 int i; 336 int i;
331 337
332 if (!mq->bounce_buf) 338 if (!mqrq->bounce_buf)
333 return blk_rq_map_sg(mq->queue, mq->req, mq->sg); 339 return blk_rq_map_sg(mq->queue, mqrq->req, mqrq->sg);
334 340
335 BUG_ON(!mq->bounce_sg); 341 BUG_ON(!mqrq->bounce_sg);
336 342
337 sg_len = blk_rq_map_sg(mq->queue, mq->req, mq->bounce_sg); 343 sg_len = blk_rq_map_sg(mq->queue, mqrq->req, mqrq->bounce_sg);
338 344
339 mq->bounce_sg_len = sg_len; 345 mqrq->bounce_sg_len = sg_len;
340 346
341 buflen = 0; 347 buflen = 0;
342 for_each_sg(mq->bounce_sg, sg, sg_len, i) 348 for_each_sg(mqrq->bounce_sg, sg, sg_len, i)
343 buflen += sg->length; 349 buflen += sg->length;
344 350
345 sg_init_one(mq->sg, mq->bounce_buf, buflen); 351 sg_init_one(mqrq->sg, mqrq->bounce_buf, buflen);
346 352
347 return 1; 353 return 1;
348} 354}
@@ -351,31 +357,30 @@ unsigned int mmc_queue_map_sg(struct mmc_queue *mq)
351 * If writing, bounce the data to the buffer before the request 357 * If writing, bounce the data to the buffer before the request
352 * is sent to the host driver 358 * is sent to the host driver
353 */ 359 */
354void mmc_queue_bounce_pre(struct mmc_queue *mq) 360void mmc_queue_bounce_pre(struct mmc_queue_req *mqrq)
355{ 361{
356 if (!mq->bounce_buf) 362 if (!mqrq->bounce_buf)
357 return; 363 return;
358 364
359 if (rq_data_dir(mq->req) != WRITE) 365 if (rq_data_dir(mqrq->req) != WRITE)
360 return; 366 return;
361 367
362 sg_copy_to_buffer(mq->bounce_sg, mq->bounce_sg_len, 368 sg_copy_to_buffer(mqrq->bounce_sg, mqrq->bounce_sg_len,
363 mq->bounce_buf, mq->sg[0].length); 369 mqrq->bounce_buf, mqrq->sg[0].length);
364} 370}
365 371
366/* 372/*
367 * If reading, bounce the data from the buffer after the request 373 * If reading, bounce the data from the buffer after the request
368 * has been handled by the host driver 374 * has been handled by the host driver
369 */ 375 */
370void mmc_queue_bounce_post(struct mmc_queue *mq) 376void mmc_queue_bounce_post(struct mmc_queue_req *mqrq)
371{ 377{
372 if (!mq->bounce_buf) 378 if (!mqrq->bounce_buf)
373 return; 379 return;
374 380
375 if (rq_data_dir(mq->req) != READ) 381 if (rq_data_dir(mqrq->req) != READ)
376 return; 382 return;
377 383
378 sg_copy_from_buffer(mq->bounce_sg, mq->bounce_sg_len, 384 sg_copy_from_buffer(mqrq->bounce_sg, mqrq->bounce_sg_len,
379 mq->bounce_buf, mq->sg[0].length); 385 mqrq->bounce_buf, mqrq->sg[0].length);
380} 386}
381