diff options
Diffstat (limited to 'drivers/mmc/card/queue.c')
-rw-r--r-- | drivers/mmc/card/queue.c | 252 |
1 files changed, 252 insertions, 0 deletions
diff --git a/drivers/mmc/card/queue.c b/drivers/mmc/card/queue.c new file mode 100644 index 000000000000..2e77963db334 --- /dev/null +++ b/drivers/mmc/card/queue.c | |||
@@ -0,0 +1,252 @@ | |||
1 | /* | ||
2 | * linux/drivers/mmc/queue.c | ||
3 | * | ||
4 | * Copyright (C) 2003 Russell King, All Rights Reserved. | ||
5 | * Copyright 2006-2007 Pierre Ossman | ||
6 | * | ||
7 | * This program is free software; you can redistribute it and/or modify | ||
8 | * it under the terms of the GNU General Public License version 2 as | ||
9 | * published by the Free Software Foundation. | ||
10 | * | ||
11 | */ | ||
12 | #include <linux/module.h> | ||
13 | #include <linux/blkdev.h> | ||
14 | #include <linux/kthread.h> | ||
15 | |||
16 | #include <linux/mmc/card.h> | ||
17 | #include <linux/mmc/host.h> | ||
18 | #include "queue.h" | ||
19 | |||
20 | #define MMC_QUEUE_SUSPENDED (1 << 0) | ||
21 | |||
22 | /* | ||
23 | * Prepare a MMC request. Essentially, this means passing the | ||
24 | * preparation off to the media driver. The media driver will | ||
25 | * create a mmc_io_request in req->special. | ||
26 | */ | ||
27 | static int mmc_prep_request(struct request_queue *q, struct request *req) | ||
28 | { | ||
29 | struct mmc_queue *mq = q->queuedata; | ||
30 | int ret = BLKPREP_KILL; | ||
31 | |||
32 | if (blk_special_request(req)) { | ||
33 | /* | ||
34 | * Special commands already have the command | ||
35 | * blocks already setup in req->special. | ||
36 | */ | ||
37 | BUG_ON(!req->special); | ||
38 | |||
39 | ret = BLKPREP_OK; | ||
40 | } else if (blk_fs_request(req) || blk_pc_request(req)) { | ||
41 | /* | ||
42 | * Block I/O requests need translating according | ||
43 | * to the protocol. | ||
44 | */ | ||
45 | ret = mq->prep_fn(mq, req); | ||
46 | } else { | ||
47 | /* | ||
48 | * Everything else is invalid. | ||
49 | */ | ||
50 | blk_dump_rq_flags(req, "MMC bad request"); | ||
51 | } | ||
52 | |||
53 | if (ret == BLKPREP_OK) | ||
54 | req->cmd_flags |= REQ_DONTPREP; | ||
55 | |||
56 | return ret; | ||
57 | } | ||
58 | |||
59 | static int mmc_queue_thread(void *d) | ||
60 | { | ||
61 | struct mmc_queue *mq = d; | ||
62 | struct request_queue *q = mq->queue; | ||
63 | |||
64 | /* | ||
65 | * Set iothread to ensure that we aren't put to sleep by | ||
66 | * the process freezing. We handle suspension ourselves. | ||
67 | */ | ||
68 | current->flags |= PF_MEMALLOC|PF_NOFREEZE; | ||
69 | |||
70 | down(&mq->thread_sem); | ||
71 | do { | ||
72 | struct request *req = NULL; | ||
73 | |||
74 | spin_lock_irq(q->queue_lock); | ||
75 | set_current_state(TASK_INTERRUPTIBLE); | ||
76 | if (!blk_queue_plugged(q)) | ||
77 | req = elv_next_request(q); | ||
78 | mq->req = req; | ||
79 | spin_unlock_irq(q->queue_lock); | ||
80 | |||
81 | if (!req) { | ||
82 | if (kthread_should_stop()) { | ||
83 | set_current_state(TASK_RUNNING); | ||
84 | break; | ||
85 | } | ||
86 | up(&mq->thread_sem); | ||
87 | schedule(); | ||
88 | down(&mq->thread_sem); | ||
89 | continue; | ||
90 | } | ||
91 | set_current_state(TASK_RUNNING); | ||
92 | |||
93 | mq->issue_fn(mq, req); | ||
94 | } while (1); | ||
95 | up(&mq->thread_sem); | ||
96 | |||
97 | return 0; | ||
98 | } | ||
99 | |||
100 | /* | ||
101 | * Generic MMC request handler. This is called for any queue on a | ||
102 | * particular host. When the host is not busy, we look for a request | ||
103 | * on any queue on this host, and attempt to issue it. This may | ||
104 | * not be the queue we were asked to process. | ||
105 | */ | ||
106 | static void mmc_request(request_queue_t *q) | ||
107 | { | ||
108 | struct mmc_queue *mq = q->queuedata; | ||
109 | struct request *req; | ||
110 | int ret; | ||
111 | |||
112 | if (!mq) { | ||
113 | printk(KERN_ERR "MMC: killing requests for dead queue\n"); | ||
114 | while ((req = elv_next_request(q)) != NULL) { | ||
115 | do { | ||
116 | ret = end_that_request_chunk(req, 0, | ||
117 | req->current_nr_sectors << 9); | ||
118 | } while (ret); | ||
119 | } | ||
120 | return; | ||
121 | } | ||
122 | |||
123 | if (!mq->req) | ||
124 | wake_up_process(mq->thread); | ||
125 | } | ||
126 | |||
127 | /** | ||
128 | * mmc_init_queue - initialise a queue structure. | ||
129 | * @mq: mmc queue | ||
130 | * @card: mmc card to attach this queue | ||
131 | * @lock: queue lock | ||
132 | * | ||
133 | * Initialise a MMC card request queue. | ||
134 | */ | ||
135 | int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card, spinlock_t *lock) | ||
136 | { | ||
137 | struct mmc_host *host = card->host; | ||
138 | u64 limit = BLK_BOUNCE_HIGH; | ||
139 | int ret; | ||
140 | |||
141 | if (mmc_dev(host)->dma_mask && *mmc_dev(host)->dma_mask) | ||
142 | limit = *mmc_dev(host)->dma_mask; | ||
143 | |||
144 | mq->card = card; | ||
145 | mq->queue = blk_init_queue(mmc_request, lock); | ||
146 | if (!mq->queue) | ||
147 | return -ENOMEM; | ||
148 | |||
149 | blk_queue_prep_rq(mq->queue, mmc_prep_request); | ||
150 | blk_queue_bounce_limit(mq->queue, limit); | ||
151 | blk_queue_max_sectors(mq->queue, host->max_req_size / 512); | ||
152 | blk_queue_max_phys_segments(mq->queue, host->max_phys_segs); | ||
153 | blk_queue_max_hw_segments(mq->queue, host->max_hw_segs); | ||
154 | blk_queue_max_segment_size(mq->queue, host->max_seg_size); | ||
155 | |||
156 | mq->queue->queuedata = mq; | ||
157 | mq->req = NULL; | ||
158 | |||
159 | mq->sg = kmalloc(sizeof(struct scatterlist) * host->max_phys_segs, | ||
160 | GFP_KERNEL); | ||
161 | if (!mq->sg) { | ||
162 | ret = -ENOMEM; | ||
163 | goto cleanup_queue; | ||
164 | } | ||
165 | |||
166 | init_MUTEX(&mq->thread_sem); | ||
167 | |||
168 | mq->thread = kthread_run(mmc_queue_thread, mq, "mmcqd"); | ||
169 | if (IS_ERR(mq->thread)) { | ||
170 | ret = PTR_ERR(mq->thread); | ||
171 | goto free_sg; | ||
172 | } | ||
173 | |||
174 | return 0; | ||
175 | |||
176 | free_sg: | ||
177 | kfree(mq->sg); | ||
178 | mq->sg = NULL; | ||
179 | cleanup_queue: | ||
180 | blk_cleanup_queue(mq->queue); | ||
181 | return ret; | ||
182 | } | ||
183 | |||
184 | void mmc_cleanup_queue(struct mmc_queue *mq) | ||
185 | { | ||
186 | request_queue_t *q = mq->queue; | ||
187 | unsigned long flags; | ||
188 | |||
189 | /* Mark that we should start throwing out stragglers */ | ||
190 | spin_lock_irqsave(q->queue_lock, flags); | ||
191 | q->queuedata = NULL; | ||
192 | spin_unlock_irqrestore(q->queue_lock, flags); | ||
193 | |||
194 | /* Make sure the queue isn't suspended, as that will deadlock */ | ||
195 | mmc_queue_resume(mq); | ||
196 | |||
197 | /* Then terminate our worker thread */ | ||
198 | kthread_stop(mq->thread); | ||
199 | |||
200 | kfree(mq->sg); | ||
201 | mq->sg = NULL; | ||
202 | |||
203 | blk_cleanup_queue(mq->queue); | ||
204 | |||
205 | mq->card = NULL; | ||
206 | } | ||
207 | EXPORT_SYMBOL(mmc_cleanup_queue); | ||
208 | |||
209 | /** | ||
210 | * mmc_queue_suspend - suspend a MMC request queue | ||
211 | * @mq: MMC queue to suspend | ||
212 | * | ||
213 | * Stop the block request queue, and wait for our thread to | ||
214 | * complete any outstanding requests. This ensures that we | ||
215 | * won't suspend while a request is being processed. | ||
216 | */ | ||
217 | void mmc_queue_suspend(struct mmc_queue *mq) | ||
218 | { | ||
219 | request_queue_t *q = mq->queue; | ||
220 | unsigned long flags; | ||
221 | |||
222 | if (!(mq->flags & MMC_QUEUE_SUSPENDED)) { | ||
223 | mq->flags |= MMC_QUEUE_SUSPENDED; | ||
224 | |||
225 | spin_lock_irqsave(q->queue_lock, flags); | ||
226 | blk_stop_queue(q); | ||
227 | spin_unlock_irqrestore(q->queue_lock, flags); | ||
228 | |||
229 | down(&mq->thread_sem); | ||
230 | } | ||
231 | } | ||
232 | |||
233 | /** | ||
234 | * mmc_queue_resume - resume a previously suspended MMC request queue | ||
235 | * @mq: MMC queue to resume | ||
236 | */ | ||
237 | void mmc_queue_resume(struct mmc_queue *mq) | ||
238 | { | ||
239 | request_queue_t *q = mq->queue; | ||
240 | unsigned long flags; | ||
241 | |||
242 | if (mq->flags & MMC_QUEUE_SUSPENDED) { | ||
243 | mq->flags &= ~MMC_QUEUE_SUSPENDED; | ||
244 | |||
245 | up(&mq->thread_sem); | ||
246 | |||
247 | spin_lock_irqsave(q->queue_lock, flags); | ||
248 | blk_start_queue(q); | ||
249 | spin_unlock_irqrestore(q->queue_lock, flags); | ||
250 | } | ||
251 | } | ||
252 | |||