diff options
author | Pierre Ossman <drzeus@drzeus.cx> | 2006-12-23 14:03:02 -0500 |
---|---|---|
committer | Pierre Ossman <drzeus@drzeus.cx> | 2007-05-01 07:04:16 -0400 |
commit | 98ac2162699f7e9880683cb954891817f20b607c (patch) | |
tree | 27452d428e16edfe6d13d71f297adf5376d07bde /drivers/mmc/mmc_queue.c | |
parent | 29041dbe199b0dff392bf1b9d634357da0b3208f (diff) |
mmc: Move queue functions to mmc_block
The mmc block queue functions are tailored for the mmc_block
driver, so move those functions into that module.
Signed-off-by: Pierre Ossman <drzeus@drzeus.cx>
Diffstat (limited to 'drivers/mmc/mmc_queue.c')
-rw-r--r-- | drivers/mmc/mmc_queue.c | 250 |
1 files changed, 0 insertions, 250 deletions
diff --git a/drivers/mmc/mmc_queue.c b/drivers/mmc/mmc_queue.c deleted file mode 100644 index c27e42645cdb..000000000000 --- a/drivers/mmc/mmc_queue.c +++ /dev/null | |||
@@ -1,250 +0,0 @@ | |||
1 | /* | ||
2 | * linux/drivers/mmc/mmc_queue.c | ||
3 | * | ||
4 | * Copyright (C) 2003 Russell King, All Rights Reserved. | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify | ||
7 | * it under the terms of the GNU General Public License version 2 as | ||
8 | * published by the Free Software Foundation. | ||
9 | * | ||
10 | */ | ||
11 | #include <linux/module.h> | ||
12 | #include <linux/blkdev.h> | ||
13 | #include <linux/kthread.h> | ||
14 | |||
15 | #include <linux/mmc/card.h> | ||
16 | #include <linux/mmc/host.h> | ||
17 | #include "mmc_queue.h" | ||
18 | |||
19 | #define MMC_QUEUE_SUSPENDED (1 << 0) | ||
20 | |||
21 | /* | ||
22 | * Prepare a MMC request. Essentially, this means passing the | ||
23 | * preparation off to the media driver. The media driver will | ||
24 | * create a mmc_io_request in req->special. | ||
25 | */ | ||
26 | static int mmc_prep_request(struct request_queue *q, struct request *req) | ||
27 | { | ||
28 | struct mmc_queue *mq = q->queuedata; | ||
29 | int ret = BLKPREP_KILL; | ||
30 | |||
31 | if (blk_special_request(req)) { | ||
32 | /* | ||
33 | * Special commands already have the command | ||
34 | * blocks already setup in req->special. | ||
35 | */ | ||
36 | BUG_ON(!req->special); | ||
37 | |||
38 | ret = BLKPREP_OK; | ||
39 | } else if (blk_fs_request(req) || blk_pc_request(req)) { | ||
40 | /* | ||
41 | * Block I/O requests need translating according | ||
42 | * to the protocol. | ||
43 | */ | ||
44 | ret = mq->prep_fn(mq, req); | ||
45 | } else { | ||
46 | /* | ||
47 | * Everything else is invalid. | ||
48 | */ | ||
49 | blk_dump_rq_flags(req, "MMC bad request"); | ||
50 | } | ||
51 | |||
52 | if (ret == BLKPREP_OK) | ||
53 | req->cmd_flags |= REQ_DONTPREP; | ||
54 | |||
55 | return ret; | ||
56 | } | ||
57 | |||
58 | static int mmc_queue_thread(void *d) | ||
59 | { | ||
60 | struct mmc_queue *mq = d; | ||
61 | struct request_queue *q = mq->queue; | ||
62 | |||
63 | /* | ||
64 | * Set iothread to ensure that we aren't put to sleep by | ||
65 | * the process freezing. We handle suspension ourselves. | ||
66 | */ | ||
67 | current->flags |= PF_MEMALLOC|PF_NOFREEZE; | ||
68 | |||
69 | down(&mq->thread_sem); | ||
70 | do { | ||
71 | struct request *req = NULL; | ||
72 | |||
73 | spin_lock_irq(q->queue_lock); | ||
74 | set_current_state(TASK_INTERRUPTIBLE); | ||
75 | if (!blk_queue_plugged(q)) | ||
76 | req = elv_next_request(q); | ||
77 | mq->req = req; | ||
78 | spin_unlock_irq(q->queue_lock); | ||
79 | |||
80 | if (!req) { | ||
81 | if (kthread_should_stop()) { | ||
82 | set_current_state(TASK_RUNNING); | ||
83 | break; | ||
84 | } | ||
85 | up(&mq->thread_sem); | ||
86 | schedule(); | ||
87 | down(&mq->thread_sem); | ||
88 | continue; | ||
89 | } | ||
90 | set_current_state(TASK_RUNNING); | ||
91 | |||
92 | mq->issue_fn(mq, req); | ||
93 | } while (1); | ||
94 | up(&mq->thread_sem); | ||
95 | |||
96 | return 0; | ||
97 | } | ||
98 | |||
99 | /* | ||
100 | * Generic MMC request handler. This is called for any queue on a | ||
101 | * particular host. When the host is not busy, we look for a request | ||
102 | * on any queue on this host, and attempt to issue it. This may | ||
103 | * not be the queue we were asked to process. | ||
104 | */ | ||
105 | static void mmc_request(request_queue_t *q) | ||
106 | { | ||
107 | struct mmc_queue *mq = q->queuedata; | ||
108 | struct request *req; | ||
109 | int ret; | ||
110 | |||
111 | if (!mq) { | ||
112 | printk(KERN_ERR "MMC: killing requests for dead queue\n"); | ||
113 | while ((req = elv_next_request(q)) != NULL) { | ||
114 | do { | ||
115 | ret = end_that_request_chunk(req, 0, | ||
116 | req->current_nr_sectors << 9); | ||
117 | } while (ret); | ||
118 | } | ||
119 | return; | ||
120 | } | ||
121 | |||
122 | if (!mq->req) | ||
123 | wake_up_process(mq->thread); | ||
124 | } | ||
125 | |||
126 | /** | ||
127 | * mmc_init_queue - initialise a queue structure. | ||
128 | * @mq: mmc queue | ||
129 | * @card: mmc card to attach this queue | ||
130 | * @lock: queue lock | ||
131 | * | ||
132 | * Initialise a MMC card request queue. | ||
133 | */ | ||
134 | int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card, spinlock_t *lock) | ||
135 | { | ||
136 | struct mmc_host *host = card->host; | ||
137 | u64 limit = BLK_BOUNCE_HIGH; | ||
138 | int ret; | ||
139 | |||
140 | if (mmc_dev(host)->dma_mask && *mmc_dev(host)->dma_mask) | ||
141 | limit = *mmc_dev(host)->dma_mask; | ||
142 | |||
143 | mq->card = card; | ||
144 | mq->queue = blk_init_queue(mmc_request, lock); | ||
145 | if (!mq->queue) | ||
146 | return -ENOMEM; | ||
147 | |||
148 | blk_queue_prep_rq(mq->queue, mmc_prep_request); | ||
149 | blk_queue_bounce_limit(mq->queue, limit); | ||
150 | blk_queue_max_sectors(mq->queue, host->max_req_size / 512); | ||
151 | blk_queue_max_phys_segments(mq->queue, host->max_phys_segs); | ||
152 | blk_queue_max_hw_segments(mq->queue, host->max_hw_segs); | ||
153 | blk_queue_max_segment_size(mq->queue, host->max_seg_size); | ||
154 | |||
155 | mq->queue->queuedata = mq; | ||
156 | mq->req = NULL; | ||
157 | |||
158 | mq->sg = kmalloc(sizeof(struct scatterlist) * host->max_phys_segs, | ||
159 | GFP_KERNEL); | ||
160 | if (!mq->sg) { | ||
161 | ret = -ENOMEM; | ||
162 | goto cleanup_queue; | ||
163 | } | ||
164 | |||
165 | init_MUTEX(&mq->thread_sem); | ||
166 | |||
167 | mq->thread = kthread_run(mmc_queue_thread, mq, "mmcqd"); | ||
168 | if (IS_ERR(mq->thread)) { | ||
169 | ret = PTR_ERR(mq->thread); | ||
170 | goto free_sg; | ||
171 | } | ||
172 | |||
173 | return 0; | ||
174 | |||
175 | free_sg: | ||
176 | kfree(mq->sg); | ||
177 | mq->sg = NULL; | ||
178 | cleanup_queue: | ||
179 | blk_cleanup_queue(mq->queue); | ||
180 | return ret; | ||
181 | } | ||
182 | EXPORT_SYMBOL(mmc_init_queue); | ||
183 | |||
184 | void mmc_cleanup_queue(struct mmc_queue *mq) | ||
185 | { | ||
186 | request_queue_t *q = mq->queue; | ||
187 | unsigned long flags; | ||
188 | |||
189 | /* Mark that we should start throwing out stragglers */ | ||
190 | spin_lock_irqsave(q->queue_lock, flags); | ||
191 | q->queuedata = NULL; | ||
192 | spin_unlock_irqrestore(q->queue_lock, flags); | ||
193 | |||
194 | /* Then terminate our worker thread */ | ||
195 | kthread_stop(mq->thread); | ||
196 | |||
197 | kfree(mq->sg); | ||
198 | mq->sg = NULL; | ||
199 | |||
200 | blk_cleanup_queue(mq->queue); | ||
201 | |||
202 | mq->card = NULL; | ||
203 | } | ||
204 | EXPORT_SYMBOL(mmc_cleanup_queue); | ||
205 | |||
206 | /** | ||
207 | * mmc_queue_suspend - suspend a MMC request queue | ||
208 | * @mq: MMC queue to suspend | ||
209 | * | ||
210 | * Stop the block request queue, and wait for our thread to | ||
211 | * complete any outstanding requests. This ensures that we | ||
212 | * won't suspend while a request is being processed. | ||
213 | */ | ||
214 | void mmc_queue_suspend(struct mmc_queue *mq) | ||
215 | { | ||
216 | request_queue_t *q = mq->queue; | ||
217 | unsigned long flags; | ||
218 | |||
219 | if (!(mq->flags & MMC_QUEUE_SUSPENDED)) { | ||
220 | mq->flags |= MMC_QUEUE_SUSPENDED; | ||
221 | |||
222 | spin_lock_irqsave(q->queue_lock, flags); | ||
223 | blk_stop_queue(q); | ||
224 | spin_unlock_irqrestore(q->queue_lock, flags); | ||
225 | |||
226 | down(&mq->thread_sem); | ||
227 | } | ||
228 | } | ||
229 | EXPORT_SYMBOL(mmc_queue_suspend); | ||
230 | |||
231 | /** | ||
232 | * mmc_queue_resume - resume a previously suspended MMC request queue | ||
233 | * @mq: MMC queue to resume | ||
234 | */ | ||
235 | void mmc_queue_resume(struct mmc_queue *mq) | ||
236 | { | ||
237 | request_queue_t *q = mq->queue; | ||
238 | unsigned long flags; | ||
239 | |||
240 | if (mq->flags & MMC_QUEUE_SUSPENDED) { | ||
241 | mq->flags &= ~MMC_QUEUE_SUSPENDED; | ||
242 | |||
243 | up(&mq->thread_sem); | ||
244 | |||
245 | spin_lock_irqsave(q->queue_lock, flags); | ||
246 | blk_start_queue(q); | ||
247 | spin_unlock_irqrestore(q->queue_lock, flags); | ||
248 | } | ||
249 | } | ||
250 | EXPORT_SYMBOL(mmc_queue_resume); | ||