diff options
Diffstat (limited to 'drivers/mmc/mmc_queue.c')
-rw-r--r-- | drivers/mmc/mmc_queue.c | 238 |
1 files changed, 238 insertions, 0 deletions
diff --git a/drivers/mmc/mmc_queue.c b/drivers/mmc/mmc_queue.c new file mode 100644 index 000000000000..0b9682e9a357 --- /dev/null +++ b/drivers/mmc/mmc_queue.c | |||
@@ -0,0 +1,238 @@ | |||
1 | /* | ||
2 | * linux/drivers/mmc/mmc_queue.c | ||
3 | * | ||
4 | * Copyright (C) 2003 Russell King, All Rights Reserved. | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify | ||
7 | * it under the terms of the GNU General Public License version 2 as | ||
8 | * published by the Free Software Foundation. | ||
9 | * | ||
10 | */ | ||
11 | #include <linux/module.h> | ||
12 | #include <linux/blkdev.h> | ||
13 | |||
14 | #include <linux/mmc/card.h> | ||
15 | #include <linux/mmc/host.h> | ||
16 | #include "mmc_queue.h" | ||
17 | |||
18 | #define MMC_QUEUE_EXIT (1 << 0) | ||
19 | #define MMC_QUEUE_SUSPENDED (1 << 1) | ||
20 | |||
21 | /* | ||
22 | * Prepare a MMC request. Essentially, this means passing the | ||
23 | * preparation off to the media driver. The media driver will | ||
24 | * create a mmc_io_request in req->special. | ||
25 | */ | ||
26 | static int mmc_prep_request(struct request_queue *q, struct request *req) | ||
27 | { | ||
28 | struct mmc_queue *mq = q->queuedata; | ||
29 | int ret = BLKPREP_KILL; | ||
30 | |||
31 | if (req->flags & REQ_SPECIAL) { | ||
32 | /* | ||
33 | * Special commands already have the command | ||
34 | * blocks already setup in req->special. | ||
35 | */ | ||
36 | BUG_ON(!req->special); | ||
37 | |||
38 | ret = BLKPREP_OK; | ||
39 | } else if (req->flags & (REQ_CMD | REQ_BLOCK_PC)) { | ||
40 | /* | ||
41 | * Block I/O requests need translating according | ||
42 | * to the protocol. | ||
43 | */ | ||
44 | ret = mq->prep_fn(mq, req); | ||
45 | } else { | ||
46 | /* | ||
47 | * Everything else is invalid. | ||
48 | */ | ||
49 | blk_dump_rq_flags(req, "MMC bad request"); | ||
50 | } | ||
51 | |||
52 | if (ret == BLKPREP_OK) | ||
53 | req->flags |= REQ_DONTPREP; | ||
54 | |||
55 | return ret; | ||
56 | } | ||
57 | |||
58 | static int mmc_queue_thread(void *d) | ||
59 | { | ||
60 | struct mmc_queue *mq = d; | ||
61 | struct request_queue *q = mq->queue; | ||
62 | DECLARE_WAITQUEUE(wait, current); | ||
63 | |||
64 | /* | ||
65 | * Set iothread to ensure that we aren't put to sleep by | ||
66 | * the process freezing. We handle suspension ourselves. | ||
67 | */ | ||
68 | current->flags |= PF_MEMALLOC|PF_NOFREEZE; | ||
69 | |||
70 | daemonize("mmcqd"); | ||
71 | |||
72 | complete(&mq->thread_complete); | ||
73 | |||
74 | down(&mq->thread_sem); | ||
75 | add_wait_queue(&mq->thread_wq, &wait); | ||
76 | do { | ||
77 | struct request *req = NULL; | ||
78 | |||
79 | spin_lock_irq(q->queue_lock); | ||
80 | set_current_state(TASK_INTERRUPTIBLE); | ||
81 | if (!blk_queue_plugged(q)) | ||
82 | mq->req = req = elv_next_request(q); | ||
83 | spin_unlock_irq(q->queue_lock); | ||
84 | |||
85 | if (!req) { | ||
86 | if (mq->flags & MMC_QUEUE_EXIT) | ||
87 | break; | ||
88 | up(&mq->thread_sem); | ||
89 | schedule(); | ||
90 | down(&mq->thread_sem); | ||
91 | continue; | ||
92 | } | ||
93 | set_current_state(TASK_RUNNING); | ||
94 | |||
95 | mq->issue_fn(mq, req); | ||
96 | } while (1); | ||
97 | remove_wait_queue(&mq->thread_wq, &wait); | ||
98 | up(&mq->thread_sem); | ||
99 | |||
100 | complete_and_exit(&mq->thread_complete, 0); | ||
101 | return 0; | ||
102 | } | ||
103 | |||
104 | /* | ||
105 | * Generic MMC request handler. This is called for any queue on a | ||
106 | * particular host. When the host is not busy, we look for a request | ||
107 | * on any queue on this host, and attempt to issue it. This may | ||
108 | * not be the queue we were asked to process. | ||
109 | */ | ||
110 | static void mmc_request(request_queue_t *q) | ||
111 | { | ||
112 | struct mmc_queue *mq = q->queuedata; | ||
113 | |||
114 | if (!mq->req) | ||
115 | wake_up(&mq->thread_wq); | ||
116 | } | ||
117 | |||
118 | /** | ||
119 | * mmc_init_queue - initialise a queue structure. | ||
120 | * @mq: mmc queue | ||
121 | * @card: mmc card to attach this queue | ||
122 | * @lock: queue lock | ||
123 | * | ||
124 | * Initialise a MMC card request queue. | ||
125 | */ | ||
126 | int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card, spinlock_t *lock) | ||
127 | { | ||
128 | struct mmc_host *host = card->host; | ||
129 | u64 limit = BLK_BOUNCE_HIGH; | ||
130 | int ret; | ||
131 | |||
132 | if (host->dev->dma_mask && *host->dev->dma_mask) | ||
133 | limit = *host->dev->dma_mask; | ||
134 | |||
135 | mq->card = card; | ||
136 | mq->queue = blk_init_queue(mmc_request, lock); | ||
137 | if (!mq->queue) | ||
138 | return -ENOMEM; | ||
139 | |||
140 | blk_queue_prep_rq(mq->queue, mmc_prep_request); | ||
141 | blk_queue_bounce_limit(mq->queue, limit); | ||
142 | blk_queue_max_sectors(mq->queue, host->max_sectors); | ||
143 | blk_queue_max_phys_segments(mq->queue, host->max_phys_segs); | ||
144 | blk_queue_max_hw_segments(mq->queue, host->max_hw_segs); | ||
145 | blk_queue_max_segment_size(mq->queue, host->max_seg_size); | ||
146 | |||
147 | mq->queue->queuedata = mq; | ||
148 | mq->req = NULL; | ||
149 | |||
150 | mq->sg = kmalloc(sizeof(struct scatterlist) * host->max_phys_segs, | ||
151 | GFP_KERNEL); | ||
152 | if (!mq->sg) { | ||
153 | ret = -ENOMEM; | ||
154 | goto cleanup; | ||
155 | } | ||
156 | |||
157 | init_completion(&mq->thread_complete); | ||
158 | init_waitqueue_head(&mq->thread_wq); | ||
159 | init_MUTEX(&mq->thread_sem); | ||
160 | |||
161 | ret = kernel_thread(mmc_queue_thread, mq, CLONE_KERNEL); | ||
162 | if (ret >= 0) { | ||
163 | wait_for_completion(&mq->thread_complete); | ||
164 | init_completion(&mq->thread_complete); | ||
165 | ret = 0; | ||
166 | goto out; | ||
167 | } | ||
168 | |||
169 | cleanup: | ||
170 | kfree(mq->sg); | ||
171 | mq->sg = NULL; | ||
172 | |||
173 | blk_cleanup_queue(mq->queue); | ||
174 | out: | ||
175 | return ret; | ||
176 | } | ||
177 | EXPORT_SYMBOL(mmc_init_queue); | ||
178 | |||
179 | void mmc_cleanup_queue(struct mmc_queue *mq) | ||
180 | { | ||
181 | mq->flags |= MMC_QUEUE_EXIT; | ||
182 | wake_up(&mq->thread_wq); | ||
183 | wait_for_completion(&mq->thread_complete); | ||
184 | |||
185 | kfree(mq->sg); | ||
186 | mq->sg = NULL; | ||
187 | |||
188 | blk_cleanup_queue(mq->queue); | ||
189 | |||
190 | mq->card = NULL; | ||
191 | } | ||
192 | EXPORT_SYMBOL(mmc_cleanup_queue); | ||
193 | |||
194 | /** | ||
195 | * mmc_queue_suspend - suspend a MMC request queue | ||
196 | * @mq: MMC queue to suspend | ||
197 | * | ||
198 | * Stop the block request queue, and wait for our thread to | ||
199 | * complete any outstanding requests. This ensures that we | ||
200 | * won't suspend while a request is being processed. | ||
201 | */ | ||
202 | void mmc_queue_suspend(struct mmc_queue *mq) | ||
203 | { | ||
204 | request_queue_t *q = mq->queue; | ||
205 | unsigned long flags; | ||
206 | |||
207 | if (!(mq->flags & MMC_QUEUE_SUSPENDED)) { | ||
208 | mq->flags |= MMC_QUEUE_SUSPENDED; | ||
209 | |||
210 | spin_lock_irqsave(q->queue_lock, flags); | ||
211 | blk_stop_queue(q); | ||
212 | spin_unlock_irqrestore(q->queue_lock, flags); | ||
213 | |||
214 | down(&mq->thread_sem); | ||
215 | } | ||
216 | } | ||
217 | EXPORT_SYMBOL(mmc_queue_suspend); | ||
218 | |||
219 | /** | ||
220 | * mmc_queue_resume - resume a previously suspended MMC request queue | ||
221 | * @mq: MMC queue to resume | ||
222 | */ | ||
223 | void mmc_queue_resume(struct mmc_queue *mq) | ||
224 | { | ||
225 | request_queue_t *q = mq->queue; | ||
226 | unsigned long flags; | ||
227 | |||
228 | if (mq->flags & MMC_QUEUE_SUSPENDED) { | ||
229 | mq->flags &= ~MMC_QUEUE_SUSPENDED; | ||
230 | |||
231 | up(&mq->thread_sem); | ||
232 | |||
233 | spin_lock_irqsave(q->queue_lock, flags); | ||
234 | blk_start_queue(q); | ||
235 | spin_unlock_irqrestore(q->queue_lock, flags); | ||
236 | } | ||
237 | } | ||
238 | EXPORT_SYMBOL(mmc_queue_resume); | ||