diff options
author | Rusty Russell <rusty@rustcorp.com.au> | 2007-07-19 04:49:29 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@woody.linux-foundation.org> | 2007-07-19 13:04:53 -0400 |
commit | b754416bfe9adac6468e45fba244d77f52048aeb (patch) | |
tree | 0d57784c3864fafa811ab87e0d968eabb0502e37 /drivers/block/lguest_blk.c | |
parent | d503e2fa5aecef99675c5a81b61321a5407bf61f (diff) |
lguest: the block driver
Lguest block driver
A simple block driver for lguest.
Signed-off-by: Rusty Russell <rusty@rustcorp.com.au>
Cc: Andi Kleen <ak@suse.de>
Cc: Jens Axboe <jens.axboe@oracle.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'drivers/block/lguest_blk.c')
-rw-r--r-- | drivers/block/lguest_blk.c | 275 |
1 files changed, 275 insertions, 0 deletions
diff --git a/drivers/block/lguest_blk.c b/drivers/block/lguest_blk.c new file mode 100644 index 000000000000..1634c2dd25ec --- /dev/null +++ b/drivers/block/lguest_blk.c | |||
@@ -0,0 +1,275 @@ | |||
1 | /* A simple block driver for lguest. | ||
2 | * | ||
3 | * Copyright 2006 Rusty Russell <rusty@rustcorp.com.au> IBM Corporation | ||
4 | * | ||
5 | * This program is free software; you can redistribute it and/or modify | ||
6 | * it under the terms of the GNU General Public License as published by | ||
7 | * the Free Software Foundation; either version 2 of the License, or | ||
8 | * (at your option) any later version. | ||
9 | * | ||
10 | * This program is distributed in the hope that it will be useful, | ||
11 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
13 | * GNU General Public License for more details. | ||
14 | * | ||
15 | * You should have received a copy of the GNU General Public License | ||
16 | * along with this program; if not, write to the Free Software | ||
17 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | ||
18 | */ | ||
19 | //#define DEBUG | ||
20 | #include <linux/init.h> | ||
21 | #include <linux/types.h> | ||
22 | #include <linux/blkdev.h> | ||
23 | #include <linux/interrupt.h> | ||
24 | #include <linux/lguest_bus.h> | ||
25 | |||
26 | static char next_block_index = 'a'; | ||
27 | |||
28 | struct blockdev | ||
29 | { | ||
30 | spinlock_t lock; | ||
31 | |||
32 | /* The disk structure for the kernel. */ | ||
33 | struct gendisk *disk; | ||
34 | |||
35 | /* The major number for this disk. */ | ||
36 | int major; | ||
37 | int irq; | ||
38 | |||
39 | unsigned long phys_addr; | ||
40 | /* The mapped block page. */ | ||
41 | struct lguest_block_page *lb_page; | ||
42 | |||
43 | /* We only have a single request outstanding at a time. */ | ||
44 | struct lguest_dma dma; | ||
45 | struct request *req; | ||
46 | }; | ||
47 | |||
48 | /* Jens gave me this nice helper to end all chunks of a request. */ | ||
49 | static void end_entire_request(struct request *req, int uptodate) | ||
50 | { | ||
51 | if (end_that_request_first(req, uptodate, req->hard_nr_sectors)) | ||
52 | BUG(); | ||
53 | add_disk_randomness(req->rq_disk); | ||
54 | blkdev_dequeue_request(req); | ||
55 | end_that_request_last(req, uptodate); | ||
56 | } | ||
57 | |||
58 | static irqreturn_t lgb_irq(int irq, void *_bd) | ||
59 | { | ||
60 | struct blockdev *bd = _bd; | ||
61 | unsigned long flags; | ||
62 | |||
63 | if (!bd->req) { | ||
64 | pr_debug("No work!\n"); | ||
65 | return IRQ_NONE; | ||
66 | } | ||
67 | |||
68 | if (!bd->lb_page->result) { | ||
69 | pr_debug("No result!\n"); | ||
70 | return IRQ_NONE; | ||
71 | } | ||
72 | |||
73 | spin_lock_irqsave(&bd->lock, flags); | ||
74 | end_entire_request(bd->req, bd->lb_page->result == 1); | ||
75 | bd->req = NULL; | ||
76 | bd->dma.used_len = 0; | ||
77 | blk_start_queue(bd->disk->queue); | ||
78 | spin_unlock_irqrestore(&bd->lock, flags); | ||
79 | return IRQ_HANDLED; | ||
80 | } | ||
81 | |||
82 | static unsigned int req_to_dma(struct request *req, struct lguest_dma *dma) | ||
83 | { | ||
84 | unsigned int i = 0, idx, len = 0; | ||
85 | struct bio *bio; | ||
86 | |||
87 | rq_for_each_bio(bio, req) { | ||
88 | struct bio_vec *bvec; | ||
89 | bio_for_each_segment(bvec, bio, idx) { | ||
90 | BUG_ON(i == LGUEST_MAX_DMA_SECTIONS); | ||
91 | BUG_ON(!bvec->bv_len); | ||
92 | dma->addr[i] = page_to_phys(bvec->bv_page) | ||
93 | + bvec->bv_offset; | ||
94 | dma->len[i] = bvec->bv_len; | ||
95 | len += bvec->bv_len; | ||
96 | i++; | ||
97 | } | ||
98 | } | ||
99 | if (i < LGUEST_MAX_DMA_SECTIONS) | ||
100 | dma->len[i] = 0; | ||
101 | return len; | ||
102 | } | ||
103 | |||
104 | static void empty_dma(struct lguest_dma *dma) | ||
105 | { | ||
106 | dma->len[0] = 0; | ||
107 | } | ||
108 | |||
109 | static void setup_req(struct blockdev *bd, | ||
110 | int type, struct request *req, struct lguest_dma *dma) | ||
111 | { | ||
112 | bd->lb_page->type = type; | ||
113 | bd->lb_page->sector = req->sector; | ||
114 | bd->lb_page->result = 0; | ||
115 | bd->req = req; | ||
116 | bd->lb_page->bytes = req_to_dma(req, dma); | ||
117 | } | ||
118 | |||
119 | static void do_write(struct blockdev *bd, struct request *req) | ||
120 | { | ||
121 | struct lguest_dma send; | ||
122 | |||
123 | pr_debug("lgb: WRITE sector %li\n", (long)req->sector); | ||
124 | setup_req(bd, 1, req, &send); | ||
125 | |||
126 | lguest_send_dma(bd->phys_addr, &send); | ||
127 | } | ||
128 | |||
129 | static void do_read(struct blockdev *bd, struct request *req) | ||
130 | { | ||
131 | struct lguest_dma ping; | ||
132 | |||
133 | pr_debug("lgb: READ sector %li\n", (long)req->sector); | ||
134 | setup_req(bd, 0, req, &bd->dma); | ||
135 | |||
136 | empty_dma(&ping); | ||
137 | lguest_send_dma(bd->phys_addr, &ping); | ||
138 | } | ||
139 | |||
140 | static void do_lgb_request(request_queue_t *q) | ||
141 | { | ||
142 | struct blockdev *bd; | ||
143 | struct request *req; | ||
144 | |||
145 | again: | ||
146 | req = elv_next_request(q); | ||
147 | if (!req) | ||
148 | return; | ||
149 | |||
150 | bd = req->rq_disk->private_data; | ||
151 | /* Sometimes we get repeated requests after blk_stop_queue. */ | ||
152 | if (bd->req) | ||
153 | return; | ||
154 | |||
155 | if (!blk_fs_request(req)) { | ||
156 | pr_debug("Got non-command 0x%08x\n", req->cmd_type); | ||
157 | req->errors++; | ||
158 | end_entire_request(req, 0); | ||
159 | goto again; | ||
160 | } | ||
161 | |||
162 | if (rq_data_dir(req) == WRITE) | ||
163 | do_write(bd, req); | ||
164 | else | ||
165 | do_read(bd, req); | ||
166 | |||
167 | /* Wait for interrupt to tell us it's done. */ | ||
168 | blk_stop_queue(q); | ||
169 | } | ||
170 | |||
171 | static struct block_device_operations lguestblk_fops = { | ||
172 | .owner = THIS_MODULE, | ||
173 | }; | ||
174 | |||
175 | static int lguestblk_probe(struct lguest_device *lgdev) | ||
176 | { | ||
177 | struct blockdev *bd; | ||
178 | int err; | ||
179 | int irqflags = IRQF_SHARED; | ||
180 | |||
181 | bd = kmalloc(sizeof(*bd), GFP_KERNEL); | ||
182 | if (!bd) | ||
183 | return -ENOMEM; | ||
184 | |||
185 | spin_lock_init(&bd->lock); | ||
186 | bd->irq = lgdev_irq(lgdev); | ||
187 | bd->req = NULL; | ||
188 | bd->dma.used_len = 0; | ||
189 | bd->dma.len[0] = 0; | ||
190 | bd->phys_addr = (lguest_devices[lgdev->index].pfn << PAGE_SHIFT); | ||
191 | |||
192 | bd->lb_page = lguest_map(bd->phys_addr, 1); | ||
193 | if (!bd->lb_page) { | ||
194 | err = -ENOMEM; | ||
195 | goto out_free_bd; | ||
196 | } | ||
197 | |||
198 | bd->major = register_blkdev(0, "lguestblk"); | ||
199 | if (bd->major < 0) { | ||
200 | err = bd->major; | ||
201 | goto out_unmap; | ||
202 | } | ||
203 | |||
204 | bd->disk = alloc_disk(1); | ||
205 | if (!bd->disk) { | ||
206 | err = -ENOMEM; | ||
207 | goto out_unregister_blkdev; | ||
208 | } | ||
209 | |||
210 | bd->disk->queue = blk_init_queue(do_lgb_request, &bd->lock); | ||
211 | if (!bd->disk->queue) { | ||
212 | err = -ENOMEM; | ||
213 | goto out_put_disk; | ||
214 | } | ||
215 | |||
216 | /* We can only handle a certain number of sg entries */ | ||
217 | blk_queue_max_hw_segments(bd->disk->queue, LGUEST_MAX_DMA_SECTIONS); | ||
218 | /* Buffers must not cross page boundaries */ | ||
219 | blk_queue_segment_boundary(bd->disk->queue, PAGE_SIZE-1); | ||
220 | |||
221 | sprintf(bd->disk->disk_name, "lgb%c", next_block_index++); | ||
222 | if (lguest_devices[lgdev->index].features & LGUEST_DEVICE_F_RANDOMNESS) | ||
223 | irqflags |= IRQF_SAMPLE_RANDOM; | ||
224 | err = request_irq(bd->irq, lgb_irq, irqflags, bd->disk->disk_name, bd); | ||
225 | if (err) | ||
226 | goto out_cleanup_queue; | ||
227 | |||
228 | err = lguest_bind_dma(bd->phys_addr, &bd->dma, 1, bd->irq); | ||
229 | if (err) | ||
230 | goto out_free_irq; | ||
231 | |||
232 | bd->disk->major = bd->major; | ||
233 | bd->disk->first_minor = 0; | ||
234 | bd->disk->private_data = bd; | ||
235 | bd->disk->fops = &lguestblk_fops; | ||
236 | /* This is initialized to the disk size by the other end. */ | ||
237 | set_capacity(bd->disk, bd->lb_page->num_sectors); | ||
238 | add_disk(bd->disk); | ||
239 | |||
240 | printk(KERN_INFO "%s: device %i at major %d\n", | ||
241 | bd->disk->disk_name, lgdev->index, bd->major); | ||
242 | |||
243 | lgdev->private = bd; | ||
244 | return 0; | ||
245 | |||
246 | out_free_irq: | ||
247 | free_irq(bd->irq, bd); | ||
248 | out_cleanup_queue: | ||
249 | blk_cleanup_queue(bd->disk->queue); | ||
250 | out_put_disk: | ||
251 | put_disk(bd->disk); | ||
252 | out_unregister_blkdev: | ||
253 | unregister_blkdev(bd->major, "lguestblk"); | ||
254 | out_unmap: | ||
255 | lguest_unmap(bd->lb_page); | ||
256 | out_free_bd: | ||
257 | kfree(bd); | ||
258 | return err; | ||
259 | } | ||
260 | |||
261 | static struct lguest_driver lguestblk_drv = { | ||
262 | .name = "lguestblk", | ||
263 | .owner = THIS_MODULE, | ||
264 | .device_type = LGUEST_DEVICE_T_BLOCK, | ||
265 | .probe = lguestblk_probe, | ||
266 | }; | ||
267 | |||
268 | static __init int lguestblk_init(void) | ||
269 | { | ||
270 | return register_lguest_driver(&lguestblk_drv); | ||
271 | } | ||
272 | module_init(lguestblk_init); | ||
273 | |||
274 | MODULE_DESCRIPTION("Lguest block driver"); | ||
275 | MODULE_LICENSE("GPL"); | ||