aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@woody.linux-foundation.org>2007-10-23 12:03:07 -0400
committerLinus Torvalds <torvalds@woody.linux-foundation.org>2007-10-23 12:03:07 -0400
commit0d6810091cdbd05efeb31654c6a41a6cbdfdd2c8 (patch)
tree44d79f8133ea6acd791fe4f32188789c2c65da93 /drivers
parenta98ce5c6feead6bfedefabd46cb3d7f5be148d9a (diff)
parent43d33b21a03d3abcc8cbdeb4d52bc4568f822c5e (diff)
Merge git://git.kernel.org/pub/scm/linux/kernel/git/rusty/linux-2.6-lguest
* git://git.kernel.org/pub/scm/linux/kernel/git/rusty/linux-2.6-lguest: (45 commits) Use "struct boot_params" in example launcher Loading bzImage directly. Revert lguest magic and use hook in head.S Update lguest documentation to reflect the new virtual block device name. generalize lgread_u32/lgwrite_u32. Example launcher handle guests not being ready for input Update example launcher for virtio Lguest support for Virtio Remove old lguest I/O infrrasructure. Remove old lguest bus and drivers. Virtio helper routines for a descriptor ringbuffer implementation Module autoprobing support for virtio drivers. Virtio console driver Block driver using virtio. Net driver using virtio Virtio interface Boot with virtual == physical to get closer to native Linux. Allow guest to specify syscall vector to use. Rename "cr3" to "gpgdir" to avoid x86-specific naming. Pagetables to use normal kernel types ...
Diffstat (limited to 'drivers')
-rw-r--r--drivers/Kconfig2
-rw-r--r--drivers/Makefile1
-rw-r--r--drivers/block/Kconfig6
-rw-r--r--drivers/block/Makefile2
-rw-r--r--drivers/block/lguest_blk.c421
-rw-r--r--drivers/block/virtio_blk.c308
-rw-r--r--drivers/char/Kconfig4
-rw-r--r--drivers/char/Makefile2
-rw-r--r--drivers/char/hvc_lguest.c177
-rw-r--r--drivers/char/virtio_console.c225
-rw-r--r--drivers/kvm/Kconfig4
-rw-r--r--drivers/lguest/Kconfig13
-rw-r--r--drivers/lguest/Makefile10
-rw-r--r--drivers/lguest/core.c568
-rw-r--r--drivers/lguest/hypercalls.c177
-rw-r--r--drivers/lguest/interrupts_and_traps.c125
-rw-r--r--drivers/lguest/io.c626
-rw-r--r--drivers/lguest/lg.h189
-rw-r--r--drivers/lguest/lguest.c1108
-rw-r--r--drivers/lguest/lguest_asm.S93
-rw-r--r--drivers/lguest/lguest_bus.c218
-rw-r--r--drivers/lguest/lguest_device.c373
-rw-r--r--drivers/lguest/lguest_user.c138
-rw-r--r--drivers/lguest/page_tables.c250
-rw-r--r--drivers/lguest/segments.c28
-rw-r--r--drivers/lguest/x86/core.c577
-rw-r--r--drivers/lguest/x86/switcher_32.S (renamed from drivers/lguest/switcher.S)7
-rw-r--r--drivers/net/Kconfig6
-rw-r--r--drivers/net/Makefile2
-rw-r--r--drivers/net/lguest_net.c555
-rw-r--r--drivers/net/virtio_net.c435
-rw-r--r--drivers/virtio/Kconfig8
-rw-r--r--drivers/virtio/Makefile2
-rw-r--r--drivers/virtio/config.c13
-rw-r--r--drivers/virtio/virtio.c189
-rw-r--r--drivers/virtio/virtio_ring.c313
36 files changed, 2911 insertions, 4264 deletions
diff --git a/drivers/Kconfig b/drivers/Kconfig
index 34f40ea0ba60..f4076d9e9902 100644
--- a/drivers/Kconfig
+++ b/drivers/Kconfig
@@ -94,5 +94,5 @@ source "drivers/kvm/Kconfig"
94 94
95source "drivers/uio/Kconfig" 95source "drivers/uio/Kconfig"
96 96
97source "drivers/lguest/Kconfig" 97source "drivers/virtio/Kconfig"
98endmenu 98endmenu
diff --git a/drivers/Makefile b/drivers/Makefile
index cfe38ffff28a..560496b43306 100644
--- a/drivers/Makefile
+++ b/drivers/Makefile
@@ -91,3 +91,4 @@ obj-$(CONFIG_HID) += hid/
91obj-$(CONFIG_PPC_PS3) += ps3/ 91obj-$(CONFIG_PPC_PS3) += ps3/
92obj-$(CONFIG_OF) += of/ 92obj-$(CONFIG_OF) += of/
93obj-$(CONFIG_SSB) += ssb/ 93obj-$(CONFIG_SSB) += ssb/
94obj-$(CONFIG_VIRTIO) += virtio/
diff --git a/drivers/block/Kconfig b/drivers/block/Kconfig
index ce4b1e484e64..4d0119ea9e35 100644
--- a/drivers/block/Kconfig
+++ b/drivers/block/Kconfig
@@ -425,4 +425,10 @@ config XEN_BLKDEV_FRONTEND
425 block device driver. It communicates with a back-end driver 425 block device driver. It communicates with a back-end driver
426 in another domain which drives the actual block device. 426 in another domain which drives the actual block device.
427 427
428config VIRTIO_BLK
429 tristate "Virtio block driver (EXPERIMENTAL)"
430 depends on EXPERIMENTAL && VIRTIO
431 ---help---
432 This is the virtual block driver for lguest. Say Y or M.
433
428endif # BLK_DEV 434endif # BLK_DEV
diff --git a/drivers/block/Makefile b/drivers/block/Makefile
index 014e72121b5a..7691505a2e12 100644
--- a/drivers/block/Makefile
+++ b/drivers/block/Makefile
@@ -25,10 +25,10 @@ obj-$(CONFIG_SUNVDC) += sunvdc.o
25obj-$(CONFIG_BLK_DEV_UMEM) += umem.o 25obj-$(CONFIG_BLK_DEV_UMEM) += umem.o
26obj-$(CONFIG_BLK_DEV_NBD) += nbd.o 26obj-$(CONFIG_BLK_DEV_NBD) += nbd.o
27obj-$(CONFIG_BLK_DEV_CRYPTOLOOP) += cryptoloop.o 27obj-$(CONFIG_BLK_DEV_CRYPTOLOOP) += cryptoloop.o
28obj-$(CONFIG_VIRTIO_BLK) += virtio_blk.o
28 29
29obj-$(CONFIG_VIODASD) += viodasd.o 30obj-$(CONFIG_VIODASD) += viodasd.o
30obj-$(CONFIG_BLK_DEV_SX8) += sx8.o 31obj-$(CONFIG_BLK_DEV_SX8) += sx8.o
31obj-$(CONFIG_BLK_DEV_UB) += ub.o 32obj-$(CONFIG_BLK_DEV_UB) += ub.o
32 33
33obj-$(CONFIG_XEN_BLKDEV_FRONTEND) += xen-blkfront.o 34obj-$(CONFIG_XEN_BLKDEV_FRONTEND) += xen-blkfront.o
34obj-$(CONFIG_LGUEST_BLOCK) += lguest_blk.o
diff --git a/drivers/block/lguest_blk.c b/drivers/block/lguest_blk.c
deleted file mode 100644
index fa8e42341b87..000000000000
--- a/drivers/block/lguest_blk.c
+++ /dev/null
@@ -1,421 +0,0 @@
1/*D:400
2 * The Guest block driver
3 *
4 * This is a simple block driver, which appears as /dev/lgba, lgbb, lgbc etc.
5 * The mechanism is simple: we place the information about the request in the
6 * device page, then use SEND_DMA (containing the data for a write, or an empty
7 * "ping" DMA for a read).
8 :*/
9/* Copyright 2006 Rusty Russell <rusty@rustcorp.com.au> IBM Corporation
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License as published by
13 * the Free Software Foundation; either version 2 of the License, or
14 * (at your option) any later version.
15 *
16 * This program is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 * GNU General Public License for more details.
20 *
21 * You should have received a copy of the GNU General Public License
22 * along with this program; if not, write to the Free Software
23 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
24 */
25//#define DEBUG
26#include <linux/init.h>
27#include <linux/types.h>
28#include <linux/blkdev.h>
29#include <linux/interrupt.h>
30#include <linux/lguest_bus.h>
31
32static char next_block_index = 'a';
33
34/*D:420 Here is the structure which holds all the information we need about
35 * each Guest block device.
36 *
37 * I'm sure at this stage, you're wondering "hey, where was the adventure I was
38 * promised?" and thinking "Rusty sucks, I shall say nasty things about him on
39 * my blog". I think Real adventures have boring bits, too, and you're in the
40 * middle of one. But it gets better. Just not quite yet. */
41struct blockdev
42{
43 /* The block queue infrastructure wants a spinlock: it is held while it
44 * calls our block request function. We grab it in our interrupt
45 * handler so the responses don't mess with new requests. */
46 spinlock_t lock;
47
48 /* The disk structure registered with kernel. */
49 struct gendisk *disk;
50
51 /* The major device number for this disk, and the interrupt. We only
52 * really keep them here for completeness; we'd need them if we
53 * supported device unplugging. */
54 int major;
55 int irq;
56
57 /* The physical address of this device's memory page */
58 unsigned long phys_addr;
59 /* The mapped memory page for convenient acces. */
60 struct lguest_block_page *lb_page;
61
62 /* We only have a single request outstanding at a time: this is it. */
63 struct lguest_dma dma;
64 struct request *req;
65};
66
67/*D:495 We originally used end_request() throughout the driver, but it turns
68 * out that end_request() is deprecated, and doesn't actually end the request
69 * (which seems like a good reason to deprecate it!). It simply ends the first
70 * bio. So if we had 3 bios in a "struct request" we would do all 3,
71 * end_request(), do 2, end_request(), do 1 and end_request(): twice as much
72 * work as we needed to do.
73 *
74 * This reinforced to me that I do not understand the block layer.
75 *
76 * Nonetheless, Jens Axboe gave me this nice helper to end all chunks of a
77 * request. This improved disk speed by 130%. */
78static void end_entire_request(struct request *req, int uptodate)
79{
80 if (end_that_request_first(req, uptodate, req->hard_nr_sectors))
81 BUG();
82 add_disk_randomness(req->rq_disk);
83 blkdev_dequeue_request(req);
84 end_that_request_last(req, uptodate);
85}
86
87/* I'm told there are only two stories in the world worth telling: love and
88 * hate. So there used to be a love scene here like this:
89 *
90 * Launcher: We could make beautiful I/O together, you and I.
91 * Guest: My, that's a big disk!
92 *
93 * Unfortunately, it was just too raunchy for our otherwise-gentle tale. */
94
95/*D:490 This is the interrupt handler, called when a block read or write has
96 * been completed for us. */
97static irqreturn_t lgb_irq(int irq, void *_bd)
98{
99 /* We handed our "struct blockdev" as the argument to request_irq(), so
100 * it is passed through to us here. This tells us which device we're
101 * dealing with in case we have more than one. */
102 struct blockdev *bd = _bd;
103 unsigned long flags;
104
105 /* We weren't doing anything? Strange, but could happen if we shared
106 * interrupts (we don't!). */
107 if (!bd->req) {
108 pr_debug("No work!\n");
109 return IRQ_NONE;
110 }
111
112 /* Not done yet? That's equally strange. */
113 if (!bd->lb_page->result) {
114 pr_debug("No result!\n");
115 return IRQ_NONE;
116 }
117
118 /* We have to grab the lock before ending the request. */
119 spin_lock_irqsave(&bd->lock, flags);
120 /* "result" is 1 for success, 2 for failure: end_entire_request() wants
121 * to know whether this succeeded or not. */
122 end_entire_request(bd->req, bd->lb_page->result == 1);
123 /* Clear out request, it's done. */
124 bd->req = NULL;
125 /* Reset incoming DMA for next time. */
126 bd->dma.used_len = 0;
127 /* Ready for more reads or writes */
128 blk_start_queue(bd->disk->queue);
129 spin_unlock_irqrestore(&bd->lock, flags);
130
131 /* The interrupt was for us, we dealt with it. */
132 return IRQ_HANDLED;
133}
134
135/*D:480 The block layer's "struct request" contains a number of "struct bio"s,
136 * each of which contains "struct bio_vec"s, each of which contains a page, an
137 * offset and a length.
138 *
139 * Fortunately there are iterators to help us walk through the "struct
140 * request". Even more fortunately, there were plenty of places to steal the
141 * code from. We pack the "struct request" into our "struct lguest_dma" and
142 * return the total length. */
143static unsigned int req_to_dma(struct request *req, struct lguest_dma *dma)
144{
145 unsigned int i = 0, len = 0;
146 struct req_iterator iter;
147 struct bio_vec *bvec;
148
149 rq_for_each_segment(bvec, req, iter) {
150 /* We told the block layer not to give us too many. */
151 BUG_ON(i == LGUEST_MAX_DMA_SECTIONS);
152 /* If we had a zero-length segment, it would look like
153 * the end of the data referred to by the "struct
154 * lguest_dma", so make sure that doesn't happen. */
155 BUG_ON(!bvec->bv_len);
156 /* Convert page & offset to a physical address */
157 dma->addr[i] = page_to_phys(bvec->bv_page)
158 + bvec->bv_offset;
159 dma->len[i] = bvec->bv_len;
160 len += bvec->bv_len;
161 i++;
162 }
163 /* If the array isn't full, we mark the end with a 0 length */
164 if (i < LGUEST_MAX_DMA_SECTIONS)
165 dma->len[i] = 0;
166 return len;
167}
168
169/* This creates an empty DMA, useful for prodding the Host without sending data
170 * (ie. when we want to do a read) */
171static void empty_dma(struct lguest_dma *dma)
172{
173 dma->len[0] = 0;
174}
175
176/*D:470 Setting up a request is fairly easy: */
177static void setup_req(struct blockdev *bd,
178 int type, struct request *req, struct lguest_dma *dma)
179{
180 /* The type is 1 (write) or 0 (read). */
181 bd->lb_page->type = type;
182 /* The sector on disk where the read or write starts. */
183 bd->lb_page->sector = req->sector;
184 /* The result is initialized to 0 (unfinished). */
185 bd->lb_page->result = 0;
186 /* The current request (so we can end it in the interrupt handler). */
187 bd->req = req;
188 /* The number of bytes: returned as a side-effect of req_to_dma(),
189 * which packs the block layer's "struct request" into our "struct
190 * lguest_dma" */
191 bd->lb_page->bytes = req_to_dma(req, dma);
192}
193
194/*D:450 Write is pretty straightforward: we pack the request into a "struct
195 * lguest_dma", then use SEND_DMA to send the request. */
196static void do_write(struct blockdev *bd, struct request *req)
197{
198 struct lguest_dma send;
199
200 pr_debug("lgb: WRITE sector %li\n", (long)req->sector);
201 setup_req(bd, 1, req, &send);
202
203 lguest_send_dma(bd->phys_addr, &send);
204}
205
206/* Read is similar to write, except we pack the request into our receive
207 * "struct lguest_dma" and send through an empty DMA just to tell the Host that
208 * there's a request pending. */
209static void do_read(struct blockdev *bd, struct request *req)
210{
211 struct lguest_dma ping;
212
213 pr_debug("lgb: READ sector %li\n", (long)req->sector);
214 setup_req(bd, 0, req, &bd->dma);
215
216 empty_dma(&ping);
217 lguest_send_dma(bd->phys_addr, &ping);
218}
219
220/*D:440 This where requests come in: we get handed the request queue and are
221 * expected to pull a "struct request" off it until we've finished them or
222 * we're waiting for a reply: */
223static void do_lgb_request(struct request_queue *q)
224{
225 struct blockdev *bd;
226 struct request *req;
227
228again:
229 /* This sometimes returns NULL even on the very first time around. I
230 * wonder if it's something to do with letting elves handle the request
231 * queue... */
232 req = elv_next_request(q);
233 if (!req)
234 return;
235
236 /* We attached the struct blockdev to the disk: get it back */
237 bd = req->rq_disk->private_data;
238 /* Sometimes we get repeated requests after blk_stop_queue(), but we
239 * can only handle one at a time. */
240 if (bd->req)
241 return;
242
243 /* We only do reads and writes: no tricky business! */
244 if (!blk_fs_request(req)) {
245 pr_debug("Got non-command 0x%08x\n", req->cmd_type);
246 req->errors++;
247 end_entire_request(req, 0);
248 goto again;
249 }
250
251 if (rq_data_dir(req) == WRITE)
252 do_write(bd, req);
253 else
254 do_read(bd, req);
255
256 /* We've put out the request, so stop any more coming in until we get
257 * an interrupt, which takes us to lgb_irq() to re-enable the queue. */
258 blk_stop_queue(q);
259}
260
261/*D:430 This is the "struct block_device_operations" we attach to the disk at
262 * the end of lguestblk_probe(). It doesn't seem to want much. */
263static struct block_device_operations lguestblk_fops = {
264 .owner = THIS_MODULE,
265};
266
267/*D:425 Setting up a disk device seems to involve a lot of code. I'm not sure
268 * quite why. I do know that the IDE code sent two or three of the maintainers
269 * insane, perhaps this is the fringe of the same disease?
270 *
271 * As in the console code, the probe function gets handed the generic
272 * lguest_device from lguest_bus.c: */
273static int lguestblk_probe(struct lguest_device *lgdev)
274{
275 struct blockdev *bd;
276 int err;
277 int irqflags = IRQF_SHARED;
278
279 /* First we allocate our own "struct blockdev" and initialize the easy
280 * fields. */
281 bd = kmalloc(sizeof(*bd), GFP_KERNEL);
282 if (!bd)
283 return -ENOMEM;
284
285 spin_lock_init(&bd->lock);
286 bd->irq = lgdev_irq(lgdev);
287 bd->req = NULL;
288 bd->dma.used_len = 0;
289 bd->dma.len[0] = 0;
290 /* The descriptor in the lguest_devices array provided by the Host
291 * gives the Guest the physical page number of the device's page. */
292 bd->phys_addr = (lguest_devices[lgdev->index].pfn << PAGE_SHIFT);
293
294 /* We use lguest_map() to get a pointer to the device page */
295 bd->lb_page = lguest_map(bd->phys_addr, 1);
296 if (!bd->lb_page) {
297 err = -ENOMEM;
298 goto out_free_bd;
299 }
300
301 /* We need a major device number: 0 means "assign one dynamically". */
302 bd->major = register_blkdev(0, "lguestblk");
303 if (bd->major < 0) {
304 err = bd->major;
305 goto out_unmap;
306 }
307
308 /* This allocates a "struct gendisk" where we pack all the information
309 * about the disk which the rest of Linux sees. The argument is the
310 * number of minor devices desired: we need one minor for the main
311 * disk, and one for each partition. Of course, we can't possibly know
312 * how many partitions are on the disk (add_disk does that).
313 */
314 bd->disk = alloc_disk(16);
315 if (!bd->disk) {
316 err = -ENOMEM;
317 goto out_unregister_blkdev;
318 }
319
320 /* Every disk needs a queue for requests to come in: we set up the
321 * queue with a callback function (the core of our driver) and the lock
322 * to use. */
323 bd->disk->queue = blk_init_queue(do_lgb_request, &bd->lock);
324 if (!bd->disk->queue) {
325 err = -ENOMEM;
326 goto out_put_disk;
327 }
328
329 /* We can only handle a certain number of pointers in our SEND_DMA
330 * call, so we set that with blk_queue_max_hw_segments(). This is not
331 * to be confused with blk_queue_max_phys_segments() of course! I
332 * know, who could possibly confuse the two?
333 *
334 * Well, it's simple to tell them apart: this one seems to work and the
335 * other one didn't. */
336 blk_queue_max_hw_segments(bd->disk->queue, LGUEST_MAX_DMA_SECTIONS);
337
338 /* Due to technical limitations of our Host (and simple coding) we
339 * can't have a single buffer which crosses a page boundary. Tell it
340 * here. This means that our maximum request size is 16
341 * (LGUEST_MAX_DMA_SECTIONS) pages. */
342 blk_queue_segment_boundary(bd->disk->queue, PAGE_SIZE-1);
343
344 /* We name our disk: this becomes the device name when udev does its
345 * magic thing and creates the device node, such as /dev/lgba.
346 * next_block_index is a global which starts at 'a'. Unfortunately
347 * this simple increment logic means that the 27th disk will be called
348 * "/dev/lgb{". In that case, I recommend having at least 29 disks, so
349 * your /dev directory will be balanced. */
350 sprintf(bd->disk->disk_name, "lgb%c", next_block_index++);
351
352 /* We look to the device descriptor again to see if this device's
353 * interrupts are expected to be random. If they are, we tell the irq
354 * subsystem. At the moment this bit is always set. */
355 if (lguest_devices[lgdev->index].features & LGUEST_DEVICE_F_RANDOMNESS)
356 irqflags |= IRQF_SAMPLE_RANDOM;
357
358 /* Now we have the name and irqflags, we can request the interrupt; we
359 * give it the "struct blockdev" we have set up to pass to lgb_irq()
360 * when there is an interrupt. */
361 err = request_irq(bd->irq, lgb_irq, irqflags, bd->disk->disk_name, bd);
362 if (err)
363 goto out_cleanup_queue;
364
365 /* We bind our one-entry DMA pool to the key for this block device so
366 * the Host can reply to our requests. The key is equal to the
367 * physical address of the device's page, which is conveniently
368 * unique. */
369 err = lguest_bind_dma(bd->phys_addr, &bd->dma, 1, bd->irq);
370 if (err)
371 goto out_free_irq;
372
373 /* We finish our disk initialization and add the disk to the system. */
374 bd->disk->major = bd->major;
375 bd->disk->first_minor = 0;
376 bd->disk->private_data = bd;
377 bd->disk->fops = &lguestblk_fops;
378 /* This is initialized to the disk size by the Launcher. */
379 set_capacity(bd->disk, bd->lb_page->num_sectors);
380 add_disk(bd->disk);
381
382 printk(KERN_INFO "%s: device %i at major %d\n",
383 bd->disk->disk_name, lgdev->index, bd->major);
384
385 /* We don't need to keep the "struct blockdev" around, but if we ever
386 * implemented device removal, we'd need this. */
387 lgdev->private = bd;
388 return 0;
389
390out_free_irq:
391 free_irq(bd->irq, bd);
392out_cleanup_queue:
393 blk_cleanup_queue(bd->disk->queue);
394out_put_disk:
395 put_disk(bd->disk);
396out_unregister_blkdev:
397 unregister_blkdev(bd->major, "lguestblk");
398out_unmap:
399 lguest_unmap(bd->lb_page);
400out_free_bd:
401 kfree(bd);
402 return err;
403}
404
405/*D:410 The boilerplate code for registering the lguest block driver is just
406 * like the console: */
407static struct lguest_driver lguestblk_drv = {
408 .name = "lguestblk",
409 .owner = THIS_MODULE,
410 .device_type = LGUEST_DEVICE_T_BLOCK,
411 .probe = lguestblk_probe,
412};
413
414static __init int lguestblk_init(void)
415{
416 return register_lguest_driver(&lguestblk_drv);
417}
418module_init(lguestblk_init);
419
420MODULE_DESCRIPTION("Lguest block driver");
421MODULE_LICENSE("GPL");
diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c
new file mode 100644
index 000000000000..a901eee64ba5
--- /dev/null
+++ b/drivers/block/virtio_blk.c
@@ -0,0 +1,308 @@
1//#define DEBUG
2#include <linux/spinlock.h>
3#include <linux/blkdev.h>
4#include <linux/hdreg.h>
5#include <linux/virtio.h>
6#include <linux/virtio_blk.h>
7#include <linux/virtio_blk.h>
8
9static unsigned char virtblk_index = 'a';
10struct virtio_blk
11{
12 spinlock_t lock;
13
14 struct virtio_device *vdev;
15 struct virtqueue *vq;
16
17 /* The disk structure for the kernel. */
18 struct gendisk *disk;
19
20 /* Request tracking. */
21 struct list_head reqs;
22
23 mempool_t *pool;
24
25 /* Scatterlist: can be too big for stack. */
26 struct scatterlist sg[3+MAX_PHYS_SEGMENTS];
27};
28
29struct virtblk_req
30{
31 struct list_head list;
32 struct request *req;
33 struct virtio_blk_outhdr out_hdr;
34 struct virtio_blk_inhdr in_hdr;
35};
36
37static bool blk_done(struct virtqueue *vq)
38{
39 struct virtio_blk *vblk = vq->vdev->priv;
40 struct virtblk_req *vbr;
41 unsigned int len;
42 unsigned long flags;
43
44 spin_lock_irqsave(&vblk->lock, flags);
45 while ((vbr = vblk->vq->vq_ops->get_buf(vblk->vq, &len)) != NULL) {
46 int uptodate;
47 switch (vbr->in_hdr.status) {
48 case VIRTIO_BLK_S_OK:
49 uptodate = 1;
50 break;
51 case VIRTIO_BLK_S_UNSUPP:
52 uptodate = -ENOTTY;
53 break;
54 default:
55 uptodate = 0;
56 break;
57 }
58
59 end_dequeued_request(vbr->req, uptodate);
60 list_del(&vbr->list);
61 mempool_free(vbr, vblk->pool);
62 }
63 /* In case queue is stopped waiting for more buffers. */
64 blk_start_queue(vblk->disk->queue);
65 spin_unlock_irqrestore(&vblk->lock, flags);
66 return true;
67}
68
69static bool do_req(struct request_queue *q, struct virtio_blk *vblk,
70 struct request *req)
71{
72 unsigned long num, out, in;
73 struct virtblk_req *vbr;
74
75 vbr = mempool_alloc(vblk->pool, GFP_ATOMIC);
76 if (!vbr)
77 /* When another request finishes we'll try again. */
78 return false;
79
80 vbr->req = req;
81 if (blk_fs_request(vbr->req)) {
82 vbr->out_hdr.type = 0;
83 vbr->out_hdr.sector = vbr->req->sector;
84 vbr->out_hdr.ioprio = vbr->req->ioprio;
85 } else if (blk_pc_request(vbr->req)) {
86 vbr->out_hdr.type = VIRTIO_BLK_T_SCSI_CMD;
87 vbr->out_hdr.sector = 0;
88 vbr->out_hdr.ioprio = vbr->req->ioprio;
89 } else {
90 /* We don't put anything else in the queue. */
91 BUG();
92 }
93
94 if (blk_barrier_rq(vbr->req))
95 vbr->out_hdr.type |= VIRTIO_BLK_T_BARRIER;
96
97 /* We have to zero this, otherwise blk_rq_map_sg gets upset. */
98 memset(vblk->sg, 0, sizeof(vblk->sg));
99 sg_set_buf(&vblk->sg[0], &vbr->out_hdr, sizeof(vbr->out_hdr));
100 num = blk_rq_map_sg(q, vbr->req, vblk->sg+1);
101 sg_set_buf(&vblk->sg[num+1], &vbr->in_hdr, sizeof(vbr->in_hdr));
102
103 if (rq_data_dir(vbr->req) == WRITE) {
104 vbr->out_hdr.type |= VIRTIO_BLK_T_OUT;
105 out = 1 + num;
106 in = 1;
107 } else {
108 vbr->out_hdr.type |= VIRTIO_BLK_T_IN;
109 out = 1;
110 in = 1 + num;
111 }
112
113 if (vblk->vq->vq_ops->add_buf(vblk->vq, vblk->sg, out, in, vbr)) {
114 mempool_free(vbr, vblk->pool);
115 return false;
116 }
117
118 list_add_tail(&vbr->list, &vblk->reqs);
119 return true;
120}
121
122static void do_virtblk_request(struct request_queue *q)
123{
124 struct virtio_blk *vblk = NULL;
125 struct request *req;
126 unsigned int issued = 0;
127
128 while ((req = elv_next_request(q)) != NULL) {
129 vblk = req->rq_disk->private_data;
130 BUG_ON(req->nr_phys_segments > ARRAY_SIZE(vblk->sg));
131
132 /* If this request fails, stop queue and wait for something to
133 finish to restart it. */
134 if (!do_req(q, vblk, req)) {
135 blk_stop_queue(q);
136 break;
137 }
138 blkdev_dequeue_request(req);
139 issued++;
140 }
141
142 if (issued)
143 vblk->vq->vq_ops->kick(vblk->vq);
144}
145
146static int virtblk_ioctl(struct inode *inode, struct file *filp,
147 unsigned cmd, unsigned long data)
148{
149 return scsi_cmd_ioctl(filp, inode->i_bdev->bd_disk->queue,
150 inode->i_bdev->bd_disk, cmd,
151 (void __user *)data);
152}
153
154static struct block_device_operations virtblk_fops = {
155 .ioctl = virtblk_ioctl,
156 .owner = THIS_MODULE,
157};
158
159static int virtblk_probe(struct virtio_device *vdev)
160{
161 struct virtio_blk *vblk;
162 int err, major;
163 void *token;
164 unsigned int len;
165 u64 cap;
166 u32 v;
167
168 vdev->priv = vblk = kmalloc(sizeof(*vblk), GFP_KERNEL);
169 if (!vblk) {
170 err = -ENOMEM;
171 goto out;
172 }
173
174 INIT_LIST_HEAD(&vblk->reqs);
175 spin_lock_init(&vblk->lock);
176 vblk->vdev = vdev;
177
178 /* We expect one virtqueue, for output. */
179 vblk->vq = vdev->config->find_vq(vdev, blk_done);
180 if (IS_ERR(vblk->vq)) {
181 err = PTR_ERR(vblk->vq);
182 goto out_free_vblk;
183 }
184
185 vblk->pool = mempool_create_kmalloc_pool(1,sizeof(struct virtblk_req));
186 if (!vblk->pool) {
187 err = -ENOMEM;
188 goto out_free_vq;
189 }
190
191 major = register_blkdev(0, "virtblk");
192 if (major < 0) {
193 err = major;
194 goto out_mempool;
195 }
196
197 /* FIXME: How many partitions? How long is a piece of string? */
198 vblk->disk = alloc_disk(1 << 4);
199 if (!vblk->disk) {
200 err = -ENOMEM;
201 goto out_unregister_blkdev;
202 }
203
204 vblk->disk->queue = blk_init_queue(do_virtblk_request, &vblk->lock);
205 if (!vblk->disk->queue) {
206 err = -ENOMEM;
207 goto out_put_disk;
208 }
209
210 sprintf(vblk->disk->disk_name, "vd%c", virtblk_index++);
211 vblk->disk->major = major;
212 vblk->disk->first_minor = 0;
213 vblk->disk->private_data = vblk;
214 vblk->disk->fops = &virtblk_fops;
215
216 /* If barriers are supported, tell block layer that queue is ordered */
217 token = vdev->config->find(vdev, VIRTIO_CONFIG_BLK_F, &len);
218 if (virtio_use_bit(vdev, token, len, VIRTIO_BLK_F_BARRIER))
219 blk_queue_ordered(vblk->disk->queue, QUEUE_ORDERED_TAG, NULL);
220
221 err = virtio_config_val(vdev, VIRTIO_CONFIG_BLK_F_CAPACITY, &cap);
222 if (err) {
223 dev_err(&vdev->dev, "Bad/missing capacity in config\n");
224 goto out_put_disk;
225 }
226
227 /* If capacity is too big, truncate with warning. */
228 if ((sector_t)cap != cap) {
229 dev_warn(&vdev->dev, "Capacity %llu too large: truncating\n",
230 (unsigned long long)cap);
231 cap = (sector_t)-1;
232 }
233 set_capacity(vblk->disk, cap);
234
235 err = virtio_config_val(vdev, VIRTIO_CONFIG_BLK_F_SIZE_MAX, &v);
236 if (!err)
237 blk_queue_max_segment_size(vblk->disk->queue, v);
238 else if (err != -ENOENT) {
239 dev_err(&vdev->dev, "Bad SIZE_MAX in config\n");
240 goto out_put_disk;
241 }
242
243 err = virtio_config_val(vdev, VIRTIO_CONFIG_BLK_F_SEG_MAX, &v);
244 if (!err)
245 blk_queue_max_hw_segments(vblk->disk->queue, v);
246 else if (err != -ENOENT) {
247 dev_err(&vdev->dev, "Bad SEG_MAX in config\n");
248 goto out_put_disk;
249 }
250
251 add_disk(vblk->disk);
252 return 0;
253
254out_put_disk:
255 put_disk(vblk->disk);
256out_unregister_blkdev:
257 unregister_blkdev(major, "virtblk");
258out_mempool:
259 mempool_destroy(vblk->pool);
260out_free_vq:
261 vdev->config->del_vq(vblk->vq);
262out_free_vblk:
263 kfree(vblk);
264out:
265 return err;
266}
267
268static void virtblk_remove(struct virtio_device *vdev)
269{
270 struct virtio_blk *vblk = vdev->priv;
271 int major = vblk->disk->major;
272
273 BUG_ON(!list_empty(&vblk->reqs));
274 blk_cleanup_queue(vblk->disk->queue);
275 put_disk(vblk->disk);
276 unregister_blkdev(major, "virtblk");
277 mempool_destroy(vblk->pool);
278 kfree(vblk);
279}
280
281static struct virtio_device_id id_table[] = {
282 { VIRTIO_ID_BLOCK, VIRTIO_DEV_ANY_ID },
283 { 0 },
284};
285
286static struct virtio_driver virtio_blk = {
287 .driver.name = KBUILD_MODNAME,
288 .driver.owner = THIS_MODULE,
289 .id_table = id_table,
290 .probe = virtblk_probe,
291 .remove = __devexit_p(virtblk_remove),
292};
293
294static int __init init(void)
295{
296 return register_virtio_driver(&virtio_blk);
297}
298
299static void __exit fini(void)
300{
301 unregister_virtio_driver(&virtio_blk);
302}
303module_init(init);
304module_exit(fini);
305
306MODULE_DEVICE_TABLE(virtio, id_table);
307MODULE_DESCRIPTION("Virtio block driver");
308MODULE_LICENSE("GPL");
diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig
index 65491103e0fb..bf18d757b876 100644
--- a/drivers/char/Kconfig
+++ b/drivers/char/Kconfig
@@ -613,6 +613,10 @@ config HVC_XEN
613 help 613 help
614 Xen virtual console device driver 614 Xen virtual console device driver
615 615
616config VIRTIO_CONSOLE
617 bool
618 select HVC_DRIVER
619
616config HVCS 620config HVCS
617 tristate "IBM Hypervisor Virtual Console Server support" 621 tristate "IBM Hypervisor Virtual Console Server support"
618 depends on PPC_PSERIES 622 depends on PPC_PSERIES
diff --git a/drivers/char/Makefile b/drivers/char/Makefile
index c78ff26647ee..07304d50e0cb 100644
--- a/drivers/char/Makefile
+++ b/drivers/char/Makefile
@@ -42,7 +42,6 @@ obj-$(CONFIG_SYNCLINK_GT) += synclink_gt.o
42obj-$(CONFIG_N_HDLC) += n_hdlc.o 42obj-$(CONFIG_N_HDLC) += n_hdlc.o
43obj-$(CONFIG_AMIGA_BUILTIN_SERIAL) += amiserial.o 43obj-$(CONFIG_AMIGA_BUILTIN_SERIAL) += amiserial.o
44obj-$(CONFIG_SX) += sx.o generic_serial.o 44obj-$(CONFIG_SX) += sx.o generic_serial.o
45obj-$(CONFIG_LGUEST_GUEST) += hvc_lguest.o
46obj-$(CONFIG_RIO) += rio/ generic_serial.o 45obj-$(CONFIG_RIO) += rio/ generic_serial.o
47obj-$(CONFIG_HVC_CONSOLE) += hvc_vio.o hvsi.o 46obj-$(CONFIG_HVC_CONSOLE) += hvc_vio.o hvsi.o
48obj-$(CONFIG_HVC_ISERIES) += hvc_iseries.o 47obj-$(CONFIG_HVC_ISERIES) += hvc_iseries.o
@@ -50,6 +49,7 @@ obj-$(CONFIG_HVC_RTAS) += hvc_rtas.o
50obj-$(CONFIG_HVC_BEAT) += hvc_beat.o 49obj-$(CONFIG_HVC_BEAT) += hvc_beat.o
51obj-$(CONFIG_HVC_DRIVER) += hvc_console.o 50obj-$(CONFIG_HVC_DRIVER) += hvc_console.o
52obj-$(CONFIG_HVC_XEN) += hvc_xen.o 51obj-$(CONFIG_HVC_XEN) += hvc_xen.o
52obj-$(CONFIG_VIRTIO_CONSOLE) += virtio_console.o
53obj-$(CONFIG_RAW_DRIVER) += raw.o 53obj-$(CONFIG_RAW_DRIVER) += raw.o
54obj-$(CONFIG_SGI_SNSC) += snsc.o snsc_event.o 54obj-$(CONFIG_SGI_SNSC) += snsc.o snsc_event.o
55obj-$(CONFIG_MSPEC) += mspec.o 55obj-$(CONFIG_MSPEC) += mspec.o
diff --git a/drivers/char/hvc_lguest.c b/drivers/char/hvc_lguest.c
deleted file mode 100644
index efccb2155830..000000000000
--- a/drivers/char/hvc_lguest.c
+++ /dev/null
@@ -1,177 +0,0 @@
1/*D:300
2 * The Guest console driver
3 *
4 * This is a trivial console driver: we use lguest's DMA mechanism to send
5 * bytes out, and register a DMA buffer to receive bytes in. It is assumed to
6 * be present and available from the very beginning of boot.
7 *
8 * Writing console drivers is one of the few remaining Dark Arts in Linux.
9 * Fortunately for us, the path of virtual consoles has been well-trodden by
10 * the PowerPC folks, who wrote "hvc_console.c" to generically support any
11 * virtual console. We use that infrastructure which only requires us to write
12 * the basic put_chars and get_chars functions and call the right register
13 * functions.
14 :*/
15
16/*M:002 The console can be flooded: while the Guest is processing input the
17 * Host can send more. Buffering in the Host could alleviate this, but it is a
18 * difficult problem in general. :*/
19/* Copyright (C) 2006 Rusty Russell, IBM Corporation
20 *
21 * This program is free software; you can redistribute it and/or modify
22 * it under the terms of the GNU General Public License as published by
23 * the Free Software Foundation; either version 2 of the License, or
24 * (at your option) any later version.
25 *
26 * This program is distributed in the hope that it will be useful,
27 * but WITHOUT ANY WARRANTY; without even the implied warranty of
28 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
29 * GNU General Public License for more details.
30 *
31 * You should have received a copy of the GNU General Public License
32 * along with this program; if not, write to the Free Software
33 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
34 */
35#include <linux/err.h>
36#include <linux/init.h>
37#include <linux/lguest_bus.h>
38#include <asm/paravirt.h>
39#include "hvc_console.h"
40
41/*D:340 This is our single console input buffer, with associated "struct
42 * lguest_dma" referring to it. Note the 0-terminated length array, and the
43 * use of physical address for the buffer itself. */
44static char inbuf[256];
45static struct lguest_dma cons_input = { .used_len = 0,
46 .addr[0] = __pa(inbuf),
47 .len[0] = sizeof(inbuf),
48 .len[1] = 0 };
49
50/*D:310 The put_chars() callback is pretty straightforward.
51 *
52 * First we put the pointer and length in a "struct lguest_dma": we only have
53 * one pointer, so we set the second length to 0. Then we use SEND_DMA to send
54 * the data to (Host) buffers attached to the console key. Usually a device's
55 * key is a physical address within the device's memory, but because the
56 * console device doesn't have any associated physical memory, we use the
57 * LGUEST_CONSOLE_DMA_KEY constant (aka 0). */
58static int put_chars(u32 vtermno, const char *buf, int count)
59{
60 struct lguest_dma dma;
61
62 /* FIXME: DMA buffers in a "struct lguest_dma" are not allowed
63 * to go over page boundaries. This never seems to happen,
64 * but if it did we'd need to fix this code. */
65 dma.len[0] = count;
66 dma.len[1] = 0;
67 dma.addr[0] = __pa(buf);
68
69 lguest_send_dma(LGUEST_CONSOLE_DMA_KEY, &dma);
70 /* We're expected to return the amount of data we wrote: all of it. */
71 return count;
72}
73
74/*D:350 get_chars() is the callback from the hvc_console infrastructure when
75 * an interrupt is received.
76 *
77 * Firstly we see if our buffer has been filled: if not, we return. The rest
78 * of the code deals with the fact that the hvc_console() infrastructure only
79 * asks us for 16 bytes at a time. We keep a "cons_offset" variable for
80 * partially-read buffers. */
81static int get_chars(u32 vtermno, char *buf, int count)
82{
83 static int cons_offset;
84
85 /* Nothing left to see here... */
86 if (!cons_input.used_len)
87 return 0;
88
89 /* You want more than we have to give? Well, try wanting less! */
90 if (cons_input.used_len - cons_offset < count)
91 count = cons_input.used_len - cons_offset;
92
93 /* Copy across to their buffer and increment offset. */
94 memcpy(buf, inbuf + cons_offset, count);
95 cons_offset += count;
96
97 /* Finished? Zero offset, and reset cons_input so Host will use it
98 * again. */
99 if (cons_offset == cons_input.used_len) {
100 cons_offset = 0;
101 cons_input.used_len = 0;
102 }
103 return count;
104}
105/*:*/
106
107static struct hv_ops lguest_cons = {
108 .get_chars = get_chars,
109 .put_chars = put_chars,
110};
111
112/*D:320 Console drivers are initialized very early so boot messages can go
113 * out. At this stage, the console is output-only. Our driver checks we're a
114 * Guest, and if so hands hvc_instantiate() the console number (0), priority
115 * (0), and the struct hv_ops containing the put_chars() function. */
116static int __init cons_init(void)
117{
118 if (strcmp(pv_info.name, "lguest") != 0)
119 return 0;
120
121 return hvc_instantiate(0, 0, &lguest_cons);
122}
123console_initcall(cons_init);
124
125/*D:370 To set up and manage our virtual console, we call hvc_alloc() and
126 * stash the result in the private pointer of the "struct lguest_device".
127 * Since we never remove the console device we never need this pointer again,
128 * but using ->private is considered good form, and you never know who's going
129 * to copy your driver.
130 *
131 * Once the console is set up, we bind our input buffer ready for input. */
132static int lguestcons_probe(struct lguest_device *lgdev)
133{
134 int err;
135
136 /* The first argument of hvc_alloc() is the virtual console number, so
137 * we use zero. The second argument is the interrupt number.
138 *
139 * The third argument is a "struct hv_ops" containing the put_chars()
140 * and get_chars() pointers. The final argument is the output buffer
141 * size: we use 256 and expect the Host to have room for us to send
142 * that much. */
143 lgdev->private = hvc_alloc(0, lgdev_irq(lgdev), &lguest_cons, 256);
144 if (IS_ERR(lgdev->private))
145 return PTR_ERR(lgdev->private);
146
147 /* We bind a single DMA buffer at key LGUEST_CONSOLE_DMA_KEY.
148 * "cons_input" is that statically-initialized global DMA buffer we saw
149 * above, and we also give the interrupt we want. */
150 err = lguest_bind_dma(LGUEST_CONSOLE_DMA_KEY, &cons_input, 1,
151 lgdev_irq(lgdev));
152 if (err)
153 printk("lguest console: failed to bind buffer.\n");
154 return err;
155}
156/* Note the use of lgdev_irq() for the interrupt number. We tell hvc_alloc()
157 * to expect input when this interrupt is triggered, and then tell
158 * lguest_bind_dma() that is the interrupt to send us when input comes in. */
159
160/*D:360 From now on the console driver follows standard Guest driver form:
161 * register_lguest_driver() registers the device type and probe function, and
162 * the probe function sets up the device.
163 *
164 * The standard "struct lguest_driver": */
165static struct lguest_driver lguestcons_drv = {
166 .name = "lguestcons",
167 .owner = THIS_MODULE,
168 .device_type = LGUEST_DEVICE_T_CONSOLE,
169 .probe = lguestcons_probe,
170};
171
172/* The standard init function */
173static int __init hvc_lguest_init(void)
174{
175 return register_lguest_driver(&lguestcons_drv);
176}
177module_init(hvc_lguest_init);
diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c
new file mode 100644
index 000000000000..100e8a201e3a
--- /dev/null
+++ b/drivers/char/virtio_console.c
@@ -0,0 +1,225 @@
1/*D:300
2 * The Guest console driver
3 *
4 * Writing console drivers is one of the few remaining Dark Arts in Linux.
5 * Fortunately for us, the path of virtual consoles has been well-trodden by
6 * the PowerPC folks, who wrote "hvc_console.c" to generically support any
7 * virtual console. We use that infrastructure which only requires us to write
8 * the basic put_chars and get_chars functions and call the right register
9 * functions.
10 :*/
11
12/*M:002 The console can be flooded: while the Guest is processing input the
13 * Host can send more. Buffering in the Host could alleviate this, but it is a
14 * difficult problem in general. :*/
15/* Copyright (C) 2006, 2007 Rusty Russell, IBM Corporation
16 *
17 * This program is free software; you can redistribute it and/or modify
18 * it under the terms of the GNU General Public License as published by
19 * the Free Software Foundation; either version 2 of the License, or
20 * (at your option) any later version.
21 *
22 * This program is distributed in the hope that it will be useful,
23 * but WITHOUT ANY WARRANTY; without even the implied warranty of
24 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
25 * GNU General Public License for more details.
26 *
27 * You should have received a copy of the GNU General Public License
28 * along with this program; if not, write to the Free Software
29 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
30 */
31#include <linux/err.h>
32#include <linux/init.h>
33#include <linux/virtio.h>
34#include <linux/virtio_console.h>
35#include "hvc_console.h"
36
37/*D:340 These represent our input and output console queues, and the virtio
38 * operations for them. */
39static struct virtqueue *in_vq, *out_vq;
40static struct virtio_device *vdev;
41
42/* This is our input buffer, and how much data is left in it. */
43static unsigned int in_len;
44static char *in, *inbuf;
45
46/* The operations for our console. */
47static struct hv_ops virtio_cons;
48
49/*D:310 The put_chars() callback is pretty straightforward.
50 *
51 * We turn the characters into a scatter-gather list, add it to the output
52 * queue and then kick the Host. Then we sit here waiting for it to finish:
53 * inefficient in theory, but in practice implementations will do it
54 * immediately (lguest's Launcher does). */
55static int put_chars(u32 vtermno, const char *buf, int count)
56{
57 struct scatterlist sg[1];
58 unsigned int len;
59
60 /* This is a convenient routine to initialize a single-elem sg list */
61 sg_init_one(sg, buf, count);
62
63 /* add_buf wants a token to identify this buffer: we hand it any
64 * non-NULL pointer, since there's only ever one buffer. */
65 if (out_vq->vq_ops->add_buf(out_vq, sg, 1, 0, (void *)1) == 0) {
66 /* Tell Host to go! */
67 out_vq->vq_ops->kick(out_vq);
68 /* Chill out until it's done with the buffer. */
69 while (!out_vq->vq_ops->get_buf(out_vq, &len))
70 cpu_relax();
71 }
72
73 /* We're expected to return the amount of data we wrote: all of it. */
74 return count;
75}
76
77/* Create a scatter-gather list representing our input buffer and put it in the
78 * queue. */
79static void add_inbuf(void)
80{
81 struct scatterlist sg[1];
82 sg_init_one(sg, inbuf, PAGE_SIZE);
83
84 /* We should always be able to add one buffer to an empty queue. */
85 if (in_vq->vq_ops->add_buf(in_vq, sg, 0, 1, inbuf) != 0)
86 BUG();
87 in_vq->vq_ops->kick(in_vq);
88}
89
90/*D:350 get_chars() is the callback from the hvc_console infrastructure when
91 * an interrupt is received.
92 *
93 * Most of the code deals with the fact that the hvc_console() infrastructure
94 * only asks us for 16 bytes at a time. We keep in_offset and in_used fields
95 * for partially-filled buffers. */
96static int get_chars(u32 vtermno, char *buf, int count)
97{
98 /* If we don't have an input queue yet, we can't get input. */
99 BUG_ON(!in_vq);
100
101 /* No buffer? Try to get one. */
102 if (!in_len) {
103 in = in_vq->vq_ops->get_buf(in_vq, &in_len);
104 if (!in)
105 return 0;
106 }
107
108 /* You want more than we have to give? Well, try wanting less! */
109 if (in_len < count)
110 count = in_len;
111
112 /* Copy across to their buffer and increment offset. */
113 memcpy(buf, in, count);
114 in += count;
115 in_len -= count;
116
117 /* Finished? Re-register buffer so Host will use it again. */
118 if (in_len == 0)
119 add_inbuf();
120
121 return count;
122}
123/*:*/
124
125/*D:320 Console drivers are initialized very early so boot messages can go out,
126 * so we do things slightly differently from the generic virtio initialization
127 * of the net and block drivers.
128 *
129 * At this stage, the console is output-only. It's too early to set up a
130 * virtqueue, so we let the drivers do some boutique early-output thing. */
131int __init virtio_cons_early_init(int (*put_chars)(u32, const char *, int))
132{
133 virtio_cons.put_chars = put_chars;
134 return hvc_instantiate(0, 0, &virtio_cons);
135}
136
137/*D:370 Once we're further in boot, we get probed like any other virtio device.
138 * At this stage we set up the output virtqueue.
139 *
140 * To set up and manage our virtual console, we call hvc_alloc(). Since we
141 * never remove the console device we never need this pointer again.
142 *
143 * Finally we put our input buffer in the input queue, ready to receive. */
144static int virtcons_probe(struct virtio_device *dev)
145{
146 int err;
147 struct hvc_struct *hvc;
148
149 vdev = dev;
150
151 /* This is the scratch page we use to receive console input */
152 inbuf = kmalloc(PAGE_SIZE, GFP_KERNEL);
153 if (!inbuf) {
154 err = -ENOMEM;
155 goto fail;
156 }
157
158 /* Find the input queue. */
159 /* FIXME: This is why we want to wean off hvc: we do nothing
160 * when input comes in. */
161 in_vq = vdev->config->find_vq(vdev, NULL);
162 if (IS_ERR(in_vq)) {
163 err = PTR_ERR(in_vq);
164 goto free;
165 }
166
167 out_vq = vdev->config->find_vq(vdev, NULL);
168 if (IS_ERR(out_vq)) {
169 err = PTR_ERR(out_vq);
170 goto free_in_vq;
171 }
172
173 /* Start using the new console output. */
174 virtio_cons.get_chars = get_chars;
175 virtio_cons.put_chars = put_chars;
176
177 /* The first argument of hvc_alloc() is the virtual console number, so
178 * we use zero. The second argument is the interrupt number; we
179 * currently leave this as zero: it would be better not to use the
180 * hvc mechanism and fix this (FIXME!).
181 *
182 * The third argument is a "struct hv_ops" containing the put_chars()
183 * and get_chars() pointers. The final argument is the output buffer
184 * size: we can do any size, so we put PAGE_SIZE here. */
185 hvc = hvc_alloc(0, 0, &virtio_cons, PAGE_SIZE);
186 if (IS_ERR(hvc)) {
187 err = PTR_ERR(hvc);
188 goto free_out_vq;
189 }
190
191 /* Register the input buffer the first time. */
192 add_inbuf();
193 return 0;
194
195free_out_vq:
196 vdev->config->del_vq(out_vq);
197free_in_vq:
198 vdev->config->del_vq(in_vq);
199free:
200 kfree(inbuf);
201fail:
202 return err;
203}
204
205static struct virtio_device_id id_table[] = {
206 { VIRTIO_ID_CONSOLE, VIRTIO_DEV_ANY_ID },
207 { 0 },
208};
209
210static struct virtio_driver virtio_console = {
211 .driver.name = KBUILD_MODNAME,
212 .driver.owner = THIS_MODULE,
213 .id_table = id_table,
214 .probe = virtcons_probe,
215};
216
217static int __init init(void)
218{
219 return register_virtio_driver(&virtio_console);
220}
221module_init(init);
222
223MODULE_DEVICE_TABLE(virtio, id_table);
224MODULE_DESCRIPTION("Virtio console driver");
225MODULE_LICENSE("GPL");
diff --git a/drivers/kvm/Kconfig b/drivers/kvm/Kconfig
index 8749fa4ffcee..656920636cb2 100644
--- a/drivers/kvm/Kconfig
+++ b/drivers/kvm/Kconfig
@@ -47,4 +47,8 @@ config KVM_AMD
47 Provides support for KVM on AMD processors equipped with the AMD-V 47 Provides support for KVM on AMD processors equipped with the AMD-V
48 (SVM) extensions. 48 (SVM) extensions.
49 49
50# OK, it's a little counter-intuitive to do this, but it puts it neatly under
51# the virtualization menu.
52source drivers/lguest/Kconfig
53
50endif # VIRTUALIZATION 54endif # VIRTUALIZATION
diff --git a/drivers/lguest/Kconfig b/drivers/lguest/Kconfig
index 41e2250613a1..7eb9ecff8f4a 100644
--- a/drivers/lguest/Kconfig
+++ b/drivers/lguest/Kconfig
@@ -1,7 +1,6 @@
1config LGUEST 1config LGUEST
2 tristate "Linux hypervisor example code" 2 tristate "Linux hypervisor example code"
3 depends on X86 && PARAVIRT && EXPERIMENTAL && !X86_PAE && FUTEX 3 depends on X86_32 && EXPERIMENTAL && !X86_PAE && FUTEX && !(X86_VISWS || X86_VOYAGER)
4 select LGUEST_GUEST
5 select HVC_DRIVER 4 select HVC_DRIVER
6 ---help--- 5 ---help---
7 This is a very simple module which allows you to run 6 This is a very simple module which allows you to run
@@ -18,13 +17,3 @@ config LGUEST_GUEST
18 The guest needs code built-in, even if the host has lguest 17 The guest needs code built-in, even if the host has lguest
19 support as a module. The drivers are tiny, so we build them 18 support as a module. The drivers are tiny, so we build them
20 in too. 19 in too.
21
22config LGUEST_NET
23 tristate
24 default y
25 depends on LGUEST_GUEST && NET
26
27config LGUEST_BLOCK
28 tristate
29 default y
30 depends on LGUEST_GUEST && BLOCK
diff --git a/drivers/lguest/Makefile b/drivers/lguest/Makefile
index e5047471c334..5e8272d296d8 100644
--- a/drivers/lguest/Makefile
+++ b/drivers/lguest/Makefile
@@ -1,10 +1,12 @@
1# Guest requires the paravirt_ops replacement and the bus driver. 1# Guest requires the device configuration and probing code.
2obj-$(CONFIG_LGUEST_GUEST) += lguest.o lguest_asm.o lguest_bus.o 2obj-$(CONFIG_LGUEST_GUEST) += lguest_device.o
3 3
4# Host requires the other files, which can be a module. 4# Host requires the other files, which can be a module.
5obj-$(CONFIG_LGUEST) += lg.o 5obj-$(CONFIG_LGUEST) += lg.o
6lg-y := core.o hypercalls.o page_tables.o interrupts_and_traps.o \ 6lg-y = core.o hypercalls.o page_tables.o interrupts_and_traps.o \
7 segments.o io.o lguest_user.o switcher.o 7 segments.o lguest_user.o
8
9lg-$(CONFIG_X86_32) += x86/switcher_32.o x86/core.o
8 10
9Preparation Preparation!: PREFIX=P 11Preparation Preparation!: PREFIX=P
10Guest: PREFIX=G 12Guest: PREFIX=G
diff --git a/drivers/lguest/core.c b/drivers/lguest/core.c
index a0788c12b392..35d19ae58de7 100644
--- a/drivers/lguest/core.c
+++ b/drivers/lguest/core.c
@@ -11,58 +11,20 @@
11#include <linux/vmalloc.h> 11#include <linux/vmalloc.h>
12#include <linux/cpu.h> 12#include <linux/cpu.h>
13#include <linux/freezer.h> 13#include <linux/freezer.h>
14#include <linux/highmem.h>
14#include <asm/paravirt.h> 15#include <asm/paravirt.h>
15#include <asm/desc.h>
16#include <asm/pgtable.h> 16#include <asm/pgtable.h>
17#include <asm/uaccess.h> 17#include <asm/uaccess.h>
18#include <asm/poll.h> 18#include <asm/poll.h>
19#include <asm/highmem.h>
20#include <asm/asm-offsets.h> 19#include <asm/asm-offsets.h>
21#include <asm/i387.h>
22#include "lg.h" 20#include "lg.h"
23 21
24/* Found in switcher.S */
25extern char start_switcher_text[], end_switcher_text[], switch_to_guest[];
26extern unsigned long default_idt_entries[];
27
28/* Every guest maps the core switcher code. */
29#define SHARED_SWITCHER_PAGES \
30 DIV_ROUND_UP(end_switcher_text - start_switcher_text, PAGE_SIZE)
31/* Pages for switcher itself, then two pages per cpu */
32#define TOTAL_SWITCHER_PAGES (SHARED_SWITCHER_PAGES + 2 * NR_CPUS)
33
34/* We map at -4M for ease of mapping into the guest (one PTE page). */
35#define SWITCHER_ADDR 0xFFC00000
36 22
37static struct vm_struct *switcher_vma; 23static struct vm_struct *switcher_vma;
38static struct page **switcher_page; 24static struct page **switcher_page;
39 25
40static int cpu_had_pge;
41static struct {
42 unsigned long offset;
43 unsigned short segment;
44} lguest_entry;
45
46/* This One Big lock protects all inter-guest data structures. */ 26/* This One Big lock protects all inter-guest data structures. */
47DEFINE_MUTEX(lguest_lock); 27DEFINE_MUTEX(lguest_lock);
48static DEFINE_PER_CPU(struct lguest *, last_guest);
49
50/* FIXME: Make dynamic. */
51#define MAX_LGUEST_GUESTS 16
52struct lguest lguests[MAX_LGUEST_GUESTS];
53
54/* Offset from where switcher.S was compiled to where we've copied it */
55static unsigned long switcher_offset(void)
56{
57 return SWITCHER_ADDR - (unsigned long)start_switcher_text;
58}
59
60/* This cpu's struct lguest_pages. */
61static struct lguest_pages *lguest_pages(unsigned int cpu)
62{
63 return &(((struct lguest_pages *)
64 (SWITCHER_ADDR + SHARED_SWITCHER_PAGES*PAGE_SIZE))[cpu]);
65}
66 28
67/*H:010 We need to set up the Switcher at a high virtual address. Remember the 29/*H:010 We need to set up the Switcher at a high virtual address. Remember the
68 * Switcher is a few hundred bytes of assembler code which actually changes the 30 * Switcher is a few hundred bytes of assembler code which actually changes the
@@ -73,9 +35,7 @@ static struct lguest_pages *lguest_pages(unsigned int cpu)
73 * Host since it will be running as the switchover occurs. 35 * Host since it will be running as the switchover occurs.
74 * 36 *
75 * Trying to map memory at a particular address is an unusual thing to do, so 37 * Trying to map memory at a particular address is an unusual thing to do, so
76 * it's not a simple one-liner. We also set up the per-cpu parts of the 38 * it's not a simple one-liner. */
77 * Switcher here.
78 */
79static __init int map_switcher(void) 39static __init int map_switcher(void)
80{ 40{
81 int i, err; 41 int i, err;
@@ -132,90 +92,11 @@ static __init int map_switcher(void)
132 goto free_vma; 92 goto free_vma;
133 } 93 }
134 94
135 /* Now the switcher is mapped at the right address, we can't fail! 95 /* Now the Switcher is mapped at the right address, we can't fail!
136 * Copy in the compiled-in Switcher code (from switcher.S). */ 96 * Copy in the compiled-in Switcher code (from <arch>_switcher.S). */
137 memcpy(switcher_vma->addr, start_switcher_text, 97 memcpy(switcher_vma->addr, start_switcher_text,
138 end_switcher_text - start_switcher_text); 98 end_switcher_text - start_switcher_text);
139 99
140 /* Most of the switcher.S doesn't care that it's been moved; on Intel,
141 * jumps are relative, and it doesn't access any references to external
142 * code or data.
143 *
144 * The only exception is the interrupt handlers in switcher.S: their
145 * addresses are placed in a table (default_idt_entries), so we need to
146 * update the table with the new addresses. switcher_offset() is a
147 * convenience function which returns the distance between the builtin
148 * switcher code and the high-mapped copy we just made. */
149 for (i = 0; i < IDT_ENTRIES; i++)
150 default_idt_entries[i] += switcher_offset();
151
152 /*
153 * Set up the Switcher's per-cpu areas.
154 *
155 * Each CPU gets two pages of its own within the high-mapped region
156 * (aka. "struct lguest_pages"). Much of this can be initialized now,
157 * but some depends on what Guest we are running (which is set up in
158 * copy_in_guest_info()).
159 */
160 for_each_possible_cpu(i) {
161 /* lguest_pages() returns this CPU's two pages. */
162 struct lguest_pages *pages = lguest_pages(i);
163 /* This is a convenience pointer to make the code fit one
164 * statement to a line. */
165 struct lguest_ro_state *state = &pages->state;
166
167 /* The Global Descriptor Table: the Host has a different one
168 * for each CPU. We keep a descriptor for the GDT which says
169 * where it is and how big it is (the size is actually the last
170 * byte, not the size, hence the "-1"). */
171 state->host_gdt_desc.size = GDT_SIZE-1;
172 state->host_gdt_desc.address = (long)get_cpu_gdt_table(i);
173
174 /* All CPUs on the Host use the same Interrupt Descriptor
175 * Table, so we just use store_idt(), which gets this CPU's IDT
176 * descriptor. */
177 store_idt(&state->host_idt_desc);
178
179 /* The descriptors for the Guest's GDT and IDT can be filled
180 * out now, too. We copy the GDT & IDT into ->guest_gdt and
181 * ->guest_idt before actually running the Guest. */
182 state->guest_idt_desc.size = sizeof(state->guest_idt)-1;
183 state->guest_idt_desc.address = (long)&state->guest_idt;
184 state->guest_gdt_desc.size = sizeof(state->guest_gdt)-1;
185 state->guest_gdt_desc.address = (long)&state->guest_gdt;
186
187 /* We know where we want the stack to be when the Guest enters
188 * the switcher: in pages->regs. The stack grows upwards, so
189 * we start it at the end of that structure. */
190 state->guest_tss.esp0 = (long)(&pages->regs + 1);
191 /* And this is the GDT entry to use for the stack: we keep a
192 * couple of special LGUEST entries. */
193 state->guest_tss.ss0 = LGUEST_DS;
194
195 /* x86 can have a finegrained bitmap which indicates what I/O
196 * ports the process can use. We set it to the end of our
197 * structure, meaning "none". */
198 state->guest_tss.io_bitmap_base = sizeof(state->guest_tss);
199
200 /* Some GDT entries are the same across all Guests, so we can
201 * set them up now. */
202 setup_default_gdt_entries(state);
203 /* Most IDT entries are the same for all Guests, too.*/
204 setup_default_idt_entries(state, default_idt_entries);
205
206 /* The Host needs to be able to use the LGUEST segments on this
207 * CPU, too, so put them in the Host GDT. */
208 get_cpu_gdt_table(i)[GDT_ENTRY_LGUEST_CS] = FULL_EXEC_SEGMENT;
209 get_cpu_gdt_table(i)[GDT_ENTRY_LGUEST_DS] = FULL_SEGMENT;
210 }
211
212 /* In the Switcher, we want the %cs segment register to use the
213 * LGUEST_CS GDT entry: we've put that in the Host and Guest GDTs, so
214 * it will be undisturbed when we switch. To change %cs and jump we
215 * need this structure to feed to Intel's "lcall" instruction. */
216 lguest_entry.offset = (long)switch_to_guest + switcher_offset();
217 lguest_entry.segment = LGUEST_CS;
218
219 printk(KERN_INFO "lguest: mapped switcher at %p\n", 100 printk(KERN_INFO "lguest: mapped switcher at %p\n",
220 switcher_vma->addr); 101 switcher_vma->addr);
221 /* And we succeeded... */ 102 /* And we succeeded... */
@@ -247,86 +128,12 @@ static void unmap_switcher(void)
247 __free_pages(switcher_page[i], 0); 128 __free_pages(switcher_page[i], 0);
248} 129}
249 130
250/*H:130 Our Guest is usually so well behaved; it never tries to do things it
251 * isn't allowed to. Unfortunately, Linux's paravirtual infrastructure isn't
252 * quite complete, because it doesn't contain replacements for the Intel I/O
253 * instructions. As a result, the Guest sometimes fumbles across one during
254 * the boot process as it probes for various things which are usually attached
255 * to a PC.
256 *
257 * When the Guest uses one of these instructions, we get trap #13 (General
258 * Protection Fault) and come here. We see if it's one of those troublesome
259 * instructions and skip over it. We return true if we did. */
260static int emulate_insn(struct lguest *lg)
261{
262 u8 insn;
263 unsigned int insnlen = 0, in = 0, shift = 0;
264 /* The eip contains the *virtual* address of the Guest's instruction:
265 * guest_pa just subtracts the Guest's page_offset. */
266 unsigned long physaddr = guest_pa(lg, lg->regs->eip);
267
268 /* The guest_pa() function only works for Guest kernel addresses, but
269 * that's all we're trying to do anyway. */
270 if (lg->regs->eip < lg->page_offset)
271 return 0;
272
273 /* Decoding x86 instructions is icky. */
274 lgread(lg, &insn, physaddr, 1);
275
276 /* 0x66 is an "operand prefix". It means it's using the upper 16 bits
277 of the eax register. */
278 if (insn == 0x66) {
279 shift = 16;
280 /* The instruction is 1 byte so far, read the next byte. */
281 insnlen = 1;
282 lgread(lg, &insn, physaddr + insnlen, 1);
283 }
284
285 /* We can ignore the lower bit for the moment and decode the 4 opcodes
286 * we need to emulate. */
287 switch (insn & 0xFE) {
288 case 0xE4: /* in <next byte>,%al */
289 insnlen += 2;
290 in = 1;
291 break;
292 case 0xEC: /* in (%dx),%al */
293 insnlen += 1;
294 in = 1;
295 break;
296 case 0xE6: /* out %al,<next byte> */
297 insnlen += 2;
298 break;
299 case 0xEE: /* out %al,(%dx) */
300 insnlen += 1;
301 break;
302 default:
303 /* OK, we don't know what this is, can't emulate. */
304 return 0;
305 }
306
307 /* If it was an "IN" instruction, they expect the result to be read
308 * into %eax, so we change %eax. We always return all-ones, which
309 * traditionally means "there's nothing there". */
310 if (in) {
311 /* Lower bit tells is whether it's a 16 or 32 bit access */
312 if (insn & 0x1)
313 lg->regs->eax = 0xFFFFFFFF;
314 else
315 lg->regs->eax |= (0xFFFF << shift);
316 }
317 /* Finally, we've "done" the instruction, so move past it. */
318 lg->regs->eip += insnlen;
319 /* Success! */
320 return 1;
321}
322/*:*/
323
324/*L:305 131/*L:305
325 * Dealing With Guest Memory. 132 * Dealing With Guest Memory.
326 * 133 *
327 * When the Guest gives us (what it thinks is) a physical address, we can use 134 * When the Guest gives us (what it thinks is) a physical address, we can use
328 * the normal copy_from_user() & copy_to_user() on that address: remember, 135 * the normal copy_from_user() & copy_to_user() on the corresponding place in
329 * Guest physical == Launcher virtual. 136 * the memory region allocated by the Launcher.
330 * 137 *
331 * But we can't trust the Guest: it might be trying to access the Launcher 138 * But we can't trust the Guest: it might be trying to access the Launcher
332 * code. We have to check that the range is below the pfn_limit the Launcher 139 * code. We have to check that the range is below the pfn_limit the Launcher
@@ -338,148 +145,27 @@ int lguest_address_ok(const struct lguest *lg,
338 return (addr+len) / PAGE_SIZE < lg->pfn_limit && (addr+len >= addr); 145 return (addr+len) / PAGE_SIZE < lg->pfn_limit && (addr+len >= addr);
339} 146}
340 147
341/* This is a convenient routine to get a 32-bit value from the Guest (a very 148/* This routine copies memory from the Guest. Here we can see how useful the
342 * common operation). Here we can see how useful the kill_lguest() routine we 149 * kill_lguest() routine we met in the Launcher can be: we return a random
343 * met in the Launcher can be: we return a random value (0) instead of needing 150 * value (all zeroes) instead of needing to return an error. */
344 * to return an error. */ 151void __lgread(struct lguest *lg, void *b, unsigned long addr, unsigned bytes)
345u32 lgread_u32(struct lguest *lg, unsigned long addr)
346{
347 u32 val = 0;
348
349 /* Don't let them access lguest binary. */
350 if (!lguest_address_ok(lg, addr, sizeof(val))
351 || get_user(val, (u32 __user *)addr) != 0)
352 kill_guest(lg, "bad read address %#lx", addr);
353 return val;
354}
355
356/* Same thing for writing a value. */
357void lgwrite_u32(struct lguest *lg, unsigned long addr, u32 val)
358{
359 if (!lguest_address_ok(lg, addr, sizeof(val))
360 || put_user(val, (u32 __user *)addr) != 0)
361 kill_guest(lg, "bad write address %#lx", addr);
362}
363
364/* This routine is more generic, and copies a range of Guest bytes into a
365 * buffer. If the copy_from_user() fails, we fill the buffer with zeroes, so
366 * the caller doesn't end up using uninitialized kernel memory. */
367void lgread(struct lguest *lg, void *b, unsigned long addr, unsigned bytes)
368{ 152{
369 if (!lguest_address_ok(lg, addr, bytes) 153 if (!lguest_address_ok(lg, addr, bytes)
370 || copy_from_user(b, (void __user *)addr, bytes) != 0) { 154 || copy_from_user(b, lg->mem_base + addr, bytes) != 0) {
371 /* copy_from_user should do this, but as we rely on it... */ 155 /* copy_from_user should do this, but as we rely on it... */
372 memset(b, 0, bytes); 156 memset(b, 0, bytes);
373 kill_guest(lg, "bad read address %#lx len %u", addr, bytes); 157 kill_guest(lg, "bad read address %#lx len %u", addr, bytes);
374 } 158 }
375} 159}
376 160
377/* Similarly, our generic routine to copy into a range of Guest bytes. */ 161/* This is the write (copy into guest) version. */
378void lgwrite(struct lguest *lg, unsigned long addr, const void *b, 162void __lgwrite(struct lguest *lg, unsigned long addr, const void *b,
379 unsigned bytes) 163 unsigned bytes)
380{ 164{
381 if (!lguest_address_ok(lg, addr, bytes) 165 if (!lguest_address_ok(lg, addr, bytes)
382 || copy_to_user((void __user *)addr, b, bytes) != 0) 166 || copy_to_user(lg->mem_base + addr, b, bytes) != 0)
383 kill_guest(lg, "bad write address %#lx len %u", addr, bytes); 167 kill_guest(lg, "bad write address %#lx len %u", addr, bytes);
384} 168}
385/* (end of memory access helper routines) :*/
386
387static void set_ts(void)
388{
389 u32 cr0;
390
391 cr0 = read_cr0();
392 if (!(cr0 & 8))
393 write_cr0(cr0|8);
394}
395
396/*S:010
397 * We are getting close to the Switcher.
398 *
399 * Remember that each CPU has two pages which are visible to the Guest when it
400 * runs on that CPU. This has to contain the state for that Guest: we copy the
401 * state in just before we run the Guest.
402 *
403 * Each Guest has "changed" flags which indicate what has changed in the Guest
404 * since it last ran. We saw this set in interrupts_and_traps.c and
405 * segments.c.
406 */
407static void copy_in_guest_info(struct lguest *lg, struct lguest_pages *pages)
408{
409 /* Copying all this data can be quite expensive. We usually run the
410 * same Guest we ran last time (and that Guest hasn't run anywhere else
411 * meanwhile). If that's not the case, we pretend everything in the
412 * Guest has changed. */
413 if (__get_cpu_var(last_guest) != lg || lg->last_pages != pages) {
414 __get_cpu_var(last_guest) = lg;
415 lg->last_pages = pages;
416 lg->changed = CHANGED_ALL;
417 }
418
419 /* These copies are pretty cheap, so we do them unconditionally: */
420 /* Save the current Host top-level page directory. */
421 pages->state.host_cr3 = __pa(current->mm->pgd);
422 /* Set up the Guest's page tables to see this CPU's pages (and no
423 * other CPU's pages). */
424 map_switcher_in_guest(lg, pages);
425 /* Set up the two "TSS" members which tell the CPU what stack to use
426 * for traps which do directly into the Guest (ie. traps at privilege
427 * level 1). */
428 pages->state.guest_tss.esp1 = lg->esp1;
429 pages->state.guest_tss.ss1 = lg->ss1;
430
431 /* Copy direct-to-Guest trap entries. */
432 if (lg->changed & CHANGED_IDT)
433 copy_traps(lg, pages->state.guest_idt, default_idt_entries);
434
435 /* Copy all GDT entries which the Guest can change. */
436 if (lg->changed & CHANGED_GDT)
437 copy_gdt(lg, pages->state.guest_gdt);
438 /* If only the TLS entries have changed, copy them. */
439 else if (lg->changed & CHANGED_GDT_TLS)
440 copy_gdt_tls(lg, pages->state.guest_gdt);
441
442 /* Mark the Guest as unchanged for next time. */
443 lg->changed = 0;
444}
445
446/* Finally: the code to actually call into the Switcher to run the Guest. */
447static void run_guest_once(struct lguest *lg, struct lguest_pages *pages)
448{
449 /* This is a dummy value we need for GCC's sake. */
450 unsigned int clobber;
451
452 /* Copy the guest-specific information into this CPU's "struct
453 * lguest_pages". */
454 copy_in_guest_info(lg, pages);
455
456 /* Set the trap number to 256 (impossible value). If we fault while
457 * switching to the Guest (bad segment registers or bug), this will
458 * cause us to abort the Guest. */
459 lg->regs->trapnum = 256;
460
461 /* Now: we push the "eflags" register on the stack, then do an "lcall".
462 * This is how we change from using the kernel code segment to using
463 * the dedicated lguest code segment, as well as jumping into the
464 * Switcher.
465 *
466 * The lcall also pushes the old code segment (KERNEL_CS) onto the
467 * stack, then the address of this call. This stack layout happens to
468 * exactly match the stack of an interrupt... */
469 asm volatile("pushf; lcall *lguest_entry"
470 /* This is how we tell GCC that %eax ("a") and %ebx ("b")
471 * are changed by this routine. The "=" means output. */
472 : "=a"(clobber), "=b"(clobber)
473 /* %eax contains the pages pointer. ("0" refers to the
474 * 0-th argument above, ie "a"). %ebx contains the
475 * physical address of the Guest's top-level page
476 * directory. */
477 : "0"(pages), "1"(__pa(lg->pgdirs[lg->pgdidx].pgdir))
478 /* We tell gcc that all these registers could change,
479 * which means we don't have to save and restore them in
480 * the Switcher. */
481 : "memory", "%edx", "%ecx", "%edi", "%esi");
482}
483/*:*/ 169/*:*/
484 170
485/*H:030 Let's jump straight to the the main loop which runs the Guest. 171/*H:030 Let's jump straight to the the main loop which runs the Guest.
@@ -489,22 +175,16 @@ int run_guest(struct lguest *lg, unsigned long __user *user)
489{ 175{
490 /* We stop running once the Guest is dead. */ 176 /* We stop running once the Guest is dead. */
491 while (!lg->dead) { 177 while (!lg->dead) {
492 /* We need to initialize this, otherwise gcc complains. It's 178 /* First we run any hypercalls the Guest wants done. */
493 * not (yet) clever enough to see that it's initialized when we 179 if (lg->hcall)
494 * need it. */ 180 do_hypercalls(lg);
495 unsigned int cr2 = 0; /* Damn gcc */ 181
496 182 /* It's possible the Guest did a NOTIFY hypercall to the
497 /* First we run any hypercalls the Guest wants done: either in
498 * the hypercall ring in "struct lguest_data", or directly by
499 * using int 31 (LGUEST_TRAP_ENTRY). */
500 do_hypercalls(lg);
501 /* It's possible the Guest did a SEND_DMA hypercall to the
502 * Launcher, in which case we return from the read() now. */ 183 * Launcher, in which case we return from the read() now. */
503 if (lg->dma_is_pending) { 184 if (lg->pending_notify) {
504 if (put_user(lg->pending_dma, user) || 185 if (put_user(lg->pending_notify, user))
505 put_user(lg->pending_key, user+1))
506 return -EFAULT; 186 return -EFAULT;
507 return sizeof(unsigned long)*2; 187 return sizeof(lg->pending_notify);
508 } 188 }
509 189
510 /* Check for signals */ 190 /* Check for signals */
@@ -542,144 +222,20 @@ int run_guest(struct lguest *lg, unsigned long __user *user)
542 * the "Do Not Disturb" sign: */ 222 * the "Do Not Disturb" sign: */
543 local_irq_disable(); 223 local_irq_disable();
544 224
545 /* Remember the awfully-named TS bit? If the Guest has asked 225 /* Actually run the Guest until something happens. */
546 * to set it we set it now, so we can trap and pass that trap 226 lguest_arch_run_guest(lg);
547 * to the Guest if it uses the FPU. */
548 if (lg->ts)
549 set_ts();
550
551 /* SYSENTER is an optimized way of doing system calls. We
552 * can't allow it because it always jumps to privilege level 0.
553 * A normal Guest won't try it because we don't advertise it in
554 * CPUID, but a malicious Guest (or malicious Guest userspace
555 * program) could, so we tell the CPU to disable it before
556 * running the Guest. */
557 if (boot_cpu_has(X86_FEATURE_SEP))
558 wrmsr(MSR_IA32_SYSENTER_CS, 0, 0);
559
560 /* Now we actually run the Guest. It will pop back out when
561 * something interesting happens, and we can examine its
562 * registers to see what it was doing. */
563 run_guest_once(lg, lguest_pages(raw_smp_processor_id()));
564
565 /* The "regs" pointer contains two extra entries which are not
566 * really registers: a trap number which says what interrupt or
567 * trap made the switcher code come back, and an error code
568 * which some traps set. */
569
570 /* If the Guest page faulted, then the cr2 register will tell
571 * us the bad virtual address. We have to grab this now,
572 * because once we re-enable interrupts an interrupt could
573 * fault and thus overwrite cr2, or we could even move off to a
574 * different CPU. */
575 if (lg->regs->trapnum == 14)
576 cr2 = read_cr2();
577 /* Similarly, if we took a trap because the Guest used the FPU,
578 * we have to restore the FPU it expects to see. */
579 else if (lg->regs->trapnum == 7)
580 math_state_restore();
581
582 /* Restore SYSENTER if it's supposed to be on. */
583 if (boot_cpu_has(X86_FEATURE_SEP))
584 wrmsr(MSR_IA32_SYSENTER_CS, __KERNEL_CS, 0);
585 227
586 /* Now we're ready to be interrupted or moved to other CPUs */ 228 /* Now we're ready to be interrupted or moved to other CPUs */
587 local_irq_enable(); 229 local_irq_enable();
588 230
589 /* OK, so what happened? */ 231 /* Now we deal with whatever happened to the Guest. */
590 switch (lg->regs->trapnum) { 232 lguest_arch_handle_trap(lg);
591 case 13: /* We've intercepted a GPF. */
592 /* Check if this was one of those annoying IN or OUT
593 * instructions which we need to emulate. If so, we
594 * just go back into the Guest after we've done it. */
595 if (lg->regs->errcode == 0) {
596 if (emulate_insn(lg))
597 continue;
598 }
599 break;
600 case 14: /* We've intercepted a page fault. */
601 /* The Guest accessed a virtual address that wasn't
602 * mapped. This happens a lot: we don't actually set
603 * up most of the page tables for the Guest at all when
604 * we start: as it runs it asks for more and more, and
605 * we set them up as required. In this case, we don't
606 * even tell the Guest that the fault happened.
607 *
608 * The errcode tells whether this was a read or a
609 * write, and whether kernel or userspace code. */
610 if (demand_page(lg, cr2, lg->regs->errcode))
611 continue;
612
613 /* OK, it's really not there (or not OK): the Guest
614 * needs to know. We write out the cr2 value so it
615 * knows where the fault occurred.
616 *
617 * Note that if the Guest were really messed up, this
618 * could happen before it's done the INITIALIZE
619 * hypercall, so lg->lguest_data will be NULL, so
620 * &lg->lguest_data->cr2 will be address 8. Writing
621 * into that address won't hurt the Host at all,
622 * though. */
623 if (put_user(cr2, &lg->lguest_data->cr2))
624 kill_guest(lg, "Writing cr2");
625 break;
626 case 7: /* We've intercepted a Device Not Available fault. */
627 /* If the Guest doesn't want to know, we already
628 * restored the Floating Point Unit, so we just
629 * continue without telling it. */
630 if (!lg->ts)
631 continue;
632 break;
633 case 32 ... 255:
634 /* These values mean a real interrupt occurred, in
635 * which case the Host handler has already been run.
636 * We just do a friendly check if another process
637 * should now be run, then fall through to loop
638 * around: */
639 cond_resched();
640 case LGUEST_TRAP_ENTRY: /* Handled at top of loop */
641 continue;
642 }
643
644 /* If we get here, it's a trap the Guest wants to know
645 * about. */
646 if (deliver_trap(lg, lg->regs->trapnum))
647 continue;
648
649 /* If the Guest doesn't have a handler (either it hasn't
650 * registered any yet, or it's one of the faults we don't let
651 * it handle), it dies with a cryptic error message. */
652 kill_guest(lg, "unhandled trap %li at %#lx (%#lx)",
653 lg->regs->trapnum, lg->regs->eip,
654 lg->regs->trapnum == 14 ? cr2 : lg->regs->errcode);
655 } 233 }
234
656 /* The Guest is dead => "No such file or directory" */ 235 /* The Guest is dead => "No such file or directory" */
657 return -ENOENT; 236 return -ENOENT;
658} 237}
659 238
660/* Now we can look at each of the routines this calls, in increasing order of
661 * complexity: do_hypercalls(), emulate_insn(), maybe_do_interrupt(),
662 * deliver_trap() and demand_page(). After all those, we'll be ready to
663 * examine the Switcher, and our philosophical understanding of the Host/Guest
664 * duality will be complete. :*/
665
666int find_free_guest(void)
667{
668 unsigned int i;
669 for (i = 0; i < MAX_LGUEST_GUESTS; i++)
670 if (!lguests[i].tsk)
671 return i;
672 return -1;
673}
674
675static void adjust_pge(void *on)
676{
677 if (on)
678 write_cr4(read_cr4() | X86_CR4_PGE);
679 else
680 write_cr4(read_cr4() & ~X86_CR4_PGE);
681}
682
683/*H:000 239/*H:000
684 * Welcome to the Host! 240 * Welcome to the Host!
685 * 241 *
@@ -701,72 +257,50 @@ static int __init init(void)
701 /* First we put the Switcher up in very high virtual memory. */ 257 /* First we put the Switcher up in very high virtual memory. */
702 err = map_switcher(); 258 err = map_switcher();
703 if (err) 259 if (err)
704 return err; 260 goto out;
705 261
706 /* Now we set up the pagetable implementation for the Guests. */ 262 /* Now we set up the pagetable implementation for the Guests. */
707 err = init_pagetables(switcher_page, SHARED_SWITCHER_PAGES); 263 err = init_pagetables(switcher_page, SHARED_SWITCHER_PAGES);
708 if (err) { 264 if (err)
709 unmap_switcher(); 265 goto unmap;
710 return err;
711 }
712 266
713 /* The I/O subsystem needs some things initialized. */ 267 /* We might need to reserve an interrupt vector. */
714 lguest_io_init(); 268 err = init_interrupts();
269 if (err)
270 goto free_pgtables;
715 271
716 /* /dev/lguest needs to be registered. */ 272 /* /dev/lguest needs to be registered. */
717 err = lguest_device_init(); 273 err = lguest_device_init();
718 if (err) { 274 if (err)
719 free_pagetables(); 275 goto free_interrupts;
720 unmap_switcher();
721 return err;
722 }
723 276
724 /* Finally, we need to turn off "Page Global Enable". PGE is an 277 /* Finally we do some architecture-specific setup. */
725 * optimization where page table entries are specially marked to show 278 lguest_arch_host_init();
726 * they never change. The Host kernel marks all the kernel pages this
727 * way because it's always present, even when userspace is running.
728 *
729 * Lguest breaks this: unbeknownst to the rest of the Host kernel, we
730 * switch to the Guest kernel. If you don't disable this on all CPUs,
731 * you'll get really weird bugs that you'll chase for two days.
732 *
733 * I used to turn PGE off every time we switched to the Guest and back
734 * on when we return, but that slowed the Switcher down noticibly. */
735
736 /* We don't need the complexity of CPUs coming and going while we're
737 * doing this. */
738 lock_cpu_hotplug();
739 if (cpu_has_pge) { /* We have a broader idea of "global". */
740 /* Remember that this was originally set (for cleanup). */
741 cpu_had_pge = 1;
742 /* adjust_pge is a helper function which sets or unsets the PGE
743 * bit on its CPU, depending on the argument (0 == unset). */
744 on_each_cpu(adjust_pge, (void *)0, 0, 1);
745 /* Turn off the feature in the global feature set. */
746 clear_bit(X86_FEATURE_PGE, boot_cpu_data.x86_capability);
747 }
748 unlock_cpu_hotplug();
749 279
750 /* All good! */ 280 /* All good! */
751 return 0; 281 return 0;
282
283free_interrupts:
284 free_interrupts();
285free_pgtables:
286 free_pagetables();
287unmap:
288 unmap_switcher();
289out:
290 return err;
752} 291}
753 292
754/* Cleaning up is just the same code, backwards. With a little French. */ 293/* Cleaning up is just the same code, backwards. With a little French. */
755static void __exit fini(void) 294static void __exit fini(void)
756{ 295{
757 lguest_device_remove(); 296 lguest_device_remove();
297 free_interrupts();
758 free_pagetables(); 298 free_pagetables();
759 unmap_switcher(); 299 unmap_switcher();
760 300
761 /* If we had PGE before we started, turn it back on now. */ 301 lguest_arch_host_fini();
762 lock_cpu_hotplug();
763 if (cpu_had_pge) {
764 set_bit(X86_FEATURE_PGE, boot_cpu_data.x86_capability);
765 /* adjust_pge's argument "1" means set PGE. */
766 on_each_cpu(adjust_pge, (void *)1, 0, 1);
767 }
768 unlock_cpu_hotplug();
769} 302}
303/*:*/
770 304
771/* The Host side of lguest can be a module. This is a nice way for people to 305/* The Host side of lguest can be a module. This is a nice way for people to
772 * play with it. */ 306 * play with it. */
diff --git a/drivers/lguest/hypercalls.c b/drivers/lguest/hypercalls.c
index db6caace3b9c..9d5184c7c14a 100644
--- a/drivers/lguest/hypercalls.c
+++ b/drivers/lguest/hypercalls.c
@@ -25,17 +25,13 @@
25#include <linux/mm.h> 25#include <linux/mm.h>
26#include <asm/page.h> 26#include <asm/page.h>
27#include <asm/pgtable.h> 27#include <asm/pgtable.h>
28#include <irq_vectors.h>
29#include "lg.h" 28#include "lg.h"
30 29
31/*H:120 This is the core hypercall routine: where the Guest gets what it 30/*H:120 This is the core hypercall routine: where the Guest gets what it wants.
32 * wants. Or gets killed. Or, in the case of LHCALL_CRASH, both. 31 * Or gets killed. Or, in the case of LHCALL_CRASH, both. */
33 * 32static void do_hcall(struct lguest *lg, struct hcall_args *args)
34 * Remember from the Guest: %eax == which call to make, and the arguments are
35 * packed into %edx, %ebx and %ecx if needed. */
36static void do_hcall(struct lguest *lg, struct lguest_regs *regs)
37{ 33{
38 switch (regs->eax) { 34 switch (args->arg0) {
39 case LHCALL_FLUSH_ASYNC: 35 case LHCALL_FLUSH_ASYNC:
40 /* This call does nothing, except by breaking out of the Guest 36 /* This call does nothing, except by breaking out of the Guest
41 * it makes us process all the asynchronous hypercalls. */ 37 * it makes us process all the asynchronous hypercalls. */
@@ -51,7 +47,7 @@ static void do_hcall(struct lguest *lg, struct lguest_regs *regs)
51 char msg[128]; 47 char msg[128];
52 /* If the lgread fails, it will call kill_guest() itself; the 48 /* If the lgread fails, it will call kill_guest() itself; the
53 * kill_guest() with the message will be ignored. */ 49 * kill_guest() with the message will be ignored. */
54 lgread(lg, msg, regs->edx, sizeof(msg)); 50 __lgread(lg, msg, args->arg1, sizeof(msg));
55 msg[sizeof(msg)-1] = '\0'; 51 msg[sizeof(msg)-1] = '\0';
56 kill_guest(lg, "CRASH: %s", msg); 52 kill_guest(lg, "CRASH: %s", msg);
57 break; 53 break;
@@ -59,67 +55,49 @@ static void do_hcall(struct lguest *lg, struct lguest_regs *regs)
59 case LHCALL_FLUSH_TLB: 55 case LHCALL_FLUSH_TLB:
60 /* FLUSH_TLB comes in two flavors, depending on the 56 /* FLUSH_TLB comes in two flavors, depending on the
61 * argument: */ 57 * argument: */
62 if (regs->edx) 58 if (args->arg1)
63 guest_pagetable_clear_all(lg); 59 guest_pagetable_clear_all(lg);
64 else 60 else
65 guest_pagetable_flush_user(lg); 61 guest_pagetable_flush_user(lg);
66 break; 62 break;
67 case LHCALL_BIND_DMA:
68 /* BIND_DMA really wants four arguments, but it's the only call
69 * which does. So the Guest packs the number of buffers and
70 * the interrupt number into the final argument, and we decode
71 * it here. This can legitimately fail, since we currently
72 * place a limit on the number of DMA pools a Guest can have.
73 * So we return true or false from this call. */
74 regs->eax = bind_dma(lg, regs->edx, regs->ebx,
75 regs->ecx >> 8, regs->ecx & 0xFF);
76 break;
77 63
78 /* All these calls simply pass the arguments through to the right 64 /* All these calls simply pass the arguments through to the right
79 * routines. */ 65 * routines. */
80 case LHCALL_SEND_DMA:
81 send_dma(lg, regs->edx, regs->ebx);
82 break;
83 case LHCALL_LOAD_GDT:
84 load_guest_gdt(lg, regs->edx, regs->ebx);
85 break;
86 case LHCALL_LOAD_IDT_ENTRY:
87 load_guest_idt_entry(lg, regs->edx, regs->ebx, regs->ecx);
88 break;
89 case LHCALL_NEW_PGTABLE: 66 case LHCALL_NEW_PGTABLE:
90 guest_new_pagetable(lg, regs->edx); 67 guest_new_pagetable(lg, args->arg1);
91 break; 68 break;
92 case LHCALL_SET_STACK: 69 case LHCALL_SET_STACK:
93 guest_set_stack(lg, regs->edx, regs->ebx, regs->ecx); 70 guest_set_stack(lg, args->arg1, args->arg2, args->arg3);
94 break; 71 break;
95 case LHCALL_SET_PTE: 72 case LHCALL_SET_PTE:
96 guest_set_pte(lg, regs->edx, regs->ebx, mkgpte(regs->ecx)); 73 guest_set_pte(lg, args->arg1, args->arg2, __pte(args->arg3));
97 break; 74 break;
98 case LHCALL_SET_PMD: 75 case LHCALL_SET_PMD:
99 guest_set_pmd(lg, regs->edx, regs->ebx); 76 guest_set_pmd(lg, args->arg1, args->arg2);
100 break;
101 case LHCALL_LOAD_TLS:
102 guest_load_tls(lg, regs->edx);
103 break; 77 break;
104 case LHCALL_SET_CLOCKEVENT: 78 case LHCALL_SET_CLOCKEVENT:
105 guest_set_clockevent(lg, regs->edx); 79 guest_set_clockevent(lg, args->arg1);
106 break; 80 break;
107
108 case LHCALL_TS: 81 case LHCALL_TS:
109 /* This sets the TS flag, as we saw used in run_guest(). */ 82 /* This sets the TS flag, as we saw used in run_guest(). */
110 lg->ts = regs->edx; 83 lg->ts = args->arg1;
111 break; 84 break;
112 case LHCALL_HALT: 85 case LHCALL_HALT:
113 /* Similarly, this sets the halted flag for run_guest(). */ 86 /* Similarly, this sets the halted flag for run_guest(). */
114 lg->halted = 1; 87 lg->halted = 1;
115 break; 88 break;
89 case LHCALL_NOTIFY:
90 lg->pending_notify = args->arg1;
91 break;
116 default: 92 default:
117 kill_guest(lg, "Bad hypercall %li\n", regs->eax); 93 if (lguest_arch_do_hcall(lg, args))
94 kill_guest(lg, "Bad hypercall %li\n", args->arg0);
118 } 95 }
119} 96}
97/*:*/
120 98
121/* Asynchronous hypercalls are easy: we just look in the array in the Guest's 99/*H:124 Asynchronous hypercalls are easy: we just look in the array in the
122 * "struct lguest_data" and see if there are any new ones marked "ready". 100 * Guest's "struct lguest_data" to see if any new ones are marked "ready".
123 * 101 *
124 * We are careful to do these in order: obviously we respect the order the 102 * We are careful to do these in order: obviously we respect the order the
125 * Guest put them in the ring, but we also promise the Guest that they will 103 * Guest put them in the ring, but we also promise the Guest that they will
@@ -134,10 +112,9 @@ static void do_async_hcalls(struct lguest *lg)
134 if (copy_from_user(&st, &lg->lguest_data->hcall_status, sizeof(st))) 112 if (copy_from_user(&st, &lg->lguest_data->hcall_status, sizeof(st)))
135 return; 113 return;
136 114
137
138 /* We process "struct lguest_data"s hcalls[] ring once. */ 115 /* We process "struct lguest_data"s hcalls[] ring once. */
139 for (i = 0; i < ARRAY_SIZE(st); i++) { 116 for (i = 0; i < ARRAY_SIZE(st); i++) {
140 struct lguest_regs regs; 117 struct hcall_args args;
141 /* We remember where we were up to from last time. This makes 118 /* We remember where we were up to from last time. This makes
142 * sure that the hypercalls are done in the order the Guest 119 * sure that the hypercalls are done in the order the Guest
143 * places them in the ring. */ 120 * places them in the ring. */
@@ -152,18 +129,16 @@ static void do_async_hcalls(struct lguest *lg)
152 if (++lg->next_hcall == LHCALL_RING_SIZE) 129 if (++lg->next_hcall == LHCALL_RING_SIZE)
153 lg->next_hcall = 0; 130 lg->next_hcall = 0;
154 131
155 /* We copy the hypercall arguments into a fake register 132 /* Copy the hypercall arguments into a local copy of
156 * structure. This makes life simple for do_hcall(). */ 133 * the hcall_args struct. */
157 if (get_user(regs.eax, &lg->lguest_data->hcalls[n].eax) 134 if (copy_from_user(&args, &lg->lguest_data->hcalls[n],
158 || get_user(regs.edx, &lg->lguest_data->hcalls[n].edx) 135 sizeof(struct hcall_args))) {
159 || get_user(regs.ecx, &lg->lguest_data->hcalls[n].ecx)
160 || get_user(regs.ebx, &lg->lguest_data->hcalls[n].ebx)) {
161 kill_guest(lg, "Fetching async hypercalls"); 136 kill_guest(lg, "Fetching async hypercalls");
162 break; 137 break;
163 } 138 }
164 139
165 /* Do the hypercall, same as a normal one. */ 140 /* Do the hypercall, same as a normal one. */
166 do_hcall(lg, &regs); 141 do_hcall(lg, &args);
167 142
168 /* Mark the hypercall done. */ 143 /* Mark the hypercall done. */
169 if (put_user(0xFF, &lg->lguest_data->hcall_status[n])) { 144 if (put_user(0xFF, &lg->lguest_data->hcall_status[n])) {
@@ -171,9 +146,9 @@ static void do_async_hcalls(struct lguest *lg)
171 break; 146 break;
172 } 147 }
173 148
174 /* Stop doing hypercalls if we've just done a DMA to the 149 /* Stop doing hypercalls if they want to notify the Launcher:
175 * Launcher: it needs to service this first. */ 150 * it needs to service this first. */
176 if (lg->dma_is_pending) 151 if (lg->pending_notify)
177 break; 152 break;
178 } 153 }
179} 154}
@@ -182,76 +157,35 @@ static void do_async_hcalls(struct lguest *lg)
182 * Guest makes a hypercall, we end up here to set things up: */ 157 * Guest makes a hypercall, we end up here to set things up: */
183static void initialize(struct lguest *lg) 158static void initialize(struct lguest *lg)
184{ 159{
185 u32 tsc_speed;
186 160
187 /* You can't do anything until you're initialized. The Guest knows the 161 /* You can't do anything until you're initialized. The Guest knows the
188 * rules, so we're unforgiving here. */ 162 * rules, so we're unforgiving here. */
189 if (lg->regs->eax != LHCALL_LGUEST_INIT) { 163 if (lg->hcall->arg0 != LHCALL_LGUEST_INIT) {
190 kill_guest(lg, "hypercall %li before LGUEST_INIT", 164 kill_guest(lg, "hypercall %li before INIT", lg->hcall->arg0);
191 lg->regs->eax);
192 return; 165 return;
193 } 166 }
194 167
195 /* We insist that the Time Stamp Counter exist and doesn't change with 168 if (lguest_arch_init_hypercalls(lg))
196 * cpu frequency. Some devious chip manufacturers decided that TSC
197 * changes could be handled in software. I decided that time going
198 * backwards might be good for benchmarks, but it's bad for users.
199 *
200 * We also insist that the TSC be stable: the kernel detects unreliable
201 * TSCs for its own purposes, and we use that here. */
202 if (boot_cpu_has(X86_FEATURE_CONSTANT_TSC) && !check_tsc_unstable())
203 tsc_speed = tsc_khz;
204 else
205 tsc_speed = 0;
206
207 /* The pointer to the Guest's "struct lguest_data" is the only
208 * argument. */
209 lg->lguest_data = (struct lguest_data __user *)lg->regs->edx;
210 /* If we check the address they gave is OK now, we can simply
211 * copy_to_user/from_user from now on rather than using lgread/lgwrite.
212 * I put this in to show that I'm not immune to writing stupid
213 * optimizations. */
214 if (!lguest_address_ok(lg, lg->regs->edx, sizeof(*lg->lguest_data))) {
215 kill_guest(lg, "bad guest page %p", lg->lguest_data); 169 kill_guest(lg, "bad guest page %p", lg->lguest_data);
216 return; 170
217 }
218 /* The Guest tells us where we're not to deliver interrupts by putting 171 /* The Guest tells us where we're not to deliver interrupts by putting
219 * the range of addresses into "struct lguest_data". */ 172 * the range of addresses into "struct lguest_data". */
220 if (get_user(lg->noirq_start, &lg->lguest_data->noirq_start) 173 if (get_user(lg->noirq_start, &lg->lguest_data->noirq_start)
221 || get_user(lg->noirq_end, &lg->lguest_data->noirq_end) 174 || get_user(lg->noirq_end, &lg->lguest_data->noirq_end))
222 /* We tell the Guest that it can't use the top 4MB of virtual
223 * addresses used by the Switcher. */
224 || put_user(4U*1024*1024, &lg->lguest_data->reserve_mem)
225 || put_user(tsc_speed, &lg->lguest_data->tsc_khz)
226 /* We also give the Guest a unique id, as used in lguest_net.c. */
227 || put_user(lg->guestid, &lg->lguest_data->guestid))
228 kill_guest(lg, "bad guest page %p", lg->lguest_data); 175 kill_guest(lg, "bad guest page %p", lg->lguest_data);
229 176
230 /* We write the current time into the Guest's data page once now. */ 177 /* We write the current time into the Guest's data page once now. */
231 write_timestamp(lg); 178 write_timestamp(lg);
232 179
180 /* page_tables.c will also do some setup. */
181 page_table_guest_data_init(lg);
182
233 /* This is the one case where the above accesses might have been the 183 /* This is the one case where the above accesses might have been the
234 * first write to a Guest page. This may have caused a copy-on-write 184 * first write to a Guest page. This may have caused a copy-on-write
235 * fault, but the Guest might be referring to the old (read-only) 185 * fault, but the Guest might be referring to the old (read-only)
236 * page. */ 186 * page. */
237 guest_pagetable_clear_all(lg); 187 guest_pagetable_clear_all(lg);
238} 188}
239/* Now we've examined the hypercall code; our Guest can make requests. There
240 * is one other way we can do things for the Guest, as we see in
241 * emulate_insn(). */
242
243/*H:110 Tricky point: we mark the hypercall as "done" once we've done it.
244 * Normally we don't need to do this: the Guest will run again and update the
245 * trap number before we come back around the run_guest() loop to
246 * do_hypercalls().
247 *
248 * However, if we are signalled or the Guest sends DMA to the Launcher, that
249 * loop will exit without running the Guest. When it comes back it would try
250 * to re-run the hypercall. */
251static void clear_hcall(struct lguest *lg)
252{
253 lg->regs->trapnum = 255;
254}
255 189
256/*H:100 190/*H:100
257 * Hypercalls 191 * Hypercalls
@@ -261,16 +195,12 @@ static void clear_hcall(struct lguest *lg)
261 */ 195 */
262void do_hypercalls(struct lguest *lg) 196void do_hypercalls(struct lguest *lg)
263{ 197{
264 /* Not initialized yet? */ 198 /* Not initialized yet? This hypercall must do it. */
265 if (unlikely(!lg->lguest_data)) { 199 if (unlikely(!lg->lguest_data)) {
266 /* Did the Guest make a hypercall? We might have come back for 200 /* Set up the "struct lguest_data" */
267 * some other reason (an interrupt, a different trap). */ 201 initialize(lg);
268 if (lg->regs->trapnum == LGUEST_TRAP_ENTRY) { 202 /* Hcall is done. */
269 /* Set up the "struct lguest_data" */ 203 lg->hcall = NULL;
270 initialize(lg);
271 /* The hypercall is done. */
272 clear_hcall(lg);
273 }
274 return; 204 return;
275 } 205 }
276 206
@@ -280,12 +210,21 @@ void do_hypercalls(struct lguest *lg)
280 do_async_hcalls(lg); 210 do_async_hcalls(lg);
281 211
282 /* If we stopped reading the hypercall ring because the Guest did a 212 /* If we stopped reading the hypercall ring because the Guest did a
283 * SEND_DMA to the Launcher, we want to return now. Otherwise if the 213 * NOTIFY to the Launcher, we want to return now. Otherwise we do
284 * Guest asked us to do a hypercall, we do it. */ 214 * the hypercall. */
285 if (!lg->dma_is_pending && lg->regs->trapnum == LGUEST_TRAP_ENTRY) { 215 if (!lg->pending_notify) {
286 do_hcall(lg, lg->regs); 216 do_hcall(lg, lg->hcall);
287 /* The hypercall is done. */ 217 /* Tricky point: we reset the hcall pointer to mark the
288 clear_hcall(lg); 218 * hypercall as "done". We use the hcall pointer rather than
219 * the trap number to indicate a hypercall is pending.
220 * Normally it doesn't matter: the Guest will run again and
221 * update the trap number before we come back here.
222 *
223 * However, if we are signalled or the Guest sends DMA to the
224 * Launcher, the run_guest() loop will exit without running the
225 * Guest. When it comes back it would try to re-run the
226 * hypercall. */
227 lg->hcall = NULL;
289 } 228 }
290} 229}
291 230
@@ -295,6 +234,6 @@ void write_timestamp(struct lguest *lg)
295{ 234{
296 struct timespec now; 235 struct timespec now;
297 ktime_get_real_ts(&now); 236 ktime_get_real_ts(&now);
298 if (put_user(now, &lg->lguest_data->time)) 237 if (copy_to_user(&lg->lguest_data->time, &now, sizeof(struct timespec)))
299 kill_guest(lg, "Writing timestamp"); 238 kill_guest(lg, "Writing timestamp");
300} 239}
diff --git a/drivers/lguest/interrupts_and_traps.c b/drivers/lguest/interrupts_and_traps.c
index 39731232d827..82966982cb38 100644
--- a/drivers/lguest/interrupts_and_traps.c
+++ b/drivers/lguest/interrupts_and_traps.c
@@ -12,8 +12,14 @@
12 * them first, so we also have a way of "reflecting" them into the Guest as if 12 * them first, so we also have a way of "reflecting" them into the Guest as if
13 * they had been delivered to it directly. :*/ 13 * they had been delivered to it directly. :*/
14#include <linux/uaccess.h> 14#include <linux/uaccess.h>
15#include <linux/interrupt.h>
16#include <linux/module.h>
15#include "lg.h" 17#include "lg.h"
16 18
19/* Allow Guests to use a non-128 (ie. non-Linux) syscall trap. */
20static unsigned int syscall_vector = SYSCALL_VECTOR;
21module_param(syscall_vector, uint, 0444);
22
17/* The address of the interrupt handler is split into two bits: */ 23/* The address of the interrupt handler is split into two bits: */
18static unsigned long idt_address(u32 lo, u32 hi) 24static unsigned long idt_address(u32 lo, u32 hi)
19{ 25{
@@ -39,7 +45,7 @@ static void push_guest_stack(struct lguest *lg, unsigned long *gstack, u32 val)
39{ 45{
40 /* Stack grows upwards: move stack then write value. */ 46 /* Stack grows upwards: move stack then write value. */
41 *gstack -= 4; 47 *gstack -= 4;
42 lgwrite_u32(lg, *gstack, val); 48 lgwrite(lg, *gstack, u32, val);
43} 49}
44 50
45/*H:210 The set_guest_interrupt() routine actually delivers the interrupt or 51/*H:210 The set_guest_interrupt() routine actually delivers the interrupt or
@@ -56,8 +62,9 @@ static void push_guest_stack(struct lguest *lg, unsigned long *gstack, u32 val)
56 * it). */ 62 * it). */
57static void set_guest_interrupt(struct lguest *lg, u32 lo, u32 hi, int has_err) 63static void set_guest_interrupt(struct lguest *lg, u32 lo, u32 hi, int has_err)
58{ 64{
59 unsigned long gstack; 65 unsigned long gstack, origstack;
60 u32 eflags, ss, irq_enable; 66 u32 eflags, ss, irq_enable;
67 unsigned long virtstack;
61 68
62 /* There are two cases for interrupts: one where the Guest is already 69 /* There are two cases for interrupts: one where the Guest is already
63 * in the kernel, and a more complex one where the Guest is in 70 * in the kernel, and a more complex one where the Guest is in
@@ -65,8 +72,10 @@ static void set_guest_interrupt(struct lguest *lg, u32 lo, u32 hi, int has_err)
65 if ((lg->regs->ss&0x3) != GUEST_PL) { 72 if ((lg->regs->ss&0x3) != GUEST_PL) {
66 /* The Guest told us their kernel stack with the SET_STACK 73 /* The Guest told us their kernel stack with the SET_STACK
67 * hypercall: both the virtual address and the segment */ 74 * hypercall: both the virtual address and the segment */
68 gstack = guest_pa(lg, lg->esp1); 75 virtstack = lg->esp1;
69 ss = lg->ss1; 76 ss = lg->ss1;
77
78 origstack = gstack = guest_pa(lg, virtstack);
70 /* We push the old stack segment and pointer onto the new 79 /* We push the old stack segment and pointer onto the new
71 * stack: when the Guest does an "iret" back from the interrupt 80 * stack: when the Guest does an "iret" back from the interrupt
72 * handler the CPU will notice they're dropping privilege 81 * handler the CPU will notice they're dropping privilege
@@ -75,8 +84,10 @@ static void set_guest_interrupt(struct lguest *lg, u32 lo, u32 hi, int has_err)
75 push_guest_stack(lg, &gstack, lg->regs->esp); 84 push_guest_stack(lg, &gstack, lg->regs->esp);
76 } else { 85 } else {
77 /* We're staying on the same Guest (kernel) stack. */ 86 /* We're staying on the same Guest (kernel) stack. */
78 gstack = guest_pa(lg, lg->regs->esp); 87 virtstack = lg->regs->esp;
79 ss = lg->regs->ss; 88 ss = lg->regs->ss;
89
90 origstack = gstack = guest_pa(lg, virtstack);
80 } 91 }
81 92
82 /* Remember that we never let the Guest actually disable interrupts, so 93 /* Remember that we never let the Guest actually disable interrupts, so
@@ -102,7 +113,7 @@ static void set_guest_interrupt(struct lguest *lg, u32 lo, u32 hi, int has_err)
102 /* Now we've pushed all the old state, we change the stack, the code 113 /* Now we've pushed all the old state, we change the stack, the code
103 * segment and the address to execute. */ 114 * segment and the address to execute. */
104 lg->regs->ss = ss; 115 lg->regs->ss = ss;
105 lg->regs->esp = gstack + lg->page_offset; 116 lg->regs->esp = virtstack + (gstack - origstack);
106 lg->regs->cs = (__KERNEL_CS|GUEST_PL); 117 lg->regs->cs = (__KERNEL_CS|GUEST_PL);
107 lg->regs->eip = idt_address(lo, hi); 118 lg->regs->eip = idt_address(lo, hi);
108 119
@@ -165,7 +176,7 @@ void maybe_do_interrupt(struct lguest *lg)
165 /* Look at the IDT entry the Guest gave us for this interrupt. The 176 /* Look at the IDT entry the Guest gave us for this interrupt. The
166 * first 32 (FIRST_EXTERNAL_VECTOR) entries are for traps, so we skip 177 * first 32 (FIRST_EXTERNAL_VECTOR) entries are for traps, so we skip
167 * over them. */ 178 * over them. */
168 idt = &lg->idt[FIRST_EXTERNAL_VECTOR+irq]; 179 idt = &lg->arch.idt[FIRST_EXTERNAL_VECTOR+irq];
169 /* If they don't have a handler (yet?), we just ignore it */ 180 /* If they don't have a handler (yet?), we just ignore it */
170 if (idt_present(idt->a, idt->b)) { 181 if (idt_present(idt->a, idt->b)) {
171 /* OK, mark it no longer pending and deliver it. */ 182 /* OK, mark it no longer pending and deliver it. */
@@ -183,6 +194,47 @@ void maybe_do_interrupt(struct lguest *lg)
183 * timer interrupt. */ 194 * timer interrupt. */
184 write_timestamp(lg); 195 write_timestamp(lg);
185} 196}
197/*:*/
198
199/* Linux uses trap 128 for system calls. Plan9 uses 64, and Ron Minnich sent
200 * me a patch, so we support that too. It'd be a big step for lguest if half
201 * the Plan 9 user base were to start using it.
202 *
203 * Actually now I think of it, it's possible that Ron *is* half the Plan 9
204 * userbase. Oh well. */
205static bool could_be_syscall(unsigned int num)
206{
207 /* Normal Linux SYSCALL_VECTOR or reserved vector? */
208 return num == SYSCALL_VECTOR || num == syscall_vector;
209}
210
211/* The syscall vector it wants must be unused by Host. */
212bool check_syscall_vector(struct lguest *lg)
213{
214 u32 vector;
215
216 if (get_user(vector, &lg->lguest_data->syscall_vec))
217 return false;
218
219 return could_be_syscall(vector);
220}
221
222int init_interrupts(void)
223{
224 /* If they want some strange system call vector, reserve it now */
225 if (syscall_vector != SYSCALL_VECTOR
226 && test_and_set_bit(syscall_vector, used_vectors)) {
227 printk("lg: couldn't reserve syscall %u\n", syscall_vector);
228 return -EBUSY;
229 }
230 return 0;
231}
232
233void free_interrupts(void)
234{
235 if (syscall_vector != SYSCALL_VECTOR)
236 clear_bit(syscall_vector, used_vectors);
237}
186 238
187/*H:220 Now we've got the routines to deliver interrupts, delivering traps 239/*H:220 Now we've got the routines to deliver interrupts, delivering traps
188 * like page fault is easy. The only trick is that Intel decided that some 240 * like page fault is easy. The only trick is that Intel decided that some
@@ -197,14 +249,14 @@ int deliver_trap(struct lguest *lg, unsigned int num)
197{ 249{
198 /* Trap numbers are always 8 bit, but we set an impossible trap number 250 /* Trap numbers are always 8 bit, but we set an impossible trap number
199 * for traps inside the Switcher, so check that here. */ 251 * for traps inside the Switcher, so check that here. */
200 if (num >= ARRAY_SIZE(lg->idt)) 252 if (num >= ARRAY_SIZE(lg->arch.idt))
201 return 0; 253 return 0;
202 254
203 /* Early on the Guest hasn't set the IDT entries (or maybe it put a 255 /* Early on the Guest hasn't set the IDT entries (or maybe it put a
204 * bogus one in): if we fail here, the Guest will be killed. */ 256 * bogus one in): if we fail here, the Guest will be killed. */
205 if (!idt_present(lg->idt[num].a, lg->idt[num].b)) 257 if (!idt_present(lg->arch.idt[num].a, lg->arch.idt[num].b))
206 return 0; 258 return 0;
207 set_guest_interrupt(lg, lg->idt[num].a, lg->idt[num].b, has_err(num)); 259 set_guest_interrupt(lg, lg->arch.idt[num].a, lg->arch.idt[num].b, has_err(num));
208 return 1; 260 return 1;
209} 261}
210 262
@@ -218,28 +270,20 @@ int deliver_trap(struct lguest *lg, unsigned int num)
218 * system calls down from 1750ns to 270ns. Plus, if lguest didn't do it, all 270 * system calls down from 1750ns to 270ns. Plus, if lguest didn't do it, all
219 * the other hypervisors would tease it. 271 * the other hypervisors would tease it.
220 * 272 *
221 * This routine determines if a trap can be delivered directly. */ 273 * This routine indicates if a particular trap number could be delivered
222static int direct_trap(const struct lguest *lg, 274 * directly. */
223 const struct desc_struct *trap, 275static int direct_trap(unsigned int num)
224 unsigned int num)
225{ 276{
226 /* Hardware interrupts don't go to the Guest at all (except system 277 /* Hardware interrupts don't go to the Guest at all (except system
227 * call). */ 278 * call). */
228 if (num >= FIRST_EXTERNAL_VECTOR && num != SYSCALL_VECTOR) 279 if (num >= FIRST_EXTERNAL_VECTOR && !could_be_syscall(num))
229 return 0; 280 return 0;
230 281
231 /* The Host needs to see page faults (for shadow paging and to save the 282 /* The Host needs to see page faults (for shadow paging and to save the
232 * fault address), general protection faults (in/out emulation) and 283 * fault address), general protection faults (in/out emulation) and
233 * device not available (TS handling), and of course, the hypercall 284 * device not available (TS handling), and of course, the hypercall
234 * trap. */ 285 * trap. */
235 if (num == 14 || num == 13 || num == 7 || num == LGUEST_TRAP_ENTRY) 286 return num != 14 && num != 13 && num != 7 && num != LGUEST_TRAP_ENTRY;
236 return 0;
237
238 /* Only trap gates (type 15) can go direct to the Guest. Interrupt
239 * gates (type 14) disable interrupts as they are entered, which we
240 * never let the Guest do. Not present entries (type 0x0) also can't
241 * go direct, of course 8) */
242 return idt_type(trap->a, trap->b) == 0xF;
243} 287}
244/*:*/ 288/*:*/
245 289
@@ -348,15 +392,11 @@ void load_guest_idt_entry(struct lguest *lg, unsigned int num, u32 lo, u32 hi)
348 * to copy this again. */ 392 * to copy this again. */
349 lg->changed |= CHANGED_IDT; 393 lg->changed |= CHANGED_IDT;
350 394
351 /* The IDT which we keep in "struct lguest" only contains 32 entries 395 /* Check that the Guest doesn't try to step outside the bounds. */
352 * for the traps and LGUEST_IRQS (32) entries for interrupts. We 396 if (num >= ARRAY_SIZE(lg->arch.idt))
353 * ignore attempts to set handlers for higher interrupt numbers, except 397 kill_guest(lg, "Setting idt entry %u", num);
354 * for the system call "interrupt" at 128: we have a special IDT entry 398 else
355 * for that. */ 399 set_trap(lg, &lg->arch.idt[num], num, lo, hi);
356 if (num < ARRAY_SIZE(lg->idt))
357 set_trap(lg, &lg->idt[num], num, lo, hi);
358 else if (num == SYSCALL_VECTOR)
359 set_trap(lg, &lg->syscall_idt, num, lo, hi);
360} 400}
361 401
362/* The default entry for each interrupt points into the Switcher routines which 402/* The default entry for each interrupt points into the Switcher routines which
@@ -399,20 +439,21 @@ void copy_traps(const struct lguest *lg, struct desc_struct *idt,
399 439
400 /* We can simply copy the direct traps, otherwise we use the default 440 /* We can simply copy the direct traps, otherwise we use the default
401 * ones in the Switcher: they will return to the Host. */ 441 * ones in the Switcher: they will return to the Host. */
402 for (i = 0; i < FIRST_EXTERNAL_VECTOR; i++) { 442 for (i = 0; i < ARRAY_SIZE(lg->arch.idt); i++) {
403 if (direct_trap(lg, &lg->idt[i], i)) 443 /* If no Guest can ever override this trap, leave it alone. */
404 idt[i] = lg->idt[i]; 444 if (!direct_trap(i))
445 continue;
446
447 /* Only trap gates (type 15) can go direct to the Guest.
448 * Interrupt gates (type 14) disable interrupts as they are
449 * entered, which we never let the Guest do. Not present
450 * entries (type 0x0) also can't go direct, of course. */
451 if (idt_type(lg->arch.idt[i].a, lg->arch.idt[i].b) == 0xF)
452 idt[i] = lg->arch.idt[i];
405 else 453 else
454 /* Reset it to the default. */
406 default_idt_entry(&idt[i], i, def[i]); 455 default_idt_entry(&idt[i], i, def[i]);
407 } 456 }
408
409 /* Don't forget the system call trap! The IDT entries for other
410 * interupts never change, so no need to copy them. */
411 i = SYSCALL_VECTOR;
412 if (direct_trap(lg, &lg->syscall_idt, i))
413 idt[i] = lg->syscall_idt;
414 else
415 default_idt_entry(&idt[i], i, def[i]);
416} 457}
417 458
418void guest_set_clockevent(struct lguest *lg, unsigned long delta) 459void guest_set_clockevent(struct lguest *lg, unsigned long delta)
diff --git a/drivers/lguest/io.c b/drivers/lguest/io.c
deleted file mode 100644
index ea68613b43f6..000000000000
--- a/drivers/lguest/io.c
+++ /dev/null
@@ -1,626 +0,0 @@
1/*P:300 The I/O mechanism in lguest is simple yet flexible, allowing the Guest
2 * to talk to the Launcher or directly to another Guest. It uses familiar
3 * concepts of DMA and interrupts, plus some neat code stolen from
4 * futexes... :*/
5
6/* Copyright (C) 2006 Rusty Russell IBM Corporation
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
21 */
22#include <linux/types.h>
23#include <linux/futex.h>
24#include <linux/jhash.h>
25#include <linux/mm.h>
26#include <linux/highmem.h>
27#include <linux/uaccess.h>
28#include "lg.h"
29
30/*L:300
31 * I/O
32 *
33 * Getting data in and out of the Guest is quite an art. There are numerous
34 * ways to do it, and they all suck differently. We try to keep things fairly
35 * close to "real" hardware so our Guest's drivers don't look like an alien
36 * visitation in the middle of the Linux code, and yet make sure that Guests
37 * can talk directly to other Guests, not just the Launcher.
38 *
39 * To do this, the Guest gives us a key when it binds or sends DMA buffers.
40 * The key corresponds to a "physical" address inside the Guest (ie. a virtual
41 * address inside the Launcher process). We don't, however, use this key
42 * directly.
43 *
44 * We want Guests which share memory to be able to DMA to each other: two
45 * Launchers can mmap memory the same file, then the Guests can communicate.
46 * Fortunately, the futex code provides us with a way to get a "union
47 * futex_key" corresponding to the memory lying at a virtual address: if the
48 * two processes share memory, the "union futex_key" for that memory will match
49 * even if the memory is mapped at different addresses in each. So we always
50 * convert the keys to "union futex_key"s to compare them.
51 *
52 * Before we dive into this though, we need to look at another set of helper
53 * routines used throughout the Host kernel code to access Guest memory.
54 :*/
55static struct list_head dma_hash[61];
56
57/* An unfortunate side effect of the Linux double-linked list implementation is
58 * that there's no good way to statically initialize an array of linked
59 * lists. */
60void lguest_io_init(void)
61{
62 unsigned int i;
63
64 for (i = 0; i < ARRAY_SIZE(dma_hash); i++)
65 INIT_LIST_HEAD(&dma_hash[i]);
66}
67
68/* FIXME: allow multi-page lengths. */
69static int check_dma_list(struct lguest *lg, const struct lguest_dma *dma)
70{
71 unsigned int i;
72
73 for (i = 0; i < LGUEST_MAX_DMA_SECTIONS; i++) {
74 if (!dma->len[i])
75 return 1;
76 if (!lguest_address_ok(lg, dma->addr[i], dma->len[i]))
77 goto kill;
78 if (dma->len[i] > PAGE_SIZE)
79 goto kill;
80 /* We could do over a page, but is it worth it? */
81 if ((dma->addr[i] % PAGE_SIZE) + dma->len[i] > PAGE_SIZE)
82 goto kill;
83 }
84 return 1;
85
86kill:
87 kill_guest(lg, "bad DMA entry: %u@%#lx", dma->len[i], dma->addr[i]);
88 return 0;
89}
90
91/*L:330 This is our hash function, using the wonderful Jenkins hash.
92 *
93 * The futex key is a union with three parts: an unsigned long word, a pointer,
94 * and an int "offset". We could use jhash_2words() which takes three u32s.
95 * (Ok, the hash functions are great: the naming sucks though).
96 *
97 * It's nice to be portable to 64-bit platforms, so we use the more generic
98 * jhash2(), which takes an array of u32, the number of u32s, and an initial
99 * u32 to roll in. This is uglier, but breaks down to almost the same code on
100 * 32-bit platforms like this one.
101 *
102 * We want a position in the array, so we modulo ARRAY_SIZE(dma_hash) (ie. 61).
103 */
104static unsigned int hash(const union futex_key *key)
105{
106 return jhash2((u32*)&key->both.word,
107 (sizeof(key->both.word)+sizeof(key->both.ptr))/4,
108 key->both.offset)
109 % ARRAY_SIZE(dma_hash);
110}
111
112/* This is a convenience routine to compare two keys. It's a much bemoaned C
113 * weakness that it doesn't allow '==' on structures or unions, so we have to
114 * open-code it like this. */
115static inline int key_eq(const union futex_key *a, const union futex_key *b)
116{
117 return (a->both.word == b->both.word
118 && a->both.ptr == b->both.ptr
119 && a->both.offset == b->both.offset);
120}
121
122/*L:360 OK, when we need to actually free up a Guest's DMA array we do several
123 * things, so we have a convenient function to do it.
124 *
125 * The caller must hold a read lock on dmainfo owner's current->mm->mmap_sem
126 * for the drop_futex_key_refs(). */
127static void unlink_dma(struct lguest_dma_info *dmainfo)
128{
129 /* You locked this too, right? */
130 BUG_ON(!mutex_is_locked(&lguest_lock));
131 /* This is how we know that the entry is free. */
132 dmainfo->interrupt = 0;
133 /* Remove it from the hash table. */
134 list_del(&dmainfo->list);
135 /* Drop the references we were holding (to the inode or mm). */
136 drop_futex_key_refs(&dmainfo->key);
137}
138
139/*L:350 This is the routine which we call when the Guest asks to unregister a
140 * DMA array attached to a given key. Returns true if the array was found. */
141static int unbind_dma(struct lguest *lg,
142 const union futex_key *key,
143 unsigned long dmas)
144{
145 int i, ret = 0;
146
147 /* We don't bother with the hash table, just look through all this
148 * Guest's DMA arrays. */
149 for (i = 0; i < LGUEST_MAX_DMA; i++) {
150 /* In theory it could have more than one array on the same key,
151 * or one array on multiple keys, so we check both */
152 if (key_eq(key, &lg->dma[i].key) && dmas == lg->dma[i].dmas) {
153 unlink_dma(&lg->dma[i]);
154 ret = 1;
155 break;
156 }
157 }
158 return ret;
159}
160
161/*L:340 BIND_DMA: this is the hypercall which sets up an array of "struct
162 * lguest_dma" for receiving I/O.
163 *
164 * The Guest wants to bind an array of "struct lguest_dma"s to a particular key
165 * to receive input. This only happens when the Guest is setting up a new
166 * device, so it doesn't have to be very fast.
167 *
168 * It returns 1 on a successful registration (it can fail if we hit the limit
169 * of registrations for this Guest).
170 */
171int bind_dma(struct lguest *lg,
172 unsigned long ukey, unsigned long dmas, u16 numdmas, u8 interrupt)
173{
174 unsigned int i;
175 int ret = 0;
176 union futex_key key;
177 /* Futex code needs the mmap_sem. */
178 struct rw_semaphore *fshared = &current->mm->mmap_sem;
179
180 /* Invalid interrupt? (We could kill the guest here). */
181 if (interrupt >= LGUEST_IRQS)
182 return 0;
183
184 /* We need to grab the Big Lguest Lock, because other Guests may be
185 * trying to look through this Guest's DMAs to send something while
186 * we're doing this. */
187 mutex_lock(&lguest_lock);
188 down_read(fshared);
189 if (get_futex_key((u32 __user *)ukey, fshared, &key) != 0) {
190 kill_guest(lg, "bad dma key %#lx", ukey);
191 goto unlock;
192 }
193
194 /* We want to keep this key valid once we drop mmap_sem, so we have to
195 * hold a reference. */
196 get_futex_key_refs(&key);
197
198 /* If the Guest specified an interrupt of 0, that means they want to
199 * unregister this array of "struct lguest_dma"s. */
200 if (interrupt == 0)
201 ret = unbind_dma(lg, &key, dmas);
202 else {
203 /* Look through this Guest's dma array for an unused entry. */
204 for (i = 0; i < LGUEST_MAX_DMA; i++) {
205 /* If the interrupt is non-zero, the entry is already
206 * used. */
207 if (lg->dma[i].interrupt)
208 continue;
209
210 /* OK, a free one! Fill on our details. */
211 lg->dma[i].dmas = dmas;
212 lg->dma[i].num_dmas = numdmas;
213 lg->dma[i].next_dma = 0;
214 lg->dma[i].key = key;
215 lg->dma[i].guestid = lg->guestid;
216 lg->dma[i].interrupt = interrupt;
217
218 /* Now we add it to the hash table: the position
219 * depends on the futex key that we got. */
220 list_add(&lg->dma[i].list, &dma_hash[hash(&key)]);
221 /* Success! */
222 ret = 1;
223 goto unlock;
224 }
225 }
226 /* If we didn't find a slot to put the key in, drop the reference
227 * again. */
228 drop_futex_key_refs(&key);
229unlock:
230 /* Unlock and out. */
231 up_read(fshared);
232 mutex_unlock(&lguest_lock);
233 return ret;
234}
235
236/*L:385 Note that our routines to access a different Guest's memory are called
237 * lgread_other() and lgwrite_other(): these names emphasize that they are only
238 * used when the Guest is *not* the current Guest.
239 *
240 * The interface for copying from another process's memory is called
241 * access_process_vm(), with a final argument of 0 for a read, and 1 for a
242 * write.
243 *
244 * We need lgread_other() to read the destination Guest's "struct lguest_dma"
245 * array. */
246static int lgread_other(struct lguest *lg,
247 void *buf, u32 addr, unsigned bytes)
248{
249 if (!lguest_address_ok(lg, addr, bytes)
250 || access_process_vm(lg->tsk, addr, buf, bytes, 0) != bytes) {
251 memset(buf, 0, bytes);
252 kill_guest(lg, "bad address in registered DMA struct");
253 return 0;
254 }
255 return 1;
256}
257
258/* "lgwrite()" to another Guest: used to update the destination "used_len" once
259 * we've transferred data into the buffer. */
260static int lgwrite_other(struct lguest *lg, u32 addr,
261 const void *buf, unsigned bytes)
262{
263 if (!lguest_address_ok(lg, addr, bytes)
264 || (access_process_vm(lg->tsk, addr, (void *)buf, bytes, 1)
265 != bytes)) {
266 kill_guest(lg, "bad address writing to registered DMA");
267 return 0;
268 }
269 return 1;
270}
271
272/*L:400 This is the generic engine which copies from a source "struct
273 * lguest_dma" from this Guest into another Guest's "struct lguest_dma". The
274 * destination Guest's pages have already been mapped, as contained in the
275 * pages array.
276 *
277 * If you're wondering if there's a nice "copy from one process to another"
278 * routine, so was I. But Linux isn't really set up to copy between two
279 * unrelated processes, so we have to write it ourselves.
280 */
281static u32 copy_data(struct lguest *srclg,
282 const struct lguest_dma *src,
283 const struct lguest_dma *dst,
284 struct page *pages[])
285{
286 unsigned int totlen, si, di, srcoff, dstoff;
287 void *maddr = NULL;
288
289 /* We return the total length transferred. */
290 totlen = 0;
291
292 /* We keep indexes into the source and destination "struct lguest_dma",
293 * and an offset within each region. */
294 si = di = 0;
295 srcoff = dstoff = 0;
296
297 /* We loop until the source or destination is exhausted. */
298 while (si < LGUEST_MAX_DMA_SECTIONS && src->len[si]
299 && di < LGUEST_MAX_DMA_SECTIONS && dst->len[di]) {
300 /* We can only transfer the rest of the src buffer, or as much
301 * as will fit into the destination buffer. */
302 u32 len = min(src->len[si] - srcoff, dst->len[di] - dstoff);
303
304 /* For systems using "highmem" we need to use kmap() to access
305 * the page we want. We often use the same page over and over,
306 * so rather than kmap() it on every loop, we set the maddr
307 * pointer to NULL when we need to move to the next
308 * destination page. */
309 if (!maddr)
310 maddr = kmap(pages[di]);
311
312 /* Copy directly from (this Guest's) source address to the
313 * destination Guest's kmap()ed buffer. Note that maddr points
314 * to the start of the page: we need to add the offset of the
315 * destination address and offset within the buffer. */
316
317 /* FIXME: This is not completely portable. I looked at
318 * copy_to_user_page(), and some arch's seem to need special
319 * flushes. x86 is fine. */
320 if (copy_from_user(maddr + (dst->addr[di] + dstoff)%PAGE_SIZE,
321 (void __user *)src->addr[si], len) != 0) {
322 /* If a copy failed, it's the source's fault. */
323 kill_guest(srclg, "bad address in sending DMA");
324 totlen = 0;
325 break;
326 }
327
328 /* Increment the total and src & dst offsets */
329 totlen += len;
330 srcoff += len;
331 dstoff += len;
332
333 /* Presumably we reached the end of the src or dest buffers: */
334 if (srcoff == src->len[si]) {
335 /* Move to the next buffer at offset 0 */
336 si++;
337 srcoff = 0;
338 }
339 if (dstoff == dst->len[di]) {
340 /* We need to unmap that destination page and reset
341 * maddr ready for the next one. */
342 kunmap(pages[di]);
343 maddr = NULL;
344 di++;
345 dstoff = 0;
346 }
347 }
348
349 /* If we still had a page mapped at the end, unmap now. */
350 if (maddr)
351 kunmap(pages[di]);
352
353 return totlen;
354}
355
356/*L:390 This is how we transfer a "struct lguest_dma" from the source Guest
357 * (the current Guest which called SEND_DMA) to another Guest. */
358static u32 do_dma(struct lguest *srclg, const struct lguest_dma *src,
359 struct lguest *dstlg, const struct lguest_dma *dst)
360{
361 int i;
362 u32 ret;
363 struct page *pages[LGUEST_MAX_DMA_SECTIONS];
364
365 /* We check that both source and destination "struct lguest_dma"s are
366 * within the bounds of the source and destination Guests */
367 if (!check_dma_list(dstlg, dst) || !check_dma_list(srclg, src))
368 return 0;
369
370 /* We need to map the pages which correspond to each parts of
371 * destination buffer. */
372 for (i = 0; i < LGUEST_MAX_DMA_SECTIONS; i++) {
373 if (dst->len[i] == 0)
374 break;
375 /* get_user_pages() is a complicated function, especially since
376 * we only want a single page. But it works, and returns the
377 * number of pages. Note that we're holding the destination's
378 * mmap_sem, as get_user_pages() requires. */
379 if (get_user_pages(dstlg->tsk, dstlg->mm,
380 dst->addr[i], 1, 1, 1, pages+i, NULL)
381 != 1) {
382 /* This means the destination gave us a bogus buffer */
383 kill_guest(dstlg, "Error mapping DMA pages");
384 ret = 0;
385 goto drop_pages;
386 }
387 }
388
389 /* Now copy the data until we run out of src or dst. */
390 ret = copy_data(srclg, src, dst, pages);
391
392drop_pages:
393 while (--i >= 0)
394 put_page(pages[i]);
395 return ret;
396}
397
398/*L:380 Transferring data from one Guest to another is not as simple as I'd
399 * like. We've found the "struct lguest_dma_info" bound to the same address as
400 * the send, we need to copy into it.
401 *
402 * This function returns true if the destination array was empty. */
403static int dma_transfer(struct lguest *srclg,
404 unsigned long udma,
405 struct lguest_dma_info *dst)
406{
407 struct lguest_dma dst_dma, src_dma;
408 struct lguest *dstlg;
409 u32 i, dma = 0;
410
411 /* From the "struct lguest_dma_info" we found in the hash, grab the
412 * Guest. */
413 dstlg = &lguests[dst->guestid];
414 /* Read in the source "struct lguest_dma" handed to SEND_DMA. */
415 lgread(srclg, &src_dma, udma, sizeof(src_dma));
416
417 /* We need the destination's mmap_sem, and we already hold the source's
418 * mmap_sem for the futex key lookup. Normally this would suggest that
419 * we could deadlock if the destination Guest was trying to send to
420 * this source Guest at the same time, which is another reason that all
421 * I/O is done under the big lguest_lock. */
422 down_read(&dstlg->mm->mmap_sem);
423
424 /* Look through the destination DMA array for an available buffer. */
425 for (i = 0; i < dst->num_dmas; i++) {
426 /* We keep a "next_dma" pointer which often helps us avoid
427 * looking at lots of previously-filled entries. */
428 dma = (dst->next_dma + i) % dst->num_dmas;
429 if (!lgread_other(dstlg, &dst_dma,
430 dst->dmas + dma * sizeof(struct lguest_dma),
431 sizeof(dst_dma))) {
432 goto fail;
433 }
434 if (!dst_dma.used_len)
435 break;
436 }
437
438 /* If we found a buffer, we do the actual data copy. */
439 if (i != dst->num_dmas) {
440 unsigned long used_lenp;
441 unsigned int ret;
442
443 ret = do_dma(srclg, &src_dma, dstlg, &dst_dma);
444 /* Put used length in the source "struct lguest_dma"'s used_len
445 * field. It's a little tricky to figure out where that is,
446 * though. */
447 lgwrite_u32(srclg,
448 udma+offsetof(struct lguest_dma, used_len), ret);
449 /* Tranferring 0 bytes is OK if the source buffer was empty. */
450 if (ret == 0 && src_dma.len[0] != 0)
451 goto fail;
452
453 /* The destination Guest might be running on a different CPU:
454 * we have to make sure that it will see the "used_len" field
455 * change to non-zero *after* it sees the data we copied into
456 * the buffer. Hence a write memory barrier. */
457 wmb();
458 /* Figuring out where the destination's used_len field for this
459 * "struct lguest_dma" in the array is also a little ugly. */
460 used_lenp = dst->dmas
461 + dma * sizeof(struct lguest_dma)
462 + offsetof(struct lguest_dma, used_len);
463 lgwrite_other(dstlg, used_lenp, &ret, sizeof(ret));
464 /* Move the cursor for next time. */
465 dst->next_dma++;
466 }
467 up_read(&dstlg->mm->mmap_sem);
468
469 /* We trigger the destination interrupt, even if the destination was
470 * empty and we didn't transfer anything: this gives them a chance to
471 * wake up and refill. */
472 set_bit(dst->interrupt, dstlg->irqs_pending);
473 /* Wake up the destination process. */
474 wake_up_process(dstlg->tsk);
475 /* If we passed the last "struct lguest_dma", the receive had no
476 * buffers left. */
477 return i == dst->num_dmas;
478
479fail:
480 up_read(&dstlg->mm->mmap_sem);
481 return 0;
482}
483
484/*L:370 This is the counter-side to the BIND_DMA hypercall; the SEND_DMA
485 * hypercall. We find out who's listening, and send to them. */
486void send_dma(struct lguest *lg, unsigned long ukey, unsigned long udma)
487{
488 union futex_key key;
489 int empty = 0;
490 struct rw_semaphore *fshared = &current->mm->mmap_sem;
491
492again:
493 mutex_lock(&lguest_lock);
494 down_read(fshared);
495 /* Get the futex key for the key the Guest gave us */
496 if (get_futex_key((u32 __user *)ukey, fshared, &key) != 0) {
497 kill_guest(lg, "bad sending DMA key");
498 goto unlock;
499 }
500 /* Since the key must be a multiple of 4, the futex key uses the lower
501 * bit of the "offset" field (which would always be 0) to indicate a
502 * mapping which is shared with other processes (ie. Guests). */
503 if (key.shared.offset & 1) {
504 struct lguest_dma_info *i;
505 /* Look through the hash for other Guests. */
506 list_for_each_entry(i, &dma_hash[hash(&key)], list) {
507 /* Don't send to ourselves. */
508 if (i->guestid == lg->guestid)
509 continue;
510 if (!key_eq(&key, &i->key))
511 continue;
512
513 /* If dma_transfer() tells us the destination has no
514 * available buffers, we increment "empty". */
515 empty += dma_transfer(lg, udma, i);
516 break;
517 }
518 /* If the destination is empty, we release our locks and
519 * give the destination Guest a brief chance to restock. */
520 if (empty == 1) {
521 /* Give any recipients one chance to restock. */
522 up_read(&current->mm->mmap_sem);
523 mutex_unlock(&lguest_lock);
524 /* Next time, we won't try again. */
525 empty++;
526 goto again;
527 }
528 } else {
529 /* Private mapping: Guest is sending to its Launcher. We set
530 * the "dma_is_pending" flag so that the main loop will exit
531 * and the Launcher's read() from /dev/lguest will return. */
532 lg->dma_is_pending = 1;
533 lg->pending_dma = udma;
534 lg->pending_key = ukey;
535 }
536unlock:
537 up_read(fshared);
538 mutex_unlock(&lguest_lock);
539}
540/*:*/
541
542void release_all_dma(struct lguest *lg)
543{
544 unsigned int i;
545
546 BUG_ON(!mutex_is_locked(&lguest_lock));
547
548 down_read(&lg->mm->mmap_sem);
549 for (i = 0; i < LGUEST_MAX_DMA; i++) {
550 if (lg->dma[i].interrupt)
551 unlink_dma(&lg->dma[i]);
552 }
553 up_read(&lg->mm->mmap_sem);
554}
555
556/*M:007 We only return a single DMA buffer to the Launcher, but it would be
557 * more efficient to return a pointer to the entire array of DMA buffers, which
558 * it can cache and choose one whenever it wants.
559 *
560 * Currently the Launcher uses a write to /dev/lguest, and the return value is
561 * the address of the DMA structure with the interrupt number placed in
562 * dma->used_len. If we wanted to return the entire array, we need to return
563 * the address, array size and interrupt number: this seems to require an
564 * ioctl(). :*/
565
566/*L:320 This routine looks for a DMA buffer registered by the Guest on the
567 * given key (using the BIND_DMA hypercall). */
568unsigned long get_dma_buffer(struct lguest *lg,
569 unsigned long ukey, unsigned long *interrupt)
570{
571 unsigned long ret = 0;
572 union futex_key key;
573 struct lguest_dma_info *i;
574 struct rw_semaphore *fshared = &current->mm->mmap_sem;
575
576 /* Take the Big Lguest Lock to stop other Guests sending this Guest DMA
577 * at the same time. */
578 mutex_lock(&lguest_lock);
579 /* To match between Guests sharing the same underlying memory we steal
580 * code from the futex infrastructure. This requires that we hold the
581 * "mmap_sem" for our process (the Launcher), and pass it to the futex
582 * code. */
583 down_read(fshared);
584
585 /* This can fail if it's not a valid address, or if the address is not
586 * divisible by 4 (the futex code needs that, we don't really). */
587 if (get_futex_key((u32 __user *)ukey, fshared, &key) != 0) {
588 kill_guest(lg, "bad registered DMA buffer");
589 goto unlock;
590 }
591 /* Search the hash table for matching entries (the Launcher can only
592 * send to its own Guest for the moment, so the entry must be for this
593 * Guest) */
594 list_for_each_entry(i, &dma_hash[hash(&key)], list) {
595 if (key_eq(&key, &i->key) && i->guestid == lg->guestid) {
596 unsigned int j;
597 /* Look through the registered DMA array for an
598 * available buffer. */
599 for (j = 0; j < i->num_dmas; j++) {
600 struct lguest_dma dma;
601
602 ret = i->dmas + j * sizeof(struct lguest_dma);
603 lgread(lg, &dma, ret, sizeof(dma));
604 if (dma.used_len == 0)
605 break;
606 }
607 /* Store the interrupt the Guest wants when the buffer
608 * is used. */
609 *interrupt = i->interrupt;
610 break;
611 }
612 }
613unlock:
614 up_read(fshared);
615 mutex_unlock(&lguest_lock);
616 return ret;
617}
618/*:*/
619
620/*L:410 This really has completed the Launcher. Not only have we now finished
621 * the longest chapter in our journey, but this also means we are over halfway
622 * through!
623 *
624 * Enough prevaricating around the bush: it is time for us to dive into the
625 * core of the Host, in "make Host".
626 */
diff --git a/drivers/lguest/lg.h b/drivers/lguest/lg.h
index 64f0abed317c..d9144beca82c 100644
--- a/drivers/lguest/lg.h
+++ b/drivers/lguest/lg.h
@@ -1,119 +1,25 @@
1#ifndef _LGUEST_H 1#ifndef _LGUEST_H
2#define _LGUEST_H 2#define _LGUEST_H
3 3
4#include <asm/desc.h>
5
6#define GDT_ENTRY_LGUEST_CS 10
7#define GDT_ENTRY_LGUEST_DS 11
8#define LGUEST_CS (GDT_ENTRY_LGUEST_CS * 8)
9#define LGUEST_DS (GDT_ENTRY_LGUEST_DS * 8)
10
11#ifndef __ASSEMBLY__ 4#ifndef __ASSEMBLY__
12#include <linux/types.h> 5#include <linux/types.h>
13#include <linux/init.h> 6#include <linux/init.h>
14#include <linux/stringify.h> 7#include <linux/stringify.h>
15#include <linux/binfmts.h>
16#include <linux/futex.h>
17#include <linux/lguest.h> 8#include <linux/lguest.h>
18#include <linux/lguest_launcher.h> 9#include <linux/lguest_launcher.h>
19#include <linux/wait.h> 10#include <linux/wait.h>
20#include <linux/err.h> 11#include <linux/err.h>
21#include <asm/semaphore.h> 12#include <asm/semaphore.h>
22#include "irq_vectors.h"
23
24#define GUEST_PL 1
25 13
26struct lguest_regs 14#include <asm/lguest.h>
27{
28 /* Manually saved part. */
29 unsigned long ebx, ecx, edx;
30 unsigned long esi, edi, ebp;
31 unsigned long gs;
32 unsigned long eax;
33 unsigned long fs, ds, es;
34 unsigned long trapnum, errcode;
35 /* Trap pushed part */
36 unsigned long eip;
37 unsigned long cs;
38 unsigned long eflags;
39 unsigned long esp;
40 unsigned long ss;
41};
42 15
43void free_pagetables(void); 16void free_pagetables(void);
44int init_pagetables(struct page **switcher_page, unsigned int pages); 17int init_pagetables(struct page **switcher_page, unsigned int pages);
45 18
46/* Full 4G segment descriptors, suitable for CS and DS. */
47#define FULL_EXEC_SEGMENT ((struct desc_struct){0x0000ffff, 0x00cf9b00})
48#define FULL_SEGMENT ((struct desc_struct){0x0000ffff, 0x00cf9300})
49
50struct lguest_dma_info
51{
52 struct list_head list;
53 union futex_key key;
54 unsigned long dmas;
55 u16 next_dma;
56 u16 num_dmas;
57 u16 guestid;
58 u8 interrupt; /* 0 when not registered */
59};
60
61/*H:310 The page-table code owes a great debt of gratitude to Andi Kleen. He
62 * reviewed the original code which used "u32" for all page table entries, and
63 * insisted that it would be far clearer with explicit typing. I thought it
64 * was overkill, but he was right: it is much clearer than it was before.
65 *
66 * We have separate types for the Guest's ptes & pgds and the shadow ptes &
67 * pgds. There's already a Linux type for these (pte_t and pgd_t) but they
68 * change depending on kernel config options (PAE). */
69
70/* Each entry is identical: lower 12 bits of flags and upper 20 bits for the
71 * "page frame number" (0 == first physical page, etc). They are different
72 * types so the compiler will warn us if we mix them improperly. */
73typedef union {
74 struct { unsigned flags:12, pfn:20; };
75 struct { unsigned long val; } raw;
76} spgd_t;
77typedef union {
78 struct { unsigned flags:12, pfn:20; };
79 struct { unsigned long val; } raw;
80} spte_t;
81typedef union {
82 struct { unsigned flags:12, pfn:20; };
83 struct { unsigned long val; } raw;
84} gpgd_t;
85typedef union {
86 struct { unsigned flags:12, pfn:20; };
87 struct { unsigned long val; } raw;
88} gpte_t;
89
90/* We have two convenient macros to convert a "raw" value as handed to us by
91 * the Guest into the correct Guest PGD or PTE type. */
92#define mkgpte(_val) ((gpte_t){.raw.val = _val})
93#define mkgpgd(_val) ((gpgd_t){.raw.val = _val})
94/*:*/
95
96struct pgdir 19struct pgdir
97{ 20{
98 unsigned long cr3; 21 unsigned long gpgdir;
99 spgd_t *pgdir; 22 pgd_t *pgdir;
100};
101
102/* This is a guest-specific page (mapped ro) into the guest. */
103struct lguest_ro_state
104{
105 /* Host information we need to restore when we switch back. */
106 u32 host_cr3;
107 struct Xgt_desc_struct host_idt_desc;
108 struct Xgt_desc_struct host_gdt_desc;
109 u32 host_sp;
110
111 /* Fields which are used when guest is running. */
112 struct Xgt_desc_struct guest_idt_desc;
113 struct Xgt_desc_struct guest_gdt_desc;
114 struct i386_hw_tss guest_tss;
115 struct desc_struct guest_idt[IDT_ENTRIES];
116 struct desc_struct guest_gdt[GDT_ENTRIES];
117}; 23};
118 24
119/* We have two pages shared with guests, per cpu. */ 25/* We have two pages shared with guests, per cpu. */
@@ -141,9 +47,11 @@ struct lguest
141 struct lguest_data __user *lguest_data; 47 struct lguest_data __user *lguest_data;
142 struct task_struct *tsk; 48 struct task_struct *tsk;
143 struct mm_struct *mm; /* == tsk->mm, but that becomes NULL on exit */ 49 struct mm_struct *mm; /* == tsk->mm, but that becomes NULL on exit */
144 u16 guestid;
145 u32 pfn_limit; 50 u32 pfn_limit;
146 u32 page_offset; 51 /* This provides the offset to the base of guest-physical
52 * memory in the Launcher. */
53 void __user *mem_base;
54 unsigned long kernel_address;
147 u32 cr2; 55 u32 cr2;
148 int halted; 56 int halted;
149 int ts; 57 int ts;
@@ -151,6 +59,9 @@ struct lguest
151 u32 esp1; 59 u32 esp1;
152 u8 ss1; 60 u8 ss1;
153 61
62 /* If a hypercall was asked for, this points to the arguments. */
63 struct hcall_args *hcall;
64
154 /* Do we need to stop what we're doing and return to userspace? */ 65 /* Do we need to stop what we're doing and return to userspace? */
155 int break_out; 66 int break_out;
156 wait_queue_head_t break_wq; 67 wait_queue_head_t break_wq;
@@ -167,24 +78,15 @@ struct lguest
167 struct task_struct *wake; 78 struct task_struct *wake;
168 79
169 unsigned long noirq_start, noirq_end; 80 unsigned long noirq_start, noirq_end;
170 int dma_is_pending; 81 unsigned long pending_notify; /* pfn from LHCALL_NOTIFY */
171 unsigned long pending_dma; /* struct lguest_dma */
172 unsigned long pending_key; /* address they're sending to */
173 82
174 unsigned int stack_pages; 83 unsigned int stack_pages;
175 u32 tsc_khz; 84 u32 tsc_khz;
176 85
177 struct lguest_dma_info dma[LGUEST_MAX_DMA];
178
179 /* Dead? */ 86 /* Dead? */
180 const char *dead; 87 const char *dead;
181 88
182 /* The GDT entries copied into lguest_ro_state when running. */ 89 struct lguest_arch arch;
183 struct desc_struct gdt[GDT_ENTRIES];
184
185 /* The IDT entries: some copied into lguest_ro_state when running. */
186 struct desc_struct idt[FIRST_EXTERNAL_VECTOR+LGUEST_IRQS];
187 struct desc_struct syscall_idt;
188 90
189 /* Virtual clock device */ 91 /* Virtual clock device */
190 struct hrtimer hrt; 92 struct hrtimer hrt;
@@ -193,19 +95,38 @@ struct lguest
193 DECLARE_BITMAP(irqs_pending, LGUEST_IRQS); 95 DECLARE_BITMAP(irqs_pending, LGUEST_IRQS);
194}; 96};
195 97
196extern struct lguest lguests[];
197extern struct mutex lguest_lock; 98extern struct mutex lguest_lock;
198 99
199/* core.c: */ 100/* core.c: */
200u32 lgread_u32(struct lguest *lg, unsigned long addr);
201void lgwrite_u32(struct lguest *lg, unsigned long addr, u32 val);
202void lgread(struct lguest *lg, void *buf, unsigned long addr, unsigned len);
203void lgwrite(struct lguest *lg, unsigned long, const void *buf, unsigned len);
204int find_free_guest(void);
205int lguest_address_ok(const struct lguest *lg, 101int lguest_address_ok(const struct lguest *lg,
206 unsigned long addr, unsigned long len); 102 unsigned long addr, unsigned long len);
103void __lgread(struct lguest *, void *, unsigned long, unsigned);
104void __lgwrite(struct lguest *, unsigned long, const void *, unsigned);
105
106/*L:306 Using memory-copy operations like that is usually inconvient, so we
107 * have the following helper macros which read and write a specific type (often
108 * an unsigned long).
109 *
110 * This reads into a variable of the given type then returns that. */
111#define lgread(lg, addr, type) \
112 ({ type _v; __lgread((lg), &_v, (addr), sizeof(_v)); _v; })
113
114/* This checks that the variable is of the given type, then writes it out. */
115#define lgwrite(lg, addr, type, val) \
116 do { \
117 typecheck(type, val); \
118 __lgwrite((lg), (addr), &(val), sizeof(val)); \
119 } while(0)
120/* (end of memory access helper routines) :*/
121
207int run_guest(struct lguest *lg, unsigned long __user *user); 122int run_guest(struct lguest *lg, unsigned long __user *user);
208 123
124/* Helper macros to obtain the first 12 or the last 20 bits, this is only the
125 * first step in the migration to the kernel types. pte_pfn is already defined
126 * in the kernel. */
127#define pgd_flags(x) (pgd_val(x) & ~PAGE_MASK)
128#define pte_flags(x) (pte_val(x) & ~PAGE_MASK)
129#define pgd_pfn(x) (pgd_val(x) >> PAGE_SHIFT)
209 130
210/* interrupts_and_traps.c: */ 131/* interrupts_and_traps.c: */
211void maybe_do_interrupt(struct lguest *lg); 132void maybe_do_interrupt(struct lguest *lg);
@@ -219,6 +140,9 @@ void copy_traps(const struct lguest *lg, struct desc_struct *idt,
219 const unsigned long *def); 140 const unsigned long *def);
220void guest_set_clockevent(struct lguest *lg, unsigned long delta); 141void guest_set_clockevent(struct lguest *lg, unsigned long delta);
221void init_clockdev(struct lguest *lg); 142void init_clockdev(struct lguest *lg);
143bool check_syscall_vector(struct lguest *lg);
144int init_interrupts(void);
145void free_interrupts(void);
222 146
223/* segments.c: */ 147/* segments.c: */
224void setup_default_gdt_entries(struct lguest_ro_state *state); 148void setup_default_gdt_entries(struct lguest_ro_state *state);
@@ -232,28 +156,33 @@ void copy_gdt_tls(const struct lguest *lg, struct desc_struct *gdt);
232int init_guest_pagetable(struct lguest *lg, unsigned long pgtable); 156int init_guest_pagetable(struct lguest *lg, unsigned long pgtable);
233void free_guest_pagetable(struct lguest *lg); 157void free_guest_pagetable(struct lguest *lg);
234void guest_new_pagetable(struct lguest *lg, unsigned long pgtable); 158void guest_new_pagetable(struct lguest *lg, unsigned long pgtable);
235void guest_set_pmd(struct lguest *lg, unsigned long cr3, u32 i); 159void guest_set_pmd(struct lguest *lg, unsigned long gpgdir, u32 i);
236void guest_pagetable_clear_all(struct lguest *lg); 160void guest_pagetable_clear_all(struct lguest *lg);
237void guest_pagetable_flush_user(struct lguest *lg); 161void guest_pagetable_flush_user(struct lguest *lg);
238void guest_set_pte(struct lguest *lg, unsigned long cr3, 162void guest_set_pte(struct lguest *lg, unsigned long gpgdir,
239 unsigned long vaddr, gpte_t val); 163 unsigned long vaddr, pte_t val);
240void map_switcher_in_guest(struct lguest *lg, struct lguest_pages *pages); 164void map_switcher_in_guest(struct lguest *lg, struct lguest_pages *pages);
241int demand_page(struct lguest *info, unsigned long cr2, int errcode); 165int demand_page(struct lguest *info, unsigned long cr2, int errcode);
242void pin_page(struct lguest *lg, unsigned long vaddr); 166void pin_page(struct lguest *lg, unsigned long vaddr);
167unsigned long guest_pa(struct lguest *lg, unsigned long vaddr);
168void page_table_guest_data_init(struct lguest *lg);
169
170/* <arch>/core.c: */
171void lguest_arch_host_init(void);
172void lguest_arch_host_fini(void);
173void lguest_arch_run_guest(struct lguest *lg);
174void lguest_arch_handle_trap(struct lguest *lg);
175int lguest_arch_init_hypercalls(struct lguest *lg);
176int lguest_arch_do_hcall(struct lguest *lg, struct hcall_args *args);
177void lguest_arch_setup_regs(struct lguest *lg, unsigned long start);
178
179/* <arch>/switcher.S: */
180extern char start_switcher_text[], end_switcher_text[], switch_to_guest[];
243 181
244/* lguest_user.c: */ 182/* lguest_user.c: */
245int lguest_device_init(void); 183int lguest_device_init(void);
246void lguest_device_remove(void); 184void lguest_device_remove(void);
247 185
248/* io.c: */
249void lguest_io_init(void);
250int bind_dma(struct lguest *lg,
251 unsigned long key, unsigned long udma, u16 numdmas, u8 interrupt);
252void send_dma(struct lguest *info, unsigned long key, unsigned long udma);
253void release_all_dma(struct lguest *lg);
254unsigned long get_dma_buffer(struct lguest *lg, unsigned long key,
255 unsigned long *interrupt);
256
257/* hypercalls.c: */ 186/* hypercalls.c: */
258void do_hypercalls(struct lguest *lg); 187void do_hypercalls(struct lguest *lg);
259void write_timestamp(struct lguest *lg); 188void write_timestamp(struct lguest *lg);
@@ -292,9 +221,5 @@ do { \
292} while(0) 221} while(0)
293/* (End of aside) :*/ 222/* (End of aside) :*/
294 223
295static inline unsigned long guest_pa(struct lguest *lg, unsigned long vaddr)
296{
297 return vaddr - lg->page_offset;
298}
299#endif /* __ASSEMBLY__ */ 224#endif /* __ASSEMBLY__ */
300#endif /* _LGUEST_H */ 225#endif /* _LGUEST_H */
diff --git a/drivers/lguest/lguest.c b/drivers/lguest/lguest.c
deleted file mode 100644
index 3ba337dde857..000000000000
--- a/drivers/lguest/lguest.c
+++ /dev/null
@@ -1,1108 +0,0 @@
1/*P:010
2 * A hypervisor allows multiple Operating Systems to run on a single machine.
3 * To quote David Wheeler: "Any problem in computer science can be solved with
4 * another layer of indirection."
5 *
6 * We keep things simple in two ways. First, we start with a normal Linux
7 * kernel and insert a module (lg.ko) which allows us to run other Linux
8 * kernels the same way we'd run processes. We call the first kernel the Host,
9 * and the others the Guests. The program which sets up and configures Guests
10 * (such as the example in Documentation/lguest/lguest.c) is called the
11 * Launcher.
12 *
13 * Secondly, we only run specially modified Guests, not normal kernels. When
14 * you set CONFIG_LGUEST to 'y' or 'm', this automatically sets
15 * CONFIG_LGUEST_GUEST=y, which compiles this file into the kernel so it knows
16 * how to be a Guest. This means that you can use the same kernel you boot
17 * normally (ie. as a Host) as a Guest.
18 *
19 * These Guests know that they cannot do privileged operations, such as disable
20 * interrupts, and that they have to ask the Host to do such things explicitly.
21 * This file consists of all the replacements for such low-level native
22 * hardware operations: these special Guest versions call the Host.
23 *
24 * So how does the kernel know it's a Guest? The Guest starts at a special
25 * entry point marked with a magic string, which sets up a few things then
26 * calls here. We replace the native functions various "paravirt" structures
27 * with our Guest versions, then boot like normal. :*/
28
29/*
30 * Copyright (C) 2006, Rusty Russell <rusty@rustcorp.com.au> IBM Corporation.
31 *
32 * This program is free software; you can redistribute it and/or modify
33 * it under the terms of the GNU General Public License as published by
34 * the Free Software Foundation; either version 2 of the License, or
35 * (at your option) any later version.
36 *
37 * This program is distributed in the hope that it will be useful, but
38 * WITHOUT ANY WARRANTY; without even the implied warranty of
39 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
40 * NON INFRINGEMENT. See the GNU General Public License for more
41 * details.
42 *
43 * You should have received a copy of the GNU General Public License
44 * along with this program; if not, write to the Free Software
45 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
46 */
47#include <linux/kernel.h>
48#include <linux/start_kernel.h>
49#include <linux/string.h>
50#include <linux/console.h>
51#include <linux/screen_info.h>
52#include <linux/irq.h>
53#include <linux/interrupt.h>
54#include <linux/clocksource.h>
55#include <linux/clockchips.h>
56#include <linux/lguest.h>
57#include <linux/lguest_launcher.h>
58#include <linux/lguest_bus.h>
59#include <asm/paravirt.h>
60#include <asm/param.h>
61#include <asm/page.h>
62#include <asm/pgtable.h>
63#include <asm/desc.h>
64#include <asm/setup.h>
65#include <asm/e820.h>
66#include <asm/mce.h>
67#include <asm/io.h>
68
69/*G:010 Welcome to the Guest!
70 *
71 * The Guest in our tale is a simple creature: identical to the Host but
72 * behaving in simplified but equivalent ways. In particular, the Guest is the
73 * same kernel as the Host (or at least, built from the same source code). :*/
74
75/* Declarations for definitions in lguest_guest.S */
76extern char lguest_noirq_start[], lguest_noirq_end[];
77extern const char lgstart_cli[], lgend_cli[];
78extern const char lgstart_sti[], lgend_sti[];
79extern const char lgstart_popf[], lgend_popf[];
80extern const char lgstart_pushf[], lgend_pushf[];
81extern const char lgstart_iret[], lgend_iret[];
82extern void lguest_iret(void);
83
84struct lguest_data lguest_data = {
85 .hcall_status = { [0 ... LHCALL_RING_SIZE-1] = 0xFF },
86 .noirq_start = (u32)lguest_noirq_start,
87 .noirq_end = (u32)lguest_noirq_end,
88 .blocked_interrupts = { 1 }, /* Block timer interrupts */
89};
90struct lguest_device_desc *lguest_devices;
91static cycle_t clock_base;
92
93/*G:035 Notice the lazy_hcall() above, rather than hcall(). This is our first
94 * real optimization trick!
95 *
96 * When lazy_mode is set, it means we're allowed to defer all hypercalls and do
97 * them as a batch when lazy_mode is eventually turned off. Because hypercalls
98 * are reasonably expensive, batching them up makes sense. For example, a
99 * large mmap might update dozens of page table entries: that code calls
100 * paravirt_enter_lazy_mmu(), does the dozen updates, then calls
101 * lguest_leave_lazy_mode().
102 *
103 * So, when we're in lazy mode, we call async_hypercall() to store the call for
104 * future processing. When lazy mode is turned off we issue a hypercall to
105 * flush the stored calls.
106 */
107static void lguest_leave_lazy_mode(void)
108{
109 paravirt_leave_lazy(paravirt_get_lazy_mode());
110 hcall(LHCALL_FLUSH_ASYNC, 0, 0, 0);
111}
112
113static void lazy_hcall(unsigned long call,
114 unsigned long arg1,
115 unsigned long arg2,
116 unsigned long arg3)
117{
118 if (paravirt_get_lazy_mode() == PARAVIRT_LAZY_NONE)
119 hcall(call, arg1, arg2, arg3);
120 else
121 async_hcall(call, arg1, arg2, arg3);
122}
123
124/* async_hcall() is pretty simple: I'm quite proud of it really. We have a
125 * ring buffer of stored hypercalls which the Host will run though next time we
126 * do a normal hypercall. Each entry in the ring has 4 slots for the hypercall
127 * arguments, and a "hcall_status" word which is 0 if the call is ready to go,
128 * and 255 once the Host has finished with it.
129 *
130 * If we come around to a slot which hasn't been finished, then the table is
131 * full and we just make the hypercall directly. This has the nice side
132 * effect of causing the Host to run all the stored calls in the ring buffer
133 * which empties it for next time! */
134void async_hcall(unsigned long call,
135 unsigned long arg1, unsigned long arg2, unsigned long arg3)
136{
137 /* Note: This code assumes we're uniprocessor. */
138 static unsigned int next_call;
139 unsigned long flags;
140
141 /* Disable interrupts if not already disabled: we don't want an
142 * interrupt handler making a hypercall while we're already doing
143 * one! */
144 local_irq_save(flags);
145 if (lguest_data.hcall_status[next_call] != 0xFF) {
146 /* Table full, so do normal hcall which will flush table. */
147 hcall(call, arg1, arg2, arg3);
148 } else {
149 lguest_data.hcalls[next_call].eax = call;
150 lguest_data.hcalls[next_call].edx = arg1;
151 lguest_data.hcalls[next_call].ebx = arg2;
152 lguest_data.hcalls[next_call].ecx = arg3;
153 /* Arguments must all be written before we mark it to go */
154 wmb();
155 lguest_data.hcall_status[next_call] = 0;
156 if (++next_call == LHCALL_RING_SIZE)
157 next_call = 0;
158 }
159 local_irq_restore(flags);
160}
161/*:*/
162
163/* Wrappers for the SEND_DMA and BIND_DMA hypercalls. This is mainly because
164 * Jeff Garzik complained that __pa() should never appear in drivers, and this
165 * helps remove most of them. But also, it wraps some ugliness. */
166void lguest_send_dma(unsigned long key, struct lguest_dma *dma)
167{
168 /* The hcall might not write this if something goes wrong */
169 dma->used_len = 0;
170 hcall(LHCALL_SEND_DMA, key, __pa(dma), 0);
171}
172
173int lguest_bind_dma(unsigned long key, struct lguest_dma *dmas,
174 unsigned int num, u8 irq)
175{
176 /* This is the only hypercall which actually wants 5 arguments, and we
177 * only support 4. Fortunately the interrupt number is always less
178 * than 256, so we can pack it with the number of dmas in the final
179 * argument. */
180 if (!hcall(LHCALL_BIND_DMA, key, __pa(dmas), (num << 8) | irq))
181 return -ENOMEM;
182 return 0;
183}
184
185/* Unbinding is the same hypercall as binding, but with 0 num & irq. */
186void lguest_unbind_dma(unsigned long key, struct lguest_dma *dmas)
187{
188 hcall(LHCALL_BIND_DMA, key, __pa(dmas), 0);
189}
190
191/* For guests, device memory can be used as normal memory, so we cast away the
192 * __iomem to quieten sparse. */
193void *lguest_map(unsigned long phys_addr, unsigned long pages)
194{
195 return (__force void *)ioremap(phys_addr, PAGE_SIZE*pages);
196}
197
198void lguest_unmap(void *addr)
199{
200 iounmap((__force void __iomem *)addr);
201}
202
203/*G:033
204 * Here are our first native-instruction replacements: four functions for
205 * interrupt control.
206 *
207 * The simplest way of implementing these would be to have "turn interrupts
208 * off" and "turn interrupts on" hypercalls. Unfortunately, this is too slow:
209 * these are by far the most commonly called functions of those we override.
210 *
211 * So instead we keep an "irq_enabled" field inside our "struct lguest_data",
212 * which the Guest can update with a single instruction. The Host knows to
213 * check there when it wants to deliver an interrupt.
214 */
215
216/* save_flags() is expected to return the processor state (ie. "eflags"). The
217 * eflags word contains all kind of stuff, but in practice Linux only cares
218 * about the interrupt flag. Our "save_flags()" just returns that. */
219static unsigned long save_fl(void)
220{
221 return lguest_data.irq_enabled;
222}
223
224/* "restore_flags" just sets the flags back to the value given. */
225static void restore_fl(unsigned long flags)
226{
227 lguest_data.irq_enabled = flags;
228}
229
230/* Interrupts go off... */
231static void irq_disable(void)
232{
233 lguest_data.irq_enabled = 0;
234}
235
236/* Interrupts go on... */
237static void irq_enable(void)
238{
239 lguest_data.irq_enabled = X86_EFLAGS_IF;
240}
241/*:*/
242/*M:003 Note that we don't check for outstanding interrupts when we re-enable
243 * them (or when we unmask an interrupt). This seems to work for the moment,
244 * since interrupts are rare and we'll just get the interrupt on the next timer
245 * tick, but when we turn on CONFIG_NO_HZ, we should revisit this. One way
246 * would be to put the "irq_enabled" field in a page by itself, and have the
247 * Host write-protect it when an interrupt comes in when irqs are disabled.
248 * There will then be a page fault as soon as interrupts are re-enabled. :*/
249
250/*G:034
251 * The Interrupt Descriptor Table (IDT).
252 *
253 * The IDT tells the processor what to do when an interrupt comes in. Each
254 * entry in the table is a 64-bit descriptor: this holds the privilege level,
255 * address of the handler, and... well, who cares? The Guest just asks the
256 * Host to make the change anyway, because the Host controls the real IDT.
257 */
258static void lguest_write_idt_entry(struct desc_struct *dt,
259 int entrynum, u32 low, u32 high)
260{
261 /* Keep the local copy up to date. */
262 write_dt_entry(dt, entrynum, low, high);
263 /* Tell Host about this new entry. */
264 hcall(LHCALL_LOAD_IDT_ENTRY, entrynum, low, high);
265}
266
267/* Changing to a different IDT is very rare: we keep the IDT up-to-date every
268 * time it is written, so we can simply loop through all entries and tell the
269 * Host about them. */
270static void lguest_load_idt(const struct Xgt_desc_struct *desc)
271{
272 unsigned int i;
273 struct desc_struct *idt = (void *)desc->address;
274
275 for (i = 0; i < (desc->size+1)/8; i++)
276 hcall(LHCALL_LOAD_IDT_ENTRY, i, idt[i].a, idt[i].b);
277}
278
279/*
280 * The Global Descriptor Table.
281 *
282 * The Intel architecture defines another table, called the Global Descriptor
283 * Table (GDT). You tell the CPU where it is (and its size) using the "lgdt"
284 * instruction, and then several other instructions refer to entries in the
285 * table. There are three entries which the Switcher needs, so the Host simply
286 * controls the entire thing and the Guest asks it to make changes using the
287 * LOAD_GDT hypercall.
288 *
289 * This is the opposite of the IDT code where we have a LOAD_IDT_ENTRY
290 * hypercall and use that repeatedly to load a new IDT. I don't think it
291 * really matters, but wouldn't it be nice if they were the same?
292 */
293static void lguest_load_gdt(const struct Xgt_desc_struct *desc)
294{
295 BUG_ON((desc->size+1)/8 != GDT_ENTRIES);
296 hcall(LHCALL_LOAD_GDT, __pa(desc->address), GDT_ENTRIES, 0);
297}
298
299/* For a single GDT entry which changes, we do the lazy thing: alter our GDT,
300 * then tell the Host to reload the entire thing. This operation is so rare
301 * that this naive implementation is reasonable. */
302static void lguest_write_gdt_entry(struct desc_struct *dt,
303 int entrynum, u32 low, u32 high)
304{
305 write_dt_entry(dt, entrynum, low, high);
306 hcall(LHCALL_LOAD_GDT, __pa(dt), GDT_ENTRIES, 0);
307}
308
309/* OK, I lied. There are three "thread local storage" GDT entries which change
310 * on every context switch (these three entries are how glibc implements
311 * __thread variables). So we have a hypercall specifically for this case. */
312static void lguest_load_tls(struct thread_struct *t, unsigned int cpu)
313{
314 /* There's one problem which normal hardware doesn't have: the Host
315 * can't handle us removing entries we're currently using. So we clear
316 * the GS register here: if it's needed it'll be reloaded anyway. */
317 loadsegment(gs, 0);
318 lazy_hcall(LHCALL_LOAD_TLS, __pa(&t->tls_array), cpu, 0);
319}
320
321/*G:038 That's enough excitement for now, back to ploughing through each of
322 * the different pv_ops structures (we're about 1/3 of the way through).
323 *
324 * This is the Local Descriptor Table, another weird Intel thingy. Linux only
325 * uses this for some strange applications like Wine. We don't do anything
326 * here, so they'll get an informative and friendly Segmentation Fault. */
327static void lguest_set_ldt(const void *addr, unsigned entries)
328{
329}
330
331/* This loads a GDT entry into the "Task Register": that entry points to a
332 * structure called the Task State Segment. Some comments scattered though the
333 * kernel code indicate that this used for task switching in ages past, along
334 * with blood sacrifice and astrology.
335 *
336 * Now there's nothing interesting in here that we don't get told elsewhere.
337 * But the native version uses the "ltr" instruction, which makes the Host
338 * complain to the Guest about a Segmentation Fault and it'll oops. So we
339 * override the native version with a do-nothing version. */
340static void lguest_load_tr_desc(void)
341{
342}
343
344/* The "cpuid" instruction is a way of querying both the CPU identity
345 * (manufacturer, model, etc) and its features. It was introduced before the
346 * Pentium in 1993 and keeps getting extended by both Intel and AMD. As you
347 * might imagine, after a decade and a half this treatment, it is now a giant
348 * ball of hair. Its entry in the current Intel manual runs to 28 pages.
349 *
350 * This instruction even it has its own Wikipedia entry. The Wikipedia entry
351 * has been translated into 4 languages. I am not making this up!
352 *
353 * We could get funky here and identify ourselves as "GenuineLguest", but
354 * instead we just use the real "cpuid" instruction. Then I pretty much turned
355 * off feature bits until the Guest booted. (Don't say that: you'll damage
356 * lguest sales!) Shut up, inner voice! (Hey, just pointing out that this is
357 * hardly future proof.) Noone's listening! They don't like you anyway,
358 * parenthetic weirdo!
359 *
360 * Replacing the cpuid so we can turn features off is great for the kernel, but
361 * anyone (including userspace) can just use the raw "cpuid" instruction and
362 * the Host won't even notice since it isn't privileged. So we try not to get
363 * too worked up about it. */
364static void lguest_cpuid(unsigned int *eax, unsigned int *ebx,
365 unsigned int *ecx, unsigned int *edx)
366{
367 int function = *eax;
368
369 native_cpuid(eax, ebx, ecx, edx);
370 switch (function) {
371 case 1: /* Basic feature request. */
372 /* We only allow kernel to see SSE3, CMPXCHG16B and SSSE3 */
373 *ecx &= 0x00002201;
374 /* SSE, SSE2, FXSR, MMX, CMOV, CMPXCHG8B, FPU. */
375 *edx &= 0x07808101;
376 /* The Host can do a nice optimization if it knows that the
377 * kernel mappings (addresses above 0xC0000000 or whatever
378 * PAGE_OFFSET is set to) haven't changed. But Linux calls
379 * flush_tlb_user() for both user and kernel mappings unless
380 * the Page Global Enable (PGE) feature bit is set. */
381 *edx |= 0x00002000;
382 break;
383 case 0x80000000:
384 /* Futureproof this a little: if they ask how much extended
385 * processor information there is, limit it to known fields. */
386 if (*eax > 0x80000008)
387 *eax = 0x80000008;
388 break;
389 }
390}
391
392/* Intel has four control registers, imaginatively named cr0, cr2, cr3 and cr4.
393 * I assume there's a cr1, but it hasn't bothered us yet, so we'll not bother
394 * it. The Host needs to know when the Guest wants to change them, so we have
395 * a whole series of functions like read_cr0() and write_cr0().
396 *
397 * We start with CR0. CR0 allows you to turn on and off all kinds of basic
398 * features, but Linux only really cares about one: the horrifically-named Task
399 * Switched (TS) bit at bit 3 (ie. 8)
400 *
401 * What does the TS bit do? Well, it causes the CPU to trap (interrupt 7) if
402 * the floating point unit is used. Which allows us to restore FPU state
403 * lazily after a task switch, and Linux uses that gratefully, but wouldn't a
404 * name like "FPUTRAP bit" be a little less cryptic?
405 *
406 * We store cr0 (and cr3) locally, because the Host never changes it. The
407 * Guest sometimes wants to read it and we'd prefer not to bother the Host
408 * unnecessarily. */
409static unsigned long current_cr0, current_cr3;
410static void lguest_write_cr0(unsigned long val)
411{
412 /* 8 == TS bit. */
413 lazy_hcall(LHCALL_TS, val & 8, 0, 0);
414 current_cr0 = val;
415}
416
417static unsigned long lguest_read_cr0(void)
418{
419 return current_cr0;
420}
421
422/* Intel provided a special instruction to clear the TS bit for people too cool
423 * to use write_cr0() to do it. This "clts" instruction is faster, because all
424 * the vowels have been optimized out. */
425static void lguest_clts(void)
426{
427 lazy_hcall(LHCALL_TS, 0, 0, 0);
428 current_cr0 &= ~8U;
429}
430
431/* CR2 is the virtual address of the last page fault, which the Guest only ever
432 * reads. The Host kindly writes this into our "struct lguest_data", so we
433 * just read it out of there. */
434static unsigned long lguest_read_cr2(void)
435{
436 return lguest_data.cr2;
437}
438
439/* CR3 is the current toplevel pagetable page: the principle is the same as
440 * cr0. Keep a local copy, and tell the Host when it changes. */
441static void lguest_write_cr3(unsigned long cr3)
442{
443 lazy_hcall(LHCALL_NEW_PGTABLE, cr3, 0, 0);
444 current_cr3 = cr3;
445}
446
447static unsigned long lguest_read_cr3(void)
448{
449 return current_cr3;
450}
451
452/* CR4 is used to enable and disable PGE, but we don't care. */
453static unsigned long lguest_read_cr4(void)
454{
455 return 0;
456}
457
458static void lguest_write_cr4(unsigned long val)
459{
460}
461
462/*
463 * Page Table Handling.
464 *
465 * Now would be a good time to take a rest and grab a coffee or similarly
466 * relaxing stimulant. The easy parts are behind us, and the trek gradually
467 * winds uphill from here.
468 *
469 * Quick refresher: memory is divided into "pages" of 4096 bytes each. The CPU
470 * maps virtual addresses to physical addresses using "page tables". We could
471 * use one huge index of 1 million entries: each address is 4 bytes, so that's
472 * 1024 pages just to hold the page tables. But since most virtual addresses
473 * are unused, we use a two level index which saves space. The CR3 register
474 * contains the physical address of the top level "page directory" page, which
475 * contains physical addresses of up to 1024 second-level pages. Each of these
476 * second level pages contains up to 1024 physical addresses of actual pages,
477 * or Page Table Entries (PTEs).
478 *
479 * Here's a diagram, where arrows indicate physical addresses:
480 *
481 * CR3 ---> +---------+
482 * | --------->+---------+
483 * | | | PADDR1 |
484 * Top-level | | PADDR2 |
485 * (PMD) page | | |
486 * | | Lower-level |
487 * | | (PTE) page |
488 * | | | |
489 * .... ....
490 *
491 * So to convert a virtual address to a physical address, we look up the top
492 * level, which points us to the second level, which gives us the physical
493 * address of that page. If the top level entry was not present, or the second
494 * level entry was not present, then the virtual address is invalid (we
495 * say "the page was not mapped").
496 *
497 * Put another way, a 32-bit virtual address is divided up like so:
498 *
499 * 1 1 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
500 * |<---- 10 bits ---->|<---- 10 bits ---->|<------ 12 bits ------>|
501 * Index into top Index into second Offset within page
502 * page directory page pagetable page
503 *
504 * The kernel spends a lot of time changing both the top-level page directory
505 * and lower-level pagetable pages. The Guest doesn't know physical addresses,
506 * so while it maintains these page tables exactly like normal, it also needs
507 * to keep the Host informed whenever it makes a change: the Host will create
508 * the real page tables based on the Guests'.
509 */
510
511/* The Guest calls this to set a second-level entry (pte), ie. to map a page
512 * into a process' address space. We set the entry then tell the Host the
513 * toplevel and address this corresponds to. The Guest uses one pagetable per
514 * process, so we need to tell the Host which one we're changing (mm->pgd). */
515static void lguest_set_pte_at(struct mm_struct *mm, unsigned long addr,
516 pte_t *ptep, pte_t pteval)
517{
518 *ptep = pteval;
519 lazy_hcall(LHCALL_SET_PTE, __pa(mm->pgd), addr, pteval.pte_low);
520}
521
522/* The Guest calls this to set a top-level entry. Again, we set the entry then
523 * tell the Host which top-level page we changed, and the index of the entry we
524 * changed. */
525static void lguest_set_pmd(pmd_t *pmdp, pmd_t pmdval)
526{
527 *pmdp = pmdval;
528 lazy_hcall(LHCALL_SET_PMD, __pa(pmdp)&PAGE_MASK,
529 (__pa(pmdp)&(PAGE_SIZE-1))/4, 0);
530}
531
532/* There are a couple of legacy places where the kernel sets a PTE, but we
533 * don't know the top level any more. This is useless for us, since we don't
534 * know which pagetable is changing or what address, so we just tell the Host
535 * to forget all of them. Fortunately, this is very rare.
536 *
537 * ... except in early boot when the kernel sets up the initial pagetables,
538 * which makes booting astonishingly slow. So we don't even tell the Host
539 * anything changed until we've done the first page table switch.
540 */
541static void lguest_set_pte(pte_t *ptep, pte_t pteval)
542{
543 *ptep = pteval;
544 /* Don't bother with hypercall before initial setup. */
545 if (current_cr3)
546 lazy_hcall(LHCALL_FLUSH_TLB, 1, 0, 0);
547}
548
549/* Unfortunately for Lguest, the pv_mmu_ops for page tables were based on
550 * native page table operations. On native hardware you can set a new page
551 * table entry whenever you want, but if you want to remove one you have to do
552 * a TLB flush (a TLB is a little cache of page table entries kept by the CPU).
553 *
554 * So the lguest_set_pte_at() and lguest_set_pmd() functions above are only
555 * called when a valid entry is written, not when it's removed (ie. marked not
556 * present). Instead, this is where we come when the Guest wants to remove a
557 * page table entry: we tell the Host to set that entry to 0 (ie. the present
558 * bit is zero). */
559static void lguest_flush_tlb_single(unsigned long addr)
560{
561 /* Simply set it to zero: if it was not, it will fault back in. */
562 lazy_hcall(LHCALL_SET_PTE, current_cr3, addr, 0);
563}
564
565/* This is what happens after the Guest has removed a large number of entries.
566 * This tells the Host that any of the page table entries for userspace might
567 * have changed, ie. virtual addresses below PAGE_OFFSET. */
568static void lguest_flush_tlb_user(void)
569{
570 lazy_hcall(LHCALL_FLUSH_TLB, 0, 0, 0);
571}
572
573/* This is called when the kernel page tables have changed. That's not very
574 * common (unless the Guest is using highmem, which makes the Guest extremely
575 * slow), so it's worth separating this from the user flushing above. */
576static void lguest_flush_tlb_kernel(void)
577{
578 lazy_hcall(LHCALL_FLUSH_TLB, 1, 0, 0);
579}
580
581/*
582 * The Unadvanced Programmable Interrupt Controller.
583 *
584 * This is an attempt to implement the simplest possible interrupt controller.
585 * I spent some time looking though routines like set_irq_chip_and_handler,
586 * set_irq_chip_and_handler_name, set_irq_chip_data and set_phasers_to_stun and
587 * I *think* this is as simple as it gets.
588 *
589 * We can tell the Host what interrupts we want blocked ready for using the
590 * lguest_data.interrupts bitmap, so disabling (aka "masking") them is as
591 * simple as setting a bit. We don't actually "ack" interrupts as such, we
592 * just mask and unmask them. I wonder if we should be cleverer?
593 */
594static void disable_lguest_irq(unsigned int irq)
595{
596 set_bit(irq, lguest_data.blocked_interrupts);
597}
598
599static void enable_lguest_irq(unsigned int irq)
600{
601 clear_bit(irq, lguest_data.blocked_interrupts);
602}
603
604/* This structure describes the lguest IRQ controller. */
605static struct irq_chip lguest_irq_controller = {
606 .name = "lguest",
607 .mask = disable_lguest_irq,
608 .mask_ack = disable_lguest_irq,
609 .unmask = enable_lguest_irq,
610};
611
612/* This sets up the Interrupt Descriptor Table (IDT) entry for each hardware
613 * interrupt (except 128, which is used for system calls), and then tells the
614 * Linux infrastructure that each interrupt is controlled by our level-based
615 * lguest interrupt controller. */
616static void __init lguest_init_IRQ(void)
617{
618 unsigned int i;
619
620 for (i = 0; i < LGUEST_IRQS; i++) {
621 int vector = FIRST_EXTERNAL_VECTOR + i;
622 if (vector != SYSCALL_VECTOR) {
623 set_intr_gate(vector, interrupt[i]);
624 set_irq_chip_and_handler(i, &lguest_irq_controller,
625 handle_level_irq);
626 }
627 }
628 /* This call is required to set up for 4k stacks, where we have
629 * separate stacks for hard and soft interrupts. */
630 irq_ctx_init(smp_processor_id());
631}
632
633/*
634 * Time.
635 *
636 * It would be far better for everyone if the Guest had its own clock, but
637 * until then the Host gives us the time on every interrupt.
638 */
639static unsigned long lguest_get_wallclock(void)
640{
641 return lguest_data.time.tv_sec;
642}
643
644static cycle_t lguest_clock_read(void)
645{
646 unsigned long sec, nsec;
647
648 /* If the Host tells the TSC speed, we can trust that. */
649 if (lguest_data.tsc_khz)
650 return native_read_tsc();
651
652 /* If we can't use the TSC, we read the time value written by the Host.
653 * Since it's in two parts (seconds and nanoseconds), we risk reading
654 * it just as it's changing from 99 & 0.999999999 to 100 and 0, and
655 * getting 99 and 0. As Linux tends to come apart under the stress of
656 * time travel, we must be careful: */
657 do {
658 /* First we read the seconds part. */
659 sec = lguest_data.time.tv_sec;
660 /* This read memory barrier tells the compiler and the CPU that
661 * this can't be reordered: we have to complete the above
662 * before going on. */
663 rmb();
664 /* Now we read the nanoseconds part. */
665 nsec = lguest_data.time.tv_nsec;
666 /* Make sure we've done that. */
667 rmb();
668 /* Now if the seconds part has changed, try again. */
669 } while (unlikely(lguest_data.time.tv_sec != sec));
670
671 /* Our non-TSC clock is in real nanoseconds. */
672 return sec*1000000000ULL + nsec;
673}
674
675/* This is what we tell the kernel is our clocksource. */
676static struct clocksource lguest_clock = {
677 .name = "lguest",
678 .rating = 400,
679 .read = lguest_clock_read,
680 .mask = CLOCKSOURCE_MASK(64),
681 .mult = 1 << 22,
682 .shift = 22,
683};
684
685/* The "scheduler clock" is just our real clock, adjusted to start at zero */
686static unsigned long long lguest_sched_clock(void)
687{
688 return cyc2ns(&lguest_clock, lguest_clock_read() - clock_base);
689}
690
691/* We also need a "struct clock_event_device": Linux asks us to set it to go
692 * off some time in the future. Actually, James Morris figured all this out, I
693 * just applied the patch. */
694static int lguest_clockevent_set_next_event(unsigned long delta,
695 struct clock_event_device *evt)
696{
697 if (delta < LG_CLOCK_MIN_DELTA) {
698 if (printk_ratelimit())
699 printk(KERN_DEBUG "%s: small delta %lu ns\n",
700 __FUNCTION__, delta);
701 return -ETIME;
702 }
703 hcall(LHCALL_SET_CLOCKEVENT, delta, 0, 0);
704 return 0;
705}
706
707static void lguest_clockevent_set_mode(enum clock_event_mode mode,
708 struct clock_event_device *evt)
709{
710 switch (mode) {
711 case CLOCK_EVT_MODE_UNUSED:
712 case CLOCK_EVT_MODE_SHUTDOWN:
713 /* A 0 argument shuts the clock down. */
714 hcall(LHCALL_SET_CLOCKEVENT, 0, 0, 0);
715 break;
716 case CLOCK_EVT_MODE_ONESHOT:
717 /* This is what we expect. */
718 break;
719 case CLOCK_EVT_MODE_PERIODIC:
720 BUG();
721 case CLOCK_EVT_MODE_RESUME:
722 break;
723 }
724}
725
726/* This describes our primitive timer chip. */
727static struct clock_event_device lguest_clockevent = {
728 .name = "lguest",
729 .features = CLOCK_EVT_FEAT_ONESHOT,
730 .set_next_event = lguest_clockevent_set_next_event,
731 .set_mode = lguest_clockevent_set_mode,
732 .rating = INT_MAX,
733 .mult = 1,
734 .shift = 0,
735 .min_delta_ns = LG_CLOCK_MIN_DELTA,
736 .max_delta_ns = LG_CLOCK_MAX_DELTA,
737};
738
739/* This is the Guest timer interrupt handler (hardware interrupt 0). We just
740 * call the clockevent infrastructure and it does whatever needs doing. */
741static void lguest_time_irq(unsigned int irq, struct irq_desc *desc)
742{
743 unsigned long flags;
744
745 /* Don't interrupt us while this is running. */
746 local_irq_save(flags);
747 lguest_clockevent.event_handler(&lguest_clockevent);
748 local_irq_restore(flags);
749}
750
751/* At some point in the boot process, we get asked to set up our timing
752 * infrastructure. The kernel doesn't expect timer interrupts before this, but
753 * we cleverly initialized the "blocked_interrupts" field of "struct
754 * lguest_data" so that timer interrupts were blocked until now. */
755static void lguest_time_init(void)
756{
757 /* Set up the timer interrupt (0) to go to our simple timer routine */
758 set_irq_handler(0, lguest_time_irq);
759
760 /* Our clock structure look like arch/i386/kernel/tsc.c if we can use
761 * the TSC, otherwise it's a dumb nanosecond-resolution clock. Either
762 * way, the "rating" is initialized so high that it's always chosen
763 * over any other clocksource. */
764 if (lguest_data.tsc_khz) {
765 lguest_clock.mult = clocksource_khz2mult(lguest_data.tsc_khz,
766 lguest_clock.shift);
767 lguest_clock.flags = CLOCK_SOURCE_IS_CONTINUOUS;
768 }
769 clock_base = lguest_clock_read();
770 clocksource_register(&lguest_clock);
771
772 /* Now we've set up our clock, we can use it as the scheduler clock */
773 pv_time_ops.sched_clock = lguest_sched_clock;
774
775 /* We can't set cpumask in the initializer: damn C limitations! Set it
776 * here and register our timer device. */
777 lguest_clockevent.cpumask = cpumask_of_cpu(0);
778 clockevents_register_device(&lguest_clockevent);
779
780 /* Finally, we unblock the timer interrupt. */
781 enable_lguest_irq(0);
782}
783
784/*
785 * Miscellaneous bits and pieces.
786 *
787 * Here is an oddball collection of functions which the Guest needs for things
788 * to work. They're pretty simple.
789 */
790
791/* The Guest needs to tell the host what stack it expects traps to use. For
792 * native hardware, this is part of the Task State Segment mentioned above in
793 * lguest_load_tr_desc(), but to help hypervisors there's this special call.
794 *
795 * We tell the Host the segment we want to use (__KERNEL_DS is the kernel data
796 * segment), the privilege level (we're privilege level 1, the Host is 0 and
797 * will not tolerate us trying to use that), the stack pointer, and the number
798 * of pages in the stack. */
799static void lguest_load_esp0(struct tss_struct *tss,
800 struct thread_struct *thread)
801{
802 lazy_hcall(LHCALL_SET_STACK, __KERNEL_DS|0x1, thread->esp0,
803 THREAD_SIZE/PAGE_SIZE);
804}
805
806/* Let's just say, I wouldn't do debugging under a Guest. */
807static void lguest_set_debugreg(int regno, unsigned long value)
808{
809 /* FIXME: Implement */
810}
811
812/* There are times when the kernel wants to make sure that no memory writes are
813 * caught in the cache (that they've all reached real hardware devices). This
814 * doesn't matter for the Guest which has virtual hardware.
815 *
816 * On the Pentium 4 and above, cpuid() indicates that the Cache Line Flush
817 * (clflush) instruction is available and the kernel uses that. Otherwise, it
818 * uses the older "Write Back and Invalidate Cache" (wbinvd) instruction.
819 * Unlike clflush, wbinvd can only be run at privilege level 0. So we can
820 * ignore clflush, but replace wbinvd.
821 */
822static void lguest_wbinvd(void)
823{
824}
825
826/* If the Guest expects to have an Advanced Programmable Interrupt Controller,
827 * we play dumb by ignoring writes and returning 0 for reads. So it's no
828 * longer Programmable nor Controlling anything, and I don't think 8 lines of
829 * code qualifies for Advanced. It will also never interrupt anything. It
830 * does, however, allow us to get through the Linux boot code. */
831#ifdef CONFIG_X86_LOCAL_APIC
832static void lguest_apic_write(unsigned long reg, unsigned long v)
833{
834}
835
836static unsigned long lguest_apic_read(unsigned long reg)
837{
838 return 0;
839}
840#endif
841
842/* STOP! Until an interrupt comes in. */
843static void lguest_safe_halt(void)
844{
845 hcall(LHCALL_HALT, 0, 0, 0);
846}
847
848/* Perhaps CRASH isn't the best name for this hypercall, but we use it to get a
849 * message out when we're crashing as well as elegant termination like powering
850 * off.
851 *
852 * Note that the Host always prefers that the Guest speak in physical addresses
853 * rather than virtual addresses, so we use __pa() here. */
854static void lguest_power_off(void)
855{
856 hcall(LHCALL_CRASH, __pa("Power down"), 0, 0);
857}
858
859/*
860 * Panicing.
861 *
862 * Don't. But if you did, this is what happens.
863 */
864static int lguest_panic(struct notifier_block *nb, unsigned long l, void *p)
865{
866 hcall(LHCALL_CRASH, __pa(p), 0, 0);
867 /* The hcall won't return, but to keep gcc happy, we're "done". */
868 return NOTIFY_DONE;
869}
870
871static struct notifier_block paniced = {
872 .notifier_call = lguest_panic
873};
874
875/* Setting up memory is fairly easy. */
876static __init char *lguest_memory_setup(void)
877{
878 /* We do this here and not earlier because lockcheck barfs if we do it
879 * before start_kernel() */
880 atomic_notifier_chain_register(&panic_notifier_list, &paniced);
881
882 /* The Linux bootloader header contains an "e820" memory map: the
883 * Launcher populated the first entry with our memory limit. */
884 add_memory_region(boot_params.e820_map[0].addr,
885 boot_params.e820_map[0].size,
886 boot_params.e820_map[0].type);
887
888 /* This string is for the boot messages. */
889 return "LGUEST";
890}
891
892/*G:050
893 * Patching (Powerfully Placating Performance Pedants)
894 *
895 * We have already seen that pv_ops structures let us replace simple
896 * native instructions with calls to the appropriate back end all throughout
897 * the kernel. This allows the same kernel to run as a Guest and as a native
898 * kernel, but it's slow because of all the indirect branches.
899 *
900 * Remember that David Wheeler quote about "Any problem in computer science can
901 * be solved with another layer of indirection"? The rest of that quote is
902 * "... But that usually will create another problem." This is the first of
903 * those problems.
904 *
905 * Our current solution is to allow the paravirt back end to optionally patch
906 * over the indirect calls to replace them with something more efficient. We
907 * patch the four most commonly called functions: disable interrupts, enable
908 * interrupts, restore interrupts and save interrupts. We usually have 10
909 * bytes to patch into: the Guest versions of these operations are small enough
910 * that we can fit comfortably.
911 *
912 * First we need assembly templates of each of the patchable Guest operations,
913 * and these are in lguest_asm.S. */
914
915/*G:060 We construct a table from the assembler templates: */
916static const struct lguest_insns
917{
918 const char *start, *end;
919} lguest_insns[] = {
920 [PARAVIRT_PATCH(pv_irq_ops.irq_disable)] = { lgstart_cli, lgend_cli },
921 [PARAVIRT_PATCH(pv_irq_ops.irq_enable)] = { lgstart_sti, lgend_sti },
922 [PARAVIRT_PATCH(pv_irq_ops.restore_fl)] = { lgstart_popf, lgend_popf },
923 [PARAVIRT_PATCH(pv_irq_ops.save_fl)] = { lgstart_pushf, lgend_pushf },
924};
925
926/* Now our patch routine is fairly simple (based on the native one in
927 * paravirt.c). If we have a replacement, we copy it in and return how much of
928 * the available space we used. */
929static unsigned lguest_patch(u8 type, u16 clobber, void *ibuf,
930 unsigned long addr, unsigned len)
931{
932 unsigned int insn_len;
933
934 /* Don't do anything special if we don't have a replacement */
935 if (type >= ARRAY_SIZE(lguest_insns) || !lguest_insns[type].start)
936 return paravirt_patch_default(type, clobber, ibuf, addr, len);
937
938 insn_len = lguest_insns[type].end - lguest_insns[type].start;
939
940 /* Similarly if we can't fit replacement (shouldn't happen, but let's
941 * be thorough). */
942 if (len < insn_len)
943 return paravirt_patch_default(type, clobber, ibuf, addr, len);
944
945 /* Copy in our instructions. */
946 memcpy(ibuf, lguest_insns[type].start, insn_len);
947 return insn_len;
948}
949
950/*G:030 Once we get to lguest_init(), we know we're a Guest. The pv_ops
951 * structures in the kernel provide points for (almost) every routine we have
952 * to override to avoid privileged instructions. */
953__init void lguest_init(void *boot)
954{
955 /* Copy boot parameters first: the Launcher put the physical location
956 * in %esi, and head.S converted that to a virtual address and handed
957 * it to us. We use "__memcpy" because "memcpy" sometimes tries to do
958 * tricky things to go faster, and we're not ready for that. */
959 __memcpy(&boot_params, boot, PARAM_SIZE);
960 /* The boot parameters also tell us where the command-line is: save
961 * that, too. */
962 __memcpy(boot_command_line, __va(boot_params.hdr.cmd_line_ptr),
963 COMMAND_LINE_SIZE);
964
965 /* We're under lguest, paravirt is enabled, and we're running at
966 * privilege level 1, not 0 as normal. */
967 pv_info.name = "lguest";
968 pv_info.paravirt_enabled = 1;
969 pv_info.kernel_rpl = 1;
970
971 /* We set up all the lguest overrides for sensitive operations. These
972 * are detailed with the operations themselves. */
973
974 /* interrupt-related operations */
975 pv_irq_ops.init_IRQ = lguest_init_IRQ;
976 pv_irq_ops.save_fl = save_fl;
977 pv_irq_ops.restore_fl = restore_fl;
978 pv_irq_ops.irq_disable = irq_disable;
979 pv_irq_ops.irq_enable = irq_enable;
980 pv_irq_ops.safe_halt = lguest_safe_halt;
981
982 /* init-time operations */
983 pv_init_ops.memory_setup = lguest_memory_setup;
984 pv_init_ops.patch = lguest_patch;
985
986 /* Intercepts of various cpu instructions */
987 pv_cpu_ops.load_gdt = lguest_load_gdt;
988 pv_cpu_ops.cpuid = lguest_cpuid;
989 pv_cpu_ops.load_idt = lguest_load_idt;
990 pv_cpu_ops.iret = lguest_iret;
991 pv_cpu_ops.load_esp0 = lguest_load_esp0;
992 pv_cpu_ops.load_tr_desc = lguest_load_tr_desc;
993 pv_cpu_ops.set_ldt = lguest_set_ldt;
994 pv_cpu_ops.load_tls = lguest_load_tls;
995 pv_cpu_ops.set_debugreg = lguest_set_debugreg;
996 pv_cpu_ops.clts = lguest_clts;
997 pv_cpu_ops.read_cr0 = lguest_read_cr0;
998 pv_cpu_ops.write_cr0 = lguest_write_cr0;
999 pv_cpu_ops.read_cr4 = lguest_read_cr4;
1000 pv_cpu_ops.write_cr4 = lguest_write_cr4;
1001 pv_cpu_ops.write_gdt_entry = lguest_write_gdt_entry;
1002 pv_cpu_ops.write_idt_entry = lguest_write_idt_entry;
1003 pv_cpu_ops.wbinvd = lguest_wbinvd;
1004 pv_cpu_ops.lazy_mode.enter = paravirt_enter_lazy_cpu;
1005 pv_cpu_ops.lazy_mode.leave = lguest_leave_lazy_mode;
1006
1007 /* pagetable management */
1008 pv_mmu_ops.write_cr3 = lguest_write_cr3;
1009 pv_mmu_ops.flush_tlb_user = lguest_flush_tlb_user;
1010 pv_mmu_ops.flush_tlb_single = lguest_flush_tlb_single;
1011 pv_mmu_ops.flush_tlb_kernel = lguest_flush_tlb_kernel;
1012 pv_mmu_ops.set_pte = lguest_set_pte;
1013 pv_mmu_ops.set_pte_at = lguest_set_pte_at;
1014 pv_mmu_ops.set_pmd = lguest_set_pmd;
1015 pv_mmu_ops.read_cr2 = lguest_read_cr2;
1016 pv_mmu_ops.read_cr3 = lguest_read_cr3;
1017 pv_mmu_ops.lazy_mode.enter = paravirt_enter_lazy_mmu;
1018 pv_mmu_ops.lazy_mode.leave = lguest_leave_lazy_mode;
1019
1020#ifdef CONFIG_X86_LOCAL_APIC
1021 /* apic read/write intercepts */
1022 pv_apic_ops.apic_write = lguest_apic_write;
1023 pv_apic_ops.apic_write_atomic = lguest_apic_write;
1024 pv_apic_ops.apic_read = lguest_apic_read;
1025#endif
1026
1027 /* time operations */
1028 pv_time_ops.get_wallclock = lguest_get_wallclock;
1029 pv_time_ops.time_init = lguest_time_init;
1030
1031 /* Now is a good time to look at the implementations of these functions
1032 * before returning to the rest of lguest_init(). */
1033
1034 /*G:070 Now we've seen all the paravirt_ops, we return to
1035 * lguest_init() where the rest of the fairly chaotic boot setup
1036 * occurs.
1037 *
1038 * The Host expects our first hypercall to tell it where our "struct
1039 * lguest_data" is, so we do that first. */
1040 hcall(LHCALL_LGUEST_INIT, __pa(&lguest_data), 0, 0);
1041
1042 /* The native boot code sets up initial page tables immediately after
1043 * the kernel itself, and sets init_pg_tables_end so they're not
1044 * clobbered. The Launcher places our initial pagetables somewhere at
1045 * the top of our physical memory, so we don't need extra space: set
1046 * init_pg_tables_end to the end of the kernel. */
1047 init_pg_tables_end = __pa(pg0);
1048
1049 /* Load the %fs segment register (the per-cpu segment register) with
1050 * the normal data segment to get through booting. */
1051 asm volatile ("mov %0, %%fs" : : "r" (__KERNEL_DS) : "memory");
1052
1053 /* Clear the part of the kernel data which is expected to be zero.
1054 * Normally it will be anyway, but if we're loading from a bzImage with
1055 * CONFIG_RELOCATALE=y, the relocations will be sitting here. */
1056 memset(__bss_start, 0, __bss_stop - __bss_start);
1057
1058 /* The Host uses the top of the Guest's virtual address space for the
1059 * Host<->Guest Switcher, and it tells us how much it needs in
1060 * lguest_data.reserve_mem, set up on the LGUEST_INIT hypercall. */
1061 reserve_top_address(lguest_data.reserve_mem);
1062
1063 /* If we don't initialize the lock dependency checker now, it crashes
1064 * paravirt_disable_iospace. */
1065 lockdep_init();
1066
1067 /* The IDE code spends about 3 seconds probing for disks: if we reserve
1068 * all the I/O ports up front it can't get them and so doesn't probe.
1069 * Other device drivers are similar (but less severe). This cuts the
1070 * kernel boot time on my machine from 4.1 seconds to 0.45 seconds. */
1071 paravirt_disable_iospace();
1072
1073 /* This is messy CPU setup stuff which the native boot code does before
1074 * start_kernel, so we have to do, too: */
1075 cpu_detect(&new_cpu_data);
1076 /* head.S usually sets up the first capability word, so do it here. */
1077 new_cpu_data.x86_capability[0] = cpuid_edx(1);
1078
1079 /* Math is always hard! */
1080 new_cpu_data.hard_math = 1;
1081
1082#ifdef CONFIG_X86_MCE
1083 mce_disabled = 1;
1084#endif
1085#ifdef CONFIG_ACPI
1086 acpi_disabled = 1;
1087 acpi_ht = 0;
1088#endif
1089
1090 /* We set the perferred console to "hvc". This is the "hypervisor
1091 * virtual console" driver written by the PowerPC people, which we also
1092 * adapted for lguest's use. */
1093 add_preferred_console("hvc", 0, NULL);
1094
1095 /* Last of all, we set the power management poweroff hook to point to
1096 * the Guest routine to power off. */
1097 pm_power_off = lguest_power_off;
1098
1099 /* Now we're set up, call start_kernel() in init/main.c and we proceed
1100 * to boot as normal. It never returns. */
1101 start_kernel();
1102}
1103/*
1104 * This marks the end of stage II of our journey, The Guest.
1105 *
1106 * It is now time for us to explore the nooks and crannies of the three Guest
1107 * devices and complete our understanding of the Guest in "make Drivers".
1108 */
diff --git a/drivers/lguest/lguest_asm.S b/drivers/lguest/lguest_asm.S
deleted file mode 100644
index 1ddcd5cd20f6..000000000000
--- a/drivers/lguest/lguest_asm.S
+++ /dev/null
@@ -1,93 +0,0 @@
1#include <linux/linkage.h>
2#include <linux/lguest.h>
3#include <asm/asm-offsets.h>
4#include <asm/thread_info.h>
5#include <asm/processor-flags.h>
6
7/*G:020 This is where we begin: we have a magic signature which the launcher
8 * looks for. The plan is that the Linux boot protocol will be extended with a
9 * "platform type" field which will guide us here from the normal entry point,
10 * but for the moment this suffices. The normal boot code uses %esi for the
11 * boot header, so we do too. We convert it to a virtual address by adding
12 * PAGE_OFFSET, and hand it to lguest_init() as its argument (ie. %eax).
13 *
14 * The .section line puts this code in .init.text so it will be discarded after
15 * boot. */
16.section .init.text, "ax", @progbits
17.ascii "GenuineLguest"
18 /* Set up initial stack. */
19 movl $(init_thread_union+THREAD_SIZE),%esp
20 movl %esi, %eax
21 addl $__PAGE_OFFSET, %eax
22 jmp lguest_init
23
24/*G:055 We create a macro which puts the assembler code between lgstart_ and
25 * lgend_ markers. These templates are put in the .text section: they can't be
26 * discarded after boot as we may need to patch modules, too. */
27.text
28#define LGUEST_PATCH(name, insns...) \
29 lgstart_##name: insns; lgend_##name:; \
30 .globl lgstart_##name; .globl lgend_##name
31
32LGUEST_PATCH(cli, movl $0, lguest_data+LGUEST_DATA_irq_enabled)
33LGUEST_PATCH(sti, movl $X86_EFLAGS_IF, lguest_data+LGUEST_DATA_irq_enabled)
34LGUEST_PATCH(popf, movl %eax, lguest_data+LGUEST_DATA_irq_enabled)
35LGUEST_PATCH(pushf, movl lguest_data+LGUEST_DATA_irq_enabled, %eax)
36/*:*/
37
38/* These demark the EIP range where host should never deliver interrupts. */
39.global lguest_noirq_start
40.global lguest_noirq_end
41
42/*M:004 When the Host reflects a trap or injects an interrupt into the Guest,
43 * it sets the eflags interrupt bit on the stack based on
44 * lguest_data.irq_enabled, so the Guest iret logic does the right thing when
45 * restoring it. However, when the Host sets the Guest up for direct traps,
46 * such as system calls, the processor is the one to push eflags onto the
47 * stack, and the interrupt bit will be 1 (in reality, interrupts are always
48 * enabled in the Guest).
49 *
50 * This turns out to be harmless: the only trap which should happen under Linux
51 * with interrupts disabled is Page Fault (due to our lazy mapping of vmalloc
52 * regions), which has to be reflected through the Host anyway. If another
53 * trap *does* go off when interrupts are disabled, the Guest will panic, and
54 * we'll never get to this iret! :*/
55
56/*G:045 There is one final paravirt_op that the Guest implements, and glancing
57 * at it you can see why I left it to last. It's *cool*! It's in *assembler*!
58 *
59 * The "iret" instruction is used to return from an interrupt or trap. The
60 * stack looks like this:
61 * old address
62 * old code segment & privilege level
63 * old processor flags ("eflags")
64 *
65 * The "iret" instruction pops those values off the stack and restores them all
66 * at once. The only problem is that eflags includes the Interrupt Flag which
67 * the Guest can't change: the CPU will simply ignore it when we do an "iret".
68 * So we have to copy eflags from the stack to lguest_data.irq_enabled before
69 * we do the "iret".
70 *
71 * There are two problems with this: firstly, we need to use a register to do
72 * the copy and secondly, the whole thing needs to be atomic. The first
73 * problem is easy to solve: push %eax on the stack so we can use it, and then
74 * restore it at the end just before the real "iret".
75 *
76 * The second is harder: copying eflags to lguest_data.irq_enabled will turn
77 * interrupts on before we're finished, so we could be interrupted before we
78 * return to userspace or wherever. Our solution to this is to surround the
79 * code with lguest_noirq_start: and lguest_noirq_end: labels. We tell the
80 * Host that it is *never* to interrupt us there, even if interrupts seem to be
81 * enabled. */
82ENTRY(lguest_iret)
83 pushl %eax
84 movl 12(%esp), %eax
85lguest_noirq_start:
86 /* Note the %ss: segment prefix here. Normal data accesses use the
87 * "ds" segment, but that will have already been restored for whatever
88 * we're returning to (such as userspace): we can't trust it. The %ss:
89 * prefix makes sure we use the stack segment, which is still valid. */
90 movl %eax,%ss:lguest_data+LGUEST_DATA_irq_enabled
91 popl %eax
92 iret
93lguest_noirq_end:
diff --git a/drivers/lguest/lguest_bus.c b/drivers/lguest/lguest_bus.c
deleted file mode 100644
index 57329788f8a7..000000000000
--- a/drivers/lguest/lguest_bus.c
+++ /dev/null
@@ -1,218 +0,0 @@
1/*P:050 Lguest guests use a very simple bus for devices. It's a simple array
2 * of device descriptors contained just above the top of normal memory. The
3 * lguest bus is 80% tedious boilerplate code. :*/
4#include <linux/init.h>
5#include <linux/bootmem.h>
6#include <linux/lguest_bus.h>
7#include <asm/io.h>
8#include <asm/paravirt.h>
9
10static ssize_t type_show(struct device *_dev,
11 struct device_attribute *attr, char *buf)
12{
13 struct lguest_device *dev = container_of(_dev,struct lguest_device,dev);
14 return sprintf(buf, "%hu", lguest_devices[dev->index].type);
15}
16static ssize_t features_show(struct device *_dev,
17 struct device_attribute *attr, char *buf)
18{
19 struct lguest_device *dev = container_of(_dev,struct lguest_device,dev);
20 return sprintf(buf, "%hx", lguest_devices[dev->index].features);
21}
22static ssize_t pfn_show(struct device *_dev,
23 struct device_attribute *attr, char *buf)
24{
25 struct lguest_device *dev = container_of(_dev,struct lguest_device,dev);
26 return sprintf(buf, "%u", lguest_devices[dev->index].pfn);
27}
28static ssize_t status_show(struct device *_dev,
29 struct device_attribute *attr, char *buf)
30{
31 struct lguest_device *dev = container_of(_dev,struct lguest_device,dev);
32 return sprintf(buf, "%hx", lguest_devices[dev->index].status);
33}
34static ssize_t status_store(struct device *_dev, struct device_attribute *attr,
35 const char *buf, size_t count)
36{
37 struct lguest_device *dev = container_of(_dev,struct lguest_device,dev);
38 if (sscanf(buf, "%hi", &lguest_devices[dev->index].status) != 1)
39 return -EINVAL;
40 return count;
41}
42static struct device_attribute lguest_dev_attrs[] = {
43 __ATTR_RO(type),
44 __ATTR_RO(features),
45 __ATTR_RO(pfn),
46 __ATTR(status, 0644, status_show, status_store),
47 __ATTR_NULL
48};
49
50/*D:130 The generic bus infrastructure requires a function which says whether a
51 * device matches a driver. For us, it is simple: "struct lguest_driver"
52 * contains a "device_type" field which indicates what type of device it can
53 * handle, so we just cast the args and compare: */
54static int lguest_dev_match(struct device *_dev, struct device_driver *_drv)
55{
56 struct lguest_device *dev = container_of(_dev,struct lguest_device,dev);
57 struct lguest_driver *drv = container_of(_drv,struct lguest_driver,drv);
58
59 return (drv->device_type == lguest_devices[dev->index].type);
60}
61/*:*/
62
63struct lguest_bus {
64 struct bus_type bus;
65 struct device dev;
66};
67
68static struct lguest_bus lguest_bus = {
69 .bus = {
70 .name = "lguest",
71 .match = lguest_dev_match,
72 .dev_attrs = lguest_dev_attrs,
73 },
74 .dev = {
75 .parent = NULL,
76 .bus_id = "lguest",
77 }
78};
79
80/*D:140 This is the callback which occurs once the bus infrastructure matches
81 * up a device and driver, ie. in response to add_lguest_device() calling
82 * device_register(), or register_lguest_driver() calling driver_register().
83 *
84 * At the moment it's always the latter: the devices are added first, since
85 * scan_devices() is called from a "core_initcall", and the drivers themselves
86 * called later as a normal "initcall". But it would work the other way too.
87 *
88 * So now we have the happy couple, we add the status bit to indicate that we
89 * found a driver. If the driver truly loves the device, it will return
90 * happiness from its probe function (ok, perhaps this wasn't my greatest
91 * analogy), and we set the final "driver ok" bit so the Host sees it's all
92 * green. */
93static int lguest_dev_probe(struct device *_dev)
94{
95 int ret;
96 struct lguest_device*dev = container_of(_dev,struct lguest_device,dev);
97 struct lguest_driver*drv = container_of(dev->dev.driver,
98 struct lguest_driver, drv);
99
100 lguest_devices[dev->index].status |= LGUEST_DEVICE_S_DRIVER;
101 ret = drv->probe(dev);
102 if (ret == 0)
103 lguest_devices[dev->index].status |= LGUEST_DEVICE_S_DRIVER_OK;
104 return ret;
105}
106
107/* The last part of the bus infrastructure is the function lguest drivers use
108 * to register themselves. Firstly, we do nothing if there's no lguest bus
109 * (ie. this is not a Guest), otherwise we fill in the embedded generic "struct
110 * driver" fields and call the generic driver_register(). */
111int register_lguest_driver(struct lguest_driver *drv)
112{
113 if (!lguest_devices)
114 return 0;
115
116 drv->drv.bus = &lguest_bus.bus;
117 drv->drv.name = drv->name;
118 drv->drv.owner = drv->owner;
119 drv->drv.probe = lguest_dev_probe;
120
121 return driver_register(&drv->drv);
122}
123
124/* At the moment we build all the drivers into the kernel because they're so
125 * simple: 8144 bytes for all three of them as I type this. And as the console
126 * really needs to be built in, it's actually only 3527 bytes for the network
127 * and block drivers.
128 *
129 * If they get complex it will make sense for them to be modularized, so we
130 * need to explicitly export the symbol.
131 *
132 * I don't think non-GPL modules make sense, so it's a GPL-only export.
133 */
134EXPORT_SYMBOL_GPL(register_lguest_driver);
135
136/*D:120 This is the core of the lguest bus: actually adding a new device.
137 * It's a separate function because it's neater that way, and because an
138 * earlier version of the code supported hotplug and unplug. They were removed
139 * early on because they were never used.
140 *
141 * As Andrew Tridgell says, "Untested code is buggy code".
142 *
143 * It's worth reading this carefully: we start with an index into the array of
144 * "struct lguest_device_desc"s indicating the device which is new: */
145static void add_lguest_device(unsigned int index)
146{
147 struct lguest_device *new;
148
149 /* Each "struct lguest_device_desc" has a "status" field, which the
150 * Guest updates as the device is probed. In the worst case, the Host
151 * can look at these bits to tell what part of device setup failed,
152 * even if the console isn't available. */
153 lguest_devices[index].status |= LGUEST_DEVICE_S_ACKNOWLEDGE;
154 new = kmalloc(sizeof(struct lguest_device), GFP_KERNEL);
155 if (!new) {
156 printk(KERN_EMERG "Cannot allocate lguest device %u\n", index);
157 lguest_devices[index].status |= LGUEST_DEVICE_S_FAILED;
158 return;
159 }
160
161 /* The "struct lguest_device" setup is pretty straight-forward example
162 * code. */
163 new->index = index;
164 new->private = NULL;
165 memset(&new->dev, 0, sizeof(new->dev));
166 new->dev.parent = &lguest_bus.dev;
167 new->dev.bus = &lguest_bus.bus;
168 sprintf(new->dev.bus_id, "%u", index);
169
170 /* device_register() causes the bus infrastructure to look for a
171 * matching driver. */
172 if (device_register(&new->dev) != 0) {
173 printk(KERN_EMERG "Cannot register lguest device %u\n", index);
174 lguest_devices[index].status |= LGUEST_DEVICE_S_FAILED;
175 kfree(new);
176 }
177}
178
179/*D:110 scan_devices() simply iterates through the device array. The type 0
180 * is reserved to mean "no device", and anything else means we have found a
181 * device: add it. */
182static void scan_devices(void)
183{
184 unsigned int i;
185
186 for (i = 0; i < LGUEST_MAX_DEVICES; i++)
187 if (lguest_devices[i].type)
188 add_lguest_device(i);
189}
190
191/*D:100 Fairly early in boot, lguest_bus_init() is called to set up the lguest
192 * bus. We check that we are a Guest by checking paravirt_ops.name: there are
193 * other ways of checking, but this seems most obvious to me.
194 *
195 * So we can access the array of "struct lguest_device_desc"s easily, we map
196 * that memory and store the pointer in the global "lguest_devices". Then we
197 * register the bus with the core. Doing two registrations seems clunky to me,
198 * but it seems to be the correct sysfs incantation.
199 *
200 * Finally we call scan_devices() which adds all the devices found in the
201 * "struct lguest_device_desc" array. */
202static int __init lguest_bus_init(void)
203{
204 if (strcmp(pv_info.name, "lguest") != 0)
205 return 0;
206
207 /* Devices are in a single page above top of "normal" mem */
208 lguest_devices = lguest_map(max_pfn<<PAGE_SHIFT, 1);
209
210 if (bus_register(&lguest_bus.bus) != 0
211 || device_register(&lguest_bus.dev) != 0)
212 panic("lguest bus registration failed");
213
214 scan_devices();
215 return 0;
216}
217/* Do this after core stuff, before devices. */
218postcore_initcall(lguest_bus_init);
diff --git a/drivers/lguest/lguest_device.c b/drivers/lguest/lguest_device.c
new file mode 100644
index 000000000000..71c64837b437
--- /dev/null
+++ b/drivers/lguest/lguest_device.c
@@ -0,0 +1,373 @@
1/*P:050 Lguest guests use a very simple method to describe devices. It's a
2 * series of device descriptors contained just above the top of normal
3 * memory.
4 *
5 * We use the standard "virtio" device infrastructure, which provides us with a
6 * console, a network and a block driver. Each one expects some configuration
7 * information and a "virtqueue" mechanism to send and receive data. :*/
8#include <linux/init.h>
9#include <linux/bootmem.h>
10#include <linux/lguest_launcher.h>
11#include <linux/virtio.h>
12#include <linux/virtio_config.h>
13#include <linux/interrupt.h>
14#include <linux/virtio_ring.h>
15#include <linux/err.h>
16#include <asm/io.h>
17#include <asm/paravirt.h>
18#include <asm/lguest_hcall.h>
19
20/* The pointer to our (page) of device descriptions. */
21static void *lguest_devices;
22
23/* Unique numbering for lguest devices. */
24static unsigned int dev_index;
25
26/* For Guests, device memory can be used as normal memory, so we cast away the
27 * __iomem to quieten sparse. */
28static inline void *lguest_map(unsigned long phys_addr, unsigned long pages)
29{
30 return (__force void *)ioremap(phys_addr, PAGE_SIZE*pages);
31}
32
33static inline void lguest_unmap(void *addr)
34{
35 iounmap((__force void __iomem *)addr);
36}
37
38/*D:100 Each lguest device is just a virtio device plus a pointer to its entry
39 * in the lguest_devices page. */
40struct lguest_device {
41 struct virtio_device vdev;
42
43 /* The entry in the lguest_devices page for this device. */
44 struct lguest_device_desc *desc;
45};
46
47/* Since the virtio infrastructure hands us a pointer to the virtio_device all
48 * the time, it helps to have a curt macro to get a pointer to the struct
49 * lguest_device it's enclosed in. */
50#define to_lgdev(vdev) container_of(vdev, struct lguest_device, vdev)
51
52/*D:130
53 * Device configurations
54 *
55 * The configuration information for a device consists of a series of fields.
56 * The device will look for these fields during setup.
57 *
58 * For us these fields come immediately after that device's descriptor in the
59 * lguest_devices page.
60 *
61 * Each field starts with a "type" byte, a "length" byte, then that number of
62 * bytes of configuration information. The device descriptor tells us the
63 * total configuration length so we know when we've reached the last field. */
64
65/* type + length bytes */
66#define FHDR_LEN 2
67
68/* This finds the first field of a given type for a device's configuration. */
69static void *lg_find(struct virtio_device *vdev, u8 type, unsigned int *len)
70{
71 struct lguest_device_desc *desc = to_lgdev(vdev)->desc;
72 int i;
73
74 for (i = 0; i < desc->config_len; i += FHDR_LEN + desc->config[i+1]) {
75 if (desc->config[i] == type) {
76 /* Mark it used, so Host can know we looked at it, and
77 * also so we won't find the same one twice. */
78 desc->config[i] |= 0x80;
79 /* Remember, the second byte is the length. */
80 *len = desc->config[i+1];
81 /* We return a pointer to the field header. */
82 return desc->config + i;
83 }
84 }
85
86 /* Not found: return NULL for failure. */
87 return NULL;
88}
89
90/* Once they've found a field, getting a copy of it is easy. */
91static void lg_get(struct virtio_device *vdev, void *token,
92 void *buf, unsigned len)
93{
94 /* Check they didn't ask for more than the length of the field! */
95 BUG_ON(len > ((u8 *)token)[1]);
96 memcpy(buf, token + FHDR_LEN, len);
97}
98
99/* Setting the contents is also trivial. */
100static void lg_set(struct virtio_device *vdev, void *token,
101 const void *buf, unsigned len)
102{
103 BUG_ON(len > ((u8 *)token)[1]);
104 memcpy(token + FHDR_LEN, buf, len);
105}
106
107/* The operations to get and set the status word just access the status field
108 * of the device descriptor. */
109static u8 lg_get_status(struct virtio_device *vdev)
110{
111 return to_lgdev(vdev)->desc->status;
112}
113
114static void lg_set_status(struct virtio_device *vdev, u8 status)
115{
116 to_lgdev(vdev)->desc->status = status;
117}
118
119/*
120 * Virtqueues
121 *
122 * The other piece of infrastructure virtio needs is a "virtqueue": a way of
123 * the Guest device registering buffers for the other side to read from or
124 * write into (ie. send and receive buffers). Each device can have multiple
125 * virtqueues: for example the console has one queue for sending and one for
126 * receiving.
127 *
128 * Fortunately for us, a very fast shared-memory-plus-descriptors virtqueue
129 * already exists in virtio_ring.c. We just need to connect it up.
130 *
131 * We start with the information we need to keep about each virtqueue.
132 */
133
134/*D:140 This is the information we remember about each virtqueue. */
135struct lguest_vq_info
136{
137 /* A copy of the information contained in the device config. */
138 struct lguest_vqconfig config;
139
140 /* The address where we mapped the virtio ring, so we can unmap it. */
141 void *pages;
142};
143
144/* When the virtio_ring code wants to prod the Host, it calls us here and we
145 * make a hypercall. We hand the page number of the virtqueue so the Host
146 * knows which virtqueue we're talking about. */
147static void lg_notify(struct virtqueue *vq)
148{
149 /* We store our virtqueue information in the "priv" pointer of the
150 * virtqueue structure. */
151 struct lguest_vq_info *lvq = vq->priv;
152
153 hcall(LHCALL_NOTIFY, lvq->config.pfn << PAGE_SHIFT, 0, 0);
154}
155
156/* This routine finds the first virtqueue described in the configuration of
157 * this device and sets it up.
158 *
159 * This is kind of an ugly duckling. It'd be nicer to have a standard
160 * representation of a virtqueue in the configuration space, but it seems that
161 * everyone wants to do it differently. The KVM guys want the Guest to
162 * allocate its own pages and tell the Host where they are, but for lguest it's
163 * simpler for the Host to simply tell us where the pages are.
164 *
165 * So we provide devices with a "find virtqueue and set it up" function. */
166static struct virtqueue *lg_find_vq(struct virtio_device *vdev,
167 bool (*callback)(struct virtqueue *vq))
168{
169 struct lguest_vq_info *lvq;
170 struct virtqueue *vq;
171 unsigned int len;
172 void *token;
173 int err;
174
175 /* Look for a field of the correct type to mark a virtqueue. Note that
176 * if this succeeds, then the type will be changed so it won't be found
177 * again, and future lg_find_vq() calls will find the next
178 * virtqueue (if any). */
179 token = vdev->config->find(vdev, VIRTIO_CONFIG_F_VIRTQUEUE, &len);
180 if (!token)
181 return ERR_PTR(-ENOENT);
182
183 lvq = kmalloc(sizeof(*lvq), GFP_KERNEL);
184 if (!lvq)
185 return ERR_PTR(-ENOMEM);
186
187 /* Note: we could use a configuration space inside here, just like we
188 * do for the device. This would allow expansion in future, because
189 * our configuration system is designed to be expansible. But this is
190 * way easier. */
191 if (len != sizeof(lvq->config)) {
192 dev_err(&vdev->dev, "Unexpected virtio config len %u\n", len);
193 err = -EIO;
194 goto free_lvq;
195 }
196 /* Make a copy of the "struct lguest_vqconfig" field. We need a copy
197 * because the config space might not be aligned correctly. */
198 vdev->config->get(vdev, token, &lvq->config, sizeof(lvq->config));
199
200 /* Figure out how many pages the ring will take, and map that memory */
201 lvq->pages = lguest_map((unsigned long)lvq->config.pfn << PAGE_SHIFT,
202 DIV_ROUND_UP(vring_size(lvq->config.num),
203 PAGE_SIZE));
204 if (!lvq->pages) {
205 err = -ENOMEM;
206 goto free_lvq;
207 }
208
209 /* OK, tell virtio_ring.c to set up a virtqueue now we know its size
210 * and we've got a pointer to its pages. */
211 vq = vring_new_virtqueue(lvq->config.num, vdev, lvq->pages,
212 lg_notify, callback);
213 if (!vq) {
214 err = -ENOMEM;
215 goto unmap;
216 }
217
218 /* Tell the interrupt for this virtqueue to go to the virtio_ring
219 * interrupt handler. */
220 /* FIXME: We used to have a flag for the Host to tell us we could use
221 * the interrupt as a source of randomness: it'd be nice to have that
222 * back.. */
223 err = request_irq(lvq->config.irq, vring_interrupt, IRQF_SHARED,
224 vdev->dev.bus_id, vq);
225 if (err)
226 goto destroy_vring;
227
228 /* Last of all we hook up our 'struct lguest_vq_info" to the
229 * virtqueue's priv pointer. */
230 vq->priv = lvq;
231 return vq;
232
233destroy_vring:
234 vring_del_virtqueue(vq);
235unmap:
236 lguest_unmap(lvq->pages);
237free_lvq:
238 kfree(lvq);
239 return ERR_PTR(err);
240}
241/*:*/
242
243/* Cleaning up a virtqueue is easy */
244static void lg_del_vq(struct virtqueue *vq)
245{
246 struct lguest_vq_info *lvq = vq->priv;
247
248 /* Tell virtio_ring.c to free the virtqueue. */
249 vring_del_virtqueue(vq);
250 /* Unmap the pages containing the ring. */
251 lguest_unmap(lvq->pages);
252 /* Free our own queue information. */
253 kfree(lvq);
254}
255
256/* The ops structure which hooks everything together. */
257static struct virtio_config_ops lguest_config_ops = {
258 .find = lg_find,
259 .get = lg_get,
260 .set = lg_set,
261 .get_status = lg_get_status,
262 .set_status = lg_set_status,
263 .find_vq = lg_find_vq,
264 .del_vq = lg_del_vq,
265};
266
267/* The root device for the lguest virtio devices. This makes them appear as
268 * /sys/devices/lguest/0,1,2 not /sys/devices/0,1,2. */
269static struct device lguest_root = {
270 .parent = NULL,
271 .bus_id = "lguest",
272};
273
274/*D:120 This is the core of the lguest bus: actually adding a new device.
275 * It's a separate function because it's neater that way, and because an
276 * earlier version of the code supported hotplug and unplug. They were removed
277 * early on because they were never used.
278 *
279 * As Andrew Tridgell says, "Untested code is buggy code".
280 *
281 * It's worth reading this carefully: we start with a pointer to the new device
282 * descriptor in the "lguest_devices" page. */
283static void add_lguest_device(struct lguest_device_desc *d)
284{
285 struct lguest_device *ldev;
286
287 ldev = kzalloc(sizeof(*ldev), GFP_KERNEL);
288 if (!ldev) {
289 printk(KERN_EMERG "Cannot allocate lguest dev %u\n",
290 dev_index++);
291 return;
292 }
293
294 /* This devices' parent is the lguest/ dir. */
295 ldev->vdev.dev.parent = &lguest_root;
296 /* We have a unique device index thanks to the dev_index counter. */
297 ldev->vdev.index = dev_index++;
298 /* The device type comes straight from the descriptor. There's also a
299 * device vendor field in the virtio_device struct, which we leave as
300 * 0. */
301 ldev->vdev.id.device = d->type;
302 /* We have a simple set of routines for querying the device's
303 * configuration information and setting its status. */
304 ldev->vdev.config = &lguest_config_ops;
305 /* And we remember the device's descriptor for lguest_config_ops. */
306 ldev->desc = d;
307
308 /* register_virtio_device() sets up the generic fields for the struct
309 * virtio_device and calls device_register(). This makes the bus
310 * infrastructure look for a matching driver. */
311 if (register_virtio_device(&ldev->vdev) != 0) {
312 printk(KERN_ERR "Failed to register lguest device %u\n",
313 ldev->vdev.index);
314 kfree(ldev);
315 }
316}
317
318/*D:110 scan_devices() simply iterates through the device page. The type 0 is
319 * reserved to mean "end of devices". */
320static void scan_devices(void)
321{
322 unsigned int i;
323 struct lguest_device_desc *d;
324
325 /* We start at the page beginning, and skip over each entry. */
326 for (i = 0; i < PAGE_SIZE; i += sizeof(*d) + d->config_len) {
327 d = lguest_devices + i;
328
329 /* Once we hit a zero, stop. */
330 if (d->type == 0)
331 break;
332
333 add_lguest_device(d);
334 }
335}
336
337/*D:105 Fairly early in boot, lguest_devices_init() is called to set up the
338 * lguest device infrastructure. We check that we are a Guest by checking
339 * pv_info.name: there are other ways of checking, but this seems most
340 * obvious to me.
341 *
342 * So we can access the "struct lguest_device_desc"s easily, we map that memory
343 * and store the pointer in the global "lguest_devices". Then we register a
344 * root device from which all our devices will hang (this seems to be the
345 * correct sysfs incantation).
346 *
347 * Finally we call scan_devices() which adds all the devices found in the
348 * lguest_devices page. */
349static int __init lguest_devices_init(void)
350{
351 if (strcmp(pv_info.name, "lguest") != 0)
352 return 0;
353
354 if (device_register(&lguest_root) != 0)
355 panic("Could not register lguest root");
356
357 /* Devices are in a single page above top of "normal" mem */
358 lguest_devices = lguest_map(max_pfn<<PAGE_SHIFT, 1);
359
360 scan_devices();
361 return 0;
362}
363/* We do this after core stuff, but before the drivers. */
364postcore_initcall(lguest_devices_init);
365
366/*D:150 At this point in the journey we used to now wade through the lguest
367 * devices themselves: net, block and console. Since they're all now virtio
368 * devices rather than lguest-specific, I've decided to ignore them. Mostly,
369 * they're kind of boring. But this does mean you'll never experience the
370 * thrill of reading the forbidden love scene buried deep in the block driver.
371 *
372 * "make Launcher" beckons, where we answer questions like "Where do Guests
373 * come from?", and "What do you do when someone asks for optimization?". */
diff --git a/drivers/lguest/lguest_user.c b/drivers/lguest/lguest_user.c
index 80d1b58c7698..ee405b38383d 100644
--- a/drivers/lguest/lguest_user.c
+++ b/drivers/lguest/lguest_user.c
@@ -1,73 +1,17 @@
1/*P:200 This contains all the /dev/lguest code, whereby the userspace launcher 1/*P:200 This contains all the /dev/lguest code, whereby the userspace launcher
2 * controls and communicates with the Guest. For example, the first write will 2 * controls and communicates with the Guest. For example, the first write will
3 * tell us the memory size, pagetable, entry point and kernel address offset. 3 * tell us the Guest's memory layout, pagetable, entry point and kernel address
4 * A read will run the Guest until a signal is pending (-EINTR), or the Guest 4 * offset. A read will run the Guest until something happens, such as a signal
5 * does a DMA out to the Launcher. Writes are also used to get a DMA buffer 5 * or the Guest doing a NOTIFY out to the Launcher. :*/
6 * registered by the Guest and to send the Guest an interrupt. :*/
7#include <linux/uaccess.h> 6#include <linux/uaccess.h>
8#include <linux/miscdevice.h> 7#include <linux/miscdevice.h>
9#include <linux/fs.h> 8#include <linux/fs.h>
10#include "lg.h" 9#include "lg.h"
11 10
12/*L:030 setup_regs() doesn't really belong in this file, but it gives us an
13 * early glimpse deeper into the Host so it's worth having here.
14 *
15 * Most of the Guest's registers are left alone: we used get_zeroed_page() to
16 * allocate the structure, so they will be 0. */
17static void setup_regs(struct lguest_regs *regs, unsigned long start)
18{
19 /* There are four "segment" registers which the Guest needs to boot:
20 * The "code segment" register (cs) refers to the kernel code segment
21 * __KERNEL_CS, and the "data", "extra" and "stack" segment registers
22 * refer to the kernel data segment __KERNEL_DS.
23 *
24 * The privilege level is packed into the lower bits. The Guest runs
25 * at privilege level 1 (GUEST_PL).*/
26 regs->ds = regs->es = regs->ss = __KERNEL_DS|GUEST_PL;
27 regs->cs = __KERNEL_CS|GUEST_PL;
28
29 /* The "eflags" register contains miscellaneous flags. Bit 1 (0x002)
30 * is supposed to always be "1". Bit 9 (0x200) controls whether
31 * interrupts are enabled. We always leave interrupts enabled while
32 * running the Guest. */
33 regs->eflags = 0x202;
34
35 /* The "Extended Instruction Pointer" register says where the Guest is
36 * running. */
37 regs->eip = start;
38
39 /* %esi points to our boot information, at physical address 0, so don't
40 * touch it. */
41}
42
43/*L:310 To send DMA into the Guest, the Launcher needs to be able to ask for a
44 * DMA buffer. This is done by writing LHREQ_GETDMA and the key to
45 * /dev/lguest. */
46static long user_get_dma(struct lguest *lg, const u32 __user *input)
47{
48 unsigned long key, udma, irq;
49
50 /* Fetch the key they wrote to us. */
51 if (get_user(key, input) != 0)
52 return -EFAULT;
53 /* Look for a free Guest DMA buffer bound to that key. */
54 udma = get_dma_buffer(lg, key, &irq);
55 if (!udma)
56 return -ENOENT;
57
58 /* We need to tell the Launcher what interrupt the Guest expects after
59 * the buffer is filled. We stash it in udma->used_len. */
60 lgwrite_u32(lg, udma + offsetof(struct lguest_dma, used_len), irq);
61
62 /* The (guest-physical) address of the DMA buffer is returned from
63 * the write(). */
64 return udma;
65}
66
67/*L:315 To force the Guest to stop running and return to the Launcher, the 11/*L:315 To force the Guest to stop running and return to the Launcher, the
68 * Waker sets writes LHREQ_BREAK and the value "1" to /dev/lguest. The 12 * Waker sets writes LHREQ_BREAK and the value "1" to /dev/lguest. The
69 * Launcher then writes LHREQ_BREAK and "0" to release the Waker. */ 13 * Launcher then writes LHREQ_BREAK and "0" to release the Waker. */
70static int break_guest_out(struct lguest *lg, const u32 __user *input) 14static int break_guest_out(struct lguest *lg, const unsigned long __user *input)
71{ 15{
72 unsigned long on; 16 unsigned long on;
73 17
@@ -90,9 +34,9 @@ static int break_guest_out(struct lguest *lg, const u32 __user *input)
90 34
91/*L:050 Sending an interrupt is done by writing LHREQ_IRQ and an interrupt 35/*L:050 Sending an interrupt is done by writing LHREQ_IRQ and an interrupt
92 * number to /dev/lguest. */ 36 * number to /dev/lguest. */
93static int user_send_irq(struct lguest *lg, const u32 __user *input) 37static int user_send_irq(struct lguest *lg, const unsigned long __user *input)
94{ 38{
95 u32 irq; 39 unsigned long irq;
96 40
97 if (get_user(irq, input) != 0) 41 if (get_user(irq, input) != 0)
98 return -EFAULT; 42 return -EFAULT;
@@ -133,17 +77,19 @@ static ssize_t read(struct file *file, char __user *user, size_t size,loff_t*o)
133 return len; 77 return len;
134 } 78 }
135 79
136 /* If we returned from read() last time because the Guest sent DMA, 80 /* If we returned from read() last time because the Guest notified,
137 * clear the flag. */ 81 * clear the flag. */
138 if (lg->dma_is_pending) 82 if (lg->pending_notify)
139 lg->dma_is_pending = 0; 83 lg->pending_notify = 0;
140 84
141 /* Run the Guest until something interesting happens. */ 85 /* Run the Guest until something interesting happens. */
142 return run_guest(lg, (unsigned long __user *)user); 86 return run_guest(lg, (unsigned long __user *)user);
143} 87}
144 88
145/*L:020 The initialization write supplies 4 32-bit values (in addition to the 89/*L:020 The initialization write supplies 4 pointer sized (32 or 64 bit)
146 * 32-bit LHREQ_INITIALIZE value). These are: 90 * values (in addition to the LHREQ_INITIALIZE value). These are:
91 *
92 * base: The start of the Guest-physical memory inside the Launcher memory.
147 * 93 *
148 * pfnlimit: The highest (Guest-physical) page number the Guest should be 94 * pfnlimit: The highest (Guest-physical) page number the Guest should be
149 * allowed to access. The Launcher has to live in Guest memory, so it sets 95 * allowed to access. The Launcher has to live in Guest memory, so it sets
@@ -153,23 +99,17 @@ static ssize_t read(struct file *file, char __user *user, size_t size,loff_t*o)
153 * pagetables (which are set up by the Launcher). 99 * pagetables (which are set up by the Launcher).
154 * 100 *
155 * start: The first instruction to execute ("eip" in x86-speak). 101 * start: The first instruction to execute ("eip" in x86-speak).
156 *
157 * page_offset: The PAGE_OFFSET constant in the Guest kernel. We should
158 * probably wean the code off this, but it's a very useful constant! Any
159 * address above this is within the Guest kernel, and any kernel address can
160 * quickly converted from physical to virtual by adding PAGE_OFFSET. It's
161 * 0xC0000000 (3G) by default, but it's configurable at kernel build time.
162 */ 102 */
163static int initialize(struct file *file, const u32 __user *input) 103static int initialize(struct file *file, const unsigned long __user *input)
164{ 104{
165 /* "struct lguest" contains everything we (the Host) know about a 105 /* "struct lguest" contains everything we (the Host) know about a
166 * Guest. */ 106 * Guest. */
167 struct lguest *lg; 107 struct lguest *lg;
168 int err, i; 108 int err;
169 u32 args[4]; 109 unsigned long args[4];
170 110
171 /* We grab the Big Lguest lock, which protects the global array 111 /* We grab the Big Lguest lock, which protects against multiple
172 * "lguests" and multiple simultaneous initializations. */ 112 * simultaneous initializations. */
173 mutex_lock(&lguest_lock); 113 mutex_lock(&lguest_lock);
174 /* You can't initialize twice! Close the device and start again... */ 114 /* You can't initialize twice! Close the device and start again... */
175 if (file->private_data) { 115 if (file->private_data) {
@@ -182,20 +122,15 @@ static int initialize(struct file *file, const u32 __user *input)
182 goto unlock; 122 goto unlock;
183 } 123 }
184 124
185 /* Find an unused guest. */ 125 lg = kzalloc(sizeof(*lg), GFP_KERNEL);
186 i = find_free_guest(); 126 if (!lg) {
187 if (i < 0) { 127 err = -ENOMEM;
188 err = -ENOSPC;
189 goto unlock; 128 goto unlock;
190 } 129 }
191 /* OK, we have an index into the "lguest" array: "lg" is a convenient
192 * pointer. */
193 lg = &lguests[i];
194 130
195 /* Populate the easy fields of our "struct lguest" */ 131 /* Populate the easy fields of our "struct lguest" */
196 lg->guestid = i; 132 lg->mem_base = (void __user *)(long)args[0];
197 lg->pfn_limit = args[0]; 133 lg->pfn_limit = args[1];
198 lg->page_offset = args[3];
199 134
200 /* We need a complete page for the Guest registers: they are accessible 135 /* We need a complete page for the Guest registers: they are accessible
201 * to the Guest and we can only grant it access to whole pages. */ 136 * to the Guest and we can only grant it access to whole pages. */
@@ -210,17 +145,13 @@ static int initialize(struct file *file, const u32 __user *input)
210 /* Initialize the Guest's shadow page tables, using the toplevel 145 /* Initialize the Guest's shadow page tables, using the toplevel
211 * address the Launcher gave us. This allocates memory, so can 146 * address the Launcher gave us. This allocates memory, so can
212 * fail. */ 147 * fail. */
213 err = init_guest_pagetable(lg, args[1]); 148 err = init_guest_pagetable(lg, args[2]);
214 if (err) 149 if (err)
215 goto free_regs; 150 goto free_regs;
216 151
217 /* Now we initialize the Guest's registers, handing it the start 152 /* Now we initialize the Guest's registers, handing it the start
218 * address. */ 153 * address. */
219 setup_regs(lg->regs, args[2]); 154 lguest_arch_setup_regs(lg, args[3]);
220
221 /* There are a couple of GDT entries the Guest expects when first
222 * booting. */
223 setup_guest_gdt(lg);
224 155
225 /* The timer for lguest's clock needs initialization. */ 156 /* The timer for lguest's clock needs initialization. */
226 init_clockdev(lg); 157 init_clockdev(lg);
@@ -260,18 +191,19 @@ unlock:
260/*L:010 The first operation the Launcher does must be a write. All writes 191/*L:010 The first operation the Launcher does must be a write. All writes
261 * start with a 32 bit number: for the first write this must be 192 * start with a 32 bit number: for the first write this must be
262 * LHREQ_INITIALIZE to set up the Guest. After that the Launcher can use 193 * LHREQ_INITIALIZE to set up the Guest. After that the Launcher can use
263 * writes of other values to get DMA buffers and send interrupts. */ 194 * writes of other values to send interrupts. */
264static ssize_t write(struct file *file, const char __user *input, 195static ssize_t write(struct file *file, const char __user *in,
265 size_t size, loff_t *off) 196 size_t size, loff_t *off)
266{ 197{
267 /* Once the guest is initialized, we hold the "struct lguest" in the 198 /* Once the guest is initialized, we hold the "struct lguest" in the
268 * file private data. */ 199 * file private data. */
269 struct lguest *lg = file->private_data; 200 struct lguest *lg = file->private_data;
270 u32 req; 201 const unsigned long __user *input = (const unsigned long __user *)in;
202 unsigned long req;
271 203
272 if (get_user(req, input) != 0) 204 if (get_user(req, input) != 0)
273 return -EFAULT; 205 return -EFAULT;
274 input += sizeof(req); 206 input++;
275 207
276 /* If you haven't initialized, you must do that first. */ 208 /* If you haven't initialized, you must do that first. */
277 if (req != LHREQ_INITIALIZE && !lg) 209 if (req != LHREQ_INITIALIZE && !lg)
@@ -287,13 +219,11 @@ static ssize_t write(struct file *file, const char __user *input,
287 219
288 switch (req) { 220 switch (req) {
289 case LHREQ_INITIALIZE: 221 case LHREQ_INITIALIZE:
290 return initialize(file, (const u32 __user *)input); 222 return initialize(file, input);
291 case LHREQ_GETDMA:
292 return user_get_dma(lg, (const u32 __user *)input);
293 case LHREQ_IRQ: 223 case LHREQ_IRQ:
294 return user_send_irq(lg, (const u32 __user *)input); 224 return user_send_irq(lg, input);
295 case LHREQ_BREAK: 225 case LHREQ_BREAK:
296 return break_guest_out(lg, (const u32 __user *)input); 226 return break_guest_out(lg, input);
297 default: 227 default:
298 return -EINVAL; 228 return -EINVAL;
299 } 229 }
@@ -319,8 +249,6 @@ static int close(struct inode *inode, struct file *file)
319 mutex_lock(&lguest_lock); 249 mutex_lock(&lguest_lock);
320 /* Cancels the hrtimer set via LHCALL_SET_CLOCKEVENT. */ 250 /* Cancels the hrtimer set via LHCALL_SET_CLOCKEVENT. */
321 hrtimer_cancel(&lg->hrt); 251 hrtimer_cancel(&lg->hrt);
322 /* Free any DMA buffers the Guest had bound. */
323 release_all_dma(lg);
324 /* Free up the shadow page tables for the Guest. */ 252 /* Free up the shadow page tables for the Guest. */
325 free_guest_pagetable(lg); 253 free_guest_pagetable(lg);
326 /* Now all the memory cleanups are done, it's safe to release the 254 /* Now all the memory cleanups are done, it's safe to release the
diff --git a/drivers/lguest/page_tables.c b/drivers/lguest/page_tables.c
index b7a924ace684..2a45f0691c9b 100644
--- a/drivers/lguest/page_tables.c
+++ b/drivers/lguest/page_tables.c
@@ -13,6 +13,7 @@
13#include <linux/random.h> 13#include <linux/random.h>
14#include <linux/percpu.h> 14#include <linux/percpu.h>
15#include <asm/tlbflush.h> 15#include <asm/tlbflush.h>
16#include <asm/uaccess.h>
16#include "lg.h" 17#include "lg.h"
17 18
18/*M:008 We hold reference to pages, which prevents them from being swapped. 19/*M:008 We hold reference to pages, which prevents them from being swapped.
@@ -44,44 +45,32 @@
44 * (vii) Setting up the page tables initially. 45 * (vii) Setting up the page tables initially.
45 :*/ 46 :*/
46 47
47/* Pages a 4k long, and each page table entry is 4 bytes long, giving us 1024
48 * (or 2^10) entries per page. */
49#define PTES_PER_PAGE_SHIFT 10
50#define PTES_PER_PAGE (1 << PTES_PER_PAGE_SHIFT)
51 48
52/* 1024 entries in a page table page maps 1024 pages: 4MB. The Switcher is 49/* 1024 entries in a page table page maps 1024 pages: 4MB. The Switcher is
53 * conveniently placed at the top 4MB, so it uses a separate, complete PTE 50 * conveniently placed at the top 4MB, so it uses a separate, complete PTE
54 * page. */ 51 * page. */
55#define SWITCHER_PGD_INDEX (PTES_PER_PAGE - 1) 52#define SWITCHER_PGD_INDEX (PTRS_PER_PGD - 1)
56 53
57/* We actually need a separate PTE page for each CPU. Remember that after the 54/* We actually need a separate PTE page for each CPU. Remember that after the
58 * Switcher code itself comes two pages for each CPU, and we don't want this 55 * Switcher code itself comes two pages for each CPU, and we don't want this
59 * CPU's guest to see the pages of any other CPU. */ 56 * CPU's guest to see the pages of any other CPU. */
60static DEFINE_PER_CPU(spte_t *, switcher_pte_pages); 57static DEFINE_PER_CPU(pte_t *, switcher_pte_pages);
61#define switcher_pte_page(cpu) per_cpu(switcher_pte_pages, cpu) 58#define switcher_pte_page(cpu) per_cpu(switcher_pte_pages, cpu)
62 59
63/*H:320 With our shadow and Guest types established, we need to deal with 60/*H:320 With our shadow and Guest types established, we need to deal with
64 * them: the page table code is curly enough to need helper functions to keep 61 * them: the page table code is curly enough to need helper functions to keep
65 * it clear and clean. 62 * it clear and clean.
66 * 63 *
67 * The first helper takes a virtual address, and says which entry in the top 64 * There are two functions which return pointers to the shadow (aka "real")
68 * level page table deals with that address. Since each top level entry deals
69 * with 4M, this effectively divides by 4M. */
70static unsigned vaddr_to_pgd_index(unsigned long vaddr)
71{
72 return vaddr >> (PAGE_SHIFT + PTES_PER_PAGE_SHIFT);
73}
74
75/* There are two functions which return pointers to the shadow (aka "real")
76 * page tables. 65 * page tables.
77 * 66 *
78 * spgd_addr() takes the virtual address and returns a pointer to the top-level 67 * spgd_addr() takes the virtual address and returns a pointer to the top-level
79 * page directory entry for that address. Since we keep track of several page 68 * page directory entry for that address. Since we keep track of several page
80 * tables, the "i" argument tells us which one we're interested in (it's 69 * tables, the "i" argument tells us which one we're interested in (it's
81 * usually the current one). */ 70 * usually the current one). */
82static spgd_t *spgd_addr(struct lguest *lg, u32 i, unsigned long vaddr) 71static pgd_t *spgd_addr(struct lguest *lg, u32 i, unsigned long vaddr)
83{ 72{
84 unsigned int index = vaddr_to_pgd_index(vaddr); 73 unsigned int index = pgd_index(vaddr);
85 74
86 /* We kill any Guest trying to touch the Switcher addresses. */ 75 /* We kill any Guest trying to touch the Switcher addresses. */
87 if (index >= SWITCHER_PGD_INDEX) { 76 if (index >= SWITCHER_PGD_INDEX) {
@@ -95,28 +84,28 @@ static spgd_t *spgd_addr(struct lguest *lg, u32 i, unsigned long vaddr)
95/* This routine then takes the PGD entry given above, which contains the 84/* This routine then takes the PGD entry given above, which contains the
96 * address of the PTE page. It then returns a pointer to the PTE entry for the 85 * address of the PTE page. It then returns a pointer to the PTE entry for the
97 * given address. */ 86 * given address. */
98static spte_t *spte_addr(struct lguest *lg, spgd_t spgd, unsigned long vaddr) 87static pte_t *spte_addr(struct lguest *lg, pgd_t spgd, unsigned long vaddr)
99{ 88{
100 spte_t *page = __va(spgd.pfn << PAGE_SHIFT); 89 pte_t *page = __va(pgd_pfn(spgd) << PAGE_SHIFT);
101 /* You should never call this if the PGD entry wasn't valid */ 90 /* You should never call this if the PGD entry wasn't valid */
102 BUG_ON(!(spgd.flags & _PAGE_PRESENT)); 91 BUG_ON(!(pgd_flags(spgd) & _PAGE_PRESENT));
103 return &page[(vaddr >> PAGE_SHIFT) % PTES_PER_PAGE]; 92 return &page[(vaddr >> PAGE_SHIFT) % PTRS_PER_PTE];
104} 93}
105 94
106/* These two functions just like the above two, except they access the Guest 95/* These two functions just like the above two, except they access the Guest
107 * page tables. Hence they return a Guest address. */ 96 * page tables. Hence they return a Guest address. */
108static unsigned long gpgd_addr(struct lguest *lg, unsigned long vaddr) 97static unsigned long gpgd_addr(struct lguest *lg, unsigned long vaddr)
109{ 98{
110 unsigned int index = vaddr >> (PAGE_SHIFT + PTES_PER_PAGE_SHIFT); 99 unsigned int index = vaddr >> (PGDIR_SHIFT);
111 return lg->pgdirs[lg->pgdidx].cr3 + index * sizeof(gpgd_t); 100 return lg->pgdirs[lg->pgdidx].gpgdir + index * sizeof(pgd_t);
112} 101}
113 102
114static unsigned long gpte_addr(struct lguest *lg, 103static unsigned long gpte_addr(struct lguest *lg,
115 gpgd_t gpgd, unsigned long vaddr) 104 pgd_t gpgd, unsigned long vaddr)
116{ 105{
117 unsigned long gpage = gpgd.pfn << PAGE_SHIFT; 106 unsigned long gpage = pgd_pfn(gpgd) << PAGE_SHIFT;
118 BUG_ON(!(gpgd.flags & _PAGE_PRESENT)); 107 BUG_ON(!(pgd_flags(gpgd) & _PAGE_PRESENT));
119 return gpage + ((vaddr>>PAGE_SHIFT) % PTES_PER_PAGE) * sizeof(gpte_t); 108 return gpage + ((vaddr>>PAGE_SHIFT) % PTRS_PER_PTE) * sizeof(pte_t);
120} 109}
121 110
122/*H:350 This routine takes a page number given by the Guest and converts it to 111/*H:350 This routine takes a page number given by the Guest and converts it to
@@ -149,53 +138,55 @@ static unsigned long get_pfn(unsigned long virtpfn, int write)
149 * entry can be a little tricky. The flags are (almost) the same, but the 138 * entry can be a little tricky. The flags are (almost) the same, but the
150 * Guest PTE contains a virtual page number: the CPU needs the real page 139 * Guest PTE contains a virtual page number: the CPU needs the real page
151 * number. */ 140 * number. */
152static spte_t gpte_to_spte(struct lguest *lg, gpte_t gpte, int write) 141static pte_t gpte_to_spte(struct lguest *lg, pte_t gpte, int write)
153{ 142{
154 spte_t spte; 143 unsigned long pfn, base, flags;
155 unsigned long pfn;
156 144
157 /* The Guest sets the global flag, because it thinks that it is using 145 /* The Guest sets the global flag, because it thinks that it is using
158 * PGE. We only told it to use PGE so it would tell us whether it was 146 * PGE. We only told it to use PGE so it would tell us whether it was
159 * flushing a kernel mapping or a userspace mapping. We don't actually 147 * flushing a kernel mapping or a userspace mapping. We don't actually
160 * use the global bit, so throw it away. */ 148 * use the global bit, so throw it away. */
161 spte.flags = (gpte.flags & ~_PAGE_GLOBAL); 149 flags = (pte_flags(gpte) & ~_PAGE_GLOBAL);
150
151 /* The Guest's pages are offset inside the Launcher. */
152 base = (unsigned long)lg->mem_base / PAGE_SIZE;
162 153
163 /* We need a temporary "unsigned long" variable to hold the answer from 154 /* We need a temporary "unsigned long" variable to hold the answer from
164 * get_pfn(), because it returns 0xFFFFFFFF on failure, which wouldn't 155 * get_pfn(), because it returns 0xFFFFFFFF on failure, which wouldn't
165 * fit in spte.pfn. get_pfn() finds the real physical number of the 156 * fit in spte.pfn. get_pfn() finds the real physical number of the
166 * page, given the virtual number. */ 157 * page, given the virtual number. */
167 pfn = get_pfn(gpte.pfn, write); 158 pfn = get_pfn(base + pte_pfn(gpte), write);
168 if (pfn == -1UL) { 159 if (pfn == -1UL) {
169 kill_guest(lg, "failed to get page %u", gpte.pfn); 160 kill_guest(lg, "failed to get page %lu", pte_pfn(gpte));
170 /* When we destroy the Guest, we'll go through the shadow page 161 /* When we destroy the Guest, we'll go through the shadow page
171 * tables and release_pte() them. Make sure we don't think 162 * tables and release_pte() them. Make sure we don't think
172 * this one is valid! */ 163 * this one is valid! */
173 spte.flags = 0; 164 flags = 0;
174 } 165 }
175 /* Now we assign the page number, and our shadow PTE is complete. */ 166 /* Now we assemble our shadow PTE from the page number and flags. */
176 spte.pfn = pfn; 167 return pfn_pte(pfn, __pgprot(flags));
177 return spte;
178} 168}
179 169
180/*H:460 And to complete the chain, release_pte() looks like this: */ 170/*H:460 And to complete the chain, release_pte() looks like this: */
181static void release_pte(spte_t pte) 171static void release_pte(pte_t pte)
182{ 172{
183 /* Remember that get_user_pages() took a reference to the page, in 173 /* Remember that get_user_pages() took a reference to the page, in
184 * get_pfn()? We have to put it back now. */ 174 * get_pfn()? We have to put it back now. */
185 if (pte.flags & _PAGE_PRESENT) 175 if (pte_flags(pte) & _PAGE_PRESENT)
186 put_page(pfn_to_page(pte.pfn)); 176 put_page(pfn_to_page(pte_pfn(pte)));
187} 177}
188/*:*/ 178/*:*/
189 179
190static void check_gpte(struct lguest *lg, gpte_t gpte) 180static void check_gpte(struct lguest *lg, pte_t gpte)
191{ 181{
192 if ((gpte.flags & (_PAGE_PWT|_PAGE_PSE)) || gpte.pfn >= lg->pfn_limit) 182 if ((pte_flags(gpte) & (_PAGE_PWT|_PAGE_PSE))
183 || pte_pfn(gpte) >= lg->pfn_limit)
193 kill_guest(lg, "bad page table entry"); 184 kill_guest(lg, "bad page table entry");
194} 185}
195 186
196static void check_gpgd(struct lguest *lg, gpgd_t gpgd) 187static void check_gpgd(struct lguest *lg, pgd_t gpgd)
197{ 188{
198 if ((gpgd.flags & ~_PAGE_TABLE) || gpgd.pfn >= lg->pfn_limit) 189 if ((pgd_flags(gpgd) & ~_PAGE_TABLE) || pgd_pfn(gpgd) >= lg->pfn_limit)
199 kill_guest(lg, "bad page directory entry"); 190 kill_guest(lg, "bad page directory entry");
200} 191}
201 192
@@ -211,21 +202,21 @@ static void check_gpgd(struct lguest *lg, gpgd_t gpgd)
211 * true. */ 202 * true. */
212int demand_page(struct lguest *lg, unsigned long vaddr, int errcode) 203int demand_page(struct lguest *lg, unsigned long vaddr, int errcode)
213{ 204{
214 gpgd_t gpgd; 205 pgd_t gpgd;
215 spgd_t *spgd; 206 pgd_t *spgd;
216 unsigned long gpte_ptr; 207 unsigned long gpte_ptr;
217 gpte_t gpte; 208 pte_t gpte;
218 spte_t *spte; 209 pte_t *spte;
219 210
220 /* First step: get the top-level Guest page table entry. */ 211 /* First step: get the top-level Guest page table entry. */
221 gpgd = mkgpgd(lgread_u32(lg, gpgd_addr(lg, vaddr))); 212 gpgd = lgread(lg, gpgd_addr(lg, vaddr), pgd_t);
222 /* Toplevel not present? We can't map it in. */ 213 /* Toplevel not present? We can't map it in. */
223 if (!(gpgd.flags & _PAGE_PRESENT)) 214 if (!(pgd_flags(gpgd) & _PAGE_PRESENT))
224 return 0; 215 return 0;
225 216
226 /* Now look at the matching shadow entry. */ 217 /* Now look at the matching shadow entry. */
227 spgd = spgd_addr(lg, lg->pgdidx, vaddr); 218 spgd = spgd_addr(lg, lg->pgdidx, vaddr);
228 if (!(spgd->flags & _PAGE_PRESENT)) { 219 if (!(pgd_flags(*spgd) & _PAGE_PRESENT)) {
229 /* No shadow entry: allocate a new shadow PTE page. */ 220 /* No shadow entry: allocate a new shadow PTE page. */
230 unsigned long ptepage = get_zeroed_page(GFP_KERNEL); 221 unsigned long ptepage = get_zeroed_page(GFP_KERNEL);
231 /* This is not really the Guest's fault, but killing it is 222 /* This is not really the Guest's fault, but killing it is
@@ -238,34 +229,35 @@ int demand_page(struct lguest *lg, unsigned long vaddr, int errcode)
238 check_gpgd(lg, gpgd); 229 check_gpgd(lg, gpgd);
239 /* And we copy the flags to the shadow PGD entry. The page 230 /* And we copy the flags to the shadow PGD entry. The page
240 * number in the shadow PGD is the page we just allocated. */ 231 * number in the shadow PGD is the page we just allocated. */
241 spgd->raw.val = (__pa(ptepage) | gpgd.flags); 232 *spgd = __pgd(__pa(ptepage) | pgd_flags(gpgd));
242 } 233 }
243 234
244 /* OK, now we look at the lower level in the Guest page table: keep its 235 /* OK, now we look at the lower level in the Guest page table: keep its
245 * address, because we might update it later. */ 236 * address, because we might update it later. */
246 gpte_ptr = gpte_addr(lg, gpgd, vaddr); 237 gpte_ptr = gpte_addr(lg, gpgd, vaddr);
247 gpte = mkgpte(lgread_u32(lg, gpte_ptr)); 238 gpte = lgread(lg, gpte_ptr, pte_t);
248 239
249 /* If this page isn't in the Guest page tables, we can't page it in. */ 240 /* If this page isn't in the Guest page tables, we can't page it in. */
250 if (!(gpte.flags & _PAGE_PRESENT)) 241 if (!(pte_flags(gpte) & _PAGE_PRESENT))
251 return 0; 242 return 0;
252 243
253 /* Check they're not trying to write to a page the Guest wants 244 /* Check they're not trying to write to a page the Guest wants
254 * read-only (bit 2 of errcode == write). */ 245 * read-only (bit 2 of errcode == write). */
255 if ((errcode & 2) && !(gpte.flags & _PAGE_RW)) 246 if ((errcode & 2) && !(pte_flags(gpte) & _PAGE_RW))
256 return 0; 247 return 0;
257 248
258 /* User access to a kernel page? (bit 3 == user access) */ 249 /* User access to a kernel page? (bit 3 == user access) */
259 if ((errcode & 4) && !(gpte.flags & _PAGE_USER)) 250 if ((errcode & 4) && !(pte_flags(gpte) & _PAGE_USER))
260 return 0; 251 return 0;
261 252
262 /* Check that the Guest PTE flags are OK, and the page number is below 253 /* Check that the Guest PTE flags are OK, and the page number is below
263 * the pfn_limit (ie. not mapping the Launcher binary). */ 254 * the pfn_limit (ie. not mapping the Launcher binary). */
264 check_gpte(lg, gpte); 255 check_gpte(lg, gpte);
265 /* Add the _PAGE_ACCESSED and (for a write) _PAGE_DIRTY flag */ 256 /* Add the _PAGE_ACCESSED and (for a write) _PAGE_DIRTY flag */
266 gpte.flags |= _PAGE_ACCESSED; 257 gpte = pte_mkyoung(gpte);
258
267 if (errcode & 2) 259 if (errcode & 2)
268 gpte.flags |= _PAGE_DIRTY; 260 gpte = pte_mkdirty(gpte);
269 261
270 /* Get the pointer to the shadow PTE entry we're going to set. */ 262 /* Get the pointer to the shadow PTE entry we're going to set. */
271 spte = spte_addr(lg, *spgd, vaddr); 263 spte = spte_addr(lg, *spgd, vaddr);
@@ -275,21 +267,18 @@ int demand_page(struct lguest *lg, unsigned long vaddr, int errcode)
275 267
276 /* If this is a write, we insist that the Guest page is writable (the 268 /* If this is a write, we insist that the Guest page is writable (the
277 * final arg to gpte_to_spte()). */ 269 * final arg to gpte_to_spte()). */
278 if (gpte.flags & _PAGE_DIRTY) 270 if (pte_dirty(gpte))
279 *spte = gpte_to_spte(lg, gpte, 1); 271 *spte = gpte_to_spte(lg, gpte, 1);
280 else { 272 else
281 /* If this is a read, don't set the "writable" bit in the page 273 /* If this is a read, don't set the "writable" bit in the page
282 * table entry, even if the Guest says it's writable. That way 274 * table entry, even if the Guest says it's writable. That way
283 * we come back here when a write does actually ocur, so we can 275 * we come back here when a write does actually ocur, so we can
284 * update the Guest's _PAGE_DIRTY flag. */ 276 * update the Guest's _PAGE_DIRTY flag. */
285 gpte_t ro_gpte = gpte; 277 *spte = gpte_to_spte(lg, pte_wrprotect(gpte), 0);
286 ro_gpte.flags &= ~_PAGE_RW;
287 *spte = gpte_to_spte(lg, ro_gpte, 0);
288 }
289 278
290 /* Finally, we write the Guest PTE entry back: we've set the 279 /* Finally, we write the Guest PTE entry back: we've set the
291 * _PAGE_ACCESSED and maybe the _PAGE_DIRTY flags. */ 280 * _PAGE_ACCESSED and maybe the _PAGE_DIRTY flags. */
292 lgwrite_u32(lg, gpte_ptr, gpte.raw.val); 281 lgwrite(lg, gpte_ptr, pte_t, gpte);
293 282
294 /* We succeeded in mapping the page! */ 283 /* We succeeded in mapping the page! */
295 return 1; 284 return 1;
@@ -305,17 +294,18 @@ int demand_page(struct lguest *lg, unsigned long vaddr, int errcode)
305 * mapped by the shadow page tables, and is it writable? */ 294 * mapped by the shadow page tables, and is it writable? */
306static int page_writable(struct lguest *lg, unsigned long vaddr) 295static int page_writable(struct lguest *lg, unsigned long vaddr)
307{ 296{
308 spgd_t *spgd; 297 pgd_t *spgd;
309 unsigned long flags; 298 unsigned long flags;
310 299
311 /* Look at the top level entry: is it present? */ 300 /* Look at the top level entry: is it present? */
312 spgd = spgd_addr(lg, lg->pgdidx, vaddr); 301 spgd = spgd_addr(lg, lg->pgdidx, vaddr);
313 if (!(spgd->flags & _PAGE_PRESENT)) 302 if (!(pgd_flags(*spgd) & _PAGE_PRESENT))
314 return 0; 303 return 0;
315 304
316 /* Check the flags on the pte entry itself: it must be present and 305 /* Check the flags on the pte entry itself: it must be present and
317 * writable. */ 306 * writable. */
318 flags = spte_addr(lg, *spgd, vaddr)->flags; 307 flags = pte_flags(*(spte_addr(lg, *spgd, vaddr)));
308
319 return (flags & (_PAGE_PRESENT|_PAGE_RW)) == (_PAGE_PRESENT|_PAGE_RW); 309 return (flags & (_PAGE_PRESENT|_PAGE_RW)) == (_PAGE_PRESENT|_PAGE_RW);
320} 310}
321 311
@@ -329,22 +319,22 @@ void pin_page(struct lguest *lg, unsigned long vaddr)
329} 319}
330 320
331/*H:450 If we chase down the release_pgd() code, it looks like this: */ 321/*H:450 If we chase down the release_pgd() code, it looks like this: */
332static void release_pgd(struct lguest *lg, spgd_t *spgd) 322static void release_pgd(struct lguest *lg, pgd_t *spgd)
333{ 323{
334 /* If the entry's not present, there's nothing to release. */ 324 /* If the entry's not present, there's nothing to release. */
335 if (spgd->flags & _PAGE_PRESENT) { 325 if (pgd_flags(*spgd) & _PAGE_PRESENT) {
336 unsigned int i; 326 unsigned int i;
337 /* Converting the pfn to find the actual PTE page is easy: turn 327 /* Converting the pfn to find the actual PTE page is easy: turn
338 * the page number into a physical address, then convert to a 328 * the page number into a physical address, then convert to a
339 * virtual address (easy for kernel pages like this one). */ 329 * virtual address (easy for kernel pages like this one). */
340 spte_t *ptepage = __va(spgd->pfn << PAGE_SHIFT); 330 pte_t *ptepage = __va(pgd_pfn(*spgd) << PAGE_SHIFT);
341 /* For each entry in the page, we might need to release it. */ 331 /* For each entry in the page, we might need to release it. */
342 for (i = 0; i < PTES_PER_PAGE; i++) 332 for (i = 0; i < PTRS_PER_PTE; i++)
343 release_pte(ptepage[i]); 333 release_pte(ptepage[i]);
344 /* Now we can free the page of PTEs */ 334 /* Now we can free the page of PTEs */
345 free_page((long)ptepage); 335 free_page((long)ptepage);
346 /* And zero out the PGD entry we we never release it twice. */ 336 /* And zero out the PGD entry we we never release it twice. */
347 spgd->raw.val = 0; 337 *spgd = __pgd(0);
348 } 338 }
349} 339}
350 340
@@ -356,7 +346,7 @@ static void flush_user_mappings(struct lguest *lg, int idx)
356{ 346{
357 unsigned int i; 347 unsigned int i;
358 /* Release every pgd entry up to the kernel's address. */ 348 /* Release every pgd entry up to the kernel's address. */
359 for (i = 0; i < vaddr_to_pgd_index(lg->page_offset); i++) 349 for (i = 0; i < pgd_index(lg->kernel_address); i++)
360 release_pgd(lg, lg->pgdirs[idx].pgdir + i); 350 release_pgd(lg, lg->pgdirs[idx].pgdir + i);
361} 351}
362 352
@@ -369,6 +359,25 @@ void guest_pagetable_flush_user(struct lguest *lg)
369} 359}
370/*:*/ 360/*:*/
371 361
362/* We walk down the guest page tables to get a guest-physical address */
363unsigned long guest_pa(struct lguest *lg, unsigned long vaddr)
364{
365 pgd_t gpgd;
366 pte_t gpte;
367
368 /* First step: get the top-level Guest page table entry. */
369 gpgd = lgread(lg, gpgd_addr(lg, vaddr), pgd_t);
370 /* Toplevel not present? We can't map it in. */
371 if (!(pgd_flags(gpgd) & _PAGE_PRESENT))
372 kill_guest(lg, "Bad address %#lx", vaddr);
373
374 gpte = lgread(lg, gpte_addr(lg, gpgd, vaddr), pte_t);
375 if (!(pte_flags(gpte) & _PAGE_PRESENT))
376 kill_guest(lg, "Bad address %#lx", vaddr);
377
378 return pte_pfn(gpte) * PAGE_SIZE | (vaddr & ~PAGE_MASK);
379}
380
372/* We keep several page tables. This is a simple routine to find the page 381/* We keep several page tables. This is a simple routine to find the page
373 * table (if any) corresponding to this top-level address the Guest has given 382 * table (if any) corresponding to this top-level address the Guest has given
374 * us. */ 383 * us. */
@@ -376,7 +385,7 @@ static unsigned int find_pgdir(struct lguest *lg, unsigned long pgtable)
376{ 385{
377 unsigned int i; 386 unsigned int i;
378 for (i = 0; i < ARRAY_SIZE(lg->pgdirs); i++) 387 for (i = 0; i < ARRAY_SIZE(lg->pgdirs); i++)
379 if (lg->pgdirs[i].cr3 == pgtable) 388 if (lg->pgdirs[i].gpgdir == pgtable)
380 break; 389 break;
381 return i; 390 return i;
382} 391}
@@ -385,7 +394,7 @@ static unsigned int find_pgdir(struct lguest *lg, unsigned long pgtable)
385 * allocate a new one (and so the kernel parts are not there), we set 394 * allocate a new one (and so the kernel parts are not there), we set
386 * blank_pgdir. */ 395 * blank_pgdir. */
387static unsigned int new_pgdir(struct lguest *lg, 396static unsigned int new_pgdir(struct lguest *lg,
388 unsigned long cr3, 397 unsigned long gpgdir,
389 int *blank_pgdir) 398 int *blank_pgdir)
390{ 399{
391 unsigned int next; 400 unsigned int next;
@@ -395,7 +404,7 @@ static unsigned int new_pgdir(struct lguest *lg,
395 next = random32() % ARRAY_SIZE(lg->pgdirs); 404 next = random32() % ARRAY_SIZE(lg->pgdirs);
396 /* If it's never been allocated at all before, try now. */ 405 /* If it's never been allocated at all before, try now. */
397 if (!lg->pgdirs[next].pgdir) { 406 if (!lg->pgdirs[next].pgdir) {
398 lg->pgdirs[next].pgdir = (spgd_t *)get_zeroed_page(GFP_KERNEL); 407 lg->pgdirs[next].pgdir = (pgd_t *)get_zeroed_page(GFP_KERNEL);
399 /* If the allocation fails, just keep using the one we have */ 408 /* If the allocation fails, just keep using the one we have */
400 if (!lg->pgdirs[next].pgdir) 409 if (!lg->pgdirs[next].pgdir)
401 next = lg->pgdidx; 410 next = lg->pgdidx;
@@ -405,7 +414,7 @@ static unsigned int new_pgdir(struct lguest *lg,
405 *blank_pgdir = 1; 414 *blank_pgdir = 1;
406 } 415 }
407 /* Record which Guest toplevel this shadows. */ 416 /* Record which Guest toplevel this shadows. */
408 lg->pgdirs[next].cr3 = cr3; 417 lg->pgdirs[next].gpgdir = gpgdir;
409 /* Release all the non-kernel mappings. */ 418 /* Release all the non-kernel mappings. */
410 flush_user_mappings(lg, next); 419 flush_user_mappings(lg, next);
411 420
@@ -472,26 +481,27 @@ void guest_pagetable_clear_all(struct lguest *lg)
472 * they set _PAGE_DIRTY then we can put a writable PTE entry in immediately. 481 * they set _PAGE_DIRTY then we can put a writable PTE entry in immediately.
473 */ 482 */
474static void do_set_pte(struct lguest *lg, int idx, 483static void do_set_pte(struct lguest *lg, int idx,
475 unsigned long vaddr, gpte_t gpte) 484 unsigned long vaddr, pte_t gpte)
476{ 485{
477 /* Look up the matching shadow page directot entry. */ 486 /* Look up the matching shadow page directot entry. */
478 spgd_t *spgd = spgd_addr(lg, idx, vaddr); 487 pgd_t *spgd = spgd_addr(lg, idx, vaddr);
479 488
480 /* If the top level isn't present, there's no entry to update. */ 489 /* If the top level isn't present, there's no entry to update. */
481 if (spgd->flags & _PAGE_PRESENT) { 490 if (pgd_flags(*spgd) & _PAGE_PRESENT) {
482 /* Otherwise, we start by releasing the existing entry. */ 491 /* Otherwise, we start by releasing the existing entry. */
483 spte_t *spte = spte_addr(lg, *spgd, vaddr); 492 pte_t *spte = spte_addr(lg, *spgd, vaddr);
484 release_pte(*spte); 493 release_pte(*spte);
485 494
486 /* If they're setting this entry as dirty or accessed, we might 495 /* If they're setting this entry as dirty or accessed, we might
487 * as well put that entry they've given us in now. This shaves 496 * as well put that entry they've given us in now. This shaves
488 * 10% off a copy-on-write micro-benchmark. */ 497 * 10% off a copy-on-write micro-benchmark. */
489 if (gpte.flags & (_PAGE_DIRTY | _PAGE_ACCESSED)) { 498 if (pte_flags(gpte) & (_PAGE_DIRTY | _PAGE_ACCESSED)) {
490 check_gpte(lg, gpte); 499 check_gpte(lg, gpte);
491 *spte = gpte_to_spte(lg, gpte, gpte.flags&_PAGE_DIRTY); 500 *spte = gpte_to_spte(lg, gpte,
501 pte_flags(gpte) & _PAGE_DIRTY);
492 } else 502 } else
493 /* Otherwise we can demand_page() it in later. */ 503 /* Otherwise we can demand_page() it in later. */
494 spte->raw.val = 0; 504 *spte = __pte(0);
495 } 505 }
496} 506}
497 507
@@ -506,18 +516,18 @@ static void do_set_pte(struct lguest *lg, int idx,
506 * The benefit is that when we have to track a new page table, we can copy keep 516 * The benefit is that when we have to track a new page table, we can copy keep
507 * all the kernel mappings. This speeds up context switch immensely. */ 517 * all the kernel mappings. This speeds up context switch immensely. */
508void guest_set_pte(struct lguest *lg, 518void guest_set_pte(struct lguest *lg,
509 unsigned long cr3, unsigned long vaddr, gpte_t gpte) 519 unsigned long gpgdir, unsigned long vaddr, pte_t gpte)
510{ 520{
511 /* Kernel mappings must be changed on all top levels. Slow, but 521 /* Kernel mappings must be changed on all top levels. Slow, but
512 * doesn't happen often. */ 522 * doesn't happen often. */
513 if (vaddr >= lg->page_offset) { 523 if (vaddr >= lg->kernel_address) {
514 unsigned int i; 524 unsigned int i;
515 for (i = 0; i < ARRAY_SIZE(lg->pgdirs); i++) 525 for (i = 0; i < ARRAY_SIZE(lg->pgdirs); i++)
516 if (lg->pgdirs[i].pgdir) 526 if (lg->pgdirs[i].pgdir)
517 do_set_pte(lg, i, vaddr, gpte); 527 do_set_pte(lg, i, vaddr, gpte);
518 } else { 528 } else {
519 /* Is this page table one we have a shadow for? */ 529 /* Is this page table one we have a shadow for? */
520 int pgdir = find_pgdir(lg, cr3); 530 int pgdir = find_pgdir(lg, gpgdir);
521 if (pgdir != ARRAY_SIZE(lg->pgdirs)) 531 if (pgdir != ARRAY_SIZE(lg->pgdirs))
522 /* If so, do the update. */ 532 /* If so, do the update. */
523 do_set_pte(lg, pgdir, vaddr, gpte); 533 do_set_pte(lg, pgdir, vaddr, gpte);
@@ -538,7 +548,7 @@ void guest_set_pte(struct lguest *lg,
538 * 548 *
539 * So with that in mind here's our code to to update a (top-level) PGD entry: 549 * So with that in mind here's our code to to update a (top-level) PGD entry:
540 */ 550 */
541void guest_set_pmd(struct lguest *lg, unsigned long cr3, u32 idx) 551void guest_set_pmd(struct lguest *lg, unsigned long gpgdir, u32 idx)
542{ 552{
543 int pgdir; 553 int pgdir;
544 554
@@ -548,7 +558,7 @@ void guest_set_pmd(struct lguest *lg, unsigned long cr3, u32 idx)
548 return; 558 return;
549 559
550 /* If they're talking about a page table we have a shadow for... */ 560 /* If they're talking about a page table we have a shadow for... */
551 pgdir = find_pgdir(lg, cr3); 561 pgdir = find_pgdir(lg, gpgdir);
552 if (pgdir < ARRAY_SIZE(lg->pgdirs)) 562 if (pgdir < ARRAY_SIZE(lg->pgdirs))
553 /* ... throw it away. */ 563 /* ... throw it away. */
554 release_pgd(lg, lg->pgdirs[pgdir].pgdir + idx); 564 release_pgd(lg, lg->pgdirs[pgdir].pgdir + idx);
@@ -560,21 +570,34 @@ void guest_set_pmd(struct lguest *lg, unsigned long cr3, u32 idx)
560 * its first page table is. We set some things up here: */ 570 * its first page table is. We set some things up here: */
561int init_guest_pagetable(struct lguest *lg, unsigned long pgtable) 571int init_guest_pagetable(struct lguest *lg, unsigned long pgtable)
562{ 572{
563 /* In flush_user_mappings() we loop from 0 to
564 * "vaddr_to_pgd_index(lg->page_offset)". This assumes it won't hit
565 * the Switcher mappings, so check that now. */
566 if (vaddr_to_pgd_index(lg->page_offset) >= SWITCHER_PGD_INDEX)
567 return -EINVAL;
568 /* We start on the first shadow page table, and give it a blank PGD 573 /* We start on the first shadow page table, and give it a blank PGD
569 * page. */ 574 * page. */
570 lg->pgdidx = 0; 575 lg->pgdidx = 0;
571 lg->pgdirs[lg->pgdidx].cr3 = pgtable; 576 lg->pgdirs[lg->pgdidx].gpgdir = pgtable;
572 lg->pgdirs[lg->pgdidx].pgdir = (spgd_t*)get_zeroed_page(GFP_KERNEL); 577 lg->pgdirs[lg->pgdidx].pgdir = (pgd_t*)get_zeroed_page(GFP_KERNEL);
573 if (!lg->pgdirs[lg->pgdidx].pgdir) 578 if (!lg->pgdirs[lg->pgdidx].pgdir)
574 return -ENOMEM; 579 return -ENOMEM;
575 return 0; 580 return 0;
576} 581}
577 582
583/* When the Guest calls LHCALL_LGUEST_INIT we do more setup. */
584void page_table_guest_data_init(struct lguest *lg)
585{
586 /* We get the kernel address: above this is all kernel memory. */
587 if (get_user(lg->kernel_address, &lg->lguest_data->kernel_address)
588 /* We tell the Guest that it can't use the top 4MB of virtual
589 * addresses used by the Switcher. */
590 || put_user(4U*1024*1024, &lg->lguest_data->reserve_mem)
591 || put_user(lg->pgdirs[lg->pgdidx].gpgdir,&lg->lguest_data->pgdir))
592 kill_guest(lg, "bad guest page %p", lg->lguest_data);
593
594 /* In flush_user_mappings() we loop from 0 to
595 * "pgd_index(lg->kernel_address)". This assumes it won't hit the
596 * Switcher mappings, so check that now. */
597 if (pgd_index(lg->kernel_address) >= SWITCHER_PGD_INDEX)
598 kill_guest(lg, "bad kernel address %#lx", lg->kernel_address);
599}
600
578/* When a Guest dies, our cleanup is fairly simple. */ 601/* When a Guest dies, our cleanup is fairly simple. */
579void free_guest_pagetable(struct lguest *lg) 602void free_guest_pagetable(struct lguest *lg)
580{ 603{
@@ -594,14 +617,14 @@ void free_guest_pagetable(struct lguest *lg)
594 * for each CPU already set up, we just need to hook them in. */ 617 * for each CPU already set up, we just need to hook them in. */
595void map_switcher_in_guest(struct lguest *lg, struct lguest_pages *pages) 618void map_switcher_in_guest(struct lguest *lg, struct lguest_pages *pages)
596{ 619{
597 spte_t *switcher_pte_page = __get_cpu_var(switcher_pte_pages); 620 pte_t *switcher_pte_page = __get_cpu_var(switcher_pte_pages);
598 spgd_t switcher_pgd; 621 pgd_t switcher_pgd;
599 spte_t regs_pte; 622 pte_t regs_pte;
600 623
601 /* Make the last PGD entry for this Guest point to the Switcher's PTE 624 /* Make the last PGD entry for this Guest point to the Switcher's PTE
602 * page for this CPU (with appropriate flags). */ 625 * page for this CPU (with appropriate flags). */
603 switcher_pgd.pfn = __pa(switcher_pte_page) >> PAGE_SHIFT; 626 switcher_pgd = __pgd(__pa(switcher_pte_page) | _PAGE_KERNEL);
604 switcher_pgd.flags = _PAGE_KERNEL; 627
605 lg->pgdirs[lg->pgdidx].pgdir[SWITCHER_PGD_INDEX] = switcher_pgd; 628 lg->pgdirs[lg->pgdidx].pgdir[SWITCHER_PGD_INDEX] = switcher_pgd;
606 629
607 /* We also change the Switcher PTE page. When we're running the Guest, 630 /* We also change the Switcher PTE page. When we're running the Guest,
@@ -611,10 +634,8 @@ void map_switcher_in_guest(struct lguest *lg, struct lguest_pages *pages)
611 * CPU's "struct lguest_pages": if we make sure the Guest's register 634 * CPU's "struct lguest_pages": if we make sure the Guest's register
612 * page is already mapped there, we don't have to copy them out 635 * page is already mapped there, we don't have to copy them out
613 * again. */ 636 * again. */
614 regs_pte.pfn = __pa(lg->regs_page) >> PAGE_SHIFT; 637 regs_pte = pfn_pte (__pa(lg->regs_page) >> PAGE_SHIFT, __pgprot(_PAGE_KERNEL));
615 regs_pte.flags = _PAGE_KERNEL; 638 switcher_pte_page[(unsigned long)pages/PAGE_SIZE%PTRS_PER_PTE] = regs_pte;
616 switcher_pte_page[(unsigned long)pages/PAGE_SIZE%PTES_PER_PAGE]
617 = regs_pte;
618} 639}
619/*:*/ 640/*:*/
620 641
@@ -635,24 +656,25 @@ static __init void populate_switcher_pte_page(unsigned int cpu,
635 unsigned int pages) 656 unsigned int pages)
636{ 657{
637 unsigned int i; 658 unsigned int i;
638 spte_t *pte = switcher_pte_page(cpu); 659 pte_t *pte = switcher_pte_page(cpu);
639 660
640 /* The first entries are easy: they map the Switcher code. */ 661 /* The first entries are easy: they map the Switcher code. */
641 for (i = 0; i < pages; i++) { 662 for (i = 0; i < pages; i++) {
642 pte[i].pfn = page_to_pfn(switcher_page[i]); 663 pte[i] = mk_pte(switcher_page[i],
643 pte[i].flags = _PAGE_PRESENT|_PAGE_ACCESSED; 664 __pgprot(_PAGE_PRESENT|_PAGE_ACCESSED));
644 } 665 }
645 666
646 /* The only other thing we map is this CPU's pair of pages. */ 667 /* The only other thing we map is this CPU's pair of pages. */
647 i = pages + cpu*2; 668 i = pages + cpu*2;
648 669
649 /* First page (Guest registers) is writable from the Guest */ 670 /* First page (Guest registers) is writable from the Guest */
650 pte[i].pfn = page_to_pfn(switcher_page[i]); 671 pte[i] = pfn_pte(page_to_pfn(switcher_page[i]),
651 pte[i].flags = _PAGE_PRESENT|_PAGE_ACCESSED|_PAGE_RW; 672 __pgprot(_PAGE_PRESENT|_PAGE_ACCESSED|_PAGE_RW));
673
652 /* The second page contains the "struct lguest_ro_state", and is 674 /* The second page contains the "struct lguest_ro_state", and is
653 * read-only. */ 675 * read-only. */
654 pte[i+1].pfn = page_to_pfn(switcher_page[i+1]); 676 pte[i+1] = pfn_pte(page_to_pfn(switcher_page[i+1]),
655 pte[i+1].flags = _PAGE_PRESENT|_PAGE_ACCESSED; 677 __pgprot(_PAGE_PRESENT|_PAGE_ACCESSED));
656} 678}
657 679
658/*H:510 At boot or module load time, init_pagetables() allocates and populates 680/*H:510 At boot or module load time, init_pagetables() allocates and populates
@@ -662,7 +684,7 @@ __init int init_pagetables(struct page **switcher_page, unsigned int pages)
662 unsigned int i; 684 unsigned int i;
663 685
664 for_each_possible_cpu(i) { 686 for_each_possible_cpu(i) {
665 switcher_pte_page(i) = (spte_t *)get_zeroed_page(GFP_KERNEL); 687 switcher_pte_page(i) = (pte_t *)get_zeroed_page(GFP_KERNEL);
666 if (!switcher_pte_page(i)) { 688 if (!switcher_pte_page(i)) {
667 free_switcher_pte_pages(); 689 free_switcher_pte_pages();
668 return -ENOMEM; 690 return -ENOMEM;
diff --git a/drivers/lguest/segments.c b/drivers/lguest/segments.c
index 9b81119f46e9..c2434ec99f7b 100644
--- a/drivers/lguest/segments.c
+++ b/drivers/lguest/segments.c
@@ -73,14 +73,14 @@ static void fixup_gdt_table(struct lguest *lg, unsigned start, unsigned end)
73 /* Segment descriptors contain a privilege level: the Guest is 73 /* Segment descriptors contain a privilege level: the Guest is
74 * sometimes careless and leaves this as 0, even though it's 74 * sometimes careless and leaves this as 0, even though it's
75 * running at privilege level 1. If so, we fix it here. */ 75 * running at privilege level 1. If so, we fix it here. */
76 if ((lg->gdt[i].b & 0x00006000) == 0) 76 if ((lg->arch.gdt[i].b & 0x00006000) == 0)
77 lg->gdt[i].b |= (GUEST_PL << 13); 77 lg->arch.gdt[i].b |= (GUEST_PL << 13);
78 78
79 /* Each descriptor has an "accessed" bit. If we don't set it 79 /* Each descriptor has an "accessed" bit. If we don't set it
80 * now, the CPU will try to set it when the Guest first loads 80 * now, the CPU will try to set it when the Guest first loads
81 * that entry into a segment register. But the GDT isn't 81 * that entry into a segment register. But the GDT isn't
82 * writable by the Guest, so bad things can happen. */ 82 * writable by the Guest, so bad things can happen. */
83 lg->gdt[i].b |= 0x00000100; 83 lg->arch.gdt[i].b |= 0x00000100;
84 } 84 }
85} 85}
86 86
@@ -106,12 +106,12 @@ void setup_default_gdt_entries(struct lguest_ro_state *state)
106void setup_guest_gdt(struct lguest *lg) 106void setup_guest_gdt(struct lguest *lg)
107{ 107{
108 /* Start with full 0-4G segments... */ 108 /* Start with full 0-4G segments... */
109 lg->gdt[GDT_ENTRY_KERNEL_CS] = FULL_EXEC_SEGMENT; 109 lg->arch.gdt[GDT_ENTRY_KERNEL_CS] = FULL_EXEC_SEGMENT;
110 lg->gdt[GDT_ENTRY_KERNEL_DS] = FULL_SEGMENT; 110 lg->arch.gdt[GDT_ENTRY_KERNEL_DS] = FULL_SEGMENT;
111 /* ...except the Guest is allowed to use them, so set the privilege 111 /* ...except the Guest is allowed to use them, so set the privilege
112 * level appropriately in the flags. */ 112 * level appropriately in the flags. */
113 lg->gdt[GDT_ENTRY_KERNEL_CS].b |= (GUEST_PL << 13); 113 lg->arch.gdt[GDT_ENTRY_KERNEL_CS].b |= (GUEST_PL << 13);
114 lg->gdt[GDT_ENTRY_KERNEL_DS].b |= (GUEST_PL << 13); 114 lg->arch.gdt[GDT_ENTRY_KERNEL_DS].b |= (GUEST_PL << 13);
115} 115}
116 116
117/* Like the IDT, we never simply use the GDT the Guest gives us. We set up the 117/* Like the IDT, we never simply use the GDT the Guest gives us. We set up the
@@ -126,7 +126,7 @@ void copy_gdt_tls(const struct lguest *lg, struct desc_struct *gdt)
126 unsigned int i; 126 unsigned int i;
127 127
128 for (i = GDT_ENTRY_TLS_MIN; i <= GDT_ENTRY_TLS_MAX; i++) 128 for (i = GDT_ENTRY_TLS_MIN; i <= GDT_ENTRY_TLS_MAX; i++)
129 gdt[i] = lg->gdt[i]; 129 gdt[i] = lg->arch.gdt[i];
130} 130}
131 131
132/* This is the full version */ 132/* This is the full version */
@@ -138,7 +138,7 @@ void copy_gdt(const struct lguest *lg, struct desc_struct *gdt)
138 * replaced. See ignored_gdt() above. */ 138 * replaced. See ignored_gdt() above. */
139 for (i = 0; i < GDT_ENTRIES; i++) 139 for (i = 0; i < GDT_ENTRIES; i++)
140 if (!ignored_gdt(i)) 140 if (!ignored_gdt(i))
141 gdt[i] = lg->gdt[i]; 141 gdt[i] = lg->arch.gdt[i];
142} 142}
143 143
144/* This is where the Guest asks us to load a new GDT (LHCALL_LOAD_GDT). */ 144/* This is where the Guest asks us to load a new GDT (LHCALL_LOAD_GDT). */
@@ -146,12 +146,12 @@ void load_guest_gdt(struct lguest *lg, unsigned long table, u32 num)
146{ 146{
147 /* We assume the Guest has the same number of GDT entries as the 147 /* We assume the Guest has the same number of GDT entries as the
148 * Host, otherwise we'd have to dynamically allocate the Guest GDT. */ 148 * Host, otherwise we'd have to dynamically allocate the Guest GDT. */
149 if (num > ARRAY_SIZE(lg->gdt)) 149 if (num > ARRAY_SIZE(lg->arch.gdt))
150 kill_guest(lg, "too many gdt entries %i", num); 150 kill_guest(lg, "too many gdt entries %i", num);
151 151
152 /* We read the whole thing in, then fix it up. */ 152 /* We read the whole thing in, then fix it up. */
153 lgread(lg, lg->gdt, table, num * sizeof(lg->gdt[0])); 153 __lgread(lg, lg->arch.gdt, table, num * sizeof(lg->arch.gdt[0]));
154 fixup_gdt_table(lg, 0, ARRAY_SIZE(lg->gdt)); 154 fixup_gdt_table(lg, 0, ARRAY_SIZE(lg->arch.gdt));
155 /* Mark that the GDT changed so the core knows it has to copy it again, 155 /* Mark that the GDT changed so the core knows it has to copy it again,
156 * even if the Guest is run on the same CPU. */ 156 * even if the Guest is run on the same CPU. */
157 lg->changed |= CHANGED_GDT; 157 lg->changed |= CHANGED_GDT;
@@ -159,9 +159,9 @@ void load_guest_gdt(struct lguest *lg, unsigned long table, u32 num)
159 159
160void guest_load_tls(struct lguest *lg, unsigned long gtls) 160void guest_load_tls(struct lguest *lg, unsigned long gtls)
161{ 161{
162 struct desc_struct *tls = &lg->gdt[GDT_ENTRY_TLS_MIN]; 162 struct desc_struct *tls = &lg->arch.gdt[GDT_ENTRY_TLS_MIN];
163 163
164 lgread(lg, tls, gtls, sizeof(*tls)*GDT_ENTRY_TLS_ENTRIES); 164 __lgread(lg, tls, gtls, sizeof(*tls)*GDT_ENTRY_TLS_ENTRIES);
165 fixup_gdt_table(lg, GDT_ENTRY_TLS_MIN, GDT_ENTRY_TLS_MAX+1); 165 fixup_gdt_table(lg, GDT_ENTRY_TLS_MIN, GDT_ENTRY_TLS_MAX+1);
166 lg->changed |= CHANGED_GDT_TLS; 166 lg->changed |= CHANGED_GDT_TLS;
167} 167}
diff --git a/drivers/lguest/x86/core.c b/drivers/lguest/x86/core.c
new file mode 100644
index 000000000000..9eed12d5a395
--- /dev/null
+++ b/drivers/lguest/x86/core.c
@@ -0,0 +1,577 @@
1/*
2 * Copyright (C) 2006, Rusty Russell <rusty@rustcorp.com.au> IBM Corporation.
3 * Copyright (C) 2007, Jes Sorensen <jes@sgi.com> SGI.
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
13 * NON INFRINGEMENT. See the GNU General Public License for more
14 * details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
19 */
20#include <linux/kernel.h>
21#include <linux/start_kernel.h>
22#include <linux/string.h>
23#include <linux/console.h>
24#include <linux/screen_info.h>
25#include <linux/irq.h>
26#include <linux/interrupt.h>
27#include <linux/clocksource.h>
28#include <linux/clockchips.h>
29#include <linux/cpu.h>
30#include <linux/lguest.h>
31#include <linux/lguest_launcher.h>
32#include <asm/paravirt.h>
33#include <asm/param.h>
34#include <asm/page.h>
35#include <asm/pgtable.h>
36#include <asm/desc.h>
37#include <asm/setup.h>
38#include <asm/lguest.h>
39#include <asm/uaccess.h>
40#include <asm/i387.h>
41#include "../lg.h"
42
43static int cpu_had_pge;
44
45static struct {
46 unsigned long offset;
47 unsigned short segment;
48} lguest_entry;
49
50/* Offset from where switcher.S was compiled to where we've copied it */
51static unsigned long switcher_offset(void)
52{
53 return SWITCHER_ADDR - (unsigned long)start_switcher_text;
54}
55
56/* This cpu's struct lguest_pages. */
57static struct lguest_pages *lguest_pages(unsigned int cpu)
58{
59 return &(((struct lguest_pages *)
60 (SWITCHER_ADDR + SHARED_SWITCHER_PAGES*PAGE_SIZE))[cpu]);
61}
62
63static DEFINE_PER_CPU(struct lguest *, last_guest);
64
65/*S:010
66 * We are getting close to the Switcher.
67 *
68 * Remember that each CPU has two pages which are visible to the Guest when it
69 * runs on that CPU. This has to contain the state for that Guest: we copy the
70 * state in just before we run the Guest.
71 *
72 * Each Guest has "changed" flags which indicate what has changed in the Guest
73 * since it last ran. We saw this set in interrupts_and_traps.c and
74 * segments.c.
75 */
76static void copy_in_guest_info(struct lguest *lg, struct lguest_pages *pages)
77{
78 /* Copying all this data can be quite expensive. We usually run the
79 * same Guest we ran last time (and that Guest hasn't run anywhere else
80 * meanwhile). If that's not the case, we pretend everything in the
81 * Guest has changed. */
82 if (__get_cpu_var(last_guest) != lg || lg->last_pages != pages) {
83 __get_cpu_var(last_guest) = lg;
84 lg->last_pages = pages;
85 lg->changed = CHANGED_ALL;
86 }
87
88 /* These copies are pretty cheap, so we do them unconditionally: */
89 /* Save the current Host top-level page directory. */
90 pages->state.host_cr3 = __pa(current->mm->pgd);
91 /* Set up the Guest's page tables to see this CPU's pages (and no
92 * other CPU's pages). */
93 map_switcher_in_guest(lg, pages);
94 /* Set up the two "TSS" members which tell the CPU what stack to use
95 * for traps which do directly into the Guest (ie. traps at privilege
96 * level 1). */
97 pages->state.guest_tss.esp1 = lg->esp1;
98 pages->state.guest_tss.ss1 = lg->ss1;
99
100 /* Copy direct-to-Guest trap entries. */
101 if (lg->changed & CHANGED_IDT)
102 copy_traps(lg, pages->state.guest_idt, default_idt_entries);
103
104 /* Copy all GDT entries which the Guest can change. */
105 if (lg->changed & CHANGED_GDT)
106 copy_gdt(lg, pages->state.guest_gdt);
107 /* If only the TLS entries have changed, copy them. */
108 else if (lg->changed & CHANGED_GDT_TLS)
109 copy_gdt_tls(lg, pages->state.guest_gdt);
110
111 /* Mark the Guest as unchanged for next time. */
112 lg->changed = 0;
113}
114
115/* Finally: the code to actually call into the Switcher to run the Guest. */
116static void run_guest_once(struct lguest *lg, struct lguest_pages *pages)
117{
118 /* This is a dummy value we need for GCC's sake. */
119 unsigned int clobber;
120
121 /* Copy the guest-specific information into this CPU's "struct
122 * lguest_pages". */
123 copy_in_guest_info(lg, pages);
124
125 /* Set the trap number to 256 (impossible value). If we fault while
126 * switching to the Guest (bad segment registers or bug), this will
127 * cause us to abort the Guest. */
128 lg->regs->trapnum = 256;
129
130 /* Now: we push the "eflags" register on the stack, then do an "lcall".
131 * This is how we change from using the kernel code segment to using
132 * the dedicated lguest code segment, as well as jumping into the
133 * Switcher.
134 *
135 * The lcall also pushes the old code segment (KERNEL_CS) onto the
136 * stack, then the address of this call. This stack layout happens to
137 * exactly match the stack of an interrupt... */
138 asm volatile("pushf; lcall *lguest_entry"
139 /* This is how we tell GCC that %eax ("a") and %ebx ("b")
140 * are changed by this routine. The "=" means output. */
141 : "=a"(clobber), "=b"(clobber)
142 /* %eax contains the pages pointer. ("0" refers to the
143 * 0-th argument above, ie "a"). %ebx contains the
144 * physical address of the Guest's top-level page
145 * directory. */
146 : "0"(pages), "1"(__pa(lg->pgdirs[lg->pgdidx].pgdir))
147 /* We tell gcc that all these registers could change,
148 * which means we don't have to save and restore them in
149 * the Switcher. */
150 : "memory", "%edx", "%ecx", "%edi", "%esi");
151}
152/*:*/
153
154/*H:040 This is the i386-specific code to setup and run the Guest. Interrupts
155 * are disabled: we own the CPU. */
156void lguest_arch_run_guest(struct lguest *lg)
157{
158 /* Remember the awfully-named TS bit? If the Guest has asked
159 * to set it we set it now, so we can trap and pass that trap
160 * to the Guest if it uses the FPU. */
161 if (lg->ts)
162 lguest_set_ts();
163
164 /* SYSENTER is an optimized way of doing system calls. We
165 * can't allow it because it always jumps to privilege level 0.
166 * A normal Guest won't try it because we don't advertise it in
167 * CPUID, but a malicious Guest (or malicious Guest userspace
168 * program) could, so we tell the CPU to disable it before
169 * running the Guest. */
170 if (boot_cpu_has(X86_FEATURE_SEP))
171 wrmsr(MSR_IA32_SYSENTER_CS, 0, 0);
172
173 /* Now we actually run the Guest. It will pop back out when
174 * something interesting happens, and we can examine its
175 * registers to see what it was doing. */
176 run_guest_once(lg, lguest_pages(raw_smp_processor_id()));
177
178 /* The "regs" pointer contains two extra entries which are not
179 * really registers: a trap number which says what interrupt or
180 * trap made the switcher code come back, and an error code
181 * which some traps set. */
182
183 /* If the Guest page faulted, then the cr2 register will tell
184 * us the bad virtual address. We have to grab this now,
185 * because once we re-enable interrupts an interrupt could
186 * fault and thus overwrite cr2, or we could even move off to a
187 * different CPU. */
188 if (lg->regs->trapnum == 14)
189 lg->arch.last_pagefault = read_cr2();
190 /* Similarly, if we took a trap because the Guest used the FPU,
191 * we have to restore the FPU it expects to see. */
192 else if (lg->regs->trapnum == 7)
193 math_state_restore();
194
195 /* Restore SYSENTER if it's supposed to be on. */
196 if (boot_cpu_has(X86_FEATURE_SEP))
197 wrmsr(MSR_IA32_SYSENTER_CS, __KERNEL_CS, 0);
198}
199
200/*H:130 Our Guest is usually so well behaved; it never tries to do things it
201 * isn't allowed to. Unfortunately, Linux's paravirtual infrastructure isn't
202 * quite complete, because it doesn't contain replacements for the Intel I/O
203 * instructions. As a result, the Guest sometimes fumbles across one during
204 * the boot process as it probes for various things which are usually attached
205 * to a PC.
206 *
207 * When the Guest uses one of these instructions, we get trap #13 (General
208 * Protection Fault) and come here. We see if it's one of those troublesome
209 * instructions and skip over it. We return true if we did. */
210static int emulate_insn(struct lguest *lg)
211{
212 u8 insn;
213 unsigned int insnlen = 0, in = 0, shift = 0;
214 /* The eip contains the *virtual* address of the Guest's instruction:
215 * guest_pa just subtracts the Guest's page_offset. */
216 unsigned long physaddr = guest_pa(lg, lg->regs->eip);
217
218 /* This must be the Guest kernel trying to do something, not userspace!
219 * The bottom two bits of the CS segment register are the privilege
220 * level. */
221 if ((lg->regs->cs & 3) != GUEST_PL)
222 return 0;
223
224 /* Decoding x86 instructions is icky. */
225 insn = lgread(lg, physaddr, u8);
226
227 /* 0x66 is an "operand prefix". It means it's using the upper 16 bits
228 of the eax register. */
229 if (insn == 0x66) {
230 shift = 16;
231 /* The instruction is 1 byte so far, read the next byte. */
232 insnlen = 1;
233 insn = lgread(lg, physaddr + insnlen, u8);
234 }
235
236 /* We can ignore the lower bit for the moment and decode the 4 opcodes
237 * we need to emulate. */
238 switch (insn & 0xFE) {
239 case 0xE4: /* in <next byte>,%al */
240 insnlen += 2;
241 in = 1;
242 break;
243 case 0xEC: /* in (%dx),%al */
244 insnlen += 1;
245 in = 1;
246 break;
247 case 0xE6: /* out %al,<next byte> */
248 insnlen += 2;
249 break;
250 case 0xEE: /* out %al,(%dx) */
251 insnlen += 1;
252 break;
253 default:
254 /* OK, we don't know what this is, can't emulate. */
255 return 0;
256 }
257
258 /* If it was an "IN" instruction, they expect the result to be read
259 * into %eax, so we change %eax. We always return all-ones, which
260 * traditionally means "there's nothing there". */
261 if (in) {
262 /* Lower bit tells is whether it's a 16 or 32 bit access */
263 if (insn & 0x1)
264 lg->regs->eax = 0xFFFFFFFF;
265 else
266 lg->regs->eax |= (0xFFFF << shift);
267 }
268 /* Finally, we've "done" the instruction, so move past it. */
269 lg->regs->eip += insnlen;
270 /* Success! */
271 return 1;
272}
273
274/*H:050 Once we've re-enabled interrupts, we look at why the Guest exited. */
275void lguest_arch_handle_trap(struct lguest *lg)
276{
277 switch (lg->regs->trapnum) {
278 case 13: /* We've intercepted a GPF. */
279 /* Check if this was one of those annoying IN or OUT
280 * instructions which we need to emulate. If so, we
281 * just go back into the Guest after we've done it. */
282 if (lg->regs->errcode == 0) {
283 if (emulate_insn(lg))
284 return;
285 }
286 break;
287 case 14: /* We've intercepted a page fault. */
288 /* The Guest accessed a virtual address that wasn't
289 * mapped. This happens a lot: we don't actually set
290 * up most of the page tables for the Guest at all when
291 * we start: as it runs it asks for more and more, and
292 * we set them up as required. In this case, we don't
293 * even tell the Guest that the fault happened.
294 *
295 * The errcode tells whether this was a read or a
296 * write, and whether kernel or userspace code. */
297 if (demand_page(lg, lg->arch.last_pagefault, lg->regs->errcode))
298 return;
299
300 /* OK, it's really not there (or not OK): the Guest
301 * needs to know. We write out the cr2 value so it
302 * knows where the fault occurred.
303 *
304 * Note that if the Guest were really messed up, this
305 * could happen before it's done the INITIALIZE
306 * hypercall, so lg->lguest_data will be NULL */
307 if (lg->lguest_data &&
308 put_user(lg->arch.last_pagefault, &lg->lguest_data->cr2))
309 kill_guest(lg, "Writing cr2");
310 break;
311 case 7: /* We've intercepted a Device Not Available fault. */
312 /* If the Guest doesn't want to know, we already
313 * restored the Floating Point Unit, so we just
314 * continue without telling it. */
315 if (!lg->ts)
316 return;
317 break;
318 case 32 ... 255:
319 /* These values mean a real interrupt occurred, in which case
320 * the Host handler has already been run. We just do a
321 * friendly check if another process should now be run, then
322 * return to run the Guest again */
323 cond_resched();
324 return;
325 case LGUEST_TRAP_ENTRY:
326 /* Our 'struct hcall_args' maps directly over our regs: we set
327 * up the pointer now to indicate a hypercall is pending. */
328 lg->hcall = (struct hcall_args *)lg->regs;
329 return;
330 }
331
332 /* We didn't handle the trap, so it needs to go to the Guest. */
333 if (!deliver_trap(lg, lg->regs->trapnum))
334 /* If the Guest doesn't have a handler (either it hasn't
335 * registered any yet, or it's one of the faults we don't let
336 * it handle), it dies with a cryptic error message. */
337 kill_guest(lg, "unhandled trap %li at %#lx (%#lx)",
338 lg->regs->trapnum, lg->regs->eip,
339 lg->regs->trapnum == 14 ? lg->arch.last_pagefault
340 : lg->regs->errcode);
341}
342
343/* Now we can look at each of the routines this calls, in increasing order of
344 * complexity: do_hypercalls(), emulate_insn(), maybe_do_interrupt(),
345 * deliver_trap() and demand_page(). After all those, we'll be ready to
346 * examine the Switcher, and our philosophical understanding of the Host/Guest
347 * duality will be complete. :*/
348static void adjust_pge(void *on)
349{
350 if (on)
351 write_cr4(read_cr4() | X86_CR4_PGE);
352 else
353 write_cr4(read_cr4() & ~X86_CR4_PGE);
354}
355
356/*H:020 Now the Switcher is mapped and every thing else is ready, we need to do
357 * some more i386-specific initialization. */
358void __init lguest_arch_host_init(void)
359{
360 int i;
361
362 /* Most of the i386/switcher.S doesn't care that it's been moved; on
363 * Intel, jumps are relative, and it doesn't access any references to
364 * external code or data.
365 *
366 * The only exception is the interrupt handlers in switcher.S: their
367 * addresses are placed in a table (default_idt_entries), so we need to
368 * update the table with the new addresses. switcher_offset() is a
369 * convenience function which returns the distance between the builtin
370 * switcher code and the high-mapped copy we just made. */
371 for (i = 0; i < IDT_ENTRIES; i++)
372 default_idt_entries[i] += switcher_offset();
373
374 /*
375 * Set up the Switcher's per-cpu areas.
376 *
377 * Each CPU gets two pages of its own within the high-mapped region
378 * (aka. "struct lguest_pages"). Much of this can be initialized now,
379 * but some depends on what Guest we are running (which is set up in
380 * copy_in_guest_info()).
381 */
382 for_each_possible_cpu(i) {
383 /* lguest_pages() returns this CPU's two pages. */
384 struct lguest_pages *pages = lguest_pages(i);
385 /* This is a convenience pointer to make the code fit one
386 * statement to a line. */
387 struct lguest_ro_state *state = &pages->state;
388
389 /* The Global Descriptor Table: the Host has a different one
390 * for each CPU. We keep a descriptor for the GDT which says
391 * where it is and how big it is (the size is actually the last
392 * byte, not the size, hence the "-1"). */
393 state->host_gdt_desc.size = GDT_SIZE-1;
394 state->host_gdt_desc.address = (long)get_cpu_gdt_table(i);
395
396 /* All CPUs on the Host use the same Interrupt Descriptor
397 * Table, so we just use store_idt(), which gets this CPU's IDT
398 * descriptor. */
399 store_idt(&state->host_idt_desc);
400
401 /* The descriptors for the Guest's GDT and IDT can be filled
402 * out now, too. We copy the GDT & IDT into ->guest_gdt and
403 * ->guest_idt before actually running the Guest. */
404 state->guest_idt_desc.size = sizeof(state->guest_idt)-1;
405 state->guest_idt_desc.address = (long)&state->guest_idt;
406 state->guest_gdt_desc.size = sizeof(state->guest_gdt)-1;
407 state->guest_gdt_desc.address = (long)&state->guest_gdt;
408
409 /* We know where we want the stack to be when the Guest enters
410 * the switcher: in pages->regs. The stack grows upwards, so
411 * we start it at the end of that structure. */
412 state->guest_tss.esp0 = (long)(&pages->regs + 1);
413 /* And this is the GDT entry to use for the stack: we keep a
414 * couple of special LGUEST entries. */
415 state->guest_tss.ss0 = LGUEST_DS;
416
417 /* x86 can have a finegrained bitmap which indicates what I/O
418 * ports the process can use. We set it to the end of our
419 * structure, meaning "none". */
420 state->guest_tss.io_bitmap_base = sizeof(state->guest_tss);
421
422 /* Some GDT entries are the same across all Guests, so we can
423 * set them up now. */
424 setup_default_gdt_entries(state);
425 /* Most IDT entries are the same for all Guests, too.*/
426 setup_default_idt_entries(state, default_idt_entries);
427
428 /* The Host needs to be able to use the LGUEST segments on this
429 * CPU, too, so put them in the Host GDT. */
430 get_cpu_gdt_table(i)[GDT_ENTRY_LGUEST_CS] = FULL_EXEC_SEGMENT;
431 get_cpu_gdt_table(i)[GDT_ENTRY_LGUEST_DS] = FULL_SEGMENT;
432 }
433
434 /* In the Switcher, we want the %cs segment register to use the
435 * LGUEST_CS GDT entry: we've put that in the Host and Guest GDTs, so
436 * it will be undisturbed when we switch. To change %cs and jump we
437 * need this structure to feed to Intel's "lcall" instruction. */
438 lguest_entry.offset = (long)switch_to_guest + switcher_offset();
439 lguest_entry.segment = LGUEST_CS;
440
441 /* Finally, we need to turn off "Page Global Enable". PGE is an
442 * optimization where page table entries are specially marked to show
443 * they never change. The Host kernel marks all the kernel pages this
444 * way because it's always present, even when userspace is running.
445 *
446 * Lguest breaks this: unbeknownst to the rest of the Host kernel, we
447 * switch to the Guest kernel. If you don't disable this on all CPUs,
448 * you'll get really weird bugs that you'll chase for two days.
449 *
450 * I used to turn PGE off every time we switched to the Guest and back
451 * on when we return, but that slowed the Switcher down noticibly. */
452
453 /* We don't need the complexity of CPUs coming and going while we're
454 * doing this. */
455 lock_cpu_hotplug();
456 if (cpu_has_pge) { /* We have a broader idea of "global". */
457 /* Remember that this was originally set (for cleanup). */
458 cpu_had_pge = 1;
459 /* adjust_pge is a helper function which sets or unsets the PGE
460 * bit on its CPU, depending on the argument (0 == unset). */
461 on_each_cpu(adjust_pge, (void *)0, 0, 1);
462 /* Turn off the feature in the global feature set. */
463 clear_bit(X86_FEATURE_PGE, boot_cpu_data.x86_capability);
464 }
465 unlock_cpu_hotplug();
466};
467/*:*/
468
469void __exit lguest_arch_host_fini(void)
470{
471 /* If we had PGE before we started, turn it back on now. */
472 lock_cpu_hotplug();
473 if (cpu_had_pge) {
474 set_bit(X86_FEATURE_PGE, boot_cpu_data.x86_capability);
475 /* adjust_pge's argument "1" means set PGE. */
476 on_each_cpu(adjust_pge, (void *)1, 0, 1);
477 }
478 unlock_cpu_hotplug();
479}
480
481
482/*H:122 The i386-specific hypercalls simply farm out to the right functions. */
483int lguest_arch_do_hcall(struct lguest *lg, struct hcall_args *args)
484{
485 switch (args->arg0) {
486 case LHCALL_LOAD_GDT:
487 load_guest_gdt(lg, args->arg1, args->arg2);
488 break;
489 case LHCALL_LOAD_IDT_ENTRY:
490 load_guest_idt_entry(lg, args->arg1, args->arg2, args->arg3);
491 break;
492 case LHCALL_LOAD_TLS:
493 guest_load_tls(lg, args->arg1);
494 break;
495 default:
496 /* Bad Guest. Bad! */
497 return -EIO;
498 }
499 return 0;
500}
501
502/*H:126 i386-specific hypercall initialization: */
503int lguest_arch_init_hypercalls(struct lguest *lg)
504{
505 u32 tsc_speed;
506
507 /* The pointer to the Guest's "struct lguest_data" is the only
508 * argument. We check that address now. */
509 if (!lguest_address_ok(lg, lg->hcall->arg1, sizeof(*lg->lguest_data)))
510 return -EFAULT;
511
512 /* Having checked it, we simply set lg->lguest_data to point straight
513 * into the Launcher's memory at the right place and then use
514 * copy_to_user/from_user from now on, instead of lgread/write. I put
515 * this in to show that I'm not immune to writing stupid
516 * optimizations. */
517 lg->lguest_data = lg->mem_base + lg->hcall->arg1;
518
519 /* We insist that the Time Stamp Counter exist and doesn't change with
520 * cpu frequency. Some devious chip manufacturers decided that TSC
521 * changes could be handled in software. I decided that time going
522 * backwards might be good for benchmarks, but it's bad for users.
523 *
524 * We also insist that the TSC be stable: the kernel detects unreliable
525 * TSCs for its own purposes, and we use that here. */
526 if (boot_cpu_has(X86_FEATURE_CONSTANT_TSC) && !check_tsc_unstable())
527 tsc_speed = tsc_khz;
528 else
529 tsc_speed = 0;
530 if (put_user(tsc_speed, &lg->lguest_data->tsc_khz))
531 return -EFAULT;
532
533 /* The interrupt code might not like the system call vector. */
534 if (!check_syscall_vector(lg))
535 kill_guest(lg, "bad syscall vector");
536
537 return 0;
538}
539/* Now we've examined the hypercall code; our Guest can make requests. There
540 * is one other way we can do things for the Guest, as we see in
541 * emulate_insn(). :*/
542
543/*L:030 lguest_arch_setup_regs()
544 *
545 * Most of the Guest's registers are left alone: we used get_zeroed_page() to
546 * allocate the structure, so they will be 0. */
547void lguest_arch_setup_regs(struct lguest *lg, unsigned long start)
548{
549 struct lguest_regs *regs = lg->regs;
550
551 /* There are four "segment" registers which the Guest needs to boot:
552 * The "code segment" register (cs) refers to the kernel code segment
553 * __KERNEL_CS, and the "data", "extra" and "stack" segment registers
554 * refer to the kernel data segment __KERNEL_DS.
555 *
556 * The privilege level is packed into the lower bits. The Guest runs
557 * at privilege level 1 (GUEST_PL).*/
558 regs->ds = regs->es = regs->ss = __KERNEL_DS|GUEST_PL;
559 regs->cs = __KERNEL_CS|GUEST_PL;
560
561 /* The "eflags" register contains miscellaneous flags. Bit 1 (0x002)
562 * is supposed to always be "1". Bit 9 (0x200) controls whether
563 * interrupts are enabled. We always leave interrupts enabled while
564 * running the Guest. */
565 regs->eflags = 0x202;
566
567 /* The "Extended Instruction Pointer" register says where the Guest is
568 * running. */
569 regs->eip = start;
570
571 /* %esi points to our boot information, at physical address 0, so don't
572 * touch it. */
573 /* There are a couple of GDT entries the Guest expects when first
574 * booting. */
575
576 setup_guest_gdt(lg);
577}
diff --git a/drivers/lguest/switcher.S b/drivers/lguest/x86/switcher_32.S
index 7c9c230cc845..1010b90b11fc 100644
--- a/drivers/lguest/switcher.S
+++ b/drivers/lguest/x86/switcher_32.S
@@ -48,7 +48,8 @@
48#include <linux/linkage.h> 48#include <linux/linkage.h>
49#include <asm/asm-offsets.h> 49#include <asm/asm-offsets.h>
50#include <asm/page.h> 50#include <asm/page.h>
51#include "lg.h" 51#include <asm/segment.h>
52#include <asm/lguest.h>
52 53
53// We mark the start of the code to copy 54// We mark the start of the code to copy
54// It's placed in .text tho it's never run here 55// It's placed in .text tho it's never run here
@@ -132,6 +133,7 @@ ENTRY(switch_to_guest)
132 // The Guest's register page has been mapped 133 // The Guest's register page has been mapped
133 // Writable onto our %esp (stack) -- 134 // Writable onto our %esp (stack) --
134 // We can simply pop off all Guest regs. 135 // We can simply pop off all Guest regs.
136 popl %eax
135 popl %ebx 137 popl %ebx
136 popl %ecx 138 popl %ecx
137 popl %edx 139 popl %edx
@@ -139,7 +141,6 @@ ENTRY(switch_to_guest)
139 popl %edi 141 popl %edi
140 popl %ebp 142 popl %ebp
141 popl %gs 143 popl %gs
142 popl %eax
143 popl %fs 144 popl %fs
144 popl %ds 145 popl %ds
145 popl %es 146 popl %es
@@ -167,7 +168,6 @@ ENTRY(switch_to_guest)
167 pushl %es; \ 168 pushl %es; \
168 pushl %ds; \ 169 pushl %ds; \
169 pushl %fs; \ 170 pushl %fs; \
170 pushl %eax; \
171 pushl %gs; \ 171 pushl %gs; \
172 pushl %ebp; \ 172 pushl %ebp; \
173 pushl %edi; \ 173 pushl %edi; \
@@ -175,6 +175,7 @@ ENTRY(switch_to_guest)
175 pushl %edx; \ 175 pushl %edx; \
176 pushl %ecx; \ 176 pushl %ecx; \
177 pushl %ebx; \ 177 pushl %ebx; \
178 pushl %eax; \
178 /* Our stack and our code are using segments \ 179 /* Our stack and our code are using segments \
179 * Set in the TSS and IDT \ 180 * Set in the TSS and IDT \
180 * Yet if we were to touch data we'd use \ 181 * Yet if we were to touch data we'd use \
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index ce34b539bf38..2538816817aa 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -3100,4 +3100,10 @@ config NETPOLL_TRAP
3100config NET_POLL_CONTROLLER 3100config NET_POLL_CONTROLLER
3101 def_bool NETPOLL 3101 def_bool NETPOLL
3102 3102
3103config VIRTIO_NET
3104 tristate "Virtio network driver (EXPERIMENTAL)"
3105 depends on EXPERIMENTAL && VIRTIO
3106 ---help---
3107 This is the virtual network driver for lguest. Say Y or M.
3108
3103endif # NETDEVICES 3109endif # NETDEVICES
diff --git a/drivers/net/Makefile b/drivers/net/Makefile
index 22f78cbd126b..593262065c9b 100644
--- a/drivers/net/Makefile
+++ b/drivers/net/Makefile
@@ -183,7 +183,6 @@ obj-$(CONFIG_ZORRO8390) += zorro8390.o
183obj-$(CONFIG_HPLANCE) += hplance.o 7990.o 183obj-$(CONFIG_HPLANCE) += hplance.o 7990.o
184obj-$(CONFIG_MVME147_NET) += mvme147.o 7990.o 184obj-$(CONFIG_MVME147_NET) += mvme147.o 7990.o
185obj-$(CONFIG_EQUALIZER) += eql.o 185obj-$(CONFIG_EQUALIZER) += eql.o
186obj-$(CONFIG_LGUEST_NET) += lguest_net.o
187obj-$(CONFIG_MIPS_JAZZ_SONIC) += jazzsonic.o 186obj-$(CONFIG_MIPS_JAZZ_SONIC) += jazzsonic.o
188obj-$(CONFIG_MIPS_AU1X00_ENET) += au1000_eth.o 187obj-$(CONFIG_MIPS_AU1X00_ENET) += au1000_eth.o
189obj-$(CONFIG_MIPS_SIM_NET) += mipsnet.o 188obj-$(CONFIG_MIPS_SIM_NET) += mipsnet.o
@@ -243,3 +242,4 @@ obj-$(CONFIG_FS_ENET) += fs_enet/
243 242
244obj-$(CONFIG_NETXEN_NIC) += netxen/ 243obj-$(CONFIG_NETXEN_NIC) += netxen/
245obj-$(CONFIG_NIU) += niu.o 244obj-$(CONFIG_NIU) += niu.o
245obj-$(CONFIG_VIRTIO_NET) += virtio_net.o
diff --git a/drivers/net/lguest_net.c b/drivers/net/lguest_net.c
deleted file mode 100644
index abce2ee8430a..000000000000
--- a/drivers/net/lguest_net.c
+++ /dev/null
@@ -1,555 +0,0 @@
1/*D:500
2 * The Guest network driver.
3 *
4 * This is very simple a virtual network driver, and our last Guest driver.
5 * The only trick is that it can talk directly to multiple other recipients
6 * (ie. other Guests on the same network). It can also be used with only the
7 * Host on the network.
8 :*/
9
10/* Copyright 2006 Rusty Russell <rusty@rustcorp.com.au> IBM Corporation
11 *
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2 of the License, or
15 * (at your option) any later version.
16 *
17 * This program is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, write to the Free Software
24 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
25 */
26//#define DEBUG
27#include <linux/netdevice.h>
28#include <linux/etherdevice.h>
29#include <linux/module.h>
30#include <linux/mm_types.h>
31#include <linux/io.h>
32#include <linux/lguest_bus.h>
33
34#define SHARED_SIZE PAGE_SIZE
35#define MAX_LANS 4
36#define NUM_SKBS 8
37
38/*M:011 Network code master Jeff Garzik points out numerous shortcomings in
39 * this driver if it aspires to greatness.
40 *
41 * Firstly, it doesn't use "NAPI": the networking's New API, and is poorer for
42 * it. As he says "NAPI means system-wide load leveling, across multiple
43 * network interfaces. Lack of NAPI can mean competition at higher loads."
44 *
45 * He also points out that we don't implement set_mac_address, so users cannot
46 * change the devices hardware address. When I asked why one would want to:
47 * "Bonding, and situations where you /do/ want the MAC address to "leak" out
48 * of the host onto the wider net."
49 *
50 * Finally, he would like module unloading: "It is not unrealistic to think of
51 * [un|re|]loading the net support module in an lguest guest. And, adding
52 * module support makes the programmer more responsible, because they now have
53 * to learn to clean up after themselves. Any driver that cannot clean up
54 * after itself is an incomplete driver in my book."
55 :*/
56
57/*D:530 The "struct lguestnet_info" contains all the information we need to
58 * know about the network device. */
59struct lguestnet_info
60{
61 /* The mapped device page(s) (an array of "struct lguest_net"). */
62 struct lguest_net *peer;
63 /* The physical address of the device page(s) */
64 unsigned long peer_phys;
65 /* The size of the device page(s). */
66 unsigned long mapsize;
67
68 /* The lguest_device I come from */
69 struct lguest_device *lgdev;
70
71 /* My peerid (ie. my slot in the array). */
72 unsigned int me;
73
74 /* Receive queue: the network packets waiting to be filled. */
75 struct sk_buff *skb[NUM_SKBS];
76 struct lguest_dma dma[NUM_SKBS];
77};
78/*:*/
79
80/* How many bytes left in this page. */
81static unsigned int rest_of_page(void *data)
82{
83 return PAGE_SIZE - ((unsigned long)data % PAGE_SIZE);
84}
85
86/*D:570 Each peer (ie. Guest or Host) on the network binds their receive
87 * buffers to a different key: we simply use the physical address of the
88 * device's memory page plus the peer number. The Host insists that all keys
89 * be a multiple of 4, so we multiply the peer number by 4. */
90static unsigned long peer_key(struct lguestnet_info *info, unsigned peernum)
91{
92 return info->peer_phys + 4 * peernum;
93}
94
95/* This is the routine which sets up a "struct lguest_dma" to point to a
96 * network packet, similar to req_to_dma() in lguest_blk.c. The structure of a
97 * "struct sk_buff" has grown complex over the years: it consists of a "head"
98 * linear section pointed to by "skb->data", and possibly an array of
99 * "fragments" in the case of a non-linear packet.
100 *
101 * Our receive buffers don't use fragments at all but outgoing skbs might, so
102 * we handle it. */
103static void skb_to_dma(const struct sk_buff *skb, unsigned int headlen,
104 struct lguest_dma *dma)
105{
106 unsigned int i, seg;
107
108 /* First, we put the linear region into the "struct lguest_dma". Each
109 * entry can't go over a page boundary, so even though all our packets
110 * are 1514 bytes or less, we might need to use two entries here: */
111 for (i = seg = 0; i < headlen; seg++, i += rest_of_page(skb->data+i)) {
112 dma->addr[seg] = virt_to_phys(skb->data + i);
113 dma->len[seg] = min((unsigned)(headlen - i),
114 rest_of_page(skb->data + i));
115 }
116
117 /* Now we handle the fragments: at least they're guaranteed not to go
118 * over a page. skb_shinfo(skb) returns a pointer to the structure
119 * which tells us about the number of fragments and the fragment
120 * array. */
121 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++, seg++) {
122 const skb_frag_t *f = &skb_shinfo(skb)->frags[i];
123 /* Should not happen with MTU less than 64k - 2 * PAGE_SIZE. */
124 if (seg == LGUEST_MAX_DMA_SECTIONS) {
125 /* We will end up sending a truncated packet should
126 * this ever happen. Plus, a cool log message! */
127 printk("Woah dude! Megapacket!\n");
128 break;
129 }
130 dma->addr[seg] = page_to_phys(f->page) + f->page_offset;
131 dma->len[seg] = f->size;
132 }
133
134 /* If after all that we didn't use the entire "struct lguest_dma"
135 * array, we terminate it with a 0 length. */
136 if (seg < LGUEST_MAX_DMA_SECTIONS)
137 dma->len[seg] = 0;
138}
139
140/*
141 * Packet transmission.
142 *
143 * Our packet transmission is a little unusual. A real network card would just
144 * send out the packet and leave the receivers to decide if they're interested.
145 * Instead, we look through the network device memory page and see if any of
146 * the ethernet addresses match the packet destination, and if so we send it to
147 * that Guest.
148 *
149 * This is made a little more complicated in two cases. The first case is
150 * broadcast packets: for that we send the packet to all Guests on the network,
151 * one at a time. The second case is "promiscuous" mode, where a Guest wants
152 * to see all the packets on the network. We need a way for the Guest to tell
153 * us it wants to see all packets, so it sets the "multicast" bit on its
154 * published MAC address, which is never valid in a real ethernet address.
155 */
156#define PROMISC_BIT 0x01
157
158/* This is the callback which is summoned whenever the network device's
159 * multicast or promiscuous state changes. If the card is in promiscuous mode,
160 * we advertise that in our ethernet address in the device's memory. We do the
161 * same if Linux wants any or all multicast traffic. */
162static void lguestnet_set_multicast(struct net_device *dev)
163{
164 struct lguestnet_info *info = netdev_priv(dev);
165
166 if ((dev->flags & (IFF_PROMISC|IFF_ALLMULTI)) || dev->mc_count)
167 info->peer[info->me].mac[0] |= PROMISC_BIT;
168 else
169 info->peer[info->me].mac[0] &= ~PROMISC_BIT;
170}
171
172/* A simple test function to see if a peer wants to see all packets.*/
173static int promisc(struct lguestnet_info *info, unsigned int peer)
174{
175 return info->peer[peer].mac[0] & PROMISC_BIT;
176}
177
178/* Another simple function to see if a peer's advertised ethernet address
179 * matches a packet's destination ethernet address. */
180static int mac_eq(const unsigned char mac[ETH_ALEN],
181 struct lguestnet_info *info, unsigned int peer)
182{
183 /* Ignore multicast bit, which peer turns on to mean promisc. */
184 if ((info->peer[peer].mac[0] & (~PROMISC_BIT)) != mac[0])
185 return 0;
186 return memcmp(mac+1, info->peer[peer].mac+1, ETH_ALEN-1) == 0;
187}
188
189/* This is the function which actually sends a packet once we've decided a
190 * peer wants it: */
191static void transfer_packet(struct net_device *dev,
192 struct sk_buff *skb,
193 unsigned int peernum)
194{
195 struct lguestnet_info *info = netdev_priv(dev);
196 struct lguest_dma dma;
197
198 /* We use our handy "struct lguest_dma" packing function to prepare
199 * the skb for sending. */
200 skb_to_dma(skb, skb_headlen(skb), &dma);
201 pr_debug("xfer length %04x (%u)\n", htons(skb->len), skb->len);
202
203 /* This is the actual send call which copies the packet. */
204 lguest_send_dma(peer_key(info, peernum), &dma);
205
206 /* Check that the entire packet was transmitted. If not, it could mean
207 * that the other Guest registered a short receive buffer, but this
208 * driver should never do that. More likely, the peer is dead. */
209 if (dma.used_len != skb->len) {
210 dev->stats.tx_carrier_errors++;
211 pr_debug("Bad xfer to peer %i: %i of %i (dma %p/%i)\n",
212 peernum, dma.used_len, skb->len,
213 (void *)dma.addr[0], dma.len[0]);
214 } else {
215 /* On success we update the stats. */
216 dev->stats.tx_bytes += skb->len;
217 dev->stats.tx_packets++;
218 }
219}
220
221/* Another helper function to tell is if a slot in the device memory is unused.
222 * Since we always set the Local Assignment bit in the ethernet address, the
223 * first byte can never be 0. */
224static int unused_peer(const struct lguest_net peer[], unsigned int num)
225{
226 return peer[num].mac[0] == 0;
227}
228
229/* Finally, here is the routine which handles an outgoing packet. It's called
230 * "start_xmit" for traditional reasons. */
231static int lguestnet_start_xmit(struct sk_buff *skb, struct net_device *dev)
232{
233 unsigned int i;
234 int broadcast;
235 struct lguestnet_info *info = netdev_priv(dev);
236 /* Extract the destination ethernet address from the packet. */
237 const unsigned char *dest = ((struct ethhdr *)skb->data)->h_dest;
238 DECLARE_MAC_BUF(mac);
239
240 pr_debug("%s: xmit %s\n", dev->name, print_mac(mac, dest));
241
242 /* If it's a multicast packet, we broadcast to everyone. That's not
243 * very efficient, but there are very few applications which actually
244 * use multicast, which is a shame really.
245 *
246 * As etherdevice.h points out: "By definition the broadcast address is
247 * also a multicast address." So we don't have to test for broadcast
248 * packets separately. */
249 broadcast = is_multicast_ether_addr(dest);
250
251 /* Look through all the published ethernet addresses to see if we
252 * should send this packet. */
253 for (i = 0; i < info->mapsize/sizeof(struct lguest_net); i++) {
254 /* We don't send to ourselves (we actually can't SEND_DMA to
255 * ourselves anyway), and don't send to unused slots.*/
256 if (i == info->me || unused_peer(info->peer, i))
257 continue;
258
259 /* If it's broadcast we send it. If they want every packet we
260 * send it. If the destination matches their address we send
261 * it. Otherwise we go to the next peer. */
262 if (!broadcast && !promisc(info, i) && !mac_eq(dest, info, i))
263 continue;
264
265 pr_debug("lguestnet %s: sending from %i to %i\n",
266 dev->name, info->me, i);
267 /* Our routine which actually does the transfer. */
268 transfer_packet(dev, skb, i);
269 }
270
271 /* An xmit routine is expected to dispose of the packet, so we do. */
272 dev_kfree_skb(skb);
273
274 /* As per kernel convention, 0 means success. This is why I love
275 * networking: even if we never sent to anyone, that's still
276 * success! */
277 return 0;
278}
279
280/*D:560
281 * Packet receiving.
282 *
283 * First, here's a helper routine which fills one of our array of receive
284 * buffers: */
285static int fill_slot(struct net_device *dev, unsigned int slot)
286{
287 struct lguestnet_info *info = netdev_priv(dev);
288
289 /* We can receive ETH_DATA_LEN (1500) byte packets, plus a standard
290 * ethernet header of ETH_HLEN (14) bytes. */
291 info->skb[slot] = netdev_alloc_skb(dev, ETH_HLEN + ETH_DATA_LEN);
292 if (!info->skb[slot]) {
293 printk("%s: could not fill slot %i\n", dev->name, slot);
294 return -ENOMEM;
295 }
296
297 /* skb_to_dma() is a helper which sets up the "struct lguest_dma" to
298 * point to the data in the skb: we also use it for sending out a
299 * packet. */
300 skb_to_dma(info->skb[slot], ETH_HLEN + ETH_DATA_LEN, &info->dma[slot]);
301
302 /* This is a Write Memory Barrier: it ensures that the entry in the
303 * receive buffer array is written *before* we set the "used_len" entry
304 * to 0. If the Host were looking at the receive buffer array from a
305 * different CPU, it could potentially see "used_len = 0" and not see
306 * the updated receive buffer information. This would be a horribly
307 * nasty bug, so make sure the compiler and CPU know this has to happen
308 * first. */
309 wmb();
310 /* Writing 0 to "used_len" tells the Host it can use this receive
311 * buffer now. */
312 info->dma[slot].used_len = 0;
313 return 0;
314}
315
316/* This is the actual receive routine. When we receive an interrupt from the
317 * Host to tell us a packet has been delivered, we arrive here: */
318static irqreturn_t lguestnet_rcv(int irq, void *dev_id)
319{
320 struct net_device *dev = dev_id;
321 struct lguestnet_info *info = netdev_priv(dev);
322 unsigned int i, done = 0;
323
324 /* Look through our entire receive array for an entry which has data
325 * in it. */
326 for (i = 0; i < ARRAY_SIZE(info->dma); i++) {
327 unsigned int length;
328 struct sk_buff *skb;
329
330 length = info->dma[i].used_len;
331 if (length == 0)
332 continue;
333
334 /* We've found one! Remember the skb (we grabbed the length
335 * above), and immediately refill the slot we've taken it
336 * from. */
337 done++;
338 skb = info->skb[i];
339 fill_slot(dev, i);
340
341 /* This shouldn't happen: micropackets could be sent by a
342 * badly-behaved Guest on the network, but the Host will never
343 * stuff more data in the buffer than the buffer length. */
344 if (length < ETH_HLEN || length > ETH_HLEN + ETH_DATA_LEN) {
345 pr_debug(KERN_WARNING "%s: unbelievable skb len: %i\n",
346 dev->name, length);
347 dev_kfree_skb(skb);
348 continue;
349 }
350
351 /* skb_put(), what a great function! I've ranted about this
352 * function before (http://lkml.org/lkml/1999/9/26/24). You
353 * call it after you've added data to the end of an skb (in
354 * this case, it was the Host which wrote the data). */
355 skb_put(skb, length);
356
357 /* The ethernet header contains a protocol field: we use the
358 * standard helper to extract it, and place the result in
359 * skb->protocol. The helper also sets up skb->pkt_type and
360 * eats up the ethernet header from the front of the packet. */
361 skb->protocol = eth_type_trans(skb, dev);
362
363 /* If this device doesn't need checksums for sending, we also
364 * don't need to check the packets when they come in. */
365 if (dev->features & NETIF_F_NO_CSUM)
366 skb->ip_summed = CHECKSUM_UNNECESSARY;
367
368 /* As a last resort for debugging the driver or the lguest I/O
369 * subsystem, you can uncomment the "#define DEBUG" at the top
370 * of this file, which turns all the pr_debug() into printk()
371 * and floods the logs. */
372 pr_debug("Receiving skb proto 0x%04x len %i type %i\n",
373 ntohs(skb->protocol), skb->len, skb->pkt_type);
374
375 /* Update the packet and byte counts (visible from ifconfig,
376 * and good for debugging). */
377 dev->stats.rx_bytes += skb->len;
378 dev->stats.rx_packets++;
379
380 /* Hand our fresh network packet into the stack's "network
381 * interface receive" routine. That will free the packet
382 * itself when it's finished. */
383 netif_rx(skb);
384 }
385
386 /* If we found any packets, we assume the interrupt was for us. */
387 return done ? IRQ_HANDLED : IRQ_NONE;
388}
389
390/*D:550 This is where we start: when the device is brought up by dhcpd or
391 * ifconfig. At this point we advertise our MAC address to the rest of the
392 * network, and register receive buffers ready for incoming packets. */
393static int lguestnet_open(struct net_device *dev)
394{
395 int i;
396 struct lguestnet_info *info = netdev_priv(dev);
397
398 /* Copy our MAC address into the device page, so others on the network
399 * can find us. */
400 memcpy(info->peer[info->me].mac, dev->dev_addr, ETH_ALEN);
401
402 /* We might already be in promisc mode (dev->flags & IFF_PROMISC). Our
403 * set_multicast callback handles this already, so we call it now. */
404 lguestnet_set_multicast(dev);
405
406 /* Allocate packets and put them into our "struct lguest_dma" array.
407 * If we fail to allocate all the packets we could still limp along,
408 * but it's a sign of real stress so we should probably give up now. */
409 for (i = 0; i < ARRAY_SIZE(info->dma); i++) {
410 if (fill_slot(dev, i) != 0)
411 goto cleanup;
412 }
413
414 /* Finally we tell the Host where our array of "struct lguest_dma"
415 * receive buffers is, binding it to the key corresponding to the
416 * device's physical memory plus our peerid. */
417 if (lguest_bind_dma(peer_key(info,info->me), info->dma,
418 NUM_SKBS, lgdev_irq(info->lgdev)) != 0)
419 goto cleanup;
420 return 0;
421
422cleanup:
423 while (--i >= 0)
424 dev_kfree_skb(info->skb[i]);
425 return -ENOMEM;
426}
427/*:*/
428
429/* The close routine is called when the device is no longer in use: we clean up
430 * elegantly. */
431static int lguestnet_close(struct net_device *dev)
432{
433 unsigned int i;
434 struct lguestnet_info *info = netdev_priv(dev);
435
436 /* Clear all trace of our existence out of the device memory by setting
437 * the slot which held our MAC address to 0 (unused). */
438 memset(&info->peer[info->me], 0, sizeof(info->peer[info->me]));
439
440 /* Unregister our array of receive buffers */
441 lguest_unbind_dma(peer_key(info, info->me), info->dma);
442 for (i = 0; i < ARRAY_SIZE(info->dma); i++)
443 dev_kfree_skb(info->skb[i]);
444 return 0;
445}
446
447/*D:510 The network device probe function is basically a standard ethernet
448 * device setup. It reads the "struct lguest_device_desc" and sets the "struct
449 * net_device". Oh, the line-by-line excitement! Let's skip over it. :*/
450static int lguestnet_probe(struct lguest_device *lgdev)
451{
452 int err, irqf = IRQF_SHARED;
453 struct net_device *dev;
454 struct lguestnet_info *info;
455 struct lguest_device_desc *desc = &lguest_devices[lgdev->index];
456
457 pr_debug("lguest_net: probing for device %i\n", lgdev->index);
458
459 dev = alloc_etherdev(sizeof(struct lguestnet_info));
460 if (!dev)
461 return -ENOMEM;
462
463 /* Ethernet defaults with some changes */
464 ether_setup(dev);
465 dev->set_mac_address = NULL;
466
467 dev->dev_addr[0] = 0x02; /* set local assignment bit (IEEE802) */
468 dev->dev_addr[1] = 0x00;
469 memcpy(&dev->dev_addr[2], &lguest_data.guestid, 2);
470 dev->dev_addr[4] = 0x00;
471 dev->dev_addr[5] = 0x00;
472
473 dev->open = lguestnet_open;
474 dev->stop = lguestnet_close;
475 dev->hard_start_xmit = lguestnet_start_xmit;
476
477 /* We don't actually support multicast yet, but turning on/off
478 * promisc also calls dev->set_multicast_list. */
479 dev->set_multicast_list = lguestnet_set_multicast;
480 SET_NETDEV_DEV(dev, &lgdev->dev);
481
482 /* The network code complains if you have "scatter-gather" capability
483 * if you don't also handle checksums (it seem that would be
484 * "illogical"). So we use a lie of omission and don't tell it that we
485 * can handle scattered packets unless we also don't want checksums,
486 * even though to us they're completely independent. */
487 if (desc->features & LGUEST_NET_F_NOCSUM)
488 dev->features = NETIF_F_SG|NETIF_F_NO_CSUM;
489
490 info = netdev_priv(dev);
491 info->mapsize = PAGE_SIZE * desc->num_pages;
492 info->peer_phys = ((unsigned long)desc->pfn << PAGE_SHIFT);
493 info->lgdev = lgdev;
494 info->peer = lguest_map(info->peer_phys, desc->num_pages);
495 if (!info->peer) {
496 err = -ENOMEM;
497 goto free;
498 }
499
500 /* This stores our peerid (upper bits reserved for future). */
501 info->me = (desc->features & (info->mapsize-1));
502
503 err = register_netdev(dev);
504 if (err) {
505 pr_debug("lguestnet: registering device failed\n");
506 goto unmap;
507 }
508
509 if (lguest_devices[lgdev->index].features & LGUEST_DEVICE_F_RANDOMNESS)
510 irqf |= IRQF_SAMPLE_RANDOM;
511 if (request_irq(lgdev_irq(lgdev), lguestnet_rcv, irqf, "lguestnet",
512 dev) != 0) {
513 pr_debug("lguestnet: cannot get irq %i\n", lgdev_irq(lgdev));
514 goto unregister;
515 }
516
517 pr_debug("lguestnet: registered device %s\n", dev->name);
518 /* Finally, we put the "struct net_device" in the generic "struct
519 * lguest_device"s private pointer. Again, it's not necessary, but
520 * makes sure the cool kernel kids don't tease us. */
521 lgdev->private = dev;
522 return 0;
523
524unregister:
525 unregister_netdev(dev);
526unmap:
527 lguest_unmap(info->peer);
528free:
529 free_netdev(dev);
530 return err;
531}
532
533static struct lguest_driver lguestnet_drv = {
534 .name = "lguestnet",
535 .owner = THIS_MODULE,
536 .device_type = LGUEST_DEVICE_T_NET,
537 .probe = lguestnet_probe,
538};
539
540static __init int lguestnet_init(void)
541{
542 return register_lguest_driver(&lguestnet_drv);
543}
544module_init(lguestnet_init);
545
546MODULE_DESCRIPTION("Lguest network driver");
547MODULE_LICENSE("GPL");
548
549/*D:580
550 * This is the last of the Drivers, and with this we have covered the many and
551 * wonderous and fine (and boring) details of the Guest.
552 *
553 * "make Launcher" beckons, where we answer questions like "Where do Guests
554 * come from?", and "What do you do when someone asks for optimization?"
555 */
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
new file mode 100644
index 000000000000..e396c9d2af8d
--- /dev/null
+++ b/drivers/net/virtio_net.c
@@ -0,0 +1,435 @@
1/* A simple network driver using virtio.
2 *
3 * Copyright 2007 Rusty Russell <rusty@rustcorp.com.au> IBM Corporation
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
18 */
19//#define DEBUG
20#include <linux/netdevice.h>
21#include <linux/etherdevice.h>
22#include <linux/module.h>
23#include <linux/virtio.h>
24#include <linux/virtio_net.h>
25#include <linux/scatterlist.h>
26
27/* FIXME: MTU in config. */
28#define MAX_PACKET_LEN (ETH_HLEN+ETH_DATA_LEN)
29
30struct virtnet_info
31{
32 struct virtio_device *vdev;
33 struct virtqueue *rvq, *svq;
34 struct net_device *dev;
35 struct napi_struct napi;
36
37 /* Number of input buffers, and max we've ever had. */
38 unsigned int num, max;
39
40 /* Receive & send queues. */
41 struct sk_buff_head recv;
42 struct sk_buff_head send;
43};
44
45static inline struct virtio_net_hdr *skb_vnet_hdr(struct sk_buff *skb)
46{
47 return (struct virtio_net_hdr *)skb->cb;
48}
49
50static inline void vnet_hdr_to_sg(struct scatterlist *sg, struct sk_buff *skb)
51{
52 sg_init_one(sg, skb_vnet_hdr(skb), sizeof(struct virtio_net_hdr));
53}
54
55static bool skb_xmit_done(struct virtqueue *rvq)
56{
57 struct virtnet_info *vi = rvq->vdev->priv;
58
59 /* In case we were waiting for output buffers. */
60 netif_wake_queue(vi->dev);
61 return true;
62}
63
64static void receive_skb(struct net_device *dev, struct sk_buff *skb,
65 unsigned len)
66{
67 struct virtio_net_hdr *hdr = skb_vnet_hdr(skb);
68
69 if (unlikely(len < sizeof(struct virtio_net_hdr) + ETH_HLEN)) {
70 pr_debug("%s: short packet %i\n", dev->name, len);
71 dev->stats.rx_length_errors++;
72 goto drop;
73 }
74 len -= sizeof(struct virtio_net_hdr);
75 BUG_ON(len > MAX_PACKET_LEN);
76
77 skb_trim(skb, len);
78 skb->protocol = eth_type_trans(skb, dev);
79 pr_debug("Receiving skb proto 0x%04x len %i type %i\n",
80 ntohs(skb->protocol), skb->len, skb->pkt_type);
81 dev->stats.rx_bytes += skb->len;
82 dev->stats.rx_packets++;
83
84 if (hdr->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) {
85 pr_debug("Needs csum!\n");
86 skb->ip_summed = CHECKSUM_PARTIAL;
87 skb->csum_start = hdr->csum_start;
88 skb->csum_offset = hdr->csum_offset;
89 if (skb->csum_start > skb->len - 2
90 || skb->csum_offset > skb->len - 2) {
91 if (net_ratelimit())
92 printk(KERN_WARNING "%s: csum=%u/%u len=%u\n",
93 dev->name, skb->csum_start,
94 skb->csum_offset, skb->len);
95 goto frame_err;
96 }
97 }
98
99 if (hdr->gso_type != VIRTIO_NET_HDR_GSO_NONE) {
100 pr_debug("GSO!\n");
101 switch (hdr->gso_type) {
102 case VIRTIO_NET_HDR_GSO_TCPV4:
103 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
104 break;
105 case VIRTIO_NET_HDR_GSO_TCPV4_ECN:
106 skb_shinfo(skb)->gso_type = SKB_GSO_TCP_ECN;
107 break;
108 case VIRTIO_NET_HDR_GSO_UDP:
109 skb_shinfo(skb)->gso_type = SKB_GSO_UDP;
110 break;
111 case VIRTIO_NET_HDR_GSO_TCPV6:
112 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
113 break;
114 default:
115 if (net_ratelimit())
116 printk(KERN_WARNING "%s: bad gso type %u.\n",
117 dev->name, hdr->gso_type);
118 goto frame_err;
119 }
120
121 skb_shinfo(skb)->gso_size = hdr->gso_size;
122 if (skb_shinfo(skb)->gso_size == 0) {
123 if (net_ratelimit())
124 printk(KERN_WARNING "%s: zero gso size.\n",
125 dev->name);
126 goto frame_err;
127 }
128
129 /* Header must be checked, and gso_segs computed. */
130 skb_shinfo(skb)->gso_type |= SKB_GSO_DODGY;
131 skb_shinfo(skb)->gso_segs = 0;
132 }
133
134 netif_receive_skb(skb);
135 return;
136
137frame_err:
138 dev->stats.rx_frame_errors++;
139drop:
140 dev_kfree_skb(skb);
141}
142
143static void try_fill_recv(struct virtnet_info *vi)
144{
145 struct sk_buff *skb;
146 struct scatterlist sg[1+MAX_SKB_FRAGS];
147 int num, err;
148
149 for (;;) {
150 skb = netdev_alloc_skb(vi->dev, MAX_PACKET_LEN);
151 if (unlikely(!skb))
152 break;
153
154 skb_put(skb, MAX_PACKET_LEN);
155 vnet_hdr_to_sg(sg, skb);
156 num = skb_to_sgvec(skb, sg+1, 0, skb->len) + 1;
157 skb_queue_head(&vi->recv, skb);
158
159 err = vi->rvq->vq_ops->add_buf(vi->rvq, sg, 0, num, skb);
160 if (err) {
161 skb_unlink(skb, &vi->recv);
162 kfree_skb(skb);
163 break;
164 }
165 vi->num++;
166 }
167 if (unlikely(vi->num > vi->max))
168 vi->max = vi->num;
169 vi->rvq->vq_ops->kick(vi->rvq);
170}
171
172static bool skb_recv_done(struct virtqueue *rvq)
173{
174 struct virtnet_info *vi = rvq->vdev->priv;
175 netif_rx_schedule(vi->dev, &vi->napi);
176 /* Suppress further interrupts. */
177 return false;
178}
179
180static int virtnet_poll(struct napi_struct *napi, int budget)
181{
182 struct virtnet_info *vi = container_of(napi, struct virtnet_info, napi);
183 struct sk_buff *skb = NULL;
184 unsigned int len, received = 0;
185
186again:
187 while (received < budget &&
188 (skb = vi->rvq->vq_ops->get_buf(vi->rvq, &len)) != NULL) {
189 __skb_unlink(skb, &vi->recv);
190 receive_skb(vi->dev, skb, len);
191 vi->num--;
192 received++;
193 }
194
195 /* FIXME: If we oom and completely run out of inbufs, we need
196 * to start a timer trying to fill more. */
197 if (vi->num < vi->max / 2)
198 try_fill_recv(vi);
199
200 /* All done? */
201 if (!skb) {
202 netif_rx_complete(vi->dev, napi);
203 if (unlikely(!vi->rvq->vq_ops->restart(vi->rvq))
204 && netif_rx_reschedule(vi->dev, napi))
205 goto again;
206 }
207
208 return received;
209}
210
211static void free_old_xmit_skbs(struct virtnet_info *vi)
212{
213 struct sk_buff *skb;
214 unsigned int len;
215
216 while ((skb = vi->svq->vq_ops->get_buf(vi->svq, &len)) != NULL) {
217 pr_debug("Sent skb %p\n", skb);
218 __skb_unlink(skb, &vi->send);
219 vi->dev->stats.tx_bytes += len;
220 vi->dev->stats.tx_packets++;
221 kfree_skb(skb);
222 }
223}
224
225static int start_xmit(struct sk_buff *skb, struct net_device *dev)
226{
227 struct virtnet_info *vi = netdev_priv(dev);
228 int num, err;
229 struct scatterlist sg[1+MAX_SKB_FRAGS];
230 struct virtio_net_hdr *hdr;
231 const unsigned char *dest = ((struct ethhdr *)skb->data)->h_dest;
232 DECLARE_MAC_BUF(mac);
233
234 pr_debug("%s: xmit %p %s\n", dev->name, skb, print_mac(mac, dest));
235
236 free_old_xmit_skbs(vi);
237
238 /* Encode metadata header at front. */
239 hdr = skb_vnet_hdr(skb);
240 if (skb->ip_summed == CHECKSUM_PARTIAL) {
241 hdr->flags = VIRTIO_NET_HDR_F_NEEDS_CSUM;
242 hdr->csum_start = skb->csum_start - skb_headroom(skb);
243 hdr->csum_offset = skb->csum_offset;
244 } else {
245 hdr->flags = 0;
246 hdr->csum_offset = hdr->csum_start = 0;
247 }
248
249 if (skb_is_gso(skb)) {
250 hdr->gso_size = skb_shinfo(skb)->gso_size;
251 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCP_ECN)
252 hdr->gso_type = VIRTIO_NET_HDR_GSO_TCPV4_ECN;
253 else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)
254 hdr->gso_type = VIRTIO_NET_HDR_GSO_TCPV4;
255 else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
256 hdr->gso_type = VIRTIO_NET_HDR_GSO_TCPV6;
257 else if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP)
258 hdr->gso_type = VIRTIO_NET_HDR_GSO_UDP;
259 else
260 BUG();
261 } else {
262 hdr->gso_type = VIRTIO_NET_HDR_GSO_NONE;
263 hdr->gso_size = 0;
264 }
265
266 vnet_hdr_to_sg(sg, skb);
267 num = skb_to_sgvec(skb, sg+1, 0, skb->len) + 1;
268 __skb_queue_head(&vi->send, skb);
269 err = vi->svq->vq_ops->add_buf(vi->svq, sg, num, 0, skb);
270 if (err) {
271 pr_debug("%s: virtio not prepared to send\n", dev->name);
272 skb_unlink(skb, &vi->send);
273 netif_stop_queue(dev);
274 return NETDEV_TX_BUSY;
275 }
276 vi->svq->vq_ops->kick(vi->svq);
277
278 return 0;
279}
280
281static int virtnet_open(struct net_device *dev)
282{
283 struct virtnet_info *vi = netdev_priv(dev);
284
285 try_fill_recv(vi);
286
287 /* If we didn't even get one input buffer, we're useless. */
288 if (vi->num == 0)
289 return -ENOMEM;
290
291 napi_enable(&vi->napi);
292 return 0;
293}
294
295static int virtnet_close(struct net_device *dev)
296{
297 struct virtnet_info *vi = netdev_priv(dev);
298 struct sk_buff *skb;
299
300 napi_disable(&vi->napi);
301
302 /* networking core has neutered skb_xmit_done/skb_recv_done, so don't
303 * worry about races vs. get(). */
304 vi->rvq->vq_ops->shutdown(vi->rvq);
305 while ((skb = __skb_dequeue(&vi->recv)) != NULL) {
306 kfree_skb(skb);
307 vi->num--;
308 }
309 vi->svq->vq_ops->shutdown(vi->svq);
310 while ((skb = __skb_dequeue(&vi->send)) != NULL)
311 kfree_skb(skb);
312
313 BUG_ON(vi->num != 0);
314 return 0;
315}
316
317static int virtnet_probe(struct virtio_device *vdev)
318{
319 int err;
320 unsigned int len;
321 struct net_device *dev;
322 struct virtnet_info *vi;
323 void *token;
324
325 /* Allocate ourselves a network device with room for our info */
326 dev = alloc_etherdev(sizeof(struct virtnet_info));
327 if (!dev)
328 return -ENOMEM;
329
330 /* Set up network device as normal. */
331 ether_setup(dev);
332 dev->open = virtnet_open;
333 dev->stop = virtnet_close;
334 dev->hard_start_xmit = start_xmit;
335 dev->features = NETIF_F_HIGHDMA;
336 SET_NETDEV_DEV(dev, &vdev->dev);
337
338 /* Do we support "hardware" checksums? */
339 token = vdev->config->find(vdev, VIRTIO_CONFIG_NET_F, &len);
340 if (virtio_use_bit(vdev, token, len, VIRTIO_NET_F_NO_CSUM)) {
341 /* This opens up the world of extra features. */
342 dev->features |= NETIF_F_HW_CSUM|NETIF_F_SG|NETIF_F_FRAGLIST;
343 if (virtio_use_bit(vdev, token, len, VIRTIO_NET_F_TSO4))
344 dev->features |= NETIF_F_TSO;
345 if (virtio_use_bit(vdev, token, len, VIRTIO_NET_F_UFO))
346 dev->features |= NETIF_F_UFO;
347 if (virtio_use_bit(vdev, token, len, VIRTIO_NET_F_TSO4_ECN))
348 dev->features |= NETIF_F_TSO_ECN;
349 if (virtio_use_bit(vdev, token, len, VIRTIO_NET_F_TSO6))
350 dev->features |= NETIF_F_TSO6;
351 }
352
353 /* Configuration may specify what MAC to use. Otherwise random. */
354 token = vdev->config->find(vdev, VIRTIO_CONFIG_NET_MAC_F, &len);
355 if (token) {
356 dev->addr_len = len;
357 vdev->config->get(vdev, token, dev->dev_addr, len);
358 } else
359 random_ether_addr(dev->dev_addr);
360
361 /* Set up our device-specific information */
362 vi = netdev_priv(dev);
363 netif_napi_add(dev, &vi->napi, virtnet_poll, 16);
364 vi->dev = dev;
365 vi->vdev = vdev;
366
367 /* We expect two virtqueues, receive then send. */
368 vi->rvq = vdev->config->find_vq(vdev, skb_recv_done);
369 if (IS_ERR(vi->rvq)) {
370 err = PTR_ERR(vi->rvq);
371 goto free;
372 }
373
374 vi->svq = vdev->config->find_vq(vdev, skb_xmit_done);
375 if (IS_ERR(vi->svq)) {
376 err = PTR_ERR(vi->svq);
377 goto free_recv;
378 }
379
380 /* Initialize our empty receive and send queues. */
381 skb_queue_head_init(&vi->recv);
382 skb_queue_head_init(&vi->send);
383
384 err = register_netdev(dev);
385 if (err) {
386 pr_debug("virtio_net: registering device failed\n");
387 goto free_send;
388 }
389 pr_debug("virtnet: registered device %s\n", dev->name);
390 vdev->priv = vi;
391 return 0;
392
393free_send:
394 vdev->config->del_vq(vi->svq);
395free_recv:
396 vdev->config->del_vq(vi->rvq);
397free:
398 free_netdev(dev);
399 return err;
400}
401
402static void virtnet_remove(struct virtio_device *vdev)
403{
404 unregister_netdev(vdev->priv);
405 free_netdev(vdev->priv);
406}
407
408static struct virtio_device_id id_table[] = {
409 { VIRTIO_ID_NET, VIRTIO_DEV_ANY_ID },
410 { 0 },
411};
412
413static struct virtio_driver virtio_net = {
414 .driver.name = KBUILD_MODNAME,
415 .driver.owner = THIS_MODULE,
416 .id_table = id_table,
417 .probe = virtnet_probe,
418 .remove = __devexit_p(virtnet_remove),
419};
420
421static int __init init(void)
422{
423 return register_virtio_driver(&virtio_net);
424}
425
426static void __exit fini(void)
427{
428 unregister_virtio_driver(&virtio_net);
429}
430module_init(init);
431module_exit(fini);
432
433MODULE_DEVICE_TABLE(virtio, id_table);
434MODULE_DESCRIPTION("Virtio network driver");
435MODULE_LICENSE("GPL");
diff --git a/drivers/virtio/Kconfig b/drivers/virtio/Kconfig
new file mode 100644
index 000000000000..9e33fc4da875
--- /dev/null
+++ b/drivers/virtio/Kconfig
@@ -0,0 +1,8 @@
1# Virtio always gets selected by whoever wants it.
2config VIRTIO
3 bool
4
5# Similarly the virtio ring implementation.
6config VIRTIO_RING
7 bool
8 depends on VIRTIO
diff --git a/drivers/virtio/Makefile b/drivers/virtio/Makefile
new file mode 100644
index 000000000000..f70e40971dd9
--- /dev/null
+++ b/drivers/virtio/Makefile
@@ -0,0 +1,2 @@
1obj-$(CONFIG_VIRTIO) += virtio.o
2obj-$(CONFIG_VIRTIO_RING) += virtio_ring.o
diff --git a/drivers/virtio/config.c b/drivers/virtio/config.c
new file mode 100644
index 000000000000..983d482fba40
--- /dev/null
+++ b/drivers/virtio/config.c
@@ -0,0 +1,13 @@
1/* Configuration space parsing helpers for virtio.
2 *
3 * The configuration is [type][len][... len bytes ...] fields.
4 *
5 * Copyright 2007 Rusty Russell, IBM Corporation.
6 * GPL v2 or later.
7 */
8#include <linux/err.h>
9#include <linux/virtio.h>
10#include <linux/virtio_config.h>
11#include <linux/bug.h>
12#include <asm/system.h>
13
diff --git a/drivers/virtio/virtio.c b/drivers/virtio/virtio.c
new file mode 100644
index 000000000000..15d7787dea87
--- /dev/null
+++ b/drivers/virtio/virtio.c
@@ -0,0 +1,189 @@
1#include <linux/virtio.h>
2#include <linux/spinlock.h>
3#include <linux/virtio_config.h>
4
5static ssize_t device_show(struct device *_d,
6 struct device_attribute *attr, char *buf)
7{
8 struct virtio_device *dev = container_of(_d,struct virtio_device,dev);
9 return sprintf(buf, "%hu", dev->id.device);
10}
11static ssize_t vendor_show(struct device *_d,
12 struct device_attribute *attr, char *buf)
13{
14 struct virtio_device *dev = container_of(_d,struct virtio_device,dev);
15 return sprintf(buf, "%hu", dev->id.vendor);
16}
17static ssize_t status_show(struct device *_d,
18 struct device_attribute *attr, char *buf)
19{
20 struct virtio_device *dev = container_of(_d,struct virtio_device,dev);
21 return sprintf(buf, "0x%08x", dev->config->get_status(dev));
22}
23static ssize_t modalias_show(struct device *_d,
24 struct device_attribute *attr, char *buf)
25{
26 struct virtio_device *dev = container_of(_d,struct virtio_device,dev);
27
28 return sprintf(buf, "virtio:d%08Xv%08X\n",
29 dev->id.device, dev->id.vendor);
30}
31static struct device_attribute virtio_dev_attrs[] = {
32 __ATTR_RO(device),
33 __ATTR_RO(vendor),
34 __ATTR_RO(status),
35 __ATTR_RO(modalias),
36 __ATTR_NULL
37};
38
39static inline int virtio_id_match(const struct virtio_device *dev,
40 const struct virtio_device_id *id)
41{
42 if (id->device != dev->id.device)
43 return 0;
44
45 return id->vendor == VIRTIO_DEV_ANY_ID || id->vendor != dev->id.vendor;
46}
47
48/* This looks through all the IDs a driver claims to support. If any of them
49 * match, we return 1 and the kernel will call virtio_dev_probe(). */
50static int virtio_dev_match(struct device *_dv, struct device_driver *_dr)
51{
52 unsigned int i;
53 struct virtio_device *dev = container_of(_dv,struct virtio_device,dev);
54 const struct virtio_device_id *ids;
55
56 ids = container_of(_dr, struct virtio_driver, driver)->id_table;
57 for (i = 0; ids[i].device; i++)
58 if (virtio_id_match(dev, &ids[i]))
59 return 1;
60 return 0;
61}
62
63static int virtio_uevent(struct device *_dv, struct kobj_uevent_env *env)
64{
65 struct virtio_device *dev = container_of(_dv,struct virtio_device,dev);
66
67 return add_uevent_var(env, "MODALIAS=virtio:d%08Xv%08X",
68 dev->id.device, dev->id.vendor);
69}
70
71static struct bus_type virtio_bus = {
72 .name = "virtio",
73 .match = virtio_dev_match,
74 .dev_attrs = virtio_dev_attrs,
75 .uevent = virtio_uevent,
76};
77
78static void add_status(struct virtio_device *dev, unsigned status)
79{
80 dev->config->set_status(dev, dev->config->get_status(dev) | status);
81}
82
83static int virtio_dev_probe(struct device *_d)
84{
85 int err;
86 struct virtio_device *dev = container_of(_d,struct virtio_device,dev);
87 struct virtio_driver *drv = container_of(dev->dev.driver,
88 struct virtio_driver, driver);
89
90 add_status(dev, VIRTIO_CONFIG_S_DRIVER);
91 err = drv->probe(dev);
92 if (err)
93 add_status(dev, VIRTIO_CONFIG_S_FAILED);
94 else
95 add_status(dev, VIRTIO_CONFIG_S_DRIVER_OK);
96 return err;
97}
98
99int register_virtio_driver(struct virtio_driver *driver)
100{
101 driver->driver.bus = &virtio_bus;
102 driver->driver.probe = virtio_dev_probe;
103 return driver_register(&driver->driver);
104}
105EXPORT_SYMBOL_GPL(register_virtio_driver);
106
107void unregister_virtio_driver(struct virtio_driver *driver)
108{
109 driver_unregister(&driver->driver);
110}
111EXPORT_SYMBOL_GPL(unregister_virtio_driver);
112
113int register_virtio_device(struct virtio_device *dev)
114{
115 int err;
116
117 dev->dev.bus = &virtio_bus;
118 sprintf(dev->dev.bus_id, "%u", dev->index);
119
120 /* Acknowledge that we've seen the device. */
121 add_status(dev, VIRTIO_CONFIG_S_ACKNOWLEDGE);
122
123 /* device_register() causes the bus infrastructure to look for a
124 * matching driver. */
125 err = device_register(&dev->dev);
126 if (err)
127 add_status(dev, VIRTIO_CONFIG_S_FAILED);
128 return err;
129}
130EXPORT_SYMBOL_GPL(register_virtio_device);
131
132void unregister_virtio_device(struct virtio_device *dev)
133{
134 device_unregister(&dev->dev);
135}
136EXPORT_SYMBOL_GPL(unregister_virtio_device);
137
138int __virtio_config_val(struct virtio_device *vdev,
139 u8 type, void *val, size_t size)
140{
141 void *token;
142 unsigned int len;
143
144 token = vdev->config->find(vdev, type, &len);
145 if (!token)
146 return -ENOENT;
147
148 if (len != size)
149 return -EIO;
150
151 vdev->config->get(vdev, token, val, size);
152 return 0;
153}
154EXPORT_SYMBOL_GPL(__virtio_config_val);
155
156int virtio_use_bit(struct virtio_device *vdev,
157 void *token, unsigned int len, unsigned int bitnum)
158{
159 unsigned long bits[16];
160
161 /* This makes it convenient to pass-through find() results. */
162 if (!token)
163 return 0;
164
165 /* bit not in range of this bitfield? */
166 if (bitnum * 8 >= len / 2)
167 return 0;
168
169 /* Giant feature bitfields are silly. */
170 BUG_ON(len > sizeof(bits));
171 vdev->config->get(vdev, token, bits, len);
172
173 if (!test_bit(bitnum, bits))
174 return 0;
175
176 /* Set acknowledge bit, and write it back. */
177 set_bit(bitnum + len * 8 / 2, bits);
178 vdev->config->set(vdev, token, bits, len);
179 return 1;
180}
181EXPORT_SYMBOL_GPL(virtio_use_bit);
182
183static int virtio_init(void)
184{
185 if (bus_register(&virtio_bus) != 0)
186 panic("virtio bus registration failed");
187 return 0;
188}
189core_initcall(virtio_init);
diff --git a/drivers/virtio/virtio_ring.c b/drivers/virtio/virtio_ring.c
new file mode 100644
index 000000000000..0e4baca21b8f
--- /dev/null
+++ b/drivers/virtio/virtio_ring.c
@@ -0,0 +1,313 @@
1/* Virtio ring implementation.
2 *
3 * Copyright 2007 Rusty Russell IBM Corporation
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
18 */
19#include <linux/virtio.h>
20#include <linux/virtio_ring.h>
21#include <linux/device.h>
22
23#ifdef DEBUG
24/* For development, we want to crash whenever the ring is screwed. */
25#define BAD_RING(vq, fmt...) \
26 do { dev_err(&vq->vq.vdev->dev, fmt); BUG(); } while(0)
27#define START_USE(vq) \
28 do { if ((vq)->in_use) panic("in_use = %i\n", (vq)->in_use); (vq)->in_use = __LINE__; mb(); } while(0)
29#define END_USE(vq) \
30 do { BUG_ON(!(vq)->in_use); (vq)->in_use = 0; mb(); } while(0)
31#else
32#define BAD_RING(vq, fmt...) \
33 do { dev_err(&vq->vq.vdev->dev, fmt); (vq)->broken = true; } while(0)
34#define START_USE(vq)
35#define END_USE(vq)
36#endif
37
38struct vring_virtqueue
39{
40 struct virtqueue vq;
41
42 /* Actual memory layout for this queue */
43 struct vring vring;
44
45 /* Other side has made a mess, don't try any more. */
46 bool broken;
47
48 /* Number of free buffers */
49 unsigned int num_free;
50 /* Head of free buffer list. */
51 unsigned int free_head;
52 /* Number we've added since last sync. */
53 unsigned int num_added;
54
55 /* Last used index we've seen. */
56 unsigned int last_used_idx;
57
58 /* How to notify other side. FIXME: commonalize hcalls! */
59 void (*notify)(struct virtqueue *vq);
60
61#ifdef DEBUG
62 /* They're supposed to lock for us. */
63 unsigned int in_use;
64#endif
65
66 /* Tokens for callbacks. */
67 void *data[];
68};
69
70#define to_vvq(_vq) container_of(_vq, struct vring_virtqueue, vq)
71
72static int vring_add_buf(struct virtqueue *_vq,
73 struct scatterlist sg[],
74 unsigned int out,
75 unsigned int in,
76 void *data)
77{
78 struct vring_virtqueue *vq = to_vvq(_vq);
79 unsigned int i, avail, head, uninitialized_var(prev);
80
81 BUG_ON(data == NULL);
82 BUG_ON(out + in > vq->vring.num);
83 BUG_ON(out + in == 0);
84
85 START_USE(vq);
86
87 if (vq->num_free < out + in) {
88 pr_debug("Can't add buf len %i - avail = %i\n",
89 out + in, vq->num_free);
90 END_USE(vq);
91 return -ENOSPC;
92 }
93
94 /* We're about to use some buffers from the free list. */
95 vq->num_free -= out + in;
96
97 head = vq->free_head;
98 for (i = vq->free_head; out; i = vq->vring.desc[i].next, out--) {
99 vq->vring.desc[i].flags = VRING_DESC_F_NEXT;
100 vq->vring.desc[i].addr = (page_to_pfn(sg_page(sg))<<PAGE_SHIFT)
101 + sg->offset;
102 vq->vring.desc[i].len = sg->length;
103 prev = i;
104 sg++;
105 }
106 for (; in; i = vq->vring.desc[i].next, in--) {
107 vq->vring.desc[i].flags = VRING_DESC_F_NEXT|VRING_DESC_F_WRITE;
108 vq->vring.desc[i].addr = (page_to_pfn(sg_page(sg))<<PAGE_SHIFT)
109 + sg->offset;
110 vq->vring.desc[i].len = sg->length;
111 prev = i;
112 sg++;
113 }
114 /* Last one doesn't continue. */
115 vq->vring.desc[prev].flags &= ~VRING_DESC_F_NEXT;
116
117 /* Update free pointer */
118 vq->free_head = i;
119
120 /* Set token. */
121 vq->data[head] = data;
122
123 /* Put entry in available array (but don't update avail->idx until they
124 * do sync). FIXME: avoid modulus here? */
125 avail = (vq->vring.avail->idx + vq->num_added++) % vq->vring.num;
126 vq->vring.avail->ring[avail] = head;
127
128 pr_debug("Added buffer head %i to %p\n", head, vq);
129 END_USE(vq);
130 return 0;
131}
132
133static void vring_kick(struct virtqueue *_vq)
134{
135 struct vring_virtqueue *vq = to_vvq(_vq);
136 START_USE(vq);
137 /* Descriptors and available array need to be set before we expose the
138 * new available array entries. */
139 wmb();
140
141 vq->vring.avail->idx += vq->num_added;
142 vq->num_added = 0;
143
144 /* Need to update avail index before checking if we should notify */
145 mb();
146
147 if (!(vq->vring.used->flags & VRING_USED_F_NO_NOTIFY))
148 /* Prod other side to tell it about changes. */
149 vq->notify(&vq->vq);
150
151 END_USE(vq);
152}
153
154static void detach_buf(struct vring_virtqueue *vq, unsigned int head)
155{
156 unsigned int i;
157
158 /* Clear data ptr. */
159 vq->data[head] = NULL;
160
161 /* Put back on free list: find end */
162 i = head;
163 while (vq->vring.desc[i].flags & VRING_DESC_F_NEXT) {
164 i = vq->vring.desc[i].next;
165 vq->num_free++;
166 }
167
168 vq->vring.desc[i].next = vq->free_head;
169 vq->free_head = head;
170 /* Plus final descriptor */
171 vq->num_free++;
172}
173
174/* FIXME: We need to tell other side about removal, to synchronize. */
175static void vring_shutdown(struct virtqueue *_vq)
176{
177 struct vring_virtqueue *vq = to_vvq(_vq);
178 unsigned int i;
179
180 for (i = 0; i < vq->vring.num; i++)
181 detach_buf(vq, i);
182}
183
184static inline bool more_used(const struct vring_virtqueue *vq)
185{
186 return vq->last_used_idx != vq->vring.used->idx;
187}
188
189static void *vring_get_buf(struct virtqueue *_vq, unsigned int *len)
190{
191 struct vring_virtqueue *vq = to_vvq(_vq);
192 void *ret;
193 unsigned int i;
194
195 START_USE(vq);
196
197 if (!more_used(vq)) {
198 pr_debug("No more buffers in queue\n");
199 END_USE(vq);
200 return NULL;
201 }
202
203 i = vq->vring.used->ring[vq->last_used_idx%vq->vring.num].id;
204 *len = vq->vring.used->ring[vq->last_used_idx%vq->vring.num].len;
205
206 if (unlikely(i >= vq->vring.num)) {
207 BAD_RING(vq, "id %u out of range\n", i);
208 return NULL;
209 }
210 if (unlikely(!vq->data[i])) {
211 BAD_RING(vq, "id %u is not a head!\n", i);
212 return NULL;
213 }
214
215 /* detach_buf clears data, so grab it now. */
216 ret = vq->data[i];
217 detach_buf(vq, i);
218 vq->last_used_idx++;
219 END_USE(vq);
220 return ret;
221}
222
223static bool vring_restart(struct virtqueue *_vq)
224{
225 struct vring_virtqueue *vq = to_vvq(_vq);
226
227 START_USE(vq);
228 BUG_ON(!(vq->vring.avail->flags & VRING_AVAIL_F_NO_INTERRUPT));
229
230 /* We optimistically turn back on interrupts, then check if there was
231 * more to do. */
232 vq->vring.avail->flags &= ~VRING_AVAIL_F_NO_INTERRUPT;
233 mb();
234 if (unlikely(more_used(vq))) {
235 vq->vring.avail->flags |= VRING_AVAIL_F_NO_INTERRUPT;
236 END_USE(vq);
237 return false;
238 }
239
240 END_USE(vq);
241 return true;
242}
243
244irqreturn_t vring_interrupt(int irq, void *_vq)
245{
246 struct vring_virtqueue *vq = to_vvq(_vq);
247
248 if (!more_used(vq)) {
249 pr_debug("virtqueue interrupt with no work for %p\n", vq);
250 return IRQ_NONE;
251 }
252
253 if (unlikely(vq->broken))
254 return IRQ_HANDLED;
255
256 pr_debug("virtqueue callback for %p (%p)\n", vq, vq->vq.callback);
257 if (vq->vq.callback && !vq->vq.callback(&vq->vq))
258 vq->vring.avail->flags |= VRING_AVAIL_F_NO_INTERRUPT;
259
260 return IRQ_HANDLED;
261}
262
263static struct virtqueue_ops vring_vq_ops = {
264 .add_buf = vring_add_buf,
265 .get_buf = vring_get_buf,
266 .kick = vring_kick,
267 .restart = vring_restart,
268 .shutdown = vring_shutdown,
269};
270
271struct virtqueue *vring_new_virtqueue(unsigned int num,
272 struct virtio_device *vdev,
273 void *pages,
274 void (*notify)(struct virtqueue *),
275 bool (*callback)(struct virtqueue *))
276{
277 struct vring_virtqueue *vq;
278 unsigned int i;
279
280 vq = kmalloc(sizeof(*vq) + sizeof(void *)*num, GFP_KERNEL);
281 if (!vq)
282 return NULL;
283
284 vring_init(&vq->vring, num, pages);
285 vq->vq.callback = callback;
286 vq->vq.vdev = vdev;
287 vq->vq.vq_ops = &vring_vq_ops;
288 vq->notify = notify;
289 vq->broken = false;
290 vq->last_used_idx = 0;
291 vq->num_added = 0;
292#ifdef DEBUG
293 vq->in_use = false;
294#endif
295
296 /* No callback? Tell other side not to bother us. */
297 if (!callback)
298 vq->vring.avail->flags |= VRING_AVAIL_F_NO_INTERRUPT;
299
300 /* Put everything in free lists. */
301 vq->num_free = num;
302 vq->free_head = 0;
303 for (i = 0; i < num-1; i++)
304 vq->vring.desc[i].next = i+1;
305
306 return &vq->vq;
307}
308
309void vring_del_virtqueue(struct virtqueue *vq)
310{
311 kfree(to_vvq(vq));
312}
313