diff options
-rw-r--r-- | Documentation/lguest/lguest.c | 66 | ||||
-rw-r--r-- | arch/s390/include/asm/kvm_virtio.h | 4 | ||||
-rw-r--r-- | arch/x86/lguest/i386_head.S | 15 | ||||
-rw-r--r-- | drivers/block/virtio_blk.c | 41 | ||||
-rw-r--r-- | drivers/char/hvc_console.c | 1 | ||||
-rw-r--r-- | drivers/char/virtio_console.c | 30 | ||||
-rw-r--r-- | drivers/lguest/lg.h | 2 | ||||
-rw-r--r-- | drivers/lguest/lguest_device.c | 8 | ||||
-rw-r--r-- | drivers/lguest/lguest_user.c | 13 | ||||
-rw-r--r-- | drivers/lguest/page_tables.c | 72 | ||||
-rw-r--r-- | drivers/s390/kvm/kvm_virtio.c | 34 | ||||
-rw-r--r-- | drivers/virtio/virtio.c | 2 | ||||
-rw-r--r-- | drivers/virtio/virtio_balloon.c | 13 | ||||
-rw-r--r-- | drivers/virtio/virtio_pci.c | 43 | ||||
-rw-r--r-- | drivers/virtio/virtio_ring.c | 3 | ||||
-rw-r--r-- | include/linux/lguest_launcher.h | 6 | ||||
-rw-r--r-- | include/linux/virtio_balloon.h | 3 | ||||
-rw-r--r-- | include/linux/virtio_console.h | 11 | ||||
-rw-r--r-- | include/linux/virtio_pci.h | 8 | ||||
-rw-r--r-- | include/linux/virtio_ring.h | 13 |
20 files changed, 253 insertions, 135 deletions
diff --git a/Documentation/lguest/lguest.c b/Documentation/lguest/lguest.c index 804520633fc..f2dbbf3bdea 100644 --- a/Documentation/lguest/lguest.c +++ b/Documentation/lguest/lguest.c | |||
@@ -481,51 +481,6 @@ static unsigned long load_initrd(const char *name, unsigned long mem) | |||
481 | /* We return the initrd size. */ | 481 | /* We return the initrd size. */ |
482 | return len; | 482 | return len; |
483 | } | 483 | } |
484 | |||
485 | /* Once we know how much memory we have we can construct simple linear page | ||
486 | * tables which set virtual == physical which will get the Guest far enough | ||
487 | * into the boot to create its own. | ||
488 | * | ||
489 | * We lay them out of the way, just below the initrd (which is why we need to | ||
490 | * know its size here). */ | ||
491 | static unsigned long setup_pagetables(unsigned long mem, | ||
492 | unsigned long initrd_size) | ||
493 | { | ||
494 | unsigned long *pgdir, *linear; | ||
495 | unsigned int mapped_pages, i, linear_pages; | ||
496 | unsigned int ptes_per_page = getpagesize()/sizeof(void *); | ||
497 | |||
498 | mapped_pages = mem/getpagesize(); | ||
499 | |||
500 | /* Each PTE page can map ptes_per_page pages: how many do we need? */ | ||
501 | linear_pages = (mapped_pages + ptes_per_page-1)/ptes_per_page; | ||
502 | |||
503 | /* We put the toplevel page directory page at the top of memory. */ | ||
504 | pgdir = from_guest_phys(mem) - initrd_size - getpagesize(); | ||
505 | |||
506 | /* Now we use the next linear_pages pages as pte pages */ | ||
507 | linear = (void *)pgdir - linear_pages*getpagesize(); | ||
508 | |||
509 | /* Linear mapping is easy: put every page's address into the mapping in | ||
510 | * order. PAGE_PRESENT contains the flags Present, Writable and | ||
511 | * Executable. */ | ||
512 | for (i = 0; i < mapped_pages; i++) | ||
513 | linear[i] = ((i * getpagesize()) | PAGE_PRESENT); | ||
514 | |||
515 | /* The top level points to the linear page table pages above. */ | ||
516 | for (i = 0; i < mapped_pages; i += ptes_per_page) { | ||
517 | pgdir[i/ptes_per_page] | ||
518 | = ((to_guest_phys(linear) + i*sizeof(void *)) | ||
519 | | PAGE_PRESENT); | ||
520 | } | ||
521 | |||
522 | verbose("Linear mapping of %u pages in %u pte pages at %#lx\n", | ||
523 | mapped_pages, linear_pages, to_guest_phys(linear)); | ||
524 | |||
525 | /* We return the top level (guest-physical) address: the kernel needs | ||
526 | * to know where it is. */ | ||
527 | return to_guest_phys(pgdir); | ||
528 | } | ||
529 | /*:*/ | 484 | /*:*/ |
530 | 485 | ||
531 | /* Simple routine to roll all the commandline arguments together with spaces | 486 | /* Simple routine to roll all the commandline arguments together with spaces |
@@ -548,13 +503,13 @@ static void concat(char *dst, char *args[]) | |||
548 | 503 | ||
549 | /*L:185 This is where we actually tell the kernel to initialize the Guest. We | 504 | /*L:185 This is where we actually tell the kernel to initialize the Guest. We |
550 | * saw the arguments it expects when we looked at initialize() in lguest_user.c: | 505 | * saw the arguments it expects when we looked at initialize() in lguest_user.c: |
551 | * the base of Guest "physical" memory, the top physical page to allow, the | 506 | * the base of Guest "physical" memory, the top physical page to allow and the |
552 | * top level pagetable and the entry point for the Guest. */ | 507 | * entry point for the Guest. */ |
553 | static int tell_kernel(unsigned long pgdir, unsigned long start) | 508 | static int tell_kernel(unsigned long start) |
554 | { | 509 | { |
555 | unsigned long args[] = { LHREQ_INITIALIZE, | 510 | unsigned long args[] = { LHREQ_INITIALIZE, |
556 | (unsigned long)guest_base, | 511 | (unsigned long)guest_base, |
557 | guest_limit / getpagesize(), pgdir, start }; | 512 | guest_limit / getpagesize(), start }; |
558 | int fd; | 513 | int fd; |
559 | 514 | ||
560 | verbose("Guest: %p - %p (%#lx)\n", | 515 | verbose("Guest: %p - %p (%#lx)\n", |
@@ -1030,7 +985,7 @@ static void update_device_status(struct device *dev) | |||
1030 | /* Zero out the virtqueues. */ | 985 | /* Zero out the virtqueues. */ |
1031 | for (vq = dev->vq; vq; vq = vq->next) { | 986 | for (vq = dev->vq; vq; vq = vq->next) { |
1032 | memset(vq->vring.desc, 0, | 987 | memset(vq->vring.desc, 0, |
1033 | vring_size(vq->config.num, getpagesize())); | 988 | vring_size(vq->config.num, LGUEST_VRING_ALIGN)); |
1034 | lg_last_avail(vq) = 0; | 989 | lg_last_avail(vq) = 0; |
1035 | } | 990 | } |
1036 | } else if (dev->desc->status & VIRTIO_CONFIG_S_FAILED) { | 991 | } else if (dev->desc->status & VIRTIO_CONFIG_S_FAILED) { |
@@ -1211,7 +1166,7 @@ static void add_virtqueue(struct device *dev, unsigned int num_descs, | |||
1211 | void *p; | 1166 | void *p; |
1212 | 1167 | ||
1213 | /* First we need some memory for this virtqueue. */ | 1168 | /* First we need some memory for this virtqueue. */ |
1214 | pages = (vring_size(num_descs, getpagesize()) + getpagesize() - 1) | 1169 | pages = (vring_size(num_descs, LGUEST_VRING_ALIGN) + getpagesize() - 1) |
1215 | / getpagesize(); | 1170 | / getpagesize(); |
1216 | p = get_pages(pages); | 1171 | p = get_pages(pages); |
1217 | 1172 | ||
@@ -1228,7 +1183,7 @@ static void add_virtqueue(struct device *dev, unsigned int num_descs, | |||
1228 | vq->config.pfn = to_guest_phys(p) / getpagesize(); | 1183 | vq->config.pfn = to_guest_phys(p) / getpagesize(); |
1229 | 1184 | ||
1230 | /* Initialize the vring. */ | 1185 | /* Initialize the vring. */ |
1231 | vring_init(&vq->vring, num_descs, p, getpagesize()); | 1186 | vring_init(&vq->vring, num_descs, p, LGUEST_VRING_ALIGN); |
1232 | 1187 | ||
1233 | /* Append virtqueue to this device's descriptor. We use | 1188 | /* Append virtqueue to this device's descriptor. We use |
1234 | * device_config() to get the end of the device's current virtqueues; | 1189 | * device_config() to get the end of the device's current virtqueues; |
@@ -1941,7 +1896,7 @@ int main(int argc, char *argv[]) | |||
1941 | { | 1896 | { |
1942 | /* Memory, top-level pagetable, code startpoint and size of the | 1897 | /* Memory, top-level pagetable, code startpoint and size of the |
1943 | * (optional) initrd. */ | 1898 | * (optional) initrd. */ |
1944 | unsigned long mem = 0, pgdir, start, initrd_size = 0; | 1899 | unsigned long mem = 0, start, initrd_size = 0; |
1945 | /* Two temporaries and the /dev/lguest file descriptor. */ | 1900 | /* Two temporaries and the /dev/lguest file descriptor. */ |
1946 | int i, c, lguest_fd; | 1901 | int i, c, lguest_fd; |
1947 | /* The boot information for the Guest. */ | 1902 | /* The boot information for the Guest. */ |
@@ -2040,9 +1995,6 @@ int main(int argc, char *argv[]) | |||
2040 | boot->hdr.type_of_loader = 0xFF; | 1995 | boot->hdr.type_of_loader = 0xFF; |
2041 | } | 1996 | } |
2042 | 1997 | ||
2043 | /* Set up the initial linear pagetables, starting below the initrd. */ | ||
2044 | pgdir = setup_pagetables(mem, initrd_size); | ||
2045 | |||
2046 | /* The Linux boot header contains an "E820" memory map: ours is a | 1998 | /* The Linux boot header contains an "E820" memory map: ours is a |
2047 | * simple, single region. */ | 1999 | * simple, single region. */ |
2048 | boot->e820_entries = 1; | 2000 | boot->e820_entries = 1; |
@@ -2064,7 +2016,7 @@ int main(int argc, char *argv[]) | |||
2064 | 2016 | ||
2065 | /* We tell the kernel to initialize the Guest: this returns the open | 2017 | /* We tell the kernel to initialize the Guest: this returns the open |
2066 | * /dev/lguest file descriptor. */ | 2018 | * /dev/lguest file descriptor. */ |
2067 | lguest_fd = tell_kernel(pgdir, start); | 2019 | lguest_fd = tell_kernel(start); |
2068 | 2020 | ||
2069 | /* We clone off a thread, which wakes the Launcher whenever one of the | 2021 | /* We clone off a thread, which wakes the Launcher whenever one of the |
2070 | * input file descriptors needs attention. We call this the Waker, and | 2022 | * input file descriptors needs attention. We call this the Waker, and |
diff --git a/arch/s390/include/asm/kvm_virtio.h b/arch/s390/include/asm/kvm_virtio.h index c13568b9351..0503936f101 100644 --- a/arch/s390/include/asm/kvm_virtio.h +++ b/arch/s390/include/asm/kvm_virtio.h | |||
@@ -50,6 +50,10 @@ struct kvm_vqconfig { | |||
50 | #define KVM_S390_VIRTIO_RESET 1 | 50 | #define KVM_S390_VIRTIO_RESET 1 |
51 | #define KVM_S390_VIRTIO_SET_STATUS 2 | 51 | #define KVM_S390_VIRTIO_SET_STATUS 2 |
52 | 52 | ||
53 | /* The alignment to use between consumer and producer parts of vring. | ||
54 | * This is pagesize for historical reasons. */ | ||
55 | #define KVM_S390_VIRTIO_RING_ALIGN 4096 | ||
56 | |||
53 | #ifdef __KERNEL__ | 57 | #ifdef __KERNEL__ |
54 | /* early virtio console setup */ | 58 | /* early virtio console setup */ |
55 | #ifdef CONFIG_S390_GUEST | 59 | #ifdef CONFIG_S390_GUEST |
diff --git a/arch/x86/lguest/i386_head.S b/arch/x86/lguest/i386_head.S index 5c7cef34c9e..10b9bd35a8f 100644 --- a/arch/x86/lguest/i386_head.S +++ b/arch/x86/lguest/i386_head.S | |||
@@ -30,21 +30,6 @@ ENTRY(lguest_entry) | |||
30 | movl $lguest_data - __PAGE_OFFSET, %edx | 30 | movl $lguest_data - __PAGE_OFFSET, %edx |
31 | int $LGUEST_TRAP_ENTRY | 31 | int $LGUEST_TRAP_ENTRY |
32 | 32 | ||
33 | /* The Host put the toplevel pagetable in lguest_data.pgdir. The movsl | ||
34 | * instruction uses %esi implicitly as the source for the copy we're | ||
35 | * about to do. */ | ||
36 | movl lguest_data - __PAGE_OFFSET + LGUEST_DATA_pgdir, %esi | ||
37 | |||
38 | /* Copy first 32 entries of page directory to __PAGE_OFFSET entries. | ||
39 | * This means the first 128M of kernel memory will be mapped at | ||
40 | * PAGE_OFFSET where the kernel expects to run. This will get it far | ||
41 | * enough through boot to switch to its own pagetables. */ | ||
42 | movl $32, %ecx | ||
43 | movl %esi, %edi | ||
44 | addl $((__PAGE_OFFSET >> 22) * 4), %edi | ||
45 | rep | ||
46 | movsl | ||
47 | |||
48 | /* Set up the initial stack so we can run C code. */ | 33 | /* Set up the initial stack so we can run C code. */ |
49 | movl $(init_thread_union+THREAD_SIZE),%esp | 34 | movl $(init_thread_union+THREAD_SIZE),%esp |
50 | 35 | ||
diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c index f151592ecf7..5d34764c8a8 100644 --- a/drivers/block/virtio_blk.c +++ b/drivers/block/virtio_blk.c | |||
@@ -6,7 +6,6 @@ | |||
6 | #include <linux/virtio_blk.h> | 6 | #include <linux/virtio_blk.h> |
7 | #include <linux/scatterlist.h> | 7 | #include <linux/scatterlist.h> |
8 | 8 | ||
9 | #define VIRTIO_MAX_SG (3+MAX_PHYS_SEGMENTS) | ||
10 | #define PART_BITS 4 | 9 | #define PART_BITS 4 |
11 | 10 | ||
12 | static int major, index; | 11 | static int major, index; |
@@ -26,8 +25,11 @@ struct virtio_blk | |||
26 | 25 | ||
27 | mempool_t *pool; | 26 | mempool_t *pool; |
28 | 27 | ||
28 | /* What host tells us, plus 2 for header & tailer. */ | ||
29 | unsigned int sg_elems; | ||
30 | |||
29 | /* Scatterlist: can be too big for stack. */ | 31 | /* Scatterlist: can be too big for stack. */ |
30 | struct scatterlist sg[VIRTIO_MAX_SG]; | 32 | struct scatterlist sg[/*sg_elems*/]; |
31 | }; | 33 | }; |
32 | 34 | ||
33 | struct virtblk_req | 35 | struct virtblk_req |
@@ -97,8 +99,6 @@ static bool do_req(struct request_queue *q, struct virtio_blk *vblk, | |||
97 | if (blk_barrier_rq(vbr->req)) | 99 | if (blk_barrier_rq(vbr->req)) |
98 | vbr->out_hdr.type |= VIRTIO_BLK_T_BARRIER; | 100 | vbr->out_hdr.type |= VIRTIO_BLK_T_BARRIER; |
99 | 101 | ||
100 | /* This init could be done at vblk creation time */ | ||
101 | sg_init_table(vblk->sg, VIRTIO_MAX_SG); | ||
102 | sg_set_buf(&vblk->sg[0], &vbr->out_hdr, sizeof(vbr->out_hdr)); | 102 | sg_set_buf(&vblk->sg[0], &vbr->out_hdr, sizeof(vbr->out_hdr)); |
103 | num = blk_rq_map_sg(q, vbr->req, vblk->sg+1); | 103 | num = blk_rq_map_sg(q, vbr->req, vblk->sg+1); |
104 | sg_set_buf(&vblk->sg[num+1], &vbr->status, sizeof(vbr->status)); | 104 | sg_set_buf(&vblk->sg[num+1], &vbr->status, sizeof(vbr->status)); |
@@ -130,7 +130,7 @@ static void do_virtblk_request(struct request_queue *q) | |||
130 | 130 | ||
131 | while ((req = elv_next_request(q)) != NULL) { | 131 | while ((req = elv_next_request(q)) != NULL) { |
132 | vblk = req->rq_disk->private_data; | 132 | vblk = req->rq_disk->private_data; |
133 | BUG_ON(req->nr_phys_segments > ARRAY_SIZE(vblk->sg)); | 133 | BUG_ON(req->nr_phys_segments + 2 > vblk->sg_elems); |
134 | 134 | ||
135 | /* If this request fails, stop queue and wait for something to | 135 | /* If this request fails, stop queue and wait for something to |
136 | finish to restart it. */ | 136 | finish to restart it. */ |
@@ -196,12 +196,22 @@ static int virtblk_probe(struct virtio_device *vdev) | |||
196 | int err; | 196 | int err; |
197 | u64 cap; | 197 | u64 cap; |
198 | u32 v; | 198 | u32 v; |
199 | u32 blk_size; | 199 | u32 blk_size, sg_elems; |
200 | 200 | ||
201 | if (index_to_minor(index) >= 1 << MINORBITS) | 201 | if (index_to_minor(index) >= 1 << MINORBITS) |
202 | return -ENOSPC; | 202 | return -ENOSPC; |
203 | 203 | ||
204 | vdev->priv = vblk = kmalloc(sizeof(*vblk), GFP_KERNEL); | 204 | /* We need to know how many segments before we allocate. */ |
205 | err = virtio_config_val(vdev, VIRTIO_BLK_F_SEG_MAX, | ||
206 | offsetof(struct virtio_blk_config, seg_max), | ||
207 | &sg_elems); | ||
208 | if (err) | ||
209 | sg_elems = 1; | ||
210 | |||
211 | /* We need an extra sg elements at head and tail. */ | ||
212 | sg_elems += 2; | ||
213 | vdev->priv = vblk = kmalloc(sizeof(*vblk) + | ||
214 | sizeof(vblk->sg[0]) * sg_elems, GFP_KERNEL); | ||
205 | if (!vblk) { | 215 | if (!vblk) { |
206 | err = -ENOMEM; | 216 | err = -ENOMEM; |
207 | goto out; | 217 | goto out; |
@@ -210,6 +220,8 @@ static int virtblk_probe(struct virtio_device *vdev) | |||
210 | INIT_LIST_HEAD(&vblk->reqs); | 220 | INIT_LIST_HEAD(&vblk->reqs); |
211 | spin_lock_init(&vblk->lock); | 221 | spin_lock_init(&vblk->lock); |
212 | vblk->vdev = vdev; | 222 | vblk->vdev = vdev; |
223 | vblk->sg_elems = sg_elems; | ||
224 | sg_init_table(vblk->sg, vblk->sg_elems); | ||
213 | 225 | ||
214 | /* We expect one virtqueue, for output. */ | 226 | /* We expect one virtqueue, for output. */ |
215 | vblk->vq = vdev->config->find_vq(vdev, 0, blk_done); | 227 | vblk->vq = vdev->config->find_vq(vdev, 0, blk_done); |
@@ -279,6 +291,13 @@ static int virtblk_probe(struct virtio_device *vdev) | |||
279 | } | 291 | } |
280 | set_capacity(vblk->disk, cap); | 292 | set_capacity(vblk->disk, cap); |
281 | 293 | ||
294 | /* We can handle whatever the host told us to handle. */ | ||
295 | blk_queue_max_phys_segments(vblk->disk->queue, vblk->sg_elems-2); | ||
296 | blk_queue_max_hw_segments(vblk->disk->queue, vblk->sg_elems-2); | ||
297 | |||
298 | /* No real sector limit. */ | ||
299 | blk_queue_max_sectors(vblk->disk->queue, -1U); | ||
300 | |||
282 | /* Host can optionally specify maximum segment size and number of | 301 | /* Host can optionally specify maximum segment size and number of |
283 | * segments. */ | 302 | * segments. */ |
284 | err = virtio_config_val(vdev, VIRTIO_BLK_F_SIZE_MAX, | 303 | err = virtio_config_val(vdev, VIRTIO_BLK_F_SIZE_MAX, |
@@ -286,12 +305,8 @@ static int virtblk_probe(struct virtio_device *vdev) | |||
286 | &v); | 305 | &v); |
287 | if (!err) | 306 | if (!err) |
288 | blk_queue_max_segment_size(vblk->disk->queue, v); | 307 | blk_queue_max_segment_size(vblk->disk->queue, v); |
289 | 308 | else | |
290 | err = virtio_config_val(vdev, VIRTIO_BLK_F_SEG_MAX, | 309 | blk_queue_max_segment_size(vblk->disk->queue, -1U); |
291 | offsetof(struct virtio_blk_config, seg_max), | ||
292 | &v); | ||
293 | if (!err) | ||
294 | blk_queue_max_hw_segments(vblk->disk->queue, v); | ||
295 | 310 | ||
296 | /* Host can optionally specify the block size of the device */ | 311 | /* Host can optionally specify the block size of the device */ |
297 | err = virtio_config_val(vdev, VIRTIO_BLK_F_BLK_SIZE, | 312 | err = virtio_config_val(vdev, VIRTIO_BLK_F_BLK_SIZE, |
diff --git a/drivers/char/hvc_console.c b/drivers/char/hvc_console.c index fb57f67bb42..0587b66d6fc 100644 --- a/drivers/char/hvc_console.c +++ b/drivers/char/hvc_console.c | |||
@@ -695,6 +695,7 @@ void hvc_resize(struct hvc_struct *hp, struct winsize ws) | |||
695 | hp->ws = ws; | 695 | hp->ws = ws; |
696 | schedule_work(&hp->tty_resize); | 696 | schedule_work(&hp->tty_resize); |
697 | } | 697 | } |
698 | EXPORT_SYMBOL_GPL(hvc_resize); | ||
698 | 699 | ||
699 | /* | 700 | /* |
700 | * This kthread is either polling or interrupt driven. This is determined by | 701 | * This kthread is either polling or interrupt driven. This is determined by |
diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c index 3fb0d2c88ba..ff6f5a4b58f 100644 --- a/drivers/char/virtio_console.c +++ b/drivers/char/virtio_console.c | |||
@@ -138,12 +138,33 @@ int __init virtio_cons_early_init(int (*put_chars)(u32, const char *, int)) | |||
138 | } | 138 | } |
139 | 139 | ||
140 | /* | 140 | /* |
141 | * virtio console configuration. This supports: | ||
142 | * - console resize | ||
143 | */ | ||
144 | static void virtcons_apply_config(struct virtio_device *dev) | ||
145 | { | ||
146 | struct winsize ws; | ||
147 | |||
148 | if (virtio_has_feature(dev, VIRTIO_CONSOLE_F_SIZE)) { | ||
149 | dev->config->get(dev, | ||
150 | offsetof(struct virtio_console_config, cols), | ||
151 | &ws.ws_col, sizeof(u16)); | ||
152 | dev->config->get(dev, | ||
153 | offsetof(struct virtio_console_config, rows), | ||
154 | &ws.ws_row, sizeof(u16)); | ||
155 | hvc_resize(hvc, ws); | ||
156 | } | ||
157 | } | ||
158 | |||
159 | /* | ||
141 | * we support only one console, the hvc struct is a global var | 160 | * we support only one console, the hvc struct is a global var |
142 | * There is no need to do anything | 161 | * We set the configuration at this point, since we now have a tty |
143 | */ | 162 | */ |
144 | static int notifier_add_vio(struct hvc_struct *hp, int data) | 163 | static int notifier_add_vio(struct hvc_struct *hp, int data) |
145 | { | 164 | { |
146 | hp->irq_requested = 1; | 165 | hp->irq_requested = 1; |
166 | virtcons_apply_config(vdev); | ||
167 | |||
147 | return 0; | 168 | return 0; |
148 | } | 169 | } |
149 | 170 | ||
@@ -234,11 +255,18 @@ static struct virtio_device_id id_table[] = { | |||
234 | { 0 }, | 255 | { 0 }, |
235 | }; | 256 | }; |
236 | 257 | ||
258 | static unsigned int features[] = { | ||
259 | VIRTIO_CONSOLE_F_SIZE, | ||
260 | }; | ||
261 | |||
237 | static struct virtio_driver virtio_console = { | 262 | static struct virtio_driver virtio_console = { |
263 | .feature_table = features, | ||
264 | .feature_table_size = ARRAY_SIZE(features), | ||
238 | .driver.name = KBUILD_MODNAME, | 265 | .driver.name = KBUILD_MODNAME, |
239 | .driver.owner = THIS_MODULE, | 266 | .driver.owner = THIS_MODULE, |
240 | .id_table = id_table, | 267 | .id_table = id_table, |
241 | .probe = virtcons_probe, | 268 | .probe = virtcons_probe, |
269 | .config_changed = virtcons_apply_config, | ||
242 | }; | 270 | }; |
243 | 271 | ||
244 | static int __init init(void) | 272 | static int __init init(void) |
diff --git a/drivers/lguest/lg.h b/drivers/lguest/lg.h index 5faefeaf679..f2c641e0bdd 100644 --- a/drivers/lguest/lg.h +++ b/drivers/lguest/lg.h | |||
@@ -164,7 +164,7 @@ void copy_gdt(const struct lg_cpu *cpu, struct desc_struct *gdt); | |||
164 | void copy_gdt_tls(const struct lg_cpu *cpu, struct desc_struct *gdt); | 164 | void copy_gdt_tls(const struct lg_cpu *cpu, struct desc_struct *gdt); |
165 | 165 | ||
166 | /* page_tables.c: */ | 166 | /* page_tables.c: */ |
167 | int init_guest_pagetable(struct lguest *lg, unsigned long pgtable); | 167 | int init_guest_pagetable(struct lguest *lg); |
168 | void free_guest_pagetable(struct lguest *lg); | 168 | void free_guest_pagetable(struct lguest *lg); |
169 | void guest_new_pagetable(struct lg_cpu *cpu, unsigned long pgtable); | 169 | void guest_new_pagetable(struct lg_cpu *cpu, unsigned long pgtable); |
170 | void guest_set_pmd(struct lguest *lg, unsigned long gpgdir, u32 i); | 170 | void guest_set_pmd(struct lguest *lg, unsigned long gpgdir, u32 i); |
diff --git a/drivers/lguest/lguest_device.c b/drivers/lguest/lguest_device.c index a661bbdae3d..915da6b8c92 100644 --- a/drivers/lguest/lguest_device.c +++ b/drivers/lguest/lguest_device.c | |||
@@ -250,7 +250,7 @@ static struct virtqueue *lg_find_vq(struct virtio_device *vdev, | |||
250 | /* Figure out how many pages the ring will take, and map that memory */ | 250 | /* Figure out how many pages the ring will take, and map that memory */ |
251 | lvq->pages = lguest_map((unsigned long)lvq->config.pfn << PAGE_SHIFT, | 251 | lvq->pages = lguest_map((unsigned long)lvq->config.pfn << PAGE_SHIFT, |
252 | DIV_ROUND_UP(vring_size(lvq->config.num, | 252 | DIV_ROUND_UP(vring_size(lvq->config.num, |
253 | PAGE_SIZE), | 253 | LGUEST_VRING_ALIGN), |
254 | PAGE_SIZE)); | 254 | PAGE_SIZE)); |
255 | if (!lvq->pages) { | 255 | if (!lvq->pages) { |
256 | err = -ENOMEM; | 256 | err = -ENOMEM; |
@@ -259,8 +259,8 @@ static struct virtqueue *lg_find_vq(struct virtio_device *vdev, | |||
259 | 259 | ||
260 | /* OK, tell virtio_ring.c to set up a virtqueue now we know its size | 260 | /* OK, tell virtio_ring.c to set up a virtqueue now we know its size |
261 | * and we've got a pointer to its pages. */ | 261 | * and we've got a pointer to its pages. */ |
262 | vq = vring_new_virtqueue(lvq->config.num, vdev, lvq->pages, | 262 | vq = vring_new_virtqueue(lvq->config.num, LGUEST_VRING_ALIGN, |
263 | lg_notify, callback); | 263 | vdev, lvq->pages, lg_notify, callback); |
264 | if (!vq) { | 264 | if (!vq) { |
265 | err = -ENOMEM; | 265 | err = -ENOMEM; |
266 | goto unmap; | 266 | goto unmap; |
@@ -272,7 +272,7 @@ static struct virtqueue *lg_find_vq(struct virtio_device *vdev, | |||
272 | * the interrupt as a source of randomness: it'd be nice to have that | 272 | * the interrupt as a source of randomness: it'd be nice to have that |
273 | * back.. */ | 273 | * back.. */ |
274 | err = request_irq(lvq->config.irq, vring_interrupt, IRQF_SHARED, | 274 | err = request_irq(lvq->config.irq, vring_interrupt, IRQF_SHARED, |
275 | vdev->dev.bus_id, vq); | 275 | dev_name(&vdev->dev), vq); |
276 | if (err) | 276 | if (err) |
277 | goto destroy_vring; | 277 | goto destroy_vring; |
278 | 278 | ||
diff --git a/drivers/lguest/lguest_user.c b/drivers/lguest/lguest_user.c index e73a000473c..34bc017b8b3 100644 --- a/drivers/lguest/lguest_user.c +++ b/drivers/lguest/lguest_user.c | |||
@@ -146,7 +146,7 @@ static int lg_cpu_start(struct lg_cpu *cpu, unsigned id, unsigned long start_ip) | |||
146 | return 0; | 146 | return 0; |
147 | } | 147 | } |
148 | 148 | ||
149 | /*L:020 The initialization write supplies 4 pointer sized (32 or 64 bit) | 149 | /*L:020 The initialization write supplies 3 pointer sized (32 or 64 bit) |
150 | * values (in addition to the LHREQ_INITIALIZE value). These are: | 150 | * values (in addition to the LHREQ_INITIALIZE value). These are: |
151 | * | 151 | * |
152 | * base: The start of the Guest-physical memory inside the Launcher memory. | 152 | * base: The start of the Guest-physical memory inside the Launcher memory. |
@@ -155,9 +155,6 @@ static int lg_cpu_start(struct lg_cpu *cpu, unsigned id, unsigned long start_ip) | |||
155 | * allowed to access. The Guest memory lives inside the Launcher, so it sets | 155 | * allowed to access. The Guest memory lives inside the Launcher, so it sets |
156 | * this to ensure the Guest can only reach its own memory. | 156 | * this to ensure the Guest can only reach its own memory. |
157 | * | 157 | * |
158 | * pgdir: The (Guest-physical) address of the top of the initial Guest | ||
159 | * pagetables (which are set up by the Launcher). | ||
160 | * | ||
161 | * start: The first instruction to execute ("eip" in x86-speak). | 158 | * start: The first instruction to execute ("eip" in x86-speak). |
162 | */ | 159 | */ |
163 | static int initialize(struct file *file, const unsigned long __user *input) | 160 | static int initialize(struct file *file, const unsigned long __user *input) |
@@ -166,7 +163,7 @@ static int initialize(struct file *file, const unsigned long __user *input) | |||
166 | * Guest. */ | 163 | * Guest. */ |
167 | struct lguest *lg; | 164 | struct lguest *lg; |
168 | int err; | 165 | int err; |
169 | unsigned long args[4]; | 166 | unsigned long args[3]; |
170 | 167 | ||
171 | /* We grab the Big Lguest lock, which protects against multiple | 168 | /* We grab the Big Lguest lock, which protects against multiple |
172 | * simultaneous initializations. */ | 169 | * simultaneous initializations. */ |
@@ -192,14 +189,14 @@ static int initialize(struct file *file, const unsigned long __user *input) | |||
192 | lg->mem_base = (void __user *)args[0]; | 189 | lg->mem_base = (void __user *)args[0]; |
193 | lg->pfn_limit = args[1]; | 190 | lg->pfn_limit = args[1]; |
194 | 191 | ||
195 | /* This is the first cpu (cpu 0) and it will start booting at args[3] */ | 192 | /* This is the first cpu (cpu 0) and it will start booting at args[2] */ |
196 | err = lg_cpu_start(&lg->cpus[0], 0, args[3]); | 193 | err = lg_cpu_start(&lg->cpus[0], 0, args[2]); |
197 | if (err) | 194 | if (err) |
198 | goto release_guest; | 195 | goto release_guest; |
199 | 196 | ||
200 | /* Initialize the Guest's shadow page tables, using the toplevel | 197 | /* Initialize the Guest's shadow page tables, using the toplevel |
201 | * address the Launcher gave us. This allocates memory, so can fail. */ | 198 | * address the Launcher gave us. This allocates memory, so can fail. */ |
202 | err = init_guest_pagetable(lg, args[2]); | 199 | err = init_guest_pagetable(lg); |
203 | if (err) | 200 | if (err) |
204 | goto free_regs; | 201 | goto free_regs; |
205 | 202 | ||
diff --git a/drivers/lguest/page_tables.c b/drivers/lguest/page_tables.c index 81d0c605344..576a8318221 100644 --- a/drivers/lguest/page_tables.c +++ b/drivers/lguest/page_tables.c | |||
@@ -14,6 +14,7 @@ | |||
14 | #include <linux/percpu.h> | 14 | #include <linux/percpu.h> |
15 | #include <asm/tlbflush.h> | 15 | #include <asm/tlbflush.h> |
16 | #include <asm/uaccess.h> | 16 | #include <asm/uaccess.h> |
17 | #include <asm/bootparam.h> | ||
17 | #include "lg.h" | 18 | #include "lg.h" |
18 | 19 | ||
19 | /*M:008 We hold reference to pages, which prevents them from being swapped. | 20 | /*M:008 We hold reference to pages, which prevents them from being swapped. |
@@ -581,15 +582,82 @@ void guest_set_pmd(struct lguest *lg, unsigned long gpgdir, u32 idx) | |||
581 | release_pgd(lg, lg->pgdirs[pgdir].pgdir + idx); | 582 | release_pgd(lg, lg->pgdirs[pgdir].pgdir + idx); |
582 | } | 583 | } |
583 | 584 | ||
585 | /* Once we know how much memory we have we can construct simple identity | ||
586 | * (which set virtual == physical) and linear mappings | ||
587 | * which will get the Guest far enough into the boot to create its own. | ||
588 | * | ||
589 | * We lay them out of the way, just below the initrd (which is why we need to | ||
590 | * know its size here). */ | ||
591 | static unsigned long setup_pagetables(struct lguest *lg, | ||
592 | unsigned long mem, | ||
593 | unsigned long initrd_size) | ||
594 | { | ||
595 | pgd_t __user *pgdir; | ||
596 | pte_t __user *linear; | ||
597 | unsigned int mapped_pages, i, linear_pages, phys_linear; | ||
598 | unsigned long mem_base = (unsigned long)lg->mem_base; | ||
599 | |||
600 | /* We have mapped_pages frames to map, so we need | ||
601 | * linear_pages page tables to map them. */ | ||
602 | mapped_pages = mem / PAGE_SIZE; | ||
603 | linear_pages = (mapped_pages + PTRS_PER_PTE - 1) / PTRS_PER_PTE; | ||
604 | |||
605 | /* We put the toplevel page directory page at the top of memory. */ | ||
606 | pgdir = (pgd_t *)(mem + mem_base - initrd_size - PAGE_SIZE); | ||
607 | |||
608 | /* Now we use the next linear_pages pages as pte pages */ | ||
609 | linear = (void *)pgdir - linear_pages * PAGE_SIZE; | ||
610 | |||
611 | /* Linear mapping is easy: put every page's address into the | ||
612 | * mapping in order. */ | ||
613 | for (i = 0; i < mapped_pages; i++) { | ||
614 | pte_t pte; | ||
615 | pte = pfn_pte(i, __pgprot(_PAGE_PRESENT|_PAGE_RW|_PAGE_USER)); | ||
616 | if (copy_to_user(&linear[i], &pte, sizeof(pte)) != 0) | ||
617 | return -EFAULT; | ||
618 | } | ||
619 | |||
620 | /* The top level points to the linear page table pages above. | ||
621 | * We setup the identity and linear mappings here. */ | ||
622 | phys_linear = (unsigned long)linear - mem_base; | ||
623 | for (i = 0; i < mapped_pages; i += PTRS_PER_PTE) { | ||
624 | pgd_t pgd; | ||
625 | pgd = __pgd((phys_linear + i * sizeof(pte_t)) | | ||
626 | (_PAGE_PRESENT | _PAGE_RW | _PAGE_USER)); | ||
627 | |||
628 | if (copy_to_user(&pgdir[i / PTRS_PER_PTE], &pgd, sizeof(pgd)) | ||
629 | || copy_to_user(&pgdir[pgd_index(PAGE_OFFSET) | ||
630 | + i / PTRS_PER_PTE], | ||
631 | &pgd, sizeof(pgd))) | ||
632 | return -EFAULT; | ||
633 | } | ||
634 | |||
635 | /* We return the top level (guest-physical) address: remember where | ||
636 | * this is. */ | ||
637 | return (unsigned long)pgdir - mem_base; | ||
638 | } | ||
639 | |||
584 | /*H:500 (vii) Setting up the page tables initially. | 640 | /*H:500 (vii) Setting up the page tables initially. |
585 | * | 641 | * |
586 | * When a Guest is first created, the Launcher tells us where the toplevel of | 642 | * When a Guest is first created, the Launcher tells us where the toplevel of |
587 | * its first page table is. We set some things up here: */ | 643 | * its first page table is. We set some things up here: */ |
588 | int init_guest_pagetable(struct lguest *lg, unsigned long pgtable) | 644 | int init_guest_pagetable(struct lguest *lg) |
589 | { | 645 | { |
646 | u64 mem; | ||
647 | u32 initrd_size; | ||
648 | struct boot_params __user *boot = (struct boot_params *)lg->mem_base; | ||
649 | |||
650 | /* Get the Guest memory size and the ramdisk size from the boot header | ||
651 | * located at lg->mem_base (Guest address 0). */ | ||
652 | if (copy_from_user(&mem, &boot->e820_map[0].size, sizeof(mem)) | ||
653 | || get_user(initrd_size, &boot->hdr.ramdisk_size)) | ||
654 | return -EFAULT; | ||
655 | |||
590 | /* We start on the first shadow page table, and give it a blank PGD | 656 | /* We start on the first shadow page table, and give it a blank PGD |
591 | * page. */ | 657 | * page. */ |
592 | lg->pgdirs[0].gpgdir = pgtable; | 658 | lg->pgdirs[0].gpgdir = setup_pagetables(lg, mem, initrd_size); |
659 | if (IS_ERR_VALUE(lg->pgdirs[0].gpgdir)) | ||
660 | return lg->pgdirs[0].gpgdir; | ||
593 | lg->pgdirs[0].pgdir = (pgd_t *)get_zeroed_page(GFP_KERNEL); | 661 | lg->pgdirs[0].pgdir = (pgd_t *)get_zeroed_page(GFP_KERNEL); |
594 | if (!lg->pgdirs[0].pgdir) | 662 | if (!lg->pgdirs[0].pgdir) |
595 | return -ENOMEM; | 663 | return -ENOMEM; |
diff --git a/drivers/s390/kvm/kvm_virtio.c b/drivers/s390/kvm/kvm_virtio.c index 3d442444c61..28c90b89f2b 100644 --- a/drivers/s390/kvm/kvm_virtio.c +++ b/drivers/s390/kvm/kvm_virtio.c | |||
@@ -188,11 +188,13 @@ static struct virtqueue *kvm_find_vq(struct virtio_device *vdev, | |||
188 | config = kvm_vq_config(kdev->desc)+index; | 188 | config = kvm_vq_config(kdev->desc)+index; |
189 | 189 | ||
190 | err = vmem_add_mapping(config->address, | 190 | err = vmem_add_mapping(config->address, |
191 | vring_size(config->num, PAGE_SIZE)); | 191 | vring_size(config->num, |
192 | KVM_S390_VIRTIO_RING_ALIGN)); | ||
192 | if (err) | 193 | if (err) |
193 | goto out; | 194 | goto out; |
194 | 195 | ||
195 | vq = vring_new_virtqueue(config->num, vdev, (void *) config->address, | 196 | vq = vring_new_virtqueue(config->num, KVM_S390_VIRTIO_RING_ALIGN, |
197 | vdev, (void *) config->address, | ||
196 | kvm_notify, callback); | 198 | kvm_notify, callback); |
197 | if (!vq) { | 199 | if (!vq) { |
198 | err = -ENOMEM; | 200 | err = -ENOMEM; |
@@ -209,7 +211,8 @@ static struct virtqueue *kvm_find_vq(struct virtio_device *vdev, | |||
209 | return vq; | 211 | return vq; |
210 | unmap: | 212 | unmap: |
211 | vmem_remove_mapping(config->address, | 213 | vmem_remove_mapping(config->address, |
212 | vring_size(config->num, PAGE_SIZE)); | 214 | vring_size(config->num, |
215 | KVM_S390_VIRTIO_RING_ALIGN)); | ||
213 | out: | 216 | out: |
214 | return ERR_PTR(err); | 217 | return ERR_PTR(err); |
215 | } | 218 | } |
@@ -220,7 +223,8 @@ static void kvm_del_vq(struct virtqueue *vq) | |||
220 | 223 | ||
221 | vring_del_virtqueue(vq); | 224 | vring_del_virtqueue(vq); |
222 | vmem_remove_mapping(config->address, | 225 | vmem_remove_mapping(config->address, |
223 | vring_size(config->num, PAGE_SIZE)); | 226 | vring_size(config->num, |
227 | KVM_S390_VIRTIO_RING_ALIGN)); | ||
224 | } | 228 | } |
225 | 229 | ||
226 | /* | 230 | /* |
@@ -295,13 +299,29 @@ static void scan_devices(void) | |||
295 | */ | 299 | */ |
296 | static void kvm_extint_handler(u16 code) | 300 | static void kvm_extint_handler(u16 code) |
297 | { | 301 | { |
298 | void *data = (void *) *(long *) __LC_PFAULT_INTPARM; | 302 | struct virtqueue *vq; |
299 | u16 subcode = S390_lowcore.cpu_addr; | 303 | u16 subcode; |
304 | int config_changed; | ||
300 | 305 | ||
306 | subcode = S390_lowcore.cpu_addr; | ||
301 | if ((subcode & 0xff00) != VIRTIO_SUBCODE_64) | 307 | if ((subcode & 0xff00) != VIRTIO_SUBCODE_64) |
302 | return; | 308 | return; |
303 | 309 | ||
304 | vring_interrupt(0, data); | 310 | /* The LSB might be overloaded, we have to mask it */ |
311 | vq = (struct virtqueue *) ((*(long *) __LC_PFAULT_INTPARM) & ~1UL); | ||
312 | |||
313 | /* We use the LSB of extparam, to decide, if this interrupt is a config | ||
314 | * change or a "standard" interrupt */ | ||
315 | config_changed = (*(int *) __LC_EXT_PARAMS & 1); | ||
316 | |||
317 | if (config_changed) { | ||
318 | struct virtio_driver *drv; | ||
319 | drv = container_of(vq->vdev->dev.driver, | ||
320 | struct virtio_driver, driver); | ||
321 | if (drv->config_changed) | ||
322 | drv->config_changed(vq->vdev); | ||
323 | } else | ||
324 | vring_interrupt(0, vq); | ||
305 | } | 325 | } |
306 | 326 | ||
307 | /* | 327 | /* |
diff --git a/drivers/virtio/virtio.c b/drivers/virtio/virtio.c index 5b78fd0aff0..018c070a357 100644 --- a/drivers/virtio/virtio.c +++ b/drivers/virtio/virtio.c | |||
@@ -176,7 +176,7 @@ int register_virtio_device(struct virtio_device *dev) | |||
176 | 176 | ||
177 | /* Assign a unique device index and hence name. */ | 177 | /* Assign a unique device index and hence name. */ |
178 | dev->index = dev_index++; | 178 | dev->index = dev_index++; |
179 | sprintf(dev->dev.bus_id, "virtio%u", dev->index); | 179 | dev_set_name(&dev->dev, "virtio%u", dev->index); |
180 | 180 | ||
181 | /* We always start by resetting the device, in case a previous | 181 | /* We always start by resetting the device, in case a previous |
182 | * driver messed it up. This also tests that code path a little. */ | 182 | * driver messed it up. This also tests that code path a little. */ |
diff --git a/drivers/virtio/virtio_balloon.c b/drivers/virtio/virtio_balloon.c index 62eab43152d..59268266b79 100644 --- a/drivers/virtio/virtio_balloon.c +++ b/drivers/virtio/virtio_balloon.c | |||
@@ -56,6 +56,15 @@ static struct virtio_device_id id_table[] = { | |||
56 | { 0 }, | 56 | { 0 }, |
57 | }; | 57 | }; |
58 | 58 | ||
59 | static u32 page_to_balloon_pfn(struct page *page) | ||
60 | { | ||
61 | unsigned long pfn = page_to_pfn(page); | ||
62 | |||
63 | BUILD_BUG_ON(PAGE_SHIFT < VIRTIO_BALLOON_PFN_SHIFT); | ||
64 | /* Convert pfn from Linux page size to balloon page size. */ | ||
65 | return pfn >> (PAGE_SHIFT - VIRTIO_BALLOON_PFN_SHIFT); | ||
66 | } | ||
67 | |||
59 | static void balloon_ack(struct virtqueue *vq) | 68 | static void balloon_ack(struct virtqueue *vq) |
60 | { | 69 | { |
61 | struct virtio_balloon *vb; | 70 | struct virtio_balloon *vb; |
@@ -99,7 +108,7 @@ static void fill_balloon(struct virtio_balloon *vb, size_t num) | |||
99 | msleep(200); | 108 | msleep(200); |
100 | break; | 109 | break; |
101 | } | 110 | } |
102 | vb->pfns[vb->num_pfns] = page_to_pfn(page); | 111 | vb->pfns[vb->num_pfns] = page_to_balloon_pfn(page); |
103 | totalram_pages--; | 112 | totalram_pages--; |
104 | vb->num_pages++; | 113 | vb->num_pages++; |
105 | list_add(&page->lru, &vb->pages); | 114 | list_add(&page->lru, &vb->pages); |
@@ -132,7 +141,7 @@ static void leak_balloon(struct virtio_balloon *vb, size_t num) | |||
132 | for (vb->num_pfns = 0; vb->num_pfns < num; vb->num_pfns++) { | 141 | for (vb->num_pfns = 0; vb->num_pfns < num; vb->num_pfns++) { |
133 | page = list_first_entry(&vb->pages, struct page, lru); | 142 | page = list_first_entry(&vb->pages, struct page, lru); |
134 | list_del(&page->lru); | 143 | list_del(&page->lru); |
135 | vb->pfns[vb->num_pfns] = page_to_pfn(page); | 144 | vb->pfns[vb->num_pfns] = page_to_balloon_pfn(page); |
136 | vb->num_pages--; | 145 | vb->num_pages--; |
137 | } | 146 | } |
138 | 147 | ||
diff --git a/drivers/virtio/virtio_pci.c b/drivers/virtio/virtio_pci.c index c7dc37c7cce..265fdf2d127 100644 --- a/drivers/virtio/virtio_pci.c +++ b/drivers/virtio/virtio_pci.c | |||
@@ -75,7 +75,7 @@ MODULE_DEVICE_TABLE(pci, virtio_pci_id_table); | |||
75 | * would make more sense for virtio to not insist on having it's own device. */ | 75 | * would make more sense for virtio to not insist on having it's own device. */ |
76 | static struct device virtio_pci_root = { | 76 | static struct device virtio_pci_root = { |
77 | .parent = NULL, | 77 | .parent = NULL, |
78 | .bus_id = "virtio-pci", | 78 | .init_name = "virtio-pci", |
79 | }; | 79 | }; |
80 | 80 | ||
81 | /* Convert a generic virtio device to our structure */ | 81 | /* Convert a generic virtio device to our structure */ |
@@ -216,7 +216,7 @@ static struct virtqueue *vp_find_vq(struct virtio_device *vdev, unsigned index, | |||
216 | struct virtio_pci_device *vp_dev = to_vp_device(vdev); | 216 | struct virtio_pci_device *vp_dev = to_vp_device(vdev); |
217 | struct virtio_pci_vq_info *info; | 217 | struct virtio_pci_vq_info *info; |
218 | struct virtqueue *vq; | 218 | struct virtqueue *vq; |
219 | unsigned long flags; | 219 | unsigned long flags, size; |
220 | u16 num; | 220 | u16 num; |
221 | int err; | 221 | int err; |
222 | 222 | ||
@@ -237,19 +237,20 @@ static struct virtqueue *vp_find_vq(struct virtio_device *vdev, unsigned index, | |||
237 | info->queue_index = index; | 237 | info->queue_index = index; |
238 | info->num = num; | 238 | info->num = num; |
239 | 239 | ||
240 | info->queue = kzalloc(PAGE_ALIGN(vring_size(num,PAGE_SIZE)), GFP_KERNEL); | 240 | size = PAGE_ALIGN(vring_size(num, VIRTIO_PCI_VRING_ALIGN)); |
241 | info->queue = alloc_pages_exact(size, GFP_KERNEL|__GFP_ZERO); | ||
241 | if (info->queue == NULL) { | 242 | if (info->queue == NULL) { |
242 | err = -ENOMEM; | 243 | err = -ENOMEM; |
243 | goto out_info; | 244 | goto out_info; |
244 | } | 245 | } |
245 | 246 | ||
246 | /* activate the queue */ | 247 | /* activate the queue */ |
247 | iowrite32(virt_to_phys(info->queue) >> PAGE_SHIFT, | 248 | iowrite32(virt_to_phys(info->queue) >> VIRTIO_PCI_QUEUE_ADDR_SHIFT, |
248 | vp_dev->ioaddr + VIRTIO_PCI_QUEUE_PFN); | 249 | vp_dev->ioaddr + VIRTIO_PCI_QUEUE_PFN); |
249 | 250 | ||
250 | /* create the vring */ | 251 | /* create the vring */ |
251 | vq = vring_new_virtqueue(info->num, vdev, info->queue, | 252 | vq = vring_new_virtqueue(info->num, VIRTIO_PCI_VRING_ALIGN, |
252 | vp_notify, callback); | 253 | vdev, info->queue, vp_notify, callback); |
253 | if (!vq) { | 254 | if (!vq) { |
254 | err = -ENOMEM; | 255 | err = -ENOMEM; |
255 | goto out_activate_queue; | 256 | goto out_activate_queue; |
@@ -266,7 +267,7 @@ static struct virtqueue *vp_find_vq(struct virtio_device *vdev, unsigned index, | |||
266 | 267 | ||
267 | out_activate_queue: | 268 | out_activate_queue: |
268 | iowrite32(0, vp_dev->ioaddr + VIRTIO_PCI_QUEUE_PFN); | 269 | iowrite32(0, vp_dev->ioaddr + VIRTIO_PCI_QUEUE_PFN); |
269 | kfree(info->queue); | 270 | free_pages_exact(info->queue, size); |
270 | out_info: | 271 | out_info: |
271 | kfree(info); | 272 | kfree(info); |
272 | return ERR_PTR(err); | 273 | return ERR_PTR(err); |
@@ -277,7 +278,7 @@ static void vp_del_vq(struct virtqueue *vq) | |||
277 | { | 278 | { |
278 | struct virtio_pci_device *vp_dev = to_vp_device(vq->vdev); | 279 | struct virtio_pci_device *vp_dev = to_vp_device(vq->vdev); |
279 | struct virtio_pci_vq_info *info = vq->priv; | 280 | struct virtio_pci_vq_info *info = vq->priv; |
280 | unsigned long flags; | 281 | unsigned long flags, size; |
281 | 282 | ||
282 | spin_lock_irqsave(&vp_dev->lock, flags); | 283 | spin_lock_irqsave(&vp_dev->lock, flags); |
283 | list_del(&info->node); | 284 | list_del(&info->node); |
@@ -289,7 +290,8 @@ static void vp_del_vq(struct virtqueue *vq) | |||
289 | iowrite16(info->queue_index, vp_dev->ioaddr + VIRTIO_PCI_QUEUE_SEL); | 290 | iowrite16(info->queue_index, vp_dev->ioaddr + VIRTIO_PCI_QUEUE_SEL); |
290 | iowrite32(0, vp_dev->ioaddr + VIRTIO_PCI_QUEUE_PFN); | 291 | iowrite32(0, vp_dev->ioaddr + VIRTIO_PCI_QUEUE_PFN); |
291 | 292 | ||
292 | kfree(info->queue); | 293 | size = PAGE_ALIGN(vring_size(info->num, VIRTIO_PCI_VRING_ALIGN)); |
294 | free_pages_exact(info->queue, size); | ||
293 | kfree(info); | 295 | kfree(info); |
294 | } | 296 | } |
295 | 297 | ||
@@ -305,6 +307,20 @@ static struct virtio_config_ops virtio_pci_config_ops = { | |||
305 | .finalize_features = vp_finalize_features, | 307 | .finalize_features = vp_finalize_features, |
306 | }; | 308 | }; |
307 | 309 | ||
310 | static void virtio_pci_release_dev(struct device *_d) | ||
311 | { | ||
312 | struct virtio_device *dev = container_of(_d, struct virtio_device, dev); | ||
313 | struct virtio_pci_device *vp_dev = to_vp_device(dev); | ||
314 | struct pci_dev *pci_dev = vp_dev->pci_dev; | ||
315 | |||
316 | free_irq(pci_dev->irq, vp_dev); | ||
317 | pci_set_drvdata(pci_dev, NULL); | ||
318 | pci_iounmap(pci_dev, vp_dev->ioaddr); | ||
319 | pci_release_regions(pci_dev); | ||
320 | pci_disable_device(pci_dev); | ||
321 | kfree(vp_dev); | ||
322 | } | ||
323 | |||
308 | /* the PCI probing function */ | 324 | /* the PCI probing function */ |
309 | static int __devinit virtio_pci_probe(struct pci_dev *pci_dev, | 325 | static int __devinit virtio_pci_probe(struct pci_dev *pci_dev, |
310 | const struct pci_device_id *id) | 326 | const struct pci_device_id *id) |
@@ -328,6 +344,7 @@ static int __devinit virtio_pci_probe(struct pci_dev *pci_dev, | |||
328 | return -ENOMEM; | 344 | return -ENOMEM; |
329 | 345 | ||
330 | vp_dev->vdev.dev.parent = &virtio_pci_root; | 346 | vp_dev->vdev.dev.parent = &virtio_pci_root; |
347 | vp_dev->vdev.dev.release = virtio_pci_release_dev; | ||
331 | vp_dev->vdev.config = &virtio_pci_config_ops; | 348 | vp_dev->vdev.config = &virtio_pci_config_ops; |
332 | vp_dev->pci_dev = pci_dev; | 349 | vp_dev->pci_dev = pci_dev; |
333 | INIT_LIST_HEAD(&vp_dev->virtqueues); | 350 | INIT_LIST_HEAD(&vp_dev->virtqueues); |
@@ -357,7 +374,7 @@ static int __devinit virtio_pci_probe(struct pci_dev *pci_dev, | |||
357 | 374 | ||
358 | /* register a handler for the queue with the PCI device's interrupt */ | 375 | /* register a handler for the queue with the PCI device's interrupt */ |
359 | err = request_irq(vp_dev->pci_dev->irq, vp_interrupt, IRQF_SHARED, | 376 | err = request_irq(vp_dev->pci_dev->irq, vp_interrupt, IRQF_SHARED, |
360 | vp_dev->vdev.dev.bus_id, vp_dev); | 377 | dev_name(&vp_dev->vdev.dev), vp_dev); |
361 | if (err) | 378 | if (err) |
362 | goto out_set_drvdata; | 379 | goto out_set_drvdata; |
363 | 380 | ||
@@ -387,12 +404,6 @@ static void __devexit virtio_pci_remove(struct pci_dev *pci_dev) | |||
387 | struct virtio_pci_device *vp_dev = pci_get_drvdata(pci_dev); | 404 | struct virtio_pci_device *vp_dev = pci_get_drvdata(pci_dev); |
388 | 405 | ||
389 | unregister_virtio_device(&vp_dev->vdev); | 406 | unregister_virtio_device(&vp_dev->vdev); |
390 | free_irq(pci_dev->irq, vp_dev); | ||
391 | pci_set_drvdata(pci_dev, NULL); | ||
392 | pci_iounmap(pci_dev, vp_dev->ioaddr); | ||
393 | pci_release_regions(pci_dev); | ||
394 | pci_disable_device(pci_dev); | ||
395 | kfree(vp_dev); | ||
396 | } | 407 | } |
397 | 408 | ||
398 | #ifdef CONFIG_PM | 409 | #ifdef CONFIG_PM |
diff --git a/drivers/virtio/virtio_ring.c b/drivers/virtio/virtio_ring.c index 6eb5303fed1..5777196bf6c 100644 --- a/drivers/virtio/virtio_ring.c +++ b/drivers/virtio/virtio_ring.c | |||
@@ -274,6 +274,7 @@ static struct virtqueue_ops vring_vq_ops = { | |||
274 | }; | 274 | }; |
275 | 275 | ||
276 | struct virtqueue *vring_new_virtqueue(unsigned int num, | 276 | struct virtqueue *vring_new_virtqueue(unsigned int num, |
277 | unsigned int vring_align, | ||
277 | struct virtio_device *vdev, | 278 | struct virtio_device *vdev, |
278 | void *pages, | 279 | void *pages, |
279 | void (*notify)(struct virtqueue *), | 280 | void (*notify)(struct virtqueue *), |
@@ -292,7 +293,7 @@ struct virtqueue *vring_new_virtqueue(unsigned int num, | |||
292 | if (!vq) | 293 | if (!vq) |
293 | return NULL; | 294 | return NULL; |
294 | 295 | ||
295 | vring_init(&vq->vring, num, pages, PAGE_SIZE); | 296 | vring_init(&vq->vring, num, pages, vring_align); |
296 | vq->vq.callback = callback; | 297 | vq->vq.callback = callback; |
297 | vq->vq.vdev = vdev; | 298 | vq->vq.vdev = vdev; |
298 | vq->vq.vq_ops = &vring_vq_ops; | 299 | vq->vq.vq_ops = &vring_vq_ops; |
diff --git a/include/linux/lguest_launcher.h b/include/linux/lguest_launcher.h index e7217dc58f3..a53407a4165 100644 --- a/include/linux/lguest_launcher.h +++ b/include/linux/lguest_launcher.h | |||
@@ -54,9 +54,13 @@ struct lguest_vqconfig { | |||
54 | /* Write command first word is a request. */ | 54 | /* Write command first word is a request. */ |
55 | enum lguest_req | 55 | enum lguest_req |
56 | { | 56 | { |
57 | LHREQ_INITIALIZE, /* + base, pfnlimit, pgdir, start */ | 57 | LHREQ_INITIALIZE, /* + base, pfnlimit, start */ |
58 | LHREQ_GETDMA, /* No longer used */ | 58 | LHREQ_GETDMA, /* No longer used */ |
59 | LHREQ_IRQ, /* + irq */ | 59 | LHREQ_IRQ, /* + irq */ |
60 | LHREQ_BREAK, /* + on/off flag (on blocks until someone does off) */ | 60 | LHREQ_BREAK, /* + on/off flag (on blocks until someone does off) */ |
61 | }; | 61 | }; |
62 | |||
63 | /* The alignment to use between consumer and producer parts of vring. | ||
64 | * x86 pagesize for historical reasons. */ | ||
65 | #define LGUEST_VRING_ALIGN 4096 | ||
62 | #endif /* _LINUX_LGUEST_LAUNCHER */ | 66 | #endif /* _LINUX_LGUEST_LAUNCHER */ |
diff --git a/include/linux/virtio_balloon.h b/include/linux/virtio_balloon.h index c30c7bfbf39..8726ff77763 100644 --- a/include/linux/virtio_balloon.h +++ b/include/linux/virtio_balloon.h | |||
@@ -10,6 +10,9 @@ | |||
10 | /* The feature bitmap for virtio balloon */ | 10 | /* The feature bitmap for virtio balloon */ |
11 | #define VIRTIO_BALLOON_F_MUST_TELL_HOST 0 /* Tell before reclaiming pages */ | 11 | #define VIRTIO_BALLOON_F_MUST_TELL_HOST 0 /* Tell before reclaiming pages */ |
12 | 12 | ||
13 | /* Size of a PFN in the balloon interface. */ | ||
14 | #define VIRTIO_BALLOON_PFN_SHIFT 12 | ||
15 | |||
13 | struct virtio_balloon_config | 16 | struct virtio_balloon_config |
14 | { | 17 | { |
15 | /* Number of pages host wants Guest to give up. */ | 18 | /* Number of pages host wants Guest to give up. */ |
diff --git a/include/linux/virtio_console.h b/include/linux/virtio_console.h index 19a0da0dba4..7615ffcdd55 100644 --- a/include/linux/virtio_console.h +++ b/include/linux/virtio_console.h | |||
@@ -7,6 +7,17 @@ | |||
7 | /* The ID for virtio console */ | 7 | /* The ID for virtio console */ |
8 | #define VIRTIO_ID_CONSOLE 3 | 8 | #define VIRTIO_ID_CONSOLE 3 |
9 | 9 | ||
10 | /* Feature bits */ | ||
11 | #define VIRTIO_CONSOLE_F_SIZE 0 /* Does host provide console size? */ | ||
12 | |||
13 | struct virtio_console_config { | ||
14 | /* colums of the screens */ | ||
15 | __u16 cols; | ||
16 | /* rows of the screens */ | ||
17 | __u16 rows; | ||
18 | } __attribute__((packed)); | ||
19 | |||
20 | |||
10 | #ifdef __KERNEL__ | 21 | #ifdef __KERNEL__ |
11 | int __init virtio_cons_early_init(int (*put_chars)(u32, const char *, int)); | 22 | int __init virtio_cons_early_init(int (*put_chars)(u32, const char *, int)); |
12 | #endif /* __KERNEL__ */ | 23 | #endif /* __KERNEL__ */ |
diff --git a/include/linux/virtio_pci.h b/include/linux/virtio_pci.h index cdef3574293..cd0fd5d181a 100644 --- a/include/linux/virtio_pci.h +++ b/include/linux/virtio_pci.h | |||
@@ -53,4 +53,12 @@ | |||
53 | 53 | ||
54 | /* Virtio ABI version, this must match exactly */ | 54 | /* Virtio ABI version, this must match exactly */ |
55 | #define VIRTIO_PCI_ABI_VERSION 0 | 55 | #define VIRTIO_PCI_ABI_VERSION 0 |
56 | |||
57 | /* How many bits to shift physical queue address written to QUEUE_PFN. | ||
58 | * 12 is historical, and due to x86 page size. */ | ||
59 | #define VIRTIO_PCI_QUEUE_ADDR_SHIFT 12 | ||
60 | |||
61 | /* The alignment to use between consumer and producer parts of vring. | ||
62 | * x86 pagesize again. */ | ||
63 | #define VIRTIO_PCI_VRING_ALIGN 4096 | ||
56 | #endif | 64 | #endif |
diff --git a/include/linux/virtio_ring.h b/include/linux/virtio_ring.h index c4a598fb382..71e03722fb5 100644 --- a/include/linux/virtio_ring.h +++ b/include/linux/virtio_ring.h | |||
@@ -83,7 +83,7 @@ struct vring { | |||
83 | * __u16 avail_idx; | 83 | * __u16 avail_idx; |
84 | * __u16 available[num]; | 84 | * __u16 available[num]; |
85 | * | 85 | * |
86 | * // Padding to the next page boundary. | 86 | * // Padding to the next align boundary. |
87 | * char pad[]; | 87 | * char pad[]; |
88 | * | 88 | * |
89 | * // A ring of used descriptor heads with free-running index. | 89 | * // A ring of used descriptor heads with free-running index. |
@@ -93,19 +93,19 @@ struct vring { | |||
93 | * }; | 93 | * }; |
94 | */ | 94 | */ |
95 | static inline void vring_init(struct vring *vr, unsigned int num, void *p, | 95 | static inline void vring_init(struct vring *vr, unsigned int num, void *p, |
96 | unsigned long pagesize) | 96 | unsigned long align) |
97 | { | 97 | { |
98 | vr->num = num; | 98 | vr->num = num; |
99 | vr->desc = p; | 99 | vr->desc = p; |
100 | vr->avail = p + num*sizeof(struct vring_desc); | 100 | vr->avail = p + num*sizeof(struct vring_desc); |
101 | vr->used = (void *)(((unsigned long)&vr->avail->ring[num] + pagesize-1) | 101 | vr->used = (void *)(((unsigned long)&vr->avail->ring[num] + align-1) |
102 | & ~(pagesize - 1)); | 102 | & ~(align - 1)); |
103 | } | 103 | } |
104 | 104 | ||
105 | static inline unsigned vring_size(unsigned int num, unsigned long pagesize) | 105 | static inline unsigned vring_size(unsigned int num, unsigned long align) |
106 | { | 106 | { |
107 | return ((sizeof(struct vring_desc) * num + sizeof(__u16) * (2 + num) | 107 | return ((sizeof(struct vring_desc) * num + sizeof(__u16) * (2 + num) |
108 | + pagesize - 1) & ~(pagesize - 1)) | 108 | + align - 1) & ~(align - 1)) |
109 | + sizeof(__u16) * 2 + sizeof(struct vring_used_elem) * num; | 109 | + sizeof(__u16) * 2 + sizeof(struct vring_used_elem) * num; |
110 | } | 110 | } |
111 | 111 | ||
@@ -115,6 +115,7 @@ struct virtio_device; | |||
115 | struct virtqueue; | 115 | struct virtqueue; |
116 | 116 | ||
117 | struct virtqueue *vring_new_virtqueue(unsigned int num, | 117 | struct virtqueue *vring_new_virtqueue(unsigned int num, |
118 | unsigned int vring_align, | ||
118 | struct virtio_device *vdev, | 119 | struct virtio_device *vdev, |
119 | void *pages, | 120 | void *pages, |
120 | void (*notify)(struct virtqueue *vq), | 121 | void (*notify)(struct virtqueue *vq), |