aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/virtio
diff options
context:
space:
mode:
authorAndrea Bastoni <bastoni@cs.unc.edu>2010-05-30 19:16:45 -0400
committerAndrea Bastoni <bastoni@cs.unc.edu>2010-05-30 19:16:45 -0400
commitada47b5fe13d89735805b566185f4885f5a3f750 (patch)
tree644b88f8a71896307d71438e9b3af49126ffb22b /drivers/virtio
parent43e98717ad40a4ae64545b5ba047c7b86aa44f4f (diff)
parent3280f21d43ee541f97f8cda5792150d2dbec20d5 (diff)
Merge branch 'wip-2.6.34' into old-private-masterarchived-private-master
Diffstat (limited to 'drivers/virtio')
-rw-r--r--drivers/virtio/virtio_balloon.c119
-rw-r--r--drivers/virtio/virtio_pci.c7
-rw-r--r--drivers/virtio/virtio_ring.c60
3 files changed, 165 insertions, 21 deletions
diff --git a/drivers/virtio/virtio_balloon.c b/drivers/virtio/virtio_balloon.c
index 9dd588042880..bfec7c29486d 100644
--- a/drivers/virtio/virtio_balloon.c
+++ b/drivers/virtio/virtio_balloon.c
@@ -24,11 +24,12 @@
24#include <linux/kthread.h> 24#include <linux/kthread.h>
25#include <linux/freezer.h> 25#include <linux/freezer.h>
26#include <linux/delay.h> 26#include <linux/delay.h>
27#include <linux/slab.h>
27 28
28struct virtio_balloon 29struct virtio_balloon
29{ 30{
30 struct virtio_device *vdev; 31 struct virtio_device *vdev;
31 struct virtqueue *inflate_vq, *deflate_vq; 32 struct virtqueue *inflate_vq, *deflate_vq, *stats_vq;
32 33
33 /* Where the ballooning thread waits for config to change. */ 34 /* Where the ballooning thread waits for config to change. */
34 wait_queue_head_t config_change; 35 wait_queue_head_t config_change;
@@ -49,6 +50,10 @@ struct virtio_balloon
49 /* The array of pfns we tell the Host about. */ 50 /* The array of pfns we tell the Host about. */
50 unsigned int num_pfns; 51 unsigned int num_pfns;
51 u32 pfns[256]; 52 u32 pfns[256];
53
54 /* Memory statistics */
55 int need_stats_update;
56 struct virtio_balloon_stat stats[VIRTIO_BALLOON_S_NR];
52}; 57};
53 58
54static struct virtio_device_id id_table[] = { 59static struct virtio_device_id id_table[] = {
@@ -98,7 +103,8 @@ static void fill_balloon(struct virtio_balloon *vb, size_t num)
98 num = min(num, ARRAY_SIZE(vb->pfns)); 103 num = min(num, ARRAY_SIZE(vb->pfns));
99 104
100 for (vb->num_pfns = 0; vb->num_pfns < num; vb->num_pfns++) { 105 for (vb->num_pfns = 0; vb->num_pfns < num; vb->num_pfns++) {
101 struct page *page = alloc_page(GFP_HIGHUSER | __GFP_NORETRY); 106 struct page *page = alloc_page(GFP_HIGHUSER | __GFP_NORETRY |
107 __GFP_NOMEMALLOC | __GFP_NOWARN);
102 if (!page) { 108 if (!page) {
103 if (printk_ratelimit()) 109 if (printk_ratelimit())
104 dev_printk(KERN_INFO, &vb->vdev->dev, 110 dev_printk(KERN_INFO, &vb->vdev->dev,
@@ -154,6 +160,72 @@ static void leak_balloon(struct virtio_balloon *vb, size_t num)
154 } 160 }
155} 161}
156 162
163static inline void update_stat(struct virtio_balloon *vb, int idx,
164 u16 tag, u64 val)
165{
166 BUG_ON(idx >= VIRTIO_BALLOON_S_NR);
167 vb->stats[idx].tag = tag;
168 vb->stats[idx].val = val;
169}
170
171#define pages_to_bytes(x) ((u64)(x) << PAGE_SHIFT)
172
173static void update_balloon_stats(struct virtio_balloon *vb)
174{
175 unsigned long events[NR_VM_EVENT_ITEMS];
176 struct sysinfo i;
177 int idx = 0;
178
179 all_vm_events(events);
180 si_meminfo(&i);
181
182 update_stat(vb, idx++, VIRTIO_BALLOON_S_SWAP_IN,
183 pages_to_bytes(events[PSWPIN]));
184 update_stat(vb, idx++, VIRTIO_BALLOON_S_SWAP_OUT,
185 pages_to_bytes(events[PSWPOUT]));
186 update_stat(vb, idx++, VIRTIO_BALLOON_S_MAJFLT, events[PGMAJFAULT]);
187 update_stat(vb, idx++, VIRTIO_BALLOON_S_MINFLT, events[PGFAULT]);
188 update_stat(vb, idx++, VIRTIO_BALLOON_S_MEMFREE,
189 pages_to_bytes(i.freeram));
190 update_stat(vb, idx++, VIRTIO_BALLOON_S_MEMTOT,
191 pages_to_bytes(i.totalram));
192}
193
194/*
195 * While most virtqueues communicate guest-initiated requests to the hypervisor,
196 * the stats queue operates in reverse. The driver initializes the virtqueue
197 * with a single buffer. From that point forward, all conversations consist of
198 * a hypervisor request (a call to this function) which directs us to refill
199 * the virtqueue with a fresh stats buffer. Since stats collection can sleep,
200 * we notify our kthread which does the actual work via stats_handle_request().
201 */
202static void stats_request(struct virtqueue *vq)
203{
204 struct virtio_balloon *vb;
205 unsigned int len;
206
207 vb = vq->vq_ops->get_buf(vq, &len);
208 if (!vb)
209 return;
210 vb->need_stats_update = 1;
211 wake_up(&vb->config_change);
212}
213
214static void stats_handle_request(struct virtio_balloon *vb)
215{
216 struct virtqueue *vq;
217 struct scatterlist sg;
218
219 vb->need_stats_update = 0;
220 update_balloon_stats(vb);
221
222 vq = vb->stats_vq;
223 sg_init_one(&sg, vb->stats, sizeof(vb->stats));
224 if (vq->vq_ops->add_buf(vq, &sg, 1, 0, vb) < 0)
225 BUG();
226 vq->vq_ops->kick(vq);
227}
228
157static void virtballoon_changed(struct virtio_device *vdev) 229static void virtballoon_changed(struct virtio_device *vdev)
158{ 230{
159 struct virtio_balloon *vb = vdev->priv; 231 struct virtio_balloon *vb = vdev->priv;
@@ -190,8 +262,11 @@ static int balloon(void *_vballoon)
190 try_to_freeze(); 262 try_to_freeze();
191 wait_event_interruptible(vb->config_change, 263 wait_event_interruptible(vb->config_change,
192 (diff = towards_target(vb)) != 0 264 (diff = towards_target(vb)) != 0
265 || vb->need_stats_update
193 || kthread_should_stop() 266 || kthread_should_stop()
194 || freezing(current)); 267 || freezing(current));
268 if (vb->need_stats_update)
269 stats_handle_request(vb);
195 if (diff > 0) 270 if (diff > 0)
196 fill_balloon(vb, diff); 271 fill_balloon(vb, diff);
197 else if (diff < 0) 272 else if (diff < 0)
@@ -204,10 +279,10 @@ static int balloon(void *_vballoon)
204static int virtballoon_probe(struct virtio_device *vdev) 279static int virtballoon_probe(struct virtio_device *vdev)
205{ 280{
206 struct virtio_balloon *vb; 281 struct virtio_balloon *vb;
207 struct virtqueue *vqs[2]; 282 struct virtqueue *vqs[3];
208 vq_callback_t *callbacks[] = { balloon_ack, balloon_ack }; 283 vq_callback_t *callbacks[] = { balloon_ack, balloon_ack, stats_request };
209 const char *names[] = { "inflate", "deflate" }; 284 const char *names[] = { "inflate", "deflate", "stats" };
210 int err; 285 int err, nvqs;
211 286
212 vdev->priv = vb = kmalloc(sizeof(*vb), GFP_KERNEL); 287 vdev->priv = vb = kmalloc(sizeof(*vb), GFP_KERNEL);
213 if (!vb) { 288 if (!vb) {
@@ -219,14 +294,31 @@ static int virtballoon_probe(struct virtio_device *vdev)
219 vb->num_pages = 0; 294 vb->num_pages = 0;
220 init_waitqueue_head(&vb->config_change); 295 init_waitqueue_head(&vb->config_change);
221 vb->vdev = vdev; 296 vb->vdev = vdev;
297 vb->need_stats_update = 0;
222 298
223 /* We expect two virtqueues. */ 299 /* We expect two virtqueues: inflate and deflate,
224 err = vdev->config->find_vqs(vdev, 2, vqs, callbacks, names); 300 * and optionally stat. */
301 nvqs = virtio_has_feature(vb->vdev, VIRTIO_BALLOON_F_STATS_VQ) ? 3 : 2;
302 err = vdev->config->find_vqs(vdev, nvqs, vqs, callbacks, names);
225 if (err) 303 if (err)
226 goto out_free_vb; 304 goto out_free_vb;
227 305
228 vb->inflate_vq = vqs[0]; 306 vb->inflate_vq = vqs[0];
229 vb->deflate_vq = vqs[1]; 307 vb->deflate_vq = vqs[1];
308 if (virtio_has_feature(vb->vdev, VIRTIO_BALLOON_F_STATS_VQ)) {
309 struct scatterlist sg;
310 vb->stats_vq = vqs[2];
311
312 /*
313 * Prime this virtqueue with one buffer so the hypervisor can
314 * use it to signal us later.
315 */
316 sg_init_one(&sg, vb->stats, sizeof vb->stats);
317 if (vb->stats_vq->vq_ops->add_buf(vb->stats_vq,
318 &sg, 1, 0, vb) < 0)
319 BUG();
320 vb->stats_vq->vq_ops->kick(vb->stats_vq);
321 }
230 322
231 vb->thread = kthread_run(balloon, vb, "vballoon"); 323 vb->thread = kthread_run(balloon, vb, "vballoon");
232 if (IS_ERR(vb->thread)) { 324 if (IS_ERR(vb->thread)) {
@@ -264,9 +356,12 @@ static void __devexit virtballoon_remove(struct virtio_device *vdev)
264 kfree(vb); 356 kfree(vb);
265} 357}
266 358
267static unsigned int features[] = { VIRTIO_BALLOON_F_MUST_TELL_HOST }; 359static unsigned int features[] = {
360 VIRTIO_BALLOON_F_MUST_TELL_HOST,
361 VIRTIO_BALLOON_F_STATS_VQ,
362};
268 363
269static struct virtio_driver virtio_balloon = { 364static struct virtio_driver virtio_balloon_driver = {
270 .feature_table = features, 365 .feature_table = features,
271 .feature_table_size = ARRAY_SIZE(features), 366 .feature_table_size = ARRAY_SIZE(features),
272 .driver.name = KBUILD_MODNAME, 367 .driver.name = KBUILD_MODNAME,
@@ -279,12 +374,12 @@ static struct virtio_driver virtio_balloon = {
279 374
280static int __init init(void) 375static int __init init(void)
281{ 376{
282 return register_virtio_driver(&virtio_balloon); 377 return register_virtio_driver(&virtio_balloon_driver);
283} 378}
284 379
285static void __exit fini(void) 380static void __exit fini(void)
286{ 381{
287 unregister_virtio_driver(&virtio_balloon); 382 unregister_virtio_driver(&virtio_balloon_driver);
288} 383}
289module_init(init); 384module_init(init);
290module_exit(fini); 385module_exit(fini);
diff --git a/drivers/virtio/virtio_pci.c b/drivers/virtio/virtio_pci.c
index 28d9cf7cf72f..24747aef1952 100644
--- a/drivers/virtio/virtio_pci.c
+++ b/drivers/virtio/virtio_pci.c
@@ -17,6 +17,7 @@
17#include <linux/module.h> 17#include <linux/module.h>
18#include <linux/list.h> 18#include <linux/list.h>
19#include <linux/pci.h> 19#include <linux/pci.h>
20#include <linux/slab.h>
20#include <linux/interrupt.h> 21#include <linux/interrupt.h>
21#include <linux/virtio.h> 22#include <linux/virtio.h>
22#include <linux/virtio_config.h> 23#include <linux/virtio_config.h>
@@ -473,7 +474,8 @@ static void vp_del_vqs(struct virtio_device *vdev)
473 474
474 list_for_each_entry_safe(vq, n, &vdev->vqs, list) { 475 list_for_each_entry_safe(vq, n, &vdev->vqs, list) {
475 info = vq->priv; 476 info = vq->priv;
476 if (vp_dev->per_vq_vectors) 477 if (vp_dev->per_vq_vectors &&
478 info->msix_vector != VIRTIO_MSI_NO_VECTOR)
477 free_irq(vp_dev->msix_entries[info->msix_vector].vector, 479 free_irq(vp_dev->msix_entries[info->msix_vector].vector,
478 vq); 480 vq);
479 vp_del_vq(vq); 481 vp_del_vq(vq);
@@ -648,6 +650,7 @@ static int __devinit virtio_pci_probe(struct pci_dev *pci_dev,
648 goto out_req_regions; 650 goto out_req_regions;
649 651
650 pci_set_drvdata(pci_dev, vp_dev); 652 pci_set_drvdata(pci_dev, vp_dev);
653 pci_set_master(pci_dev);
651 654
652 /* we use the subsystem vendor/device id as the virtio vendor/device 655 /* we use the subsystem vendor/device id as the virtio vendor/device
653 * id. this allows us to use the same PCI vendor/device id for all 656 * id. this allows us to use the same PCI vendor/device id for all
@@ -702,7 +705,7 @@ static struct pci_driver virtio_pci_driver = {
702 .name = "virtio-pci", 705 .name = "virtio-pci",
703 .id_table = virtio_pci_id_table, 706 .id_table = virtio_pci_id_table,
704 .probe = virtio_pci_probe, 707 .probe = virtio_pci_probe,
705 .remove = virtio_pci_remove, 708 .remove = __devexit_p(virtio_pci_remove),
706#ifdef CONFIG_PM 709#ifdef CONFIG_PM
707 .suspend = virtio_pci_suspend, 710 .suspend = virtio_pci_suspend,
708 .resume = virtio_pci_resume, 711 .resume = virtio_pci_resume,
diff --git a/drivers/virtio/virtio_ring.c b/drivers/virtio/virtio_ring.c
index fbd2ecde93e4..0f90634bcb85 100644
--- a/drivers/virtio/virtio_ring.c
+++ b/drivers/virtio/virtio_ring.c
@@ -20,6 +20,25 @@
20#include <linux/virtio_ring.h> 20#include <linux/virtio_ring.h>
21#include <linux/virtio_config.h> 21#include <linux/virtio_config.h>
22#include <linux/device.h> 22#include <linux/device.h>
23#include <linux/slab.h>
24
25/* virtio guest is communicating with a virtual "device" that actually runs on
26 * a host processor. Memory barriers are used to control SMP effects. */
27#ifdef CONFIG_SMP
28/* Where possible, use SMP barriers which are more lightweight than mandatory
29 * barriers, because mandatory barriers control MMIO effects on accesses
30 * through relaxed memory I/O windows (which virtio does not use). */
31#define virtio_mb() smp_mb()
32#define virtio_rmb() smp_rmb()
33#define virtio_wmb() smp_wmb()
34#else
35/* We must force memory ordering even if guest is UP since host could be
36 * running on another CPU, but SMP barriers are defined to barrier() in that
37 * configuration. So fall back to mandatory barriers instead. */
38#define virtio_mb() mb()
39#define virtio_rmb() rmb()
40#define virtio_wmb() wmb()
41#endif
23 42
24#ifdef DEBUG 43#ifdef DEBUG
25/* For development, we want to crash whenever the ring is screwed. */ 44/* For development, we want to crash whenever the ring is screwed. */
@@ -36,10 +55,9 @@
36 panic("%s:in_use = %i\n", \ 55 panic("%s:in_use = %i\n", \
37 (_vq)->vq.name, (_vq)->in_use); \ 56 (_vq)->vq.name, (_vq)->in_use); \
38 (_vq)->in_use = __LINE__; \ 57 (_vq)->in_use = __LINE__; \
39 mb(); \
40 } while (0) 58 } while (0)
41#define END_USE(_vq) \ 59#define END_USE(_vq) \
42 do { BUG_ON(!(_vq)->in_use); (_vq)->in_use = 0; mb(); } while(0) 60 do { BUG_ON(!(_vq)->in_use); (_vq)->in_use = 0; } while(0)
43#else 61#else
44#define BAD_RING(_vq, fmt, args...) \ 62#define BAD_RING(_vq, fmt, args...) \
45 do { \ 63 do { \
@@ -221,13 +239,13 @@ static void vring_kick(struct virtqueue *_vq)
221 START_USE(vq); 239 START_USE(vq);
222 /* Descriptors and available array need to be set before we expose the 240 /* Descriptors and available array need to be set before we expose the
223 * new available array entries. */ 241 * new available array entries. */
224 wmb(); 242 virtio_wmb();
225 243
226 vq->vring.avail->idx += vq->num_added; 244 vq->vring.avail->idx += vq->num_added;
227 vq->num_added = 0; 245 vq->num_added = 0;
228 246
229 /* Need to update avail index before checking if we should notify */ 247 /* Need to update avail index before checking if we should notify */
230 mb(); 248 virtio_mb();
231 249
232 if (!(vq->vring.used->flags & VRING_USED_F_NO_NOTIFY)) 250 if (!(vq->vring.used->flags & VRING_USED_F_NO_NOTIFY))
233 /* Prod other side to tell it about changes. */ 251 /* Prod other side to tell it about changes. */
@@ -286,7 +304,7 @@ static void *vring_get_buf(struct virtqueue *_vq, unsigned int *len)
286 } 304 }
287 305
288 /* Only get used array entries after they have been exposed by host. */ 306 /* Only get used array entries after they have been exposed by host. */
289 rmb(); 307 virtio_rmb();
290 308
291 i = vq->vring.used->ring[vq->last_used_idx%vq->vring.num].id; 309 i = vq->vring.used->ring[vq->last_used_idx%vq->vring.num].id;
292 *len = vq->vring.used->ring[vq->last_used_idx%vq->vring.num].len; 310 *len = vq->vring.used->ring[vq->last_used_idx%vq->vring.num].len;
@@ -324,7 +342,7 @@ static bool vring_enable_cb(struct virtqueue *_vq)
324 /* We optimistically turn back on interrupts, then check if there was 342 /* We optimistically turn back on interrupts, then check if there was
325 * more to do. */ 343 * more to do. */
326 vq->vring.avail->flags &= ~VRING_AVAIL_F_NO_INTERRUPT; 344 vq->vring.avail->flags &= ~VRING_AVAIL_F_NO_INTERRUPT;
327 mb(); 345 virtio_mb();
328 if (unlikely(more_used(vq))) { 346 if (unlikely(more_used(vq))) {
329 END_USE(vq); 347 END_USE(vq);
330 return false; 348 return false;
@@ -334,6 +352,30 @@ static bool vring_enable_cb(struct virtqueue *_vq)
334 return true; 352 return true;
335} 353}
336 354
355static void *vring_detach_unused_buf(struct virtqueue *_vq)
356{
357 struct vring_virtqueue *vq = to_vvq(_vq);
358 unsigned int i;
359 void *buf;
360
361 START_USE(vq);
362
363 for (i = 0; i < vq->vring.num; i++) {
364 if (!vq->data[i])
365 continue;
366 /* detach_buf clears data, so grab it now. */
367 buf = vq->data[i];
368 detach_buf(vq, i);
369 END_USE(vq);
370 return buf;
371 }
372 /* That should have freed everything. */
373 BUG_ON(vq->num_free != vq->vring.num);
374
375 END_USE(vq);
376 return NULL;
377}
378
337irqreturn_t vring_interrupt(int irq, void *_vq) 379irqreturn_t vring_interrupt(int irq, void *_vq)
338{ 380{
339 struct vring_virtqueue *vq = to_vvq(_vq); 381 struct vring_virtqueue *vq = to_vvq(_vq);
@@ -360,6 +402,7 @@ static struct virtqueue_ops vring_vq_ops = {
360 .kick = vring_kick, 402 .kick = vring_kick,
361 .disable_cb = vring_disable_cb, 403 .disable_cb = vring_disable_cb,
362 .enable_cb = vring_enable_cb, 404 .enable_cb = vring_enable_cb,
405 .detach_unused_buf = vring_detach_unused_buf,
363}; 406};
364 407
365struct virtqueue *vring_new_virtqueue(unsigned int num, 408struct virtqueue *vring_new_virtqueue(unsigned int num,
@@ -406,8 +449,11 @@ struct virtqueue *vring_new_virtqueue(unsigned int num,
406 /* Put everything in free lists. */ 449 /* Put everything in free lists. */
407 vq->num_free = num; 450 vq->num_free = num;
408 vq->free_head = 0; 451 vq->free_head = 0;
409 for (i = 0; i < num-1; i++) 452 for (i = 0; i < num-1; i++) {
410 vq->vring.desc[i].next = i+1; 453 vq->vring.desc[i].next = i+1;
454 vq->data[i] = NULL;
455 }
456 vq->data[i] = NULL;
411 457
412 return &vq->vq; 458 return &vq->vq;
413} 459}