aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/virtio
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/virtio')
-rw-r--r--drivers/virtio/virtio_balloon.c109
-rw-r--r--drivers/virtio/virtio_pci.c2
-rw-r--r--drivers/virtio/virtio_ring.c59
3 files changed, 154 insertions, 16 deletions
diff --git a/drivers/virtio/virtio_balloon.c b/drivers/virtio/virtio_balloon.c
index 505be88c82ae..369f2eebbad1 100644
--- a/drivers/virtio/virtio_balloon.c
+++ b/drivers/virtio/virtio_balloon.c
@@ -28,7 +28,7 @@
28struct virtio_balloon 28struct virtio_balloon
29{ 29{
30 struct virtio_device *vdev; 30 struct virtio_device *vdev;
31 struct virtqueue *inflate_vq, *deflate_vq; 31 struct virtqueue *inflate_vq, *deflate_vq, *stats_vq;
32 32
33 /* Where the ballooning thread waits for config to change. */ 33 /* Where the ballooning thread waits for config to change. */
34 wait_queue_head_t config_change; 34 wait_queue_head_t config_change;
@@ -49,6 +49,10 @@ struct virtio_balloon
49 /* The array of pfns we tell the Host about. */ 49 /* The array of pfns we tell the Host about. */
50 unsigned int num_pfns; 50 unsigned int num_pfns;
51 u32 pfns[256]; 51 u32 pfns[256];
52
53 /* Memory statistics */
54 int need_stats_update;
55 struct virtio_balloon_stat stats[VIRTIO_BALLOON_S_NR];
52}; 56};
53 57
54static struct virtio_device_id id_table[] = { 58static struct virtio_device_id id_table[] = {
@@ -154,6 +158,72 @@ static void leak_balloon(struct virtio_balloon *vb, size_t num)
154 } 158 }
155} 159}
156 160
161static inline void update_stat(struct virtio_balloon *vb, int idx,
162 u16 tag, u64 val)
163{
164 BUG_ON(idx >= VIRTIO_BALLOON_S_NR);
165 vb->stats[idx].tag = tag;
166 vb->stats[idx].val = val;
167}
168
169#define pages_to_bytes(x) ((u64)(x) << PAGE_SHIFT)
170
171static void update_balloon_stats(struct virtio_balloon *vb)
172{
173 unsigned long events[NR_VM_EVENT_ITEMS];
174 struct sysinfo i;
175 int idx = 0;
176
177 all_vm_events(events);
178 si_meminfo(&i);
179
180 update_stat(vb, idx++, VIRTIO_BALLOON_S_SWAP_IN,
181 pages_to_bytes(events[PSWPIN]));
182 update_stat(vb, idx++, VIRTIO_BALLOON_S_SWAP_OUT,
183 pages_to_bytes(events[PSWPOUT]));
184 update_stat(vb, idx++, VIRTIO_BALLOON_S_MAJFLT, events[PGMAJFAULT]);
185 update_stat(vb, idx++, VIRTIO_BALLOON_S_MINFLT, events[PGFAULT]);
186 update_stat(vb, idx++, VIRTIO_BALLOON_S_MEMFREE,
187 pages_to_bytes(i.freeram));
188 update_stat(vb, idx++, VIRTIO_BALLOON_S_MEMTOT,
189 pages_to_bytes(i.totalram));
190}
191
192/*
193 * While most virtqueues communicate guest-initiated requests to the hypervisor,
194 * the stats queue operates in reverse. The driver initializes the virtqueue
195 * with a single buffer. From that point forward, all conversations consist of
196 * a hypervisor request (a call to this function) which directs us to refill
197 * the virtqueue with a fresh stats buffer. Since stats collection can sleep,
198 * we notify our kthread which does the actual work via stats_handle_request().
199 */
200static void stats_request(struct virtqueue *vq)
201{
202 struct virtio_balloon *vb;
203 unsigned int len;
204
205 vb = vq->vq_ops->get_buf(vq, &len);
206 if (!vb)
207 return;
208 vb->need_stats_update = 1;
209 wake_up(&vb->config_change);
210}
211
212static void stats_handle_request(struct virtio_balloon *vb)
213{
214 struct virtqueue *vq;
215 struct scatterlist sg;
216
217 vb->need_stats_update = 0;
218 update_balloon_stats(vb);
219
220 vq = vb->stats_vq;
221 sg_init_one(&sg, vb->stats, sizeof(vb->stats));
222 if (vq->vq_ops->add_buf(vq, &sg, 1, 0, vb) < 0)
223 BUG();
224 vq->vq_ops->kick(vq);
225}
226
157static void virtballoon_changed(struct virtio_device *vdev) 227static void virtballoon_changed(struct virtio_device *vdev)
158{ 228{
159 struct virtio_balloon *vb = vdev->priv; 229 struct virtio_balloon *vb = vdev->priv;
@@ -190,8 +260,11 @@ static int balloon(void *_vballoon)
190 try_to_freeze(); 260 try_to_freeze();
191 wait_event_interruptible(vb->config_change, 261 wait_event_interruptible(vb->config_change,
192 (diff = towards_target(vb)) != 0 262 (diff = towards_target(vb)) != 0
263 || vb->need_stats_update
193 || kthread_should_stop() 264 || kthread_should_stop()
194 || freezing(current)); 265 || freezing(current));
266 if (vb->need_stats_update)
267 stats_handle_request(vb);
195 if (diff > 0) 268 if (diff > 0)
196 fill_balloon(vb, diff); 269 fill_balloon(vb, diff);
197 else if (diff < 0) 270 else if (diff < 0)
@@ -204,10 +277,10 @@ static int balloon(void *_vballoon)
204static int virtballoon_probe(struct virtio_device *vdev) 277static int virtballoon_probe(struct virtio_device *vdev)
205{ 278{
206 struct virtio_balloon *vb; 279 struct virtio_balloon *vb;
207 struct virtqueue *vqs[2]; 280 struct virtqueue *vqs[3];
208 vq_callback_t *callbacks[] = { balloon_ack, balloon_ack }; 281 vq_callback_t *callbacks[] = { balloon_ack, balloon_ack, stats_request };
209 const char *names[] = { "inflate", "deflate" }; 282 const char *names[] = { "inflate", "deflate", "stats" };
210 int err; 283 int err, nvqs;
211 284
212 vdev->priv = vb = kmalloc(sizeof(*vb), GFP_KERNEL); 285 vdev->priv = vb = kmalloc(sizeof(*vb), GFP_KERNEL);
213 if (!vb) { 286 if (!vb) {
@@ -219,14 +292,31 @@ static int virtballoon_probe(struct virtio_device *vdev)
219 vb->num_pages = 0; 292 vb->num_pages = 0;
220 init_waitqueue_head(&vb->config_change); 293 init_waitqueue_head(&vb->config_change);
221 vb->vdev = vdev; 294 vb->vdev = vdev;
295 vb->need_stats_update = 0;
222 296
223 /* We expect two virtqueues. */ 297 /* We expect two virtqueues: inflate and deflate,
224 err = vdev->config->find_vqs(vdev, 2, vqs, callbacks, names); 298 * and optionally stat. */
299 nvqs = virtio_has_feature(vb->vdev, VIRTIO_BALLOON_F_STATS_VQ) ? 3 : 2;
300 err = vdev->config->find_vqs(vdev, nvqs, vqs, callbacks, names);
225 if (err) 301 if (err)
226 goto out_free_vb; 302 goto out_free_vb;
227 303
228 vb->inflate_vq = vqs[0]; 304 vb->inflate_vq = vqs[0];
229 vb->deflate_vq = vqs[1]; 305 vb->deflate_vq = vqs[1];
306 if (virtio_has_feature(vb->vdev, VIRTIO_BALLOON_F_STATS_VQ)) {
307 struct scatterlist sg;
308 vb->stats_vq = vqs[2];
309
310 /*
311 * Prime this virtqueue with one buffer so the hypervisor can
312 * use it to signal us later.
313 */
314 sg_init_one(&sg, vb->stats, sizeof vb->stats);
315 if (vb->stats_vq->vq_ops->add_buf(vb->stats_vq,
316 &sg, 1, 0, vb) < 0)
317 BUG();
318 vb->stats_vq->vq_ops->kick(vb->stats_vq);
319 }
230 320
231 vb->thread = kthread_run(balloon, vb, "vballoon"); 321 vb->thread = kthread_run(balloon, vb, "vballoon");
232 if (IS_ERR(vb->thread)) { 322 if (IS_ERR(vb->thread)) {
@@ -264,7 +354,10 @@ static void __devexit virtballoon_remove(struct virtio_device *vdev)
264 kfree(vb); 354 kfree(vb);
265} 355}
266 356
267static unsigned int features[] = { VIRTIO_BALLOON_F_MUST_TELL_HOST }; 357static unsigned int features[] = {
358 VIRTIO_BALLOON_F_MUST_TELL_HOST,
359 VIRTIO_BALLOON_F_STATS_VQ,
360};
268 361
269static struct virtio_driver virtio_balloon_driver = { 362static struct virtio_driver virtio_balloon_driver = {
270 .feature_table = features, 363 .feature_table = features,
diff --git a/drivers/virtio/virtio_pci.c b/drivers/virtio/virtio_pci.c
index 28d9cf7cf72f..1d5191fab62e 100644
--- a/drivers/virtio/virtio_pci.c
+++ b/drivers/virtio/virtio_pci.c
@@ -702,7 +702,7 @@ static struct pci_driver virtio_pci_driver = {
702 .name = "virtio-pci", 702 .name = "virtio-pci",
703 .id_table = virtio_pci_id_table, 703 .id_table = virtio_pci_id_table,
704 .probe = virtio_pci_probe, 704 .probe = virtio_pci_probe,
705 .remove = virtio_pci_remove, 705 .remove = __devexit_p(virtio_pci_remove),
706#ifdef CONFIG_PM 706#ifdef CONFIG_PM
707 .suspend = virtio_pci_suspend, 707 .suspend = virtio_pci_suspend,
708 .resume = virtio_pci_resume, 708 .resume = virtio_pci_resume,
diff --git a/drivers/virtio/virtio_ring.c b/drivers/virtio/virtio_ring.c
index fbd2ecde93e4..0db906b3c95d 100644
--- a/drivers/virtio/virtio_ring.c
+++ b/drivers/virtio/virtio_ring.c
@@ -21,6 +21,24 @@
21#include <linux/virtio_config.h> 21#include <linux/virtio_config.h>
22#include <linux/device.h> 22#include <linux/device.h>
23 23
24/* virtio guest is communicating with a virtual "device" that actually runs on
25 * a host processor. Memory barriers are used to control SMP effects. */
26#ifdef CONFIG_SMP
27/* Where possible, use SMP barriers which are more lightweight than mandatory
28 * barriers, because mandatory barriers control MMIO effects on accesses
29 * through relaxed memory I/O windows (which virtio does not use). */
30#define virtio_mb() smp_mb()
31#define virtio_rmb() smp_rmb()
32#define virtio_wmb() smp_wmb()
33#else
34/* We must force memory ordering even if guest is UP since host could be
35 * running on another CPU, but SMP barriers are defined to barrier() in that
36 * configuration. So fall back to mandatory barriers instead. */
37#define virtio_mb() mb()
38#define virtio_rmb() rmb()
39#define virtio_wmb() wmb()
40#endif
41
24#ifdef DEBUG 42#ifdef DEBUG
25/* For development, we want to crash whenever the ring is screwed. */ 43/* For development, we want to crash whenever the ring is screwed. */
26#define BAD_RING(_vq, fmt, args...) \ 44#define BAD_RING(_vq, fmt, args...) \
@@ -36,10 +54,9 @@
36 panic("%s:in_use = %i\n", \ 54 panic("%s:in_use = %i\n", \
37 (_vq)->vq.name, (_vq)->in_use); \ 55 (_vq)->vq.name, (_vq)->in_use); \
38 (_vq)->in_use = __LINE__; \ 56 (_vq)->in_use = __LINE__; \
39 mb(); \
40 } while (0) 57 } while (0)
41#define END_USE(_vq) \ 58#define END_USE(_vq) \
42 do { BUG_ON(!(_vq)->in_use); (_vq)->in_use = 0; mb(); } while(0) 59 do { BUG_ON(!(_vq)->in_use); (_vq)->in_use = 0; } while(0)
43#else 60#else
44#define BAD_RING(_vq, fmt, args...) \ 61#define BAD_RING(_vq, fmt, args...) \
45 do { \ 62 do { \
@@ -221,13 +238,13 @@ static void vring_kick(struct virtqueue *_vq)
221 START_USE(vq); 238 START_USE(vq);
222 /* Descriptors and available array need to be set before we expose the 239 /* Descriptors and available array need to be set before we expose the
223 * new available array entries. */ 240 * new available array entries. */
224 wmb(); 241 virtio_wmb();
225 242
226 vq->vring.avail->idx += vq->num_added; 243 vq->vring.avail->idx += vq->num_added;
227 vq->num_added = 0; 244 vq->num_added = 0;
228 245
229 /* Need to update avail index before checking if we should notify */ 246 /* Need to update avail index before checking if we should notify */
230 mb(); 247 virtio_mb();
231 248
232 if (!(vq->vring.used->flags & VRING_USED_F_NO_NOTIFY)) 249 if (!(vq->vring.used->flags & VRING_USED_F_NO_NOTIFY))
233 /* Prod other side to tell it about changes. */ 250 /* Prod other side to tell it about changes. */
@@ -286,7 +303,7 @@ static void *vring_get_buf(struct virtqueue *_vq, unsigned int *len)
286 } 303 }
287 304
288 /* Only get used array entries after they have been exposed by host. */ 305 /* Only get used array entries after they have been exposed by host. */
289 rmb(); 306 virtio_rmb();
290 307
291 i = vq->vring.used->ring[vq->last_used_idx%vq->vring.num].id; 308 i = vq->vring.used->ring[vq->last_used_idx%vq->vring.num].id;
292 *len = vq->vring.used->ring[vq->last_used_idx%vq->vring.num].len; 309 *len = vq->vring.used->ring[vq->last_used_idx%vq->vring.num].len;
@@ -324,7 +341,7 @@ static bool vring_enable_cb(struct virtqueue *_vq)
324 /* We optimistically turn back on interrupts, then check if there was 341 /* We optimistically turn back on interrupts, then check if there was
325 * more to do. */ 342 * more to do. */
326 vq->vring.avail->flags &= ~VRING_AVAIL_F_NO_INTERRUPT; 343 vq->vring.avail->flags &= ~VRING_AVAIL_F_NO_INTERRUPT;
327 mb(); 344 virtio_mb();
328 if (unlikely(more_used(vq))) { 345 if (unlikely(more_used(vq))) {
329 END_USE(vq); 346 END_USE(vq);
330 return false; 347 return false;
@@ -334,6 +351,30 @@ static bool vring_enable_cb(struct virtqueue *_vq)
334 return true; 351 return true;
335} 352}
336 353
354static void *vring_detach_unused_buf(struct virtqueue *_vq)
355{
356 struct vring_virtqueue *vq = to_vvq(_vq);
357 unsigned int i;
358 void *buf;
359
360 START_USE(vq);
361
362 for (i = 0; i < vq->vring.num; i++) {
363 if (!vq->data[i])
364 continue;
365 /* detach_buf clears data, so grab it now. */
366 buf = vq->data[i];
367 detach_buf(vq, i);
368 END_USE(vq);
369 return buf;
370 }
371 /* That should have freed everything. */
372 BUG_ON(vq->num_free != vq->vring.num);
373
374 END_USE(vq);
375 return NULL;
376}
377
337irqreturn_t vring_interrupt(int irq, void *_vq) 378irqreturn_t vring_interrupt(int irq, void *_vq)
338{ 379{
339 struct vring_virtqueue *vq = to_vvq(_vq); 380 struct vring_virtqueue *vq = to_vvq(_vq);
@@ -360,6 +401,7 @@ static struct virtqueue_ops vring_vq_ops = {
360 .kick = vring_kick, 401 .kick = vring_kick,
361 .disable_cb = vring_disable_cb, 402 .disable_cb = vring_disable_cb,
362 .enable_cb = vring_enable_cb, 403 .enable_cb = vring_enable_cb,
404 .detach_unused_buf = vring_detach_unused_buf,
363}; 405};
364 406
365struct virtqueue *vring_new_virtqueue(unsigned int num, 407struct virtqueue *vring_new_virtqueue(unsigned int num,
@@ -406,8 +448,11 @@ struct virtqueue *vring_new_virtqueue(unsigned int num,
406 /* Put everything in free lists. */ 448 /* Put everything in free lists. */
407 vq->num_free = num; 449 vq->num_free = num;
408 vq->free_head = 0; 450 vq->free_head = 0;
409 for (i = 0; i < num-1; i++) 451 for (i = 0; i < num-1; i++) {
410 vq->vring.desc[i].next = i+1; 452 vq->vring.desc[i].next = i+1;
453 vq->data[i] = NULL;
454 }
455 vq->data[i] = NULL;
411 456
412 return &vq->vq; 457 return &vq->vq;
413} 458}