aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--drivers/virtio/virtio_balloon.c108
1 files changed, 51 insertions, 57 deletions
diff --git a/drivers/virtio/virtio_balloon.c b/drivers/virtio/virtio_balloon.c
index 0c3691f46575..2c9a92f1e525 100644
--- a/drivers/virtio/virtio_balloon.c
+++ b/drivers/virtio/virtio_balloon.c
@@ -22,8 +22,7 @@
22#include <linux/virtio.h> 22#include <linux/virtio.h>
23#include <linux/virtio_balloon.h> 23#include <linux/virtio_balloon.h>
24#include <linux/swap.h> 24#include <linux/swap.h>
25#include <linux/kthread.h> 25#include <linux/workqueue.h>
26#include <linux/freezer.h>
27#include <linux/delay.h> 26#include <linux/delay.h>
28#include <linux/slab.h> 27#include <linux/slab.h>
29#include <linux/module.h> 28#include <linux/module.h>
@@ -49,11 +48,12 @@ struct virtio_balloon {
49 struct virtio_device *vdev; 48 struct virtio_device *vdev;
50 struct virtqueue *inflate_vq, *deflate_vq, *stats_vq; 49 struct virtqueue *inflate_vq, *deflate_vq, *stats_vq;
51 50
52 /* Where the ballooning thread waits for config to change. */ 51 /* The balloon servicing is delegated to a freezable workqueue. */
53 wait_queue_head_t config_change; 52 struct work_struct work;
54 53
55 /* The thread servicing the balloon. */ 54 /* Prevent updating balloon when it is being canceled. */
56 struct task_struct *thread; 55 spinlock_t stop_update_lock;
56 bool stop_update;
57 57
58 /* Waiting for host to ack the pages we released. */ 58 /* Waiting for host to ack the pages we released. */
59 wait_queue_head_t acked; 59 wait_queue_head_t acked;
@@ -135,9 +135,10 @@ static void set_page_pfns(u32 pfns[], struct page *page)
135 pfns[i] = page_to_balloon_pfn(page) + i; 135 pfns[i] = page_to_balloon_pfn(page) + i;
136} 136}
137 137
138static void fill_balloon(struct virtio_balloon *vb, size_t num) 138static unsigned fill_balloon(struct virtio_balloon *vb, size_t num)
139{ 139{
140 struct balloon_dev_info *vb_dev_info = &vb->vb_dev_info; 140 struct balloon_dev_info *vb_dev_info = &vb->vb_dev_info;
141 unsigned num_allocated_pages;
141 142
142 /* We can only do one array worth at a time. */ 143 /* We can only do one array worth at a time. */
143 num = min(num, ARRAY_SIZE(vb->pfns)); 144 num = min(num, ARRAY_SIZE(vb->pfns));
@@ -162,10 +163,13 @@ static void fill_balloon(struct virtio_balloon *vb, size_t num)
162 adjust_managed_page_count(page, -1); 163 adjust_managed_page_count(page, -1);
163 } 164 }
164 165
166 num_allocated_pages = vb->num_pfns;
165 /* Did we get any? */ 167 /* Did we get any? */
166 if (vb->num_pfns != 0) 168 if (vb->num_pfns != 0)
167 tell_host(vb, vb->inflate_vq); 169 tell_host(vb, vb->inflate_vq);
168 mutex_unlock(&vb->balloon_lock); 170 mutex_unlock(&vb->balloon_lock);
171
172 return num_allocated_pages;
169} 173}
170 174
171static void release_pages_balloon(struct virtio_balloon *vb) 175static void release_pages_balloon(struct virtio_balloon *vb)
@@ -251,14 +255,19 @@ static void update_balloon_stats(struct virtio_balloon *vb)
251 * with a single buffer. From that point forward, all conversations consist of 255 * with a single buffer. From that point forward, all conversations consist of
252 * a hypervisor request (a call to this function) which directs us to refill 256 * a hypervisor request (a call to this function) which directs us to refill
253 * the virtqueue with a fresh stats buffer. Since stats collection can sleep, 257 * the virtqueue with a fresh stats buffer. Since stats collection can sleep,
254 * we notify our kthread which does the actual work via stats_handle_request(). 258 * we delegate the job to a freezable workqueue that will do the actual work via
259 * stats_handle_request().
255 */ 260 */
256static void stats_request(struct virtqueue *vq) 261static void stats_request(struct virtqueue *vq)
257{ 262{
258 struct virtio_balloon *vb = vq->vdev->priv; 263 struct virtio_balloon *vb = vq->vdev->priv;
259 264
260 vb->need_stats_update = 1; 265 vb->need_stats_update = 1;
261 wake_up(&vb->config_change); 266
267 spin_lock(&vb->stop_update_lock);
268 if (!vb->stop_update)
269 queue_work(system_freezable_wq, &vb->work);
270 spin_unlock(&vb->stop_update_lock);
262} 271}
263 272
264static void stats_handle_request(struct virtio_balloon *vb) 273static void stats_handle_request(struct virtio_balloon *vb)
@@ -281,8 +290,12 @@ static void stats_handle_request(struct virtio_balloon *vb)
281static void virtballoon_changed(struct virtio_device *vdev) 290static void virtballoon_changed(struct virtio_device *vdev)
282{ 291{
283 struct virtio_balloon *vb = vdev->priv; 292 struct virtio_balloon *vb = vdev->priv;
293 unsigned long flags;
284 294
285 wake_up(&vb->config_change); 295 spin_lock_irqsave(&vb->stop_update_lock, flags);
296 if (!vb->stop_update)
297 queue_work(system_freezable_wq, &vb->work);
298 spin_unlock_irqrestore(&vb->stop_update_lock, flags);
286} 299}
287 300
288static inline s64 towards_target(struct virtio_balloon *vb) 301static inline s64 towards_target(struct virtio_balloon *vb)
@@ -345,43 +358,25 @@ static int virtballoon_oom_notify(struct notifier_block *self,
345 return NOTIFY_OK; 358 return NOTIFY_OK;
346} 359}
347 360
348static int balloon(void *_vballoon) 361static void balloon(struct work_struct *work)
349{ 362{
350 struct virtio_balloon *vb = _vballoon; 363 struct virtio_balloon *vb;
351 DEFINE_WAIT_FUNC(wait, woken_wake_function); 364 s64 diff;
352
353 set_freezable();
354 while (!kthread_should_stop()) {
355 s64 diff;
356
357 try_to_freeze();
358
359 add_wait_queue(&vb->config_change, &wait);
360 for (;;) {
361 if ((diff = towards_target(vb)) != 0 ||
362 vb->need_stats_update ||
363 kthread_should_stop() ||
364 freezing(current))
365 break;
366 wait_woken(&wait, TASK_INTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT);
367 }
368 remove_wait_queue(&vb->config_change, &wait);
369 365
370 if (vb->need_stats_update) 366 vb = container_of(work, struct virtio_balloon, work);
371 stats_handle_request(vb); 367 diff = towards_target(vb);
372 if (diff > 0)
373 fill_balloon(vb, diff);
374 else if (diff < 0)
375 leak_balloon(vb, -diff);
376 update_balloon_size(vb);
377 368
378 /* 369 if (vb->need_stats_update)
379 * For large balloon changes, we could spend a lot of time 370 stats_handle_request(vb);
380 * and always have work to do. Be nice if preempt disabled. 371
381 */ 372 if (diff > 0)
382 cond_resched(); 373 diff -= fill_balloon(vb, diff);
383 } 374 else if (diff < 0)
384 return 0; 375 diff += leak_balloon(vb, -diff);
376 update_balloon_size(vb);
377
378 if (diff)
379 queue_work(system_freezable_wq, work);
385} 380}
386 381
387static int init_vqs(struct virtio_balloon *vb) 382static int init_vqs(struct virtio_balloon *vb)
@@ -499,9 +494,11 @@ static int virtballoon_probe(struct virtio_device *vdev)
499 goto out; 494 goto out;
500 } 495 }
501 496
497 INIT_WORK(&vb->work, balloon);
498 spin_lock_init(&vb->stop_update_lock);
499 vb->stop_update = false;
502 vb->num_pages = 0; 500 vb->num_pages = 0;
503 mutex_init(&vb->balloon_lock); 501 mutex_init(&vb->balloon_lock);
504 init_waitqueue_head(&vb->config_change);
505 init_waitqueue_head(&vb->acked); 502 init_waitqueue_head(&vb->acked);
506 vb->vdev = vdev; 503 vb->vdev = vdev;
507 vb->need_stats_update = 0; 504 vb->need_stats_update = 0;
@@ -523,16 +520,8 @@ static int virtballoon_probe(struct virtio_device *vdev)
523 520
524 virtio_device_ready(vdev); 521 virtio_device_ready(vdev);
525 522
526 vb->thread = kthread_run(balloon, vb, "vballoon");
527 if (IS_ERR(vb->thread)) {
528 err = PTR_ERR(vb->thread);
529 goto out_del_vqs;
530 }
531
532 return 0; 523 return 0;
533 524
534out_del_vqs:
535 unregister_oom_notifier(&vb->nb);
536out_oom_notify: 525out_oom_notify:
537 vdev->config->del_vqs(vdev); 526 vdev->config->del_vqs(vdev);
538out_free_vb: 527out_free_vb:
@@ -559,7 +548,12 @@ static void virtballoon_remove(struct virtio_device *vdev)
559 struct virtio_balloon *vb = vdev->priv; 548 struct virtio_balloon *vb = vdev->priv;
560 549
561 unregister_oom_notifier(&vb->nb); 550 unregister_oom_notifier(&vb->nb);
562 kthread_stop(vb->thread); 551
552 spin_lock_irq(&vb->stop_update_lock);
553 vb->stop_update = true;
554 spin_unlock_irq(&vb->stop_update_lock);
555 cancel_work_sync(&vb->work);
556
563 remove_common(vb); 557 remove_common(vb);
564 kfree(vb); 558 kfree(vb);
565} 559}
@@ -570,10 +564,9 @@ static int virtballoon_freeze(struct virtio_device *vdev)
570 struct virtio_balloon *vb = vdev->priv; 564 struct virtio_balloon *vb = vdev->priv;
571 565
572 /* 566 /*
573 * The kthread is already frozen by the PM core before this 567 * The workqueue is already frozen by the PM core before this
574 * function is called. 568 * function is called.
575 */ 569 */
576
577 remove_common(vb); 570 remove_common(vb);
578 return 0; 571 return 0;
579} 572}
@@ -589,7 +582,8 @@ static int virtballoon_restore(struct virtio_device *vdev)
589 582
590 virtio_device_ready(vdev); 583 virtio_device_ready(vdev);
591 584
592 fill_balloon(vb, towards_target(vb)); 585 if (towards_target(vb))
586 virtballoon_changed(vdev);
593 update_balloon_size(vb); 587 update_balloon_size(vb);
594 return 0; 588 return 0;
595} 589}