diff options
| -rw-r--r-- | drivers/vhost/vhost.c | 80 | ||||
| -rw-r--r-- | include/linux/cgroup.h | 11 | ||||
| -rw-r--r-- | kernel/cgroup.c | 9 |
3 files changed, 73 insertions, 27 deletions
diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c index 4b99117f3ecd..c579dcc9200c 100644 --- a/drivers/vhost/vhost.c +++ b/drivers/vhost/vhost.c | |||
| @@ -60,22 +60,25 @@ static int vhost_poll_wakeup(wait_queue_t *wait, unsigned mode, int sync, | |||
| 60 | return 0; | 60 | return 0; |
| 61 | } | 61 | } |
| 62 | 62 | ||
| 63 | static void vhost_work_init(struct vhost_work *work, vhost_work_fn_t fn) | ||
| 64 | { | ||
| 65 | INIT_LIST_HEAD(&work->node); | ||
| 66 | work->fn = fn; | ||
| 67 | init_waitqueue_head(&work->done); | ||
| 68 | work->flushing = 0; | ||
| 69 | work->queue_seq = work->done_seq = 0; | ||
| 70 | } | ||
| 71 | |||
| 63 | /* Init poll structure */ | 72 | /* Init poll structure */ |
| 64 | void vhost_poll_init(struct vhost_poll *poll, vhost_work_fn_t fn, | 73 | void vhost_poll_init(struct vhost_poll *poll, vhost_work_fn_t fn, |
| 65 | unsigned long mask, struct vhost_dev *dev) | 74 | unsigned long mask, struct vhost_dev *dev) |
| 66 | { | 75 | { |
| 67 | struct vhost_work *work = &poll->work; | ||
| 68 | |||
| 69 | init_waitqueue_func_entry(&poll->wait, vhost_poll_wakeup); | 76 | init_waitqueue_func_entry(&poll->wait, vhost_poll_wakeup); |
| 70 | init_poll_funcptr(&poll->table, vhost_poll_func); | 77 | init_poll_funcptr(&poll->table, vhost_poll_func); |
| 71 | poll->mask = mask; | 78 | poll->mask = mask; |
| 72 | poll->dev = dev; | 79 | poll->dev = dev; |
| 73 | 80 | ||
| 74 | INIT_LIST_HEAD(&work->node); | 81 | vhost_work_init(&poll->work, fn); |
| 75 | work->fn = fn; | ||
| 76 | init_waitqueue_head(&work->done); | ||
| 77 | work->flushing = 0; | ||
| 78 | work->queue_seq = work->done_seq = 0; | ||
| 79 | } | 82 | } |
| 80 | 83 | ||
| 81 | /* Start polling a file. We add ourselves to file's wait queue. The caller must | 84 | /* Start polling a file. We add ourselves to file's wait queue. The caller must |
| @@ -95,35 +98,38 @@ void vhost_poll_stop(struct vhost_poll *poll) | |||
| 95 | remove_wait_queue(poll->wqh, &poll->wait); | 98 | remove_wait_queue(poll->wqh, &poll->wait); |
| 96 | } | 99 | } |
| 97 | 100 | ||
| 98 | /* Flush any work that has been scheduled. When calling this, don't hold any | 101 | static void vhost_work_flush(struct vhost_dev *dev, struct vhost_work *work) |
| 99 | * locks that are also used by the callback. */ | ||
| 100 | void vhost_poll_flush(struct vhost_poll *poll) | ||
| 101 | { | 102 | { |
| 102 | struct vhost_work *work = &poll->work; | ||
| 103 | unsigned seq; | 103 | unsigned seq; |
| 104 | int left; | 104 | int left; |
| 105 | int flushing; | 105 | int flushing; |
| 106 | 106 | ||
| 107 | spin_lock_irq(&poll->dev->work_lock); | 107 | spin_lock_irq(&dev->work_lock); |
| 108 | seq = work->queue_seq; | 108 | seq = work->queue_seq; |
| 109 | work->flushing++; | 109 | work->flushing++; |
| 110 | spin_unlock_irq(&poll->dev->work_lock); | 110 | spin_unlock_irq(&dev->work_lock); |
| 111 | wait_event(work->done, ({ | 111 | wait_event(work->done, ({ |
| 112 | spin_lock_irq(&poll->dev->work_lock); | 112 | spin_lock_irq(&dev->work_lock); |
| 113 | left = seq - work->done_seq <= 0; | 113 | left = seq - work->done_seq <= 0; |
| 114 | spin_unlock_irq(&poll->dev->work_lock); | 114 | spin_unlock_irq(&dev->work_lock); |
| 115 | left; | 115 | left; |
| 116 | })); | 116 | })); |
| 117 | spin_lock_irq(&poll->dev->work_lock); | 117 | spin_lock_irq(&dev->work_lock); |
| 118 | flushing = --work->flushing; | 118 | flushing = --work->flushing; |
| 119 | spin_unlock_irq(&poll->dev->work_lock); | 119 | spin_unlock_irq(&dev->work_lock); |
| 120 | BUG_ON(flushing < 0); | 120 | BUG_ON(flushing < 0); |
| 121 | } | 121 | } |
| 122 | 122 | ||
| 123 | void vhost_poll_queue(struct vhost_poll *poll) | 123 | /* Flush any work that has been scheduled. When calling this, don't hold any |
| 124 | * locks that are also used by the callback. */ | ||
| 125 | void vhost_poll_flush(struct vhost_poll *poll) | ||
| 126 | { | ||
| 127 | vhost_work_flush(poll->dev, &poll->work); | ||
| 128 | } | ||
| 129 | |||
| 130 | static inline void vhost_work_queue(struct vhost_dev *dev, | ||
| 131 | struct vhost_work *work) | ||
| 124 | { | 132 | { |
| 125 | struct vhost_dev *dev = poll->dev; | ||
| 126 | struct vhost_work *work = &poll->work; | ||
| 127 | unsigned long flags; | 133 | unsigned long flags; |
| 128 | 134 | ||
| 129 | spin_lock_irqsave(&dev->work_lock, flags); | 135 | spin_lock_irqsave(&dev->work_lock, flags); |
| @@ -135,6 +141,11 @@ void vhost_poll_queue(struct vhost_poll *poll) | |||
| 135 | spin_unlock_irqrestore(&dev->work_lock, flags); | 141 | spin_unlock_irqrestore(&dev->work_lock, flags); |
| 136 | } | 142 | } |
| 137 | 143 | ||
| 144 | void vhost_poll_queue(struct vhost_poll *poll) | ||
| 145 | { | ||
| 146 | vhost_work_queue(poll->dev, &poll->work); | ||
| 147 | } | ||
| 148 | |||
| 138 | static void vhost_vq_reset(struct vhost_dev *dev, | 149 | static void vhost_vq_reset(struct vhost_dev *dev, |
| 139 | struct vhost_virtqueue *vq) | 150 | struct vhost_virtqueue *vq) |
| 140 | { | 151 | { |
| @@ -236,6 +247,29 @@ long vhost_dev_check_owner(struct vhost_dev *dev) | |||
| 236 | return dev->mm == current->mm ? 0 : -EPERM; | 247 | return dev->mm == current->mm ? 0 : -EPERM; |
| 237 | } | 248 | } |
| 238 | 249 | ||
| 250 | struct vhost_attach_cgroups_struct { | ||
| 251 | struct vhost_work work; | ||
| 252 | struct task_struct *owner; | ||
| 253 | int ret; | ||
| 254 | }; | ||
| 255 | |||
| 256 | static void vhost_attach_cgroups_work(struct vhost_work *work) | ||
| 257 | { | ||
| 258 | struct vhost_attach_cgroups_struct *s; | ||
| 259 | s = container_of(work, struct vhost_attach_cgroups_struct, work); | ||
| 260 | s->ret = cgroup_attach_task_all(s->owner, current); | ||
| 261 | } | ||
| 262 | |||
| 263 | static int vhost_attach_cgroups(struct vhost_dev *dev) | ||
| 264 | { | ||
| 265 | struct vhost_attach_cgroups_struct attach; | ||
| 266 | attach.owner = current; | ||
| 267 | vhost_work_init(&attach.work, vhost_attach_cgroups_work); | ||
| 268 | vhost_work_queue(dev, &attach.work); | ||
| 269 | vhost_work_flush(dev, &attach.work); | ||
| 270 | return attach.ret; | ||
| 271 | } | ||
| 272 | |||
| 239 | /* Caller should have device mutex */ | 273 | /* Caller should have device mutex */ |
| 240 | static long vhost_dev_set_owner(struct vhost_dev *dev) | 274 | static long vhost_dev_set_owner(struct vhost_dev *dev) |
| 241 | { | 275 | { |
| @@ -255,14 +289,16 @@ static long vhost_dev_set_owner(struct vhost_dev *dev) | |||
| 255 | } | 289 | } |
| 256 | 290 | ||
| 257 | dev->worker = worker; | 291 | dev->worker = worker; |
| 258 | err = cgroup_attach_task_current_cg(worker); | 292 | wake_up_process(worker); /* avoid contributing to loadavg */ |
| 293 | |||
| 294 | err = vhost_attach_cgroups(dev); | ||
| 259 | if (err) | 295 | if (err) |
| 260 | goto err_cgroup; | 296 | goto err_cgroup; |
| 261 | wake_up_process(worker); /* avoid contributing to loadavg */ | ||
| 262 | 297 | ||
| 263 | return 0; | 298 | return 0; |
| 264 | err_cgroup: | 299 | err_cgroup: |
| 265 | kthread_stop(worker); | 300 | kthread_stop(worker); |
| 301 | dev->worker = NULL; | ||
| 266 | err_worker: | 302 | err_worker: |
| 267 | if (dev->mm) | 303 | if (dev->mm) |
| 268 | mmput(dev->mm); | 304 | mmput(dev->mm); |
diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h index ed3e92e41c6e..5a53d8f039a2 100644 --- a/include/linux/cgroup.h +++ b/include/linux/cgroup.h | |||
| @@ -578,7 +578,11 @@ struct task_struct *cgroup_iter_next(struct cgroup *cgrp, | |||
| 578 | void cgroup_iter_end(struct cgroup *cgrp, struct cgroup_iter *it); | 578 | void cgroup_iter_end(struct cgroup *cgrp, struct cgroup_iter *it); |
| 579 | int cgroup_scan_tasks(struct cgroup_scanner *scan); | 579 | int cgroup_scan_tasks(struct cgroup_scanner *scan); |
| 580 | int cgroup_attach_task(struct cgroup *, struct task_struct *); | 580 | int cgroup_attach_task(struct cgroup *, struct task_struct *); |
| 581 | int cgroup_attach_task_current_cg(struct task_struct *); | 581 | int cgroup_attach_task_all(struct task_struct *from, struct task_struct *); |
| 582 | static inline int cgroup_attach_task_current_cg(struct task_struct *tsk) | ||
| 583 | { | ||
| 584 | return cgroup_attach_task_all(current, tsk); | ||
| 585 | } | ||
| 582 | 586 | ||
| 583 | /* | 587 | /* |
| 584 | * CSS ID is ID for cgroup_subsys_state structs under subsys. This only works | 588 | * CSS ID is ID for cgroup_subsys_state structs under subsys. This only works |
| @@ -636,6 +640,11 @@ static inline int cgroupstats_build(struct cgroupstats *stats, | |||
| 636 | } | 640 | } |
| 637 | 641 | ||
| 638 | /* No cgroups - nothing to do */ | 642 | /* No cgroups - nothing to do */ |
| 643 | static inline int cgroup_attach_task_all(struct task_struct *from, | ||
| 644 | struct task_struct *t) | ||
| 645 | { | ||
| 646 | return 0; | ||
| 647 | } | ||
| 639 | static inline int cgroup_attach_task_current_cg(struct task_struct *t) | 648 | static inline int cgroup_attach_task_current_cg(struct task_struct *t) |
| 640 | { | 649 | { |
| 641 | return 0; | 650 | return 0; |
diff --git a/kernel/cgroup.c b/kernel/cgroup.c index 192f88c5b0f9..ed19afd9e3fe 100644 --- a/kernel/cgroup.c +++ b/kernel/cgroup.c | |||
| @@ -1791,10 +1791,11 @@ out: | |||
| 1791 | } | 1791 | } |
| 1792 | 1792 | ||
| 1793 | /** | 1793 | /** |
| 1794 | * cgroup_attach_task_current_cg - attach task 'tsk' to current task's cgroup | 1794 | * cgroup_attach_task_all - attach task 'tsk' to all cgroups of task 'from' |
| 1795 | * @from: attach to all cgroups of a given task | ||
| 1795 | * @tsk: the task to be attached | 1796 | * @tsk: the task to be attached |
| 1796 | */ | 1797 | */ |
| 1797 | int cgroup_attach_task_current_cg(struct task_struct *tsk) | 1798 | int cgroup_attach_task_all(struct task_struct *from, struct task_struct *tsk) |
| 1798 | { | 1799 | { |
| 1799 | struct cgroupfs_root *root; | 1800 | struct cgroupfs_root *root; |
| 1800 | struct cgroup *cur_cg; | 1801 | struct cgroup *cur_cg; |
| @@ -1802,7 +1803,7 @@ int cgroup_attach_task_current_cg(struct task_struct *tsk) | |||
| 1802 | 1803 | ||
| 1803 | cgroup_lock(); | 1804 | cgroup_lock(); |
| 1804 | for_each_active_root(root) { | 1805 | for_each_active_root(root) { |
| 1805 | cur_cg = task_cgroup_from_root(current, root); | 1806 | cur_cg = task_cgroup_from_root(from, root); |
| 1806 | retval = cgroup_attach_task(cur_cg, tsk); | 1807 | retval = cgroup_attach_task(cur_cg, tsk); |
| 1807 | if (retval) | 1808 | if (retval) |
| 1808 | break; | 1809 | break; |
| @@ -1811,7 +1812,7 @@ int cgroup_attach_task_current_cg(struct task_struct *tsk) | |||
| 1811 | 1812 | ||
| 1812 | return retval; | 1813 | return retval; |
| 1813 | } | 1814 | } |
| 1814 | EXPORT_SYMBOL_GPL(cgroup_attach_task_current_cg); | 1815 | EXPORT_SYMBOL_GPL(cgroup_attach_task_all); |
| 1815 | 1816 | ||
| 1816 | /* | 1817 | /* |
| 1817 | * Attach task with pid 'pid' to cgroup 'cgrp'. Call with cgroup_mutex | 1818 | * Attach task with pid 'pid' to cgroup 'cgrp'. Call with cgroup_mutex |
