diff options
author | Michael S. Tsirkin <mst@redhat.com> | 2010-09-02 07:05:30 -0400 |
---|---|---|
committer | Michael S. Tsirkin <mst@redhat.com> | 2010-09-06 02:49:31 -0400 |
commit | 87d6a412bd1ed82c14cabd4b408003b23bbd2880 (patch) | |
tree | 598f8fc4a19f9b52e531ab41a1c45796358956d1 | |
parent | 73457f0f836956747e0394320be2163c050e96ef (diff) |
vhost: fix attach to cgroups regression
Since 2.6.36-rc1, non-root users of vhost-net fail to attach
if they are in any cgroups.
The reason is that when qemu uses vhost, vhost wants to attach
its thread to all cgroups that qemu has. But we got the API backwards,
so a non-priveledged process (Qemu) tried to control
the priveledged one (vhost), which fails.
Fix this by switching to the new cgroup_attach_task_all,
and running it from the vhost thread.
Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
-rw-r--r-- | drivers/vhost/vhost.c | 79 |
1 files changed, 57 insertions, 22 deletions
diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c index 4b99117f3ecd..1afa08527e08 100644 --- a/drivers/vhost/vhost.c +++ b/drivers/vhost/vhost.c | |||
@@ -60,22 +60,25 @@ static int vhost_poll_wakeup(wait_queue_t *wait, unsigned mode, int sync, | |||
60 | return 0; | 60 | return 0; |
61 | } | 61 | } |
62 | 62 | ||
63 | static void vhost_work_init(struct vhost_work *work, vhost_work_fn_t fn) | ||
64 | { | ||
65 | INIT_LIST_HEAD(&work->node); | ||
66 | work->fn = fn; | ||
67 | init_waitqueue_head(&work->done); | ||
68 | work->flushing = 0; | ||
69 | work->queue_seq = work->done_seq = 0; | ||
70 | } | ||
71 | |||
63 | /* Init poll structure */ | 72 | /* Init poll structure */ |
64 | void vhost_poll_init(struct vhost_poll *poll, vhost_work_fn_t fn, | 73 | void vhost_poll_init(struct vhost_poll *poll, vhost_work_fn_t fn, |
65 | unsigned long mask, struct vhost_dev *dev) | 74 | unsigned long mask, struct vhost_dev *dev) |
66 | { | 75 | { |
67 | struct vhost_work *work = &poll->work; | ||
68 | |||
69 | init_waitqueue_func_entry(&poll->wait, vhost_poll_wakeup); | 76 | init_waitqueue_func_entry(&poll->wait, vhost_poll_wakeup); |
70 | init_poll_funcptr(&poll->table, vhost_poll_func); | 77 | init_poll_funcptr(&poll->table, vhost_poll_func); |
71 | poll->mask = mask; | 78 | poll->mask = mask; |
72 | poll->dev = dev; | 79 | poll->dev = dev; |
73 | 80 | ||
74 | INIT_LIST_HEAD(&work->node); | 81 | vhost_work_init(&poll->work, fn); |
75 | work->fn = fn; | ||
76 | init_waitqueue_head(&work->done); | ||
77 | work->flushing = 0; | ||
78 | work->queue_seq = work->done_seq = 0; | ||
79 | } | 82 | } |
80 | 83 | ||
81 | /* Start polling a file. We add ourselves to file's wait queue. The caller must | 84 | /* Start polling a file. We add ourselves to file's wait queue. The caller must |
@@ -95,35 +98,38 @@ void vhost_poll_stop(struct vhost_poll *poll) | |||
95 | remove_wait_queue(poll->wqh, &poll->wait); | 98 | remove_wait_queue(poll->wqh, &poll->wait); |
96 | } | 99 | } |
97 | 100 | ||
98 | /* Flush any work that has been scheduled. When calling this, don't hold any | 101 | static void vhost_work_flush(struct vhost_dev *dev, struct vhost_work *work) |
99 | * locks that are also used by the callback. */ | ||
100 | void vhost_poll_flush(struct vhost_poll *poll) | ||
101 | { | 102 | { |
102 | struct vhost_work *work = &poll->work; | ||
103 | unsigned seq; | 103 | unsigned seq; |
104 | int left; | 104 | int left; |
105 | int flushing; | 105 | int flushing; |
106 | 106 | ||
107 | spin_lock_irq(&poll->dev->work_lock); | 107 | spin_lock_irq(&dev->work_lock); |
108 | seq = work->queue_seq; | 108 | seq = work->queue_seq; |
109 | work->flushing++; | 109 | work->flushing++; |
110 | spin_unlock_irq(&poll->dev->work_lock); | 110 | spin_unlock_irq(&dev->work_lock); |
111 | wait_event(work->done, ({ | 111 | wait_event(work->done, ({ |
112 | spin_lock_irq(&poll->dev->work_lock); | 112 | spin_lock_irq(&dev->work_lock); |
113 | left = seq - work->done_seq <= 0; | 113 | left = seq - work->done_seq <= 0; |
114 | spin_unlock_irq(&poll->dev->work_lock); | 114 | spin_unlock_irq(&dev->work_lock); |
115 | left; | 115 | left; |
116 | })); | 116 | })); |
117 | spin_lock_irq(&poll->dev->work_lock); | 117 | spin_lock_irq(&dev->work_lock); |
118 | flushing = --work->flushing; | 118 | flushing = --work->flushing; |
119 | spin_unlock_irq(&poll->dev->work_lock); | 119 | spin_unlock_irq(&dev->work_lock); |
120 | BUG_ON(flushing < 0); | 120 | BUG_ON(flushing < 0); |
121 | } | 121 | } |
122 | 122 | ||
123 | void vhost_poll_queue(struct vhost_poll *poll) | 123 | /* Flush any work that has been scheduled. When calling this, don't hold any |
124 | * locks that are also used by the callback. */ | ||
125 | void vhost_poll_flush(struct vhost_poll *poll) | ||
126 | { | ||
127 | vhost_work_flush(poll->dev, &poll->work); | ||
128 | } | ||
129 | |||
130 | static inline void vhost_work_queue(struct vhost_dev *dev, | ||
131 | struct vhost_work *work) | ||
124 | { | 132 | { |
125 | struct vhost_dev *dev = poll->dev; | ||
126 | struct vhost_work *work = &poll->work; | ||
127 | unsigned long flags; | 133 | unsigned long flags; |
128 | 134 | ||
129 | spin_lock_irqsave(&dev->work_lock, flags); | 135 | spin_lock_irqsave(&dev->work_lock, flags); |
@@ -135,6 +141,11 @@ void vhost_poll_queue(struct vhost_poll *poll) | |||
135 | spin_unlock_irqrestore(&dev->work_lock, flags); | 141 | spin_unlock_irqrestore(&dev->work_lock, flags); |
136 | } | 142 | } |
137 | 143 | ||
144 | void vhost_poll_queue(struct vhost_poll *poll) | ||
145 | { | ||
146 | vhost_work_queue(poll->dev, &poll->work); | ||
147 | } | ||
148 | |||
138 | static void vhost_vq_reset(struct vhost_dev *dev, | 149 | static void vhost_vq_reset(struct vhost_dev *dev, |
139 | struct vhost_virtqueue *vq) | 150 | struct vhost_virtqueue *vq) |
140 | { | 151 | { |
@@ -236,6 +247,29 @@ long vhost_dev_check_owner(struct vhost_dev *dev) | |||
236 | return dev->mm == current->mm ? 0 : -EPERM; | 247 | return dev->mm == current->mm ? 0 : -EPERM; |
237 | } | 248 | } |
238 | 249 | ||
250 | struct vhost_attach_cgroups_struct { | ||
251 | struct vhost_work work; | ||
252 | struct task_struct *owner; | ||
253 | int ret; | ||
254 | }; | ||
255 | |||
256 | static void vhost_attach_cgroups_work(struct vhost_work *work) | ||
257 | { | ||
258 | struct vhost_attach_cgroups_struct *s; | ||
259 | s = container_of(work, struct vhost_attach_cgroups_struct, work); | ||
260 | s->ret = cgroup_attach_task_all(s->owner, current); | ||
261 | } | ||
262 | |||
263 | static int vhost_attach_cgroups(struct vhost_dev *dev) | ||
264 | { | ||
265 | struct vhost_attach_cgroups_struct attach; | ||
266 | attach.owner = current; | ||
267 | vhost_work_init(&attach.work, vhost_attach_cgroups_work); | ||
268 | vhost_work_queue(dev, &attach.work); | ||
269 | vhost_work_flush(dev, &attach.work); | ||
270 | return attach.ret; | ||
271 | } | ||
272 | |||
239 | /* Caller should have device mutex */ | 273 | /* Caller should have device mutex */ |
240 | static long vhost_dev_set_owner(struct vhost_dev *dev) | 274 | static long vhost_dev_set_owner(struct vhost_dev *dev) |
241 | { | 275 | { |
@@ -255,10 +289,11 @@ static long vhost_dev_set_owner(struct vhost_dev *dev) | |||
255 | } | 289 | } |
256 | 290 | ||
257 | dev->worker = worker; | 291 | dev->worker = worker; |
258 | err = cgroup_attach_task_current_cg(worker); | 292 | wake_up_process(worker); /* avoid contributing to loadavg */ |
293 | |||
294 | err = vhost_attach_cgroups(dev); | ||
259 | if (err) | 295 | if (err) |
260 | goto err_cgroup; | 296 | goto err_cgroup; |
261 | wake_up_process(worker); /* avoid contributing to loadavg */ | ||
262 | 297 | ||
263 | return 0; | 298 | return 0; |
264 | err_cgroup: | 299 | err_cgroup: |