aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2010-09-10 00:59:51 -0400
committerDavid S. Miller <davem@davemloft.net>2010-09-10 00:59:51 -0400
commit053d8f6622701f849fda2ca2c9ae596c13599ba9 (patch)
treee5dd90cca3a69bc993b5aa860a9eeb8c9178450a /drivers
parentc9cedbba0fc591e1c0587f838932ca3f3c6fec57 (diff)
parent615cc2211c17ed05a2a5d94abdac6c340a8ea508 (diff)
Merge branch 'vhost-net' of git://git.kernel.org/pub/scm/linux/kernel/git/mst/vhost
Diffstat (limited to 'drivers')
-rw-r--r--drivers/vhost/vhost.c80
1 files changed, 58 insertions, 22 deletions
diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
index 4b99117f3ecd..c579dcc9200c 100644
--- a/drivers/vhost/vhost.c
+++ b/drivers/vhost/vhost.c
@@ -60,22 +60,25 @@ static int vhost_poll_wakeup(wait_queue_t *wait, unsigned mode, int sync,
60 return 0; 60 return 0;
61} 61}
62 62
63static void vhost_work_init(struct vhost_work *work, vhost_work_fn_t fn)
64{
65 INIT_LIST_HEAD(&work->node);
66 work->fn = fn;
67 init_waitqueue_head(&work->done);
68 work->flushing = 0;
69 work->queue_seq = work->done_seq = 0;
70}
71
63/* Init poll structure */ 72/* Init poll structure */
64void vhost_poll_init(struct vhost_poll *poll, vhost_work_fn_t fn, 73void vhost_poll_init(struct vhost_poll *poll, vhost_work_fn_t fn,
65 unsigned long mask, struct vhost_dev *dev) 74 unsigned long mask, struct vhost_dev *dev)
66{ 75{
67 struct vhost_work *work = &poll->work;
68
69 init_waitqueue_func_entry(&poll->wait, vhost_poll_wakeup); 76 init_waitqueue_func_entry(&poll->wait, vhost_poll_wakeup);
70 init_poll_funcptr(&poll->table, vhost_poll_func); 77 init_poll_funcptr(&poll->table, vhost_poll_func);
71 poll->mask = mask; 78 poll->mask = mask;
72 poll->dev = dev; 79 poll->dev = dev;
73 80
74 INIT_LIST_HEAD(&work->node); 81 vhost_work_init(&poll->work, fn);
75 work->fn = fn;
76 init_waitqueue_head(&work->done);
77 work->flushing = 0;
78 work->queue_seq = work->done_seq = 0;
79} 82}
80 83
81/* Start polling a file. We add ourselves to file's wait queue. The caller must 84/* Start polling a file. We add ourselves to file's wait queue. The caller must
@@ -95,35 +98,38 @@ void vhost_poll_stop(struct vhost_poll *poll)
95 remove_wait_queue(poll->wqh, &poll->wait); 98 remove_wait_queue(poll->wqh, &poll->wait);
96} 99}
97 100
98/* Flush any work that has been scheduled. When calling this, don't hold any 101static void vhost_work_flush(struct vhost_dev *dev, struct vhost_work *work)
99 * locks that are also used by the callback. */
100void vhost_poll_flush(struct vhost_poll *poll)
101{ 102{
102 struct vhost_work *work = &poll->work;
103 unsigned seq; 103 unsigned seq;
104 int left; 104 int left;
105 int flushing; 105 int flushing;
106 106
107 spin_lock_irq(&poll->dev->work_lock); 107 spin_lock_irq(&dev->work_lock);
108 seq = work->queue_seq; 108 seq = work->queue_seq;
109 work->flushing++; 109 work->flushing++;
110 spin_unlock_irq(&poll->dev->work_lock); 110 spin_unlock_irq(&dev->work_lock);
111 wait_event(work->done, ({ 111 wait_event(work->done, ({
112 spin_lock_irq(&poll->dev->work_lock); 112 spin_lock_irq(&dev->work_lock);
113 left = seq - work->done_seq <= 0; 113 left = seq - work->done_seq <= 0;
114 spin_unlock_irq(&poll->dev->work_lock); 114 spin_unlock_irq(&dev->work_lock);
115 left; 115 left;
116 })); 116 }));
117 spin_lock_irq(&poll->dev->work_lock); 117 spin_lock_irq(&dev->work_lock);
118 flushing = --work->flushing; 118 flushing = --work->flushing;
119 spin_unlock_irq(&poll->dev->work_lock); 119 spin_unlock_irq(&dev->work_lock);
120 BUG_ON(flushing < 0); 120 BUG_ON(flushing < 0);
121} 121}
122 122
123void vhost_poll_queue(struct vhost_poll *poll) 123/* Flush any work that has been scheduled. When calling this, don't hold any
124 * locks that are also used by the callback. */
125void vhost_poll_flush(struct vhost_poll *poll)
126{
127 vhost_work_flush(poll->dev, &poll->work);
128}
129
130static inline void vhost_work_queue(struct vhost_dev *dev,
131 struct vhost_work *work)
124{ 132{
125 struct vhost_dev *dev = poll->dev;
126 struct vhost_work *work = &poll->work;
127 unsigned long flags; 133 unsigned long flags;
128 134
129 spin_lock_irqsave(&dev->work_lock, flags); 135 spin_lock_irqsave(&dev->work_lock, flags);
@@ -135,6 +141,11 @@ void vhost_poll_queue(struct vhost_poll *poll)
135 spin_unlock_irqrestore(&dev->work_lock, flags); 141 spin_unlock_irqrestore(&dev->work_lock, flags);
136} 142}
137 143
144void vhost_poll_queue(struct vhost_poll *poll)
145{
146 vhost_work_queue(poll->dev, &poll->work);
147}
148
138static void vhost_vq_reset(struct vhost_dev *dev, 149static void vhost_vq_reset(struct vhost_dev *dev,
139 struct vhost_virtqueue *vq) 150 struct vhost_virtqueue *vq)
140{ 151{
@@ -236,6 +247,29 @@ long vhost_dev_check_owner(struct vhost_dev *dev)
236 return dev->mm == current->mm ? 0 : -EPERM; 247 return dev->mm == current->mm ? 0 : -EPERM;
237} 248}
238 249
250struct vhost_attach_cgroups_struct {
251 struct vhost_work work;
252 struct task_struct *owner;
253 int ret;
254};
255
256static void vhost_attach_cgroups_work(struct vhost_work *work)
257{
258 struct vhost_attach_cgroups_struct *s;
259 s = container_of(work, struct vhost_attach_cgroups_struct, work);
260 s->ret = cgroup_attach_task_all(s->owner, current);
261}
262
263static int vhost_attach_cgroups(struct vhost_dev *dev)
264{
265 struct vhost_attach_cgroups_struct attach;
266 attach.owner = current;
267 vhost_work_init(&attach.work, vhost_attach_cgroups_work);
268 vhost_work_queue(dev, &attach.work);
269 vhost_work_flush(dev, &attach.work);
270 return attach.ret;
271}
272
239/* Caller should have device mutex */ 273/* Caller should have device mutex */
240static long vhost_dev_set_owner(struct vhost_dev *dev) 274static long vhost_dev_set_owner(struct vhost_dev *dev)
241{ 275{
@@ -255,14 +289,16 @@ static long vhost_dev_set_owner(struct vhost_dev *dev)
255 } 289 }
256 290
257 dev->worker = worker; 291 dev->worker = worker;
258 err = cgroup_attach_task_current_cg(worker); 292 wake_up_process(worker); /* avoid contributing to loadavg */
293
294 err = vhost_attach_cgroups(dev);
259 if (err) 295 if (err)
260 goto err_cgroup; 296 goto err_cgroup;
261 wake_up_process(worker); /* avoid contributing to loadavg */
262 297
263 return 0; 298 return 0;
264err_cgroup: 299err_cgroup:
265 kthread_stop(worker); 300 kthread_stop(worker);
301 dev->worker = NULL;
266err_worker: 302err_worker:
267 if (dev->mm) 303 if (dev->mm)
268 mmput(dev->mm); 304 mmput(dev->mm);