aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/xen
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/xen')
-rw-r--r--drivers/xen/xenfs/privcmd.c400
-rw-r--r--drivers/xen/xenfs/xenbus.c593
2 files changed, 993 insertions, 0 deletions
diff --git a/drivers/xen/xenfs/privcmd.c b/drivers/xen/xenfs/privcmd.c
new file mode 100644
index 00000000000..dbd3b16fd13
--- /dev/null
+++ b/drivers/xen/xenfs/privcmd.c
@@ -0,0 +1,400 @@
1/******************************************************************************
2 * privcmd.c
3 *
4 * Interface to privileged domain-0 commands.
5 *
6 * Copyright (c) 2002-2004, K A Fraser, B Dragovic
7 */
8
9#include <linux/kernel.h>
10#include <linux/sched.h>
11#include <linux/slab.h>
12#include <linux/string.h>
13#include <linux/errno.h>
14#include <linux/mm.h>
15#include <linux/mman.h>
16#include <linux/uaccess.h>
17#include <linux/swap.h>
18#include <linux/highmem.h>
19#include <linux/pagemap.h>
20#include <linux/seq_file.h>
21
22#include <asm/pgalloc.h>
23#include <asm/pgtable.h>
24#include <asm/tlb.h>
25#include <asm/xen/hypervisor.h>
26#include <asm/xen/hypercall.h>
27
28#include <xen/xen.h>
29#include <xen/privcmd.h>
30#include <xen/interface/xen.h>
31#include <xen/features.h>
32#include <xen/page.h>
33#include <xen/xen-ops.h>
34
35#ifndef HAVE_ARCH_PRIVCMD_MMAP
36static int privcmd_enforce_singleshot_mapping(struct vm_area_struct *vma);
37#endif
38
39static long privcmd_ioctl_hypercall(void __user *udata)
40{
41 struct privcmd_hypercall hypercall;
42 long ret;
43
44 if (copy_from_user(&hypercall, udata, sizeof(hypercall)))
45 return -EFAULT;
46
47 ret = privcmd_call(hypercall.op,
48 hypercall.arg[0], hypercall.arg[1],
49 hypercall.arg[2], hypercall.arg[3],
50 hypercall.arg[4]);
51
52 return ret;
53}
54
55static void free_page_list(struct list_head *pages)
56{
57 struct page *p, *n;
58
59 list_for_each_entry_safe(p, n, pages, lru)
60 __free_page(p);
61
62 INIT_LIST_HEAD(pages);
63}
64
65/*
66 * Given an array of items in userspace, return a list of pages
67 * containing the data. If copying fails, either because of memory
68 * allocation failure or a problem reading user memory, return an
69 * error code; its up to the caller to dispose of any partial list.
70 */
71static int gather_array(struct list_head *pagelist,
72 unsigned nelem, size_t size,
73 void __user *data)
74{
75 unsigned pageidx;
76 void *pagedata;
77 int ret;
78
79 if (size > PAGE_SIZE)
80 return 0;
81
82 pageidx = PAGE_SIZE;
83 pagedata = NULL; /* quiet, gcc */
84 while (nelem--) {
85 if (pageidx > PAGE_SIZE-size) {
86 struct page *page = alloc_page(GFP_KERNEL);
87
88 ret = -ENOMEM;
89 if (page == NULL)
90 goto fail;
91
92 pagedata = page_address(page);
93
94 list_add_tail(&page->lru, pagelist);
95 pageidx = 0;
96 }
97
98 ret = -EFAULT;
99 if (copy_from_user(pagedata + pageidx, data, size))
100 goto fail;
101
102 data += size;
103 pageidx += size;
104 }
105
106 ret = 0;
107
108fail:
109 return ret;
110}
111
112/*
113 * Call function "fn" on each element of the array fragmented
114 * over a list of pages.
115 */
116static int traverse_pages(unsigned nelem, size_t size,
117 struct list_head *pos,
118 int (*fn)(void *data, void *state),
119 void *state)
120{
121 void *pagedata;
122 unsigned pageidx;
123 int ret = 0;
124
125 BUG_ON(size > PAGE_SIZE);
126
127 pageidx = PAGE_SIZE;
128 pagedata = NULL; /* hush, gcc */
129
130 while (nelem--) {
131 if (pageidx > PAGE_SIZE-size) {
132 struct page *page;
133 pos = pos->next;
134 page = list_entry(pos, struct page, lru);
135 pagedata = page_address(page);
136 pageidx = 0;
137 }
138
139 ret = (*fn)(pagedata + pageidx, state);
140 if (ret)
141 break;
142 pageidx += size;
143 }
144
145 return ret;
146}
147
148struct mmap_mfn_state {
149 unsigned long va;
150 struct vm_area_struct *vma;
151 domid_t domain;
152};
153
154static int mmap_mfn_range(void *data, void *state)
155{
156 struct privcmd_mmap_entry *msg = data;
157 struct mmap_mfn_state *st = state;
158 struct vm_area_struct *vma = st->vma;
159 int rc;
160
161 /* Do not allow range to wrap the address space. */
162 if ((msg->npages > (LONG_MAX >> PAGE_SHIFT)) ||
163 ((unsigned long)(msg->npages << PAGE_SHIFT) >= -st->va))
164 return -EINVAL;
165
166 /* Range chunks must be contiguous in va space. */
167 if ((msg->va != st->va) ||
168 ((msg->va+(msg->npages<<PAGE_SHIFT)) > vma->vm_end))
169 return -EINVAL;
170
171 rc = xen_remap_domain_mfn_range(vma,
172 msg->va & PAGE_MASK,
173 msg->mfn, msg->npages,
174 vma->vm_page_prot,
175 st->domain);
176 if (rc < 0)
177 return rc;
178
179 st->va += msg->npages << PAGE_SHIFT;
180
181 return 0;
182}
183
184static long privcmd_ioctl_mmap(void __user *udata)
185{
186 struct privcmd_mmap mmapcmd;
187 struct mm_struct *mm = current->mm;
188 struct vm_area_struct *vma;
189 int rc;
190 LIST_HEAD(pagelist);
191 struct mmap_mfn_state state;
192
193 if (!xen_initial_domain())
194 return -EPERM;
195
196 if (copy_from_user(&mmapcmd, udata, sizeof(mmapcmd)))
197 return -EFAULT;
198
199 rc = gather_array(&pagelist,
200 mmapcmd.num, sizeof(struct privcmd_mmap_entry),
201 mmapcmd.entry);
202
203 if (rc || list_empty(&pagelist))
204 goto out;
205
206 down_write(&mm->mmap_sem);
207
208 {
209 struct page *page = list_first_entry(&pagelist,
210 struct page, lru);
211 struct privcmd_mmap_entry *msg = page_address(page);
212
213 vma = find_vma(mm, msg->va);
214 rc = -EINVAL;
215
216 if (!vma || (msg->va != vma->vm_start) ||
217 !privcmd_enforce_singleshot_mapping(vma))
218 goto out_up;
219 }
220
221 state.va = vma->vm_start;
222 state.vma = vma;
223 state.domain = mmapcmd.dom;
224
225 rc = traverse_pages(mmapcmd.num, sizeof(struct privcmd_mmap_entry),
226 &pagelist,
227 mmap_mfn_range, &state);
228
229
230out_up:
231 up_write(&mm->mmap_sem);
232
233out:
234 free_page_list(&pagelist);
235
236 return rc;
237}
238
239struct mmap_batch_state {
240 domid_t domain;
241 unsigned long va;
242 struct vm_area_struct *vma;
243 int err;
244
245 xen_pfn_t __user *user;
246};
247
248static int mmap_batch_fn(void *data, void *state)
249{
250 xen_pfn_t *mfnp = data;
251 struct mmap_batch_state *st = state;
252
253 if (xen_remap_domain_mfn_range(st->vma, st->va & PAGE_MASK, *mfnp, 1,
254 st->vma->vm_page_prot, st->domain) < 0) {
255 *mfnp |= 0xf0000000U;
256 st->err++;
257 }
258 st->va += PAGE_SIZE;
259
260 return 0;
261}
262
263static int mmap_return_errors(void *data, void *state)
264{
265 xen_pfn_t *mfnp = data;
266 struct mmap_batch_state *st = state;
267
268 return put_user(*mfnp, st->user++);
269}
270
271static struct vm_operations_struct privcmd_vm_ops;
272
273static long privcmd_ioctl_mmap_batch(void __user *udata)
274{
275 int ret;
276 struct privcmd_mmapbatch m;
277 struct mm_struct *mm = current->mm;
278 struct vm_area_struct *vma;
279 unsigned long nr_pages;
280 LIST_HEAD(pagelist);
281 struct mmap_batch_state state;
282
283 if (!xen_initial_domain())
284 return -EPERM;
285
286 if (copy_from_user(&m, udata, sizeof(m)))
287 return -EFAULT;
288
289 nr_pages = m.num;
290 if ((m.num <= 0) || (nr_pages > (LONG_MAX >> PAGE_SHIFT)))
291 return -EINVAL;
292
293 ret = gather_array(&pagelist, m.num, sizeof(xen_pfn_t),
294 m.arr);
295
296 if (ret || list_empty(&pagelist))
297 goto out;
298
299 down_write(&mm->mmap_sem);
300
301 vma = find_vma(mm, m.addr);
302 ret = -EINVAL;
303 if (!vma ||
304 vma->vm_ops != &privcmd_vm_ops ||
305 (m.addr != vma->vm_start) ||
306 ((m.addr + (nr_pages << PAGE_SHIFT)) != vma->vm_end) ||
307 !privcmd_enforce_singleshot_mapping(vma)) {
308 up_write(&mm->mmap_sem);
309 goto out;
310 }
311
312 state.domain = m.dom;
313 state.vma = vma;
314 state.va = m.addr;
315 state.err = 0;
316
317 ret = traverse_pages(m.num, sizeof(xen_pfn_t),
318 &pagelist, mmap_batch_fn, &state);
319
320 up_write(&mm->mmap_sem);
321
322 if (state.err > 0) {
323 state.user = m.arr;
324 ret = traverse_pages(m.num, sizeof(xen_pfn_t),
325 &pagelist,
326 mmap_return_errors, &state);
327 }
328
329out:
330 free_page_list(&pagelist);
331
332 return ret;
333}
334
335static long privcmd_ioctl(struct file *file,
336 unsigned int cmd, unsigned long data)
337{
338 int ret = -ENOSYS;
339 void __user *udata = (void __user *) data;
340
341 switch (cmd) {
342 case IOCTL_PRIVCMD_HYPERCALL:
343 ret = privcmd_ioctl_hypercall(udata);
344 break;
345
346 case IOCTL_PRIVCMD_MMAP:
347 ret = privcmd_ioctl_mmap(udata);
348 break;
349
350 case IOCTL_PRIVCMD_MMAPBATCH:
351 ret = privcmd_ioctl_mmap_batch(udata);
352 break;
353
354 default:
355 ret = -EINVAL;
356 break;
357 }
358
359 return ret;
360}
361
362#ifndef HAVE_ARCH_PRIVCMD_MMAP
363static int privcmd_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
364{
365 printk(KERN_DEBUG "privcmd_fault: vma=%p %lx-%lx, pgoff=%lx, uv=%p\n",
366 vma, vma->vm_start, vma->vm_end,
367 vmf->pgoff, vmf->virtual_address);
368
369 return VM_FAULT_SIGBUS;
370}
371
372static struct vm_operations_struct privcmd_vm_ops = {
373 .fault = privcmd_fault
374};
375
376static int privcmd_mmap(struct file *file, struct vm_area_struct *vma)
377{
378 /* Unsupported for auto-translate guests. */
379 if (xen_feature(XENFEAT_auto_translated_physmap))
380 return -ENOSYS;
381
382 /* DONTCOPY is essential for Xen because copy_page_range doesn't know
383 * how to recreate these mappings */
384 vma->vm_flags |= VM_RESERVED | VM_IO | VM_DONTCOPY | VM_PFNMAP;
385 vma->vm_ops = &privcmd_vm_ops;
386 vma->vm_private_data = NULL;
387
388 return 0;
389}
390
391static int privcmd_enforce_singleshot_mapping(struct vm_area_struct *vma)
392{
393 return (xchg(&vma->vm_private_data, (void *)1) == NULL);
394}
395#endif
396
397const struct file_operations privcmd_file_ops = {
398 .unlocked_ioctl = privcmd_ioctl,
399 .mmap = privcmd_mmap,
400};
diff --git a/drivers/xen/xenfs/xenbus.c b/drivers/xen/xenfs/xenbus.c
new file mode 100644
index 00000000000..bbd000f88af
--- /dev/null
+++ b/drivers/xen/xenfs/xenbus.c
@@ -0,0 +1,593 @@
1/*
2 * Driver giving user-space access to the kernel's xenbus connection
3 * to xenstore.
4 *
5 * Copyright (c) 2005, Christian Limpach
6 * Copyright (c) 2005, Rusty Russell, IBM Corporation
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License version 2
10 * as published by the Free Software Foundation; or, when distributed
11 * separately from the Linux kernel or incorporated into other
12 * software packages, subject to the following license:
13 *
14 * Permission is hereby granted, free of charge, to any person obtaining a copy
15 * of this source file (the "Software"), to deal in the Software without
16 * restriction, including without limitation the rights to use, copy, modify,
17 * merge, publish, distribute, sublicense, and/or sell copies of the Software,
18 * and to permit persons to whom the Software is furnished to do so, subject to
19 * the following conditions:
20 *
21 * The above copyright notice and this permission notice shall be included in
22 * all copies or substantial portions of the Software.
23 *
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
25 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
26 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
27 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
28 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
29 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
30 * IN THE SOFTWARE.
31 *
32 * Changes:
33 * 2008-10-07 Alex Zeffertt Replaced /proc/xen/xenbus with xenfs filesystem
34 * and /proc/xen compatibility mount point.
35 * Turned xenfs into a loadable module.
36 */
37
38#include <linux/kernel.h>
39#include <linux/errno.h>
40#include <linux/uio.h>
41#include <linux/notifier.h>
42#include <linux/wait.h>
43#include <linux/fs.h>
44#include <linux/poll.h>
45#include <linux/mutex.h>
46#include <linux/sched.h>
47#include <linux/spinlock.h>
48#include <linux/mount.h>
49#include <linux/pagemap.h>
50#include <linux/uaccess.h>
51#include <linux/init.h>
52#include <linux/namei.h>
53#include <linux/string.h>
54#include <linux/slab.h>
55
56#include "xenfs.h"
57#include "../xenbus/xenbus_comms.h"
58
59#include <xen/xenbus.h>
60#include <asm/xen/hypervisor.h>
61
62/*
63 * An element of a list of outstanding transactions, for which we're
64 * still waiting a reply.
65 */
66struct xenbus_transaction_holder {
67 struct list_head list;
68 struct xenbus_transaction handle;
69};
70
71/*
72 * A buffer of data on the queue.
73 */
74struct read_buffer {
75 struct list_head list;
76 unsigned int cons;
77 unsigned int len;
78 char msg[];
79};
80
81struct xenbus_file_priv {
82 /*
83 * msgbuffer_mutex is held while partial requests are built up
84 * and complete requests are acted on. It therefore protects
85 * the "transactions" and "watches" lists, and the partial
86 * request length and buffer.
87 *
88 * reply_mutex protects the reply being built up to return to
89 * usermode. It nests inside msgbuffer_mutex but may be held
90 * alone during a watch callback.
91 */
92 struct mutex msgbuffer_mutex;
93
94 /* In-progress transactions */
95 struct list_head transactions;
96
97 /* Active watches. */
98 struct list_head watches;
99
100 /* Partial request. */
101 unsigned int len;
102 union {
103 struct xsd_sockmsg msg;
104 char buffer[PAGE_SIZE];
105 } u;
106
107 /* Response queue. */
108 struct mutex reply_mutex;
109 struct list_head read_buffers;
110 wait_queue_head_t read_waitq;
111
112};
113
114/* Read out any raw xenbus messages queued up. */
115static ssize_t xenbus_file_read(struct file *filp,
116 char __user *ubuf,
117 size_t len, loff_t *ppos)
118{
119 struct xenbus_file_priv *u = filp->private_data;
120 struct read_buffer *rb;
121 unsigned i;
122 int ret;
123
124 mutex_lock(&u->reply_mutex);
125again:
126 while (list_empty(&u->read_buffers)) {
127 mutex_unlock(&u->reply_mutex);
128 if (filp->f_flags & O_NONBLOCK)
129 return -EAGAIN;
130
131 ret = wait_event_interruptible(u->read_waitq,
132 !list_empty(&u->read_buffers));
133 if (ret)
134 return ret;
135 mutex_lock(&u->reply_mutex);
136 }
137
138 rb = list_entry(u->read_buffers.next, struct read_buffer, list);
139 i = 0;
140 while (i < len) {
141 unsigned sz = min((unsigned)len - i, rb->len - rb->cons);
142
143 ret = copy_to_user(ubuf + i, &rb->msg[rb->cons], sz);
144
145 i += sz - ret;
146 rb->cons += sz - ret;
147
148 if (ret != 0) {
149 if (i == 0)
150 i = -EFAULT;
151 goto out;
152 }
153
154 /* Clear out buffer if it has been consumed */
155 if (rb->cons == rb->len) {
156 list_del(&rb->list);
157 kfree(rb);
158 if (list_empty(&u->read_buffers))
159 break;
160 rb = list_entry(u->read_buffers.next,
161 struct read_buffer, list);
162 }
163 }
164 if (i == 0)
165 goto again;
166
167out:
168 mutex_unlock(&u->reply_mutex);
169 return i;
170}
171
172/*
173 * Add a buffer to the queue. Caller must hold the appropriate lock
174 * if the queue is not local. (Commonly the caller will build up
175 * multiple queued buffers on a temporary local list, and then add it
176 * to the appropriate list under lock once all the buffers have een
177 * successfully allocated.)
178 */
179static int queue_reply(struct list_head *queue, const void *data, size_t len)
180{
181 struct read_buffer *rb;
182
183 if (len == 0)
184 return 0;
185
186 rb = kmalloc(sizeof(*rb) + len, GFP_KERNEL);
187 if (rb == NULL)
188 return -ENOMEM;
189
190 rb->cons = 0;
191 rb->len = len;
192
193 memcpy(rb->msg, data, len);
194
195 list_add_tail(&rb->list, queue);
196 return 0;
197}
198
199/*
200 * Free all the read_buffer s on a list.
201 * Caller must have sole reference to list.
202 */
203static void queue_cleanup(struct list_head *list)
204{
205 struct read_buffer *rb;
206
207 while (!list_empty(list)) {
208 rb = list_entry(list->next, struct read_buffer, list);
209 list_del(list->next);
210 kfree(rb);
211 }
212}
213
214struct watch_adapter {
215 struct list_head list;
216 struct xenbus_watch watch;
217 struct xenbus_file_priv *dev_data;
218 char *token;
219};
220
221static void free_watch_adapter(struct watch_adapter *watch)
222{
223 kfree(watch->watch.node);
224 kfree(watch->token);
225 kfree(watch);
226}
227
228static struct watch_adapter *alloc_watch_adapter(const char *path,
229 const char *token)
230{
231 struct watch_adapter *watch;
232
233 watch = kzalloc(sizeof(*watch), GFP_KERNEL);
234 if (watch == NULL)
235 goto out_fail;
236
237 watch->watch.node = kstrdup(path, GFP_KERNEL);
238 if (watch->watch.node == NULL)
239 goto out_free;
240
241 watch->token = kstrdup(token, GFP_KERNEL);
242 if (watch->token == NULL)
243 goto out_free;
244
245 return watch;
246
247out_free:
248 free_watch_adapter(watch);
249
250out_fail:
251 return NULL;
252}
253
254static void watch_fired(struct xenbus_watch *watch,
255 const char **vec,
256 unsigned int len)
257{
258 struct watch_adapter *adap;
259 struct xsd_sockmsg hdr;
260 const char *path, *token;
261 int path_len, tok_len, body_len, data_len = 0;
262 int ret;
263 LIST_HEAD(staging_q);
264
265 adap = container_of(watch, struct watch_adapter, watch);
266
267 path = vec[XS_WATCH_PATH];
268 token = adap->token;
269
270 path_len = strlen(path) + 1;
271 tok_len = strlen(token) + 1;
272 if (len > 2)
273 data_len = vec[len] - vec[2] + 1;
274 body_len = path_len + tok_len + data_len;
275
276 hdr.type = XS_WATCH_EVENT;
277 hdr.len = body_len;
278
279 mutex_lock(&adap->dev_data->reply_mutex);
280
281 ret = queue_reply(&staging_q, &hdr, sizeof(hdr));
282 if (!ret)
283 ret = queue_reply(&staging_q, path, path_len);
284 if (!ret)
285 ret = queue_reply(&staging_q, token, tok_len);
286 if (!ret && len > 2)
287 ret = queue_reply(&staging_q, vec[2], data_len);
288
289 if (!ret) {
290 /* success: pass reply list onto watcher */
291 list_splice_tail(&staging_q, &adap->dev_data->read_buffers);
292 wake_up(&adap->dev_data->read_waitq);
293 } else
294 queue_cleanup(&staging_q);
295
296 mutex_unlock(&adap->dev_data->reply_mutex);
297}
298
299static int xenbus_write_transaction(unsigned msg_type,
300 struct xenbus_file_priv *u)
301{
302 int rc;
303 void *reply;
304 struct xenbus_transaction_holder *trans = NULL;
305 LIST_HEAD(staging_q);
306
307 if (msg_type == XS_TRANSACTION_START) {
308 trans = kmalloc(sizeof(*trans), GFP_KERNEL);
309 if (!trans) {
310 rc = -ENOMEM;
311 goto out;
312 }
313 }
314
315 reply = xenbus_dev_request_and_reply(&u->u.msg);
316 if (IS_ERR(reply)) {
317 kfree(trans);
318 rc = PTR_ERR(reply);
319 goto out;
320 }
321
322 if (msg_type == XS_TRANSACTION_START) {
323 trans->handle.id = simple_strtoul(reply, NULL, 0);
324
325 list_add(&trans->list, &u->transactions);
326 } else if (msg_type == XS_TRANSACTION_END) {
327 list_for_each_entry(trans, &u->transactions, list)
328 if (trans->handle.id == u->u.msg.tx_id)
329 break;
330 BUG_ON(&trans->list == &u->transactions);
331 list_del(&trans->list);
332
333 kfree(trans);
334 }
335
336 mutex_lock(&u->reply_mutex);
337 rc = queue_reply(&staging_q, &u->u.msg, sizeof(u->u.msg));
338 if (!rc)
339 rc = queue_reply(&staging_q, reply, u->u.msg.len);
340 if (!rc) {
341 list_splice_tail(&staging_q, &u->read_buffers);
342 wake_up(&u->read_waitq);
343 } else {
344 queue_cleanup(&staging_q);
345 }
346 mutex_unlock(&u->reply_mutex);
347
348 kfree(reply);
349
350out:
351 return rc;
352}
353
354static int xenbus_write_watch(unsigned msg_type, struct xenbus_file_priv *u)
355{
356 struct watch_adapter *watch, *tmp_watch;
357 char *path, *token;
358 int err, rc;
359 LIST_HEAD(staging_q);
360
361 path = u->u.buffer + sizeof(u->u.msg);
362 token = memchr(path, 0, u->u.msg.len);
363 if (token == NULL) {
364 rc = -EILSEQ;
365 goto out;
366 }
367 token++;
368
369 if (msg_type == XS_WATCH) {
370 watch = alloc_watch_adapter(path, token);
371 if (watch == NULL) {
372 rc = -ENOMEM;
373 goto out;
374 }
375
376 watch->watch.callback = watch_fired;
377 watch->dev_data = u;
378
379 err = register_xenbus_watch(&watch->watch);
380 if (err) {
381 free_watch_adapter(watch);
382 rc = err;
383 goto out;
384 }
385 list_add(&watch->list, &u->watches);
386 } else {
387 list_for_each_entry_safe(watch, tmp_watch, &u->watches, list) {
388 if (!strcmp(watch->token, token) &&
389 !strcmp(watch->watch.node, path)) {
390 unregister_xenbus_watch(&watch->watch);
391 list_del(&watch->list);
392 free_watch_adapter(watch);
393 break;
394 }
395 }
396 }
397
398 /* Success. Synthesize a reply to say all is OK. */
399 {
400 struct {
401 struct xsd_sockmsg hdr;
402 char body[3];
403 } __packed reply = {
404 {
405 .type = msg_type,
406 .len = sizeof(reply.body)
407 },
408 "OK"
409 };
410
411 mutex_lock(&u->reply_mutex);
412 rc = queue_reply(&u->read_buffers, &reply, sizeof(reply));
413 wake_up(&u->read_waitq);
414 mutex_unlock(&u->reply_mutex);
415 }
416
417out:
418 return rc;
419}
420
421static ssize_t xenbus_file_write(struct file *filp,
422 const char __user *ubuf,
423 size_t len, loff_t *ppos)
424{
425 struct xenbus_file_priv *u = filp->private_data;
426 uint32_t msg_type;
427 int rc = len;
428 int ret;
429 LIST_HEAD(staging_q);
430
431 /*
432 * We're expecting usermode to be writing properly formed
433 * xenbus messages. If they write an incomplete message we
434 * buffer it up. Once it is complete, we act on it.
435 */
436
437 /*
438 * Make sure concurrent writers can't stomp all over each
439 * other's messages and make a mess of our partial message
440 * buffer. We don't make any attemppt to stop multiple
441 * writers from making a mess of each other's incomplete
442 * messages; we're just trying to guarantee our own internal
443 * consistency and make sure that single writes are handled
444 * atomically.
445 */
446 mutex_lock(&u->msgbuffer_mutex);
447
448 /* Get this out of the way early to avoid confusion */
449 if (len == 0)
450 goto out;
451
452 /* Can't write a xenbus message larger we can buffer */
453 if ((len + u->len) > sizeof(u->u.buffer)) {
454 /* On error, dump existing buffer */
455 u->len = 0;
456 rc = -EINVAL;
457 goto out;
458 }
459
460 ret = copy_from_user(u->u.buffer + u->len, ubuf, len);
461
462 if (ret != 0) {
463 rc = -EFAULT;
464 goto out;
465 }
466
467 /* Deal with a partial copy. */
468 len -= ret;
469 rc = len;
470
471 u->len += len;
472
473 /* Return if we haven't got a full message yet */
474 if (u->len < sizeof(u->u.msg))
475 goto out; /* not even the header yet */
476
477 /* If we're expecting a message that's larger than we can
478 possibly send, dump what we have and return an error. */
479 if ((sizeof(u->u.msg) + u->u.msg.len) > sizeof(u->u.buffer)) {
480 rc = -E2BIG;
481 u->len = 0;
482 goto out;
483 }
484
485 if (u->len < (sizeof(u->u.msg) + u->u.msg.len))
486 goto out; /* incomplete data portion */
487
488 /*
489 * OK, now we have a complete message. Do something with it.
490 */
491
492 msg_type = u->u.msg.type;
493
494 switch (msg_type) {
495 case XS_WATCH:
496 case XS_UNWATCH:
497 /* (Un)Ask for some path to be watched for changes */
498 ret = xenbus_write_watch(msg_type, u);
499 break;
500
501 default:
502 /* Send out a transaction */
503 ret = xenbus_write_transaction(msg_type, u);
504 break;
505 }
506 if (ret != 0)
507 rc = ret;
508
509 /* Buffered message consumed */
510 u->len = 0;
511
512 out:
513 mutex_unlock(&u->msgbuffer_mutex);
514 return rc;
515}
516
517static int xenbus_file_open(struct inode *inode, struct file *filp)
518{
519 struct xenbus_file_priv *u;
520
521 if (xen_store_evtchn == 0)
522 return -ENOENT;
523
524 nonseekable_open(inode, filp);
525
526 u = kzalloc(sizeof(*u), GFP_KERNEL);
527 if (u == NULL)
528 return -ENOMEM;
529
530 INIT_LIST_HEAD(&u->transactions);
531 INIT_LIST_HEAD(&u->watches);
532 INIT_LIST_HEAD(&u->read_buffers);
533 init_waitqueue_head(&u->read_waitq);
534
535 mutex_init(&u->reply_mutex);
536 mutex_init(&u->msgbuffer_mutex);
537
538 filp->private_data = u;
539
540 return 0;
541}
542
543static int xenbus_file_release(struct inode *inode, struct file *filp)
544{
545 struct xenbus_file_priv *u = filp->private_data;
546 struct xenbus_transaction_holder *trans, *tmp;
547 struct watch_adapter *watch, *tmp_watch;
548 struct read_buffer *rb, *tmp_rb;
549
550 /*
551 * No need for locking here because there are no other users,
552 * by definition.
553 */
554
555 list_for_each_entry_safe(trans, tmp, &u->transactions, list) {
556 xenbus_transaction_end(trans->handle, 1);
557 list_del(&trans->list);
558 kfree(trans);
559 }
560
561 list_for_each_entry_safe(watch, tmp_watch, &u->watches, list) {
562 unregister_xenbus_watch(&watch->watch);
563 list_del(&watch->list);
564 free_watch_adapter(watch);
565 }
566
567 list_for_each_entry_safe(rb, tmp_rb, &u->read_buffers, list) {
568 list_del(&rb->list);
569 kfree(rb);
570 }
571 kfree(u);
572
573 return 0;
574}
575
576static unsigned int xenbus_file_poll(struct file *file, poll_table *wait)
577{
578 struct xenbus_file_priv *u = file->private_data;
579
580 poll_wait(file, &u->read_waitq, wait);
581 if (!list_empty(&u->read_buffers))
582 return POLLIN | POLLRDNORM;
583 return 0;
584}
585
586const struct file_operations xenbus_file_ops = {
587 .read = xenbus_file_read,
588 .write = xenbus_file_write,
589 .open = xenbus_file_open,
590 .release = xenbus_file_release,
591 .poll = xenbus_file_poll,
592 .llseek = no_llseek,
593};