aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/xen
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/xen')
-rw-r--r--drivers/xen/Makefile5
-rw-r--r--drivers/xen/evtchn.c100
-rw-r--r--drivers/xen/xenfs/privcmd.c14
-rw-r--r--drivers/xen/xenfs/super.c50
4 files changed, 94 insertions, 75 deletions
diff --git a/drivers/xen/Makefile b/drivers/xen/Makefile
index eb8a78d77d9d..533a199e7a3f 100644
--- a/drivers/xen/Makefile
+++ b/drivers/xen/Makefile
@@ -8,9 +8,12 @@ obj-$(CONFIG_BLOCK) += biomerge.o
8obj-$(CONFIG_HOTPLUG_CPU) += cpu_hotplug.o 8obj-$(CONFIG_HOTPLUG_CPU) += cpu_hotplug.o
9obj-$(CONFIG_XEN_XENCOMM) += xencomm.o 9obj-$(CONFIG_XEN_XENCOMM) += xencomm.o
10obj-$(CONFIG_XEN_BALLOON) += balloon.o 10obj-$(CONFIG_XEN_BALLOON) += balloon.o
11obj-$(CONFIG_XEN_DEV_EVTCHN) += evtchn.o 11obj-$(CONFIG_XEN_DEV_EVTCHN) += xen-evtchn.o
12obj-$(CONFIG_XENFS) += xenfs/ 12obj-$(CONFIG_XENFS) += xenfs/
13obj-$(CONFIG_XEN_SYS_HYPERVISOR) += sys-hypervisor.o 13obj-$(CONFIG_XEN_SYS_HYPERVISOR) += sys-hypervisor.o
14obj-$(CONFIG_XEN_PLATFORM_PCI) += platform-pci.o 14obj-$(CONFIG_XEN_PLATFORM_PCI) += platform-pci.o
15obj-$(CONFIG_SWIOTLB_XEN) += swiotlb-xen.o 15obj-$(CONFIG_SWIOTLB_XEN) += swiotlb-xen.o
16obj-$(CONFIG_XEN_DOM0) += pci.o 16obj-$(CONFIG_XEN_DOM0) += pci.o
17
18xen-evtchn-y := evtchn.o
19
diff --git a/drivers/xen/evtchn.c b/drivers/xen/evtchn.c
index fec6ba3c08a8..ef11daf0cafe 100644
--- a/drivers/xen/evtchn.c
+++ b/drivers/xen/evtchn.c
@@ -69,20 +69,51 @@ struct per_user_data {
69 const char *name; 69 const char *name;
70}; 70};
71 71
72/* Who's bound to each port? */ 72/*
73static struct per_user_data *port_user[NR_EVENT_CHANNELS]; 73 * Who's bound to each port? This is logically an array of struct
74 * per_user_data *, but we encode the current enabled-state in bit 0.
75 */
76static unsigned long *port_user;
74static DEFINE_SPINLOCK(port_user_lock); /* protects port_user[] and ring_prod */ 77static DEFINE_SPINLOCK(port_user_lock); /* protects port_user[] and ring_prod */
75 78
76irqreturn_t evtchn_interrupt(int irq, void *data) 79static inline struct per_user_data *get_port_user(unsigned port)
80{
81 return (struct per_user_data *)(port_user[port] & ~1);
82}
83
84static inline void set_port_user(unsigned port, struct per_user_data *u)
85{
86 port_user[port] = (unsigned long)u;
87}
88
89static inline bool get_port_enabled(unsigned port)
90{
91 return port_user[port] & 1;
92}
93
94static inline void set_port_enabled(unsigned port, bool enabled)
95{
96 if (enabled)
97 port_user[port] |= 1;
98 else
99 port_user[port] &= ~1;
100}
101
102static irqreturn_t evtchn_interrupt(int irq, void *data)
77{ 103{
78 unsigned int port = (unsigned long)data; 104 unsigned int port = (unsigned long)data;
79 struct per_user_data *u; 105 struct per_user_data *u;
80 106
81 spin_lock(&port_user_lock); 107 spin_lock(&port_user_lock);
82 108
83 u = port_user[port]; 109 u = get_port_user(port);
110
111 WARN(!get_port_enabled(port),
112 "Interrupt for port %d, but apparently not enabled; per-user %p\n",
113 port, u);
84 114
85 disable_irq_nosync(irq); 115 disable_irq_nosync(irq);
116 set_port_enabled(port, false);
86 117
87 if ((u->ring_prod - u->ring_cons) < EVTCHN_RING_SIZE) { 118 if ((u->ring_prod - u->ring_cons) < EVTCHN_RING_SIZE) {
88 u->ring[EVTCHN_RING_MASK(u->ring_prod)] = port; 119 u->ring[EVTCHN_RING_MASK(u->ring_prod)] = port;
@@ -92,9 +123,8 @@ irqreturn_t evtchn_interrupt(int irq, void *data)
92 kill_fasync(&u->evtchn_async_queue, 123 kill_fasync(&u->evtchn_async_queue,
93 SIGIO, POLL_IN); 124 SIGIO, POLL_IN);
94 } 125 }
95 } else { 126 } else
96 u->ring_overflow = 1; 127 u->ring_overflow = 1;
97 }
98 128
99 spin_unlock(&port_user_lock); 129 spin_unlock(&port_user_lock);
100 130
@@ -198,9 +228,18 @@ static ssize_t evtchn_write(struct file *file, const char __user *buf,
198 goto out; 228 goto out;
199 229
200 spin_lock_irq(&port_user_lock); 230 spin_lock_irq(&port_user_lock);
201 for (i = 0; i < (count/sizeof(evtchn_port_t)); i++) 231
202 if ((kbuf[i] < NR_EVENT_CHANNELS) && (port_user[kbuf[i]] == u)) 232 for (i = 0; i < (count/sizeof(evtchn_port_t)); i++) {
203 enable_irq(irq_from_evtchn(kbuf[i])); 233 unsigned port = kbuf[i];
234
235 if (port < NR_EVENT_CHANNELS &&
236 get_port_user(port) == u &&
237 !get_port_enabled(port)) {
238 set_port_enabled(port, true);
239 enable_irq(irq_from_evtchn(port));
240 }
241 }
242
204 spin_unlock_irq(&port_user_lock); 243 spin_unlock_irq(&port_user_lock);
205 244
206 rc = count; 245 rc = count;
@@ -222,8 +261,9 @@ static int evtchn_bind_to_user(struct per_user_data *u, int port)
222 * interrupt handler yet, and our caller has already 261 * interrupt handler yet, and our caller has already
223 * serialized bind operations.) 262 * serialized bind operations.)
224 */ 263 */
225 BUG_ON(port_user[port] != NULL); 264 BUG_ON(get_port_user(port) != NULL);
226 port_user[port] = u; 265 set_port_user(port, u);
266 set_port_enabled(port, true); /* start enabled */
227 267
228 rc = bind_evtchn_to_irqhandler(port, evtchn_interrupt, IRQF_DISABLED, 268 rc = bind_evtchn_to_irqhandler(port, evtchn_interrupt, IRQF_DISABLED,
229 u->name, (void *)(unsigned long)port); 269 u->name, (void *)(unsigned long)port);
@@ -239,10 +279,7 @@ static void evtchn_unbind_from_user(struct per_user_data *u, int port)
239 279
240 unbind_from_irqhandler(irq, (void *)(unsigned long)port); 280 unbind_from_irqhandler(irq, (void *)(unsigned long)port);
241 281
242 /* make sure we unbind the irq handler before clearing the port */ 282 set_port_user(port, NULL);
243 barrier();
244
245 port_user[port] = NULL;
246} 283}
247 284
248static long evtchn_ioctl(struct file *file, 285static long evtchn_ioctl(struct file *file,
@@ -333,15 +370,17 @@ static long evtchn_ioctl(struct file *file,
333 spin_lock_irq(&port_user_lock); 370 spin_lock_irq(&port_user_lock);
334 371
335 rc = -ENOTCONN; 372 rc = -ENOTCONN;
336 if (port_user[unbind.port] != u) { 373 if (get_port_user(unbind.port) != u) {
337 spin_unlock_irq(&port_user_lock); 374 spin_unlock_irq(&port_user_lock);
338 break; 375 break;
339 } 376 }
340 377
341 evtchn_unbind_from_user(u, unbind.port); 378 disable_irq(irq_from_evtchn(unbind.port));
342 379
343 spin_unlock_irq(&port_user_lock); 380 spin_unlock_irq(&port_user_lock);
344 381
382 evtchn_unbind_from_user(u, unbind.port);
383
345 rc = 0; 384 rc = 0;
346 break; 385 break;
347 } 386 }
@@ -355,7 +394,7 @@ static long evtchn_ioctl(struct file *file,
355 394
356 if (notify.port >= NR_EVENT_CHANNELS) { 395 if (notify.port >= NR_EVENT_CHANNELS) {
357 rc = -EINVAL; 396 rc = -EINVAL;
358 } else if (port_user[notify.port] != u) { 397 } else if (get_port_user(notify.port) != u) {
359 rc = -ENOTCONN; 398 rc = -ENOTCONN;
360 } else { 399 } else {
361 notify_remote_via_evtchn(notify.port); 400 notify_remote_via_evtchn(notify.port);
@@ -431,7 +470,7 @@ static int evtchn_open(struct inode *inode, struct file *filp)
431 470
432 filp->private_data = u; 471 filp->private_data = u;
433 472
434 return 0; 473 return nonseekable_open(inode, filp);;
435} 474}
436 475
437static int evtchn_release(struct inode *inode, struct file *filp) 476static int evtchn_release(struct inode *inode, struct file *filp)
@@ -444,14 +483,21 @@ static int evtchn_release(struct inode *inode, struct file *filp)
444 free_page((unsigned long)u->ring); 483 free_page((unsigned long)u->ring);
445 484
446 for (i = 0; i < NR_EVENT_CHANNELS; i++) { 485 for (i = 0; i < NR_EVENT_CHANNELS; i++) {
447 if (port_user[i] != u) 486 if (get_port_user(i) != u)
448 continue; 487 continue;
449 488
450 evtchn_unbind_from_user(port_user[i], i); 489 disable_irq(irq_from_evtchn(i));
451 } 490 }
452 491
453 spin_unlock_irq(&port_user_lock); 492 spin_unlock_irq(&port_user_lock);
454 493
494 for (i = 0; i < NR_EVENT_CHANNELS; i++) {
495 if (get_port_user(i) != u)
496 continue;
497
498 evtchn_unbind_from_user(get_port_user(i), i);
499 }
500
455 kfree(u->name); 501 kfree(u->name);
456 kfree(u); 502 kfree(u);
457 503
@@ -467,12 +513,12 @@ static const struct file_operations evtchn_fops = {
467 .fasync = evtchn_fasync, 513 .fasync = evtchn_fasync,
468 .open = evtchn_open, 514 .open = evtchn_open,
469 .release = evtchn_release, 515 .release = evtchn_release,
470 .llseek = noop_llseek, 516 .llseek = no_llseek,
471}; 517};
472 518
473static struct miscdevice evtchn_miscdev = { 519static struct miscdevice evtchn_miscdev = {
474 .minor = MISC_DYNAMIC_MINOR, 520 .minor = MISC_DYNAMIC_MINOR,
475 .name = "evtchn", 521 .name = "xen/evtchn",
476 .fops = &evtchn_fops, 522 .fops = &evtchn_fops,
477}; 523};
478static int __init evtchn_init(void) 524static int __init evtchn_init(void)
@@ -482,8 +528,11 @@ static int __init evtchn_init(void)
482 if (!xen_domain()) 528 if (!xen_domain())
483 return -ENODEV; 529 return -ENODEV;
484 530
531 port_user = kcalloc(NR_EVENT_CHANNELS, sizeof(*port_user), GFP_KERNEL);
532 if (port_user == NULL)
533 return -ENOMEM;
534
485 spin_lock_init(&port_user_lock); 535 spin_lock_init(&port_user_lock);
486 memset(port_user, 0, sizeof(port_user));
487 536
488 /* Create '/dev/misc/evtchn'. */ 537 /* Create '/dev/misc/evtchn'. */
489 err = misc_register(&evtchn_miscdev); 538 err = misc_register(&evtchn_miscdev);
@@ -499,6 +548,9 @@ static int __init evtchn_init(void)
499 548
500static void __exit evtchn_cleanup(void) 549static void __exit evtchn_cleanup(void)
501{ 550{
551 kfree(port_user);
552 port_user = NULL;
553
502 misc_deregister(&evtchn_miscdev); 554 misc_deregister(&evtchn_miscdev);
503} 555}
504 556
diff --git a/drivers/xen/xenfs/privcmd.c b/drivers/xen/xenfs/privcmd.c
index f80be7f6eb95..dbd3b16fd131 100644
--- a/drivers/xen/xenfs/privcmd.c
+++ b/drivers/xen/xenfs/privcmd.c
@@ -15,7 +15,6 @@
15#include <linux/mman.h> 15#include <linux/mman.h>
16#include <linux/uaccess.h> 16#include <linux/uaccess.h>
17#include <linux/swap.h> 17#include <linux/swap.h>
18#include <linux/smp_lock.h>
19#include <linux/highmem.h> 18#include <linux/highmem.h>
20#include <linux/pagemap.h> 19#include <linux/pagemap.h>
21#include <linux/seq_file.h> 20#include <linux/seq_file.h>
@@ -266,9 +265,7 @@ static int mmap_return_errors(void *data, void *state)
266 xen_pfn_t *mfnp = data; 265 xen_pfn_t *mfnp = data;
267 struct mmap_batch_state *st = state; 266 struct mmap_batch_state *st = state;
268 267
269 put_user(*mfnp, st->user++); 268 return put_user(*mfnp, st->user++);
270
271 return 0;
272} 269}
273 270
274static struct vm_operations_struct privcmd_vm_ops; 271static struct vm_operations_struct privcmd_vm_ops;
@@ -323,10 +320,8 @@ static long privcmd_ioctl_mmap_batch(void __user *udata)
323 up_write(&mm->mmap_sem); 320 up_write(&mm->mmap_sem);
324 321
325 if (state.err > 0) { 322 if (state.err > 0) {
326 ret = 0;
327
328 state.user = m.arr; 323 state.user = m.arr;
329 traverse_pages(m.num, sizeof(xen_pfn_t), 324 ret = traverse_pages(m.num, sizeof(xen_pfn_t),
330 &pagelist, 325 &pagelist,
331 mmap_return_errors, &state); 326 mmap_return_errors, &state);
332 } 327 }
@@ -384,8 +379,9 @@ static int privcmd_mmap(struct file *file, struct vm_area_struct *vma)
384 if (xen_feature(XENFEAT_auto_translated_physmap)) 379 if (xen_feature(XENFEAT_auto_translated_physmap))
385 return -ENOSYS; 380 return -ENOSYS;
386 381
387 /* DONTCOPY is essential for Xen as copy_page_range is broken. */ 382 /* DONTCOPY is essential for Xen because copy_page_range doesn't know
388 vma->vm_flags |= VM_RESERVED | VM_IO | VM_DONTCOPY; 383 * how to recreate these mappings */
384 vma->vm_flags |= VM_RESERVED | VM_IO | VM_DONTCOPY | VM_PFNMAP;
389 vma->vm_ops = &privcmd_vm_ops; 385 vma->vm_ops = &privcmd_vm_ops;
390 vma->vm_private_data = NULL; 386 vma->vm_private_data = NULL;
391 387
diff --git a/drivers/xen/xenfs/super.c b/drivers/xen/xenfs/super.c
index d6662b789b6b..1aa389719846 100644
--- a/drivers/xen/xenfs/super.c
+++ b/drivers/xen/xenfs/super.c
@@ -12,8 +12,6 @@
12#include <linux/module.h> 12#include <linux/module.h>
13#include <linux/fs.h> 13#include <linux/fs.h>
14#include <linux/magic.h> 14#include <linux/magic.h>
15#include <linux/mm.h>
16#include <linux/backing-dev.h>
17 15
18#include <xen/xen.h> 16#include <xen/xen.h>
19 17
@@ -24,28 +22,12 @@
24MODULE_DESCRIPTION("Xen filesystem"); 22MODULE_DESCRIPTION("Xen filesystem");
25MODULE_LICENSE("GPL"); 23MODULE_LICENSE("GPL");
26 24
27static int xenfs_set_page_dirty(struct page *page)
28{
29 return !TestSetPageDirty(page);
30}
31
32static const struct address_space_operations xenfs_aops = {
33 .set_page_dirty = xenfs_set_page_dirty,
34};
35
36static struct backing_dev_info xenfs_backing_dev_info = {
37 .ra_pages = 0, /* No readahead */
38 .capabilities = BDI_CAP_NO_ACCT_AND_WRITEBACK,
39};
40
41static struct inode *xenfs_make_inode(struct super_block *sb, int mode) 25static struct inode *xenfs_make_inode(struct super_block *sb, int mode)
42{ 26{
43 struct inode *ret = new_inode(sb); 27 struct inode *ret = new_inode(sb);
44 28
45 if (ret) { 29 if (ret) {
46 ret->i_mode = mode; 30 ret->i_mode = mode;
47 ret->i_mapping->a_ops = &xenfs_aops;
48 ret->i_mapping->backing_dev_info = &xenfs_backing_dev_info;
49 ret->i_uid = ret->i_gid = 0; 31 ret->i_uid = ret->i_gid = 0;
50 ret->i_blocks = 0; 32 ret->i_blocks = 0;
51 ret->i_atime = ret->i_mtime = ret->i_ctime = CURRENT_TIME; 33 ret->i_atime = ret->i_mtime = ret->i_ctime = CURRENT_TIME;
@@ -121,41 +103,27 @@ static int xenfs_fill_super(struct super_block *sb, void *data, int silent)
121 return rc; 103 return rc;
122} 104}
123 105
124static int xenfs_get_sb(struct file_system_type *fs_type, 106static struct dentry *xenfs_mount(struct file_system_type *fs_type,
125 int flags, const char *dev_name, 107 int flags, const char *dev_name,
126 void *data, struct vfsmount *mnt) 108 void *data)
127{ 109{
128 return get_sb_single(fs_type, flags, data, xenfs_fill_super, mnt); 110 return mount_single(fs_type, flags, data, xenfs_fill_super);
129} 111}
130 112
131static struct file_system_type xenfs_type = { 113static struct file_system_type xenfs_type = {
132 .owner = THIS_MODULE, 114 .owner = THIS_MODULE,
133 .name = "xenfs", 115 .name = "xenfs",
134 .get_sb = xenfs_get_sb, 116 .mount = xenfs_mount,
135 .kill_sb = kill_litter_super, 117 .kill_sb = kill_litter_super,
136}; 118};
137 119
138static int __init xenfs_init(void) 120static int __init xenfs_init(void)
139{ 121{
140 int err; 122 if (xen_domain())
141 if (!xen_domain()) { 123 return register_filesystem(&xenfs_type);
142 printk(KERN_INFO "xenfs: not registering filesystem on non-xen platform\n");
143 return 0;
144 }
145
146 err = register_filesystem(&xenfs_type);
147 if (err) {
148 printk(KERN_ERR "xenfs: Unable to register filesystem!\n");
149 goto out;
150 }
151
152 err = bdi_init(&xenfs_backing_dev_info);
153 if (err)
154 unregister_filesystem(&xenfs_type);
155
156 out:
157 124
158 return err; 125 printk(KERN_INFO "XENFS: not registering filesystem on non-xen platform\n");
126 return 0;
159} 127}
160 128
161static void __exit xenfs_exit(void) 129static void __exit xenfs_exit(void)