aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2012-01-10 13:09:59 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2012-01-10 13:09:59 -0500
commit90160371b3a3e67ef78d68210a94dd30664a703d (patch)
tree2841ea811be129133cf9b83d9c3badd96e7ffab4
parentae5cfc0546ca2698b9dcddf72accbd70e57590a0 (diff)
parent6c254de16a1d14c1ac931d3aa08dc88ac9fc582b (diff)
Merge branch 'stable/for-linus-3.3' of git://git.kernel.org/pub/scm/linux/kernel/git/konrad/xen
* 'stable/for-linus-3.3' of git://git.kernel.org/pub/scm/linux/kernel/git/konrad/xen: (37 commits) xen/pciback: Expand the warning message to include domain id. xen/pciback: Fix "device has been assigned to X domain!" warning xen/pciback: Move the PCI_DEV_FLAGS_ASSIGNED ops to the "[un|]bind" xen/xenbus: don't reimplement kvasprintf via a fixed size buffer xenbus: maximum buffer size is XENSTORE_PAYLOAD_MAX xen/xenbus: Reject replies with payload > XENSTORE_PAYLOAD_MAX. Xen: consolidate and simplify struct xenbus_driver instantiation xen-gntalloc: introduce missing kfree xen/xenbus: Fix compile error - missing header for xen_initial_domain() xen/netback: Enable netback on HVM guests xen/grant-table: Support mappings required by blkback xenbus: Use grant-table wrapper functions xenbus: Support HVM backends xen/xenbus-frontend: Fix compile error with randconfig xen/xenbus-frontend: Make error message more clear xen/privcmd: Remove unused support for arch specific privcmp mmap xen: Add xenbus_backend device xen: Add xenbus device driver xen: Add privcmd device driver xen/gntalloc: fix reference counts on multi-page mappings ...
-rw-r--r--Documentation/ABI/stable/sysfs-bus-xen-backend75
-rw-r--r--Documentation/ABI/stable/sysfs-devices-system-xen_memory77
-rw-r--r--arch/ia64/include/asm/xen/interface.h2
-rw-r--r--arch/x86/xen/Kconfig4
-rw-r--r--arch/x86/xen/grant-table.c44
-rw-r--r--drivers/block/xen-blkback/xenbus.c9
-rw-r--r--drivers/block/xen-blkfront.c11
-rw-r--r--drivers/input/misc/xen-kbdfront.c7
-rw-r--r--drivers/net/xen-netback/netback.c2
-rw-r--r--drivers/net/xen-netback/xenbus.c9
-rw-r--r--drivers/net/xen-netfront.c9
-rw-r--r--drivers/pci/xen-pcifront.c11
-rw-r--r--drivers/video/xen-fbfront.c9
-rw-r--r--drivers/xen/Kconfig7
-rw-r--r--drivers/xen/Makefile2
-rw-r--r--drivers/xen/events.c77
-rw-r--r--drivers/xen/evtchn.c2
-rw-r--r--drivers/xen/gntalloc.c121
-rw-r--r--drivers/xen/gntdev.c34
-rw-r--r--drivers/xen/grant-table.c518
-rw-r--r--drivers/xen/privcmd.c (renamed from drivers/xen/xenfs/privcmd.c)41
-rw-r--r--drivers/xen/privcmd.h3
-rw-r--r--drivers/xen/xen-pciback/pci_stub.c4
-rw-r--r--drivers/xen/xen-pciback/xenbus.c19
-rw-r--r--drivers/xen/xenbus/Makefile2
-rw-r--r--drivers/xen/xenbus/xenbus_client.c193
-rw-r--r--drivers/xen/xenbus/xenbus_comms.h4
-rw-r--r--drivers/xen/xenbus/xenbus_dev_backend.c90
-rw-r--r--drivers/xen/xenbus/xenbus_dev_frontend.c (renamed from drivers/xen/xenfs/xenbus.c)40
-rw-r--r--drivers/xen/xenbus/xenbus_probe.c9
-rw-r--r--drivers/xen/xenbus/xenbus_probe.h6
-rw-r--r--drivers/xen/xenbus/xenbus_probe_backend.c8
-rw-r--r--drivers/xen/xenbus/xenbus_probe_frontend.c8
-rw-r--r--drivers/xen/xenbus/xenbus_xs.c23
-rw-r--r--drivers/xen/xenfs/Makefile2
-rw-r--r--drivers/xen/xenfs/super.c6
-rw-r--r--drivers/xen/xenfs/xenfs.h2
-rw-r--r--include/xen/events.h7
-rw-r--r--include/xen/grant_table.h37
-rw-r--r--include/xen/interface/grant_table.h167
-rw-r--r--include/xen/interface/io/xs_wire.h3
-rw-r--r--include/xen/interface/xen.h2
-rw-r--r--include/xen/xenbus.h31
-rw-r--r--include/xen/xenbus_dev.h41
44 files changed, 1513 insertions, 265 deletions
diff --git a/Documentation/ABI/stable/sysfs-bus-xen-backend b/Documentation/ABI/stable/sysfs-bus-xen-backend
new file mode 100644
index 000000000000..3d5951c8bf5f
--- /dev/null
+++ b/Documentation/ABI/stable/sysfs-bus-xen-backend
@@ -0,0 +1,75 @@
1What: /sys/bus/xen-backend/devices/*/devtype
2Date: Feb 2009
3KernelVersion: 2.6.38
4Contact: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
5Description:
6 The type of the device. e.g., one of: 'vbd' (block),
7 'vif' (network), or 'vfb' (framebuffer).
8
9What: /sys/bus/xen-backend/devices/*/nodename
10Date: Feb 2009
11KernelVersion: 2.6.38
12Contact: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
13Description:
14 XenStore node (under /local/domain/NNN/) for this
15 backend device.
16
17What: /sys/bus/xen-backend/devices/vbd-*/physical_device
18Date: April 2011
19KernelVersion: 3.0
20Contact: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
21Description:
22 The major:minor number (in hexidecimal) of the
23 physical device providing the storage for this backend
24 block device.
25
26What: /sys/bus/xen-backend/devices/vbd-*/mode
27Date: April 2011
28KernelVersion: 3.0
29Contact: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
30Description:
31 Whether the block device is read-only ('r') or
32 read-write ('w').
33
34What: /sys/bus/xen-backend/devices/vbd-*/statistics/f_req
35Date: April 2011
36KernelVersion: 3.0
37Contact: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
38Description:
39 Number of flush requests from the frontend.
40
41What: /sys/bus/xen-backend/devices/vbd-*/statistics/oo_req
42Date: April 2011
43KernelVersion: 3.0
44Contact: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
45Description:
46 Number of requests delayed because the backend was too
47 busy processing previous requests.
48
49What: /sys/bus/xen-backend/devices/vbd-*/statistics/rd_req
50Date: April 2011
51KernelVersion: 3.0
52Contact: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
53Description:
54 Number of read requests from the frontend.
55
56What: /sys/bus/xen-backend/devices/vbd-*/statistics/rd_sect
57Date: April 2011
58KernelVersion: 3.0
59Contact: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
60Description:
61 Number of sectors read by the frontend.
62
63What: /sys/bus/xen-backend/devices/vbd-*/statistics/wr_req
64Date: April 2011
65KernelVersion: 3.0
66Contact: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
67Description:
68 Number of write requests from the frontend.
69
70What: /sys/bus/xen-backend/devices/vbd-*/statistics/wr_sect
71Date: April 2011
72KernelVersion: 3.0
73Contact: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
74Description:
75 Number of sectors written by the frontend.
diff --git a/Documentation/ABI/stable/sysfs-devices-system-xen_memory b/Documentation/ABI/stable/sysfs-devices-system-xen_memory
new file mode 100644
index 000000000000..caa311d59ac1
--- /dev/null
+++ b/Documentation/ABI/stable/sysfs-devices-system-xen_memory
@@ -0,0 +1,77 @@
1What: /sys/devices/system/xen_memory/xen_memory0/max_retry_count
2Date: May 2011
3KernelVersion: 2.6.39
4Contact: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
5Description:
6 The maximum number of times the balloon driver will
7 attempt to increase the balloon before giving up. See
8 also 'retry_count' below.
9 A value of zero means retry forever and is the default one.
10
11What: /sys/devices/system/xen_memory/xen_memory0/max_schedule_delay
12Date: May 2011
13KernelVersion: 2.6.39
14Contact: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
15Description:
16 The limit that 'schedule_delay' (see below) will be
17 increased to. The default value is 32 seconds.
18
19What: /sys/devices/system/xen_memory/xen_memory0/retry_count
20Date: May 2011
21KernelVersion: 2.6.39
22Contact: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
23Description:
24 The current number of times that the balloon driver
25 has attempted to increase the size of the balloon.
26 The default value is one. With max_retry_count being
27 zero (unlimited), this means that the driver will attempt
28 to retry with a 'schedule_delay' delay.
29
30What: /sys/devices/system/xen_memory/xen_memory0/schedule_delay
31Date: May 2011
32KernelVersion: 2.6.39
33Contact: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
34Description:
35 The time (in seconds) to wait between attempts to
36 increase the balloon. Each time the balloon cannot be
37 increased, 'schedule_delay' is increased (until
38 'max_schedule_delay' is reached at which point it
39 will use the max value).
40
41What: /sys/devices/system/xen_memory/xen_memory0/target
42Date: April 2008
43KernelVersion: 2.6.26
44Contact: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
45Description:
46 The target number of pages to adjust this domain's
47 memory reservation to.
48
49What: /sys/devices/system/xen_memory/xen_memory0/target_kb
50Date: April 2008
51KernelVersion: 2.6.26
52Contact: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
53Description:
54 As target above, except the value is in KiB.
55
56What: /sys/devices/system/xen_memory/xen_memory0/info/current_kb
57Date: April 2008
58KernelVersion: 2.6.26
59Contact: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
60Description:
61 Current size (in KiB) of this domain's memory
62 reservation.
63
64What: /sys/devices/system/xen_memory/xen_memory0/info/high_kb
65Date: April 2008
66KernelVersion: 2.6.26
67Contact: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
68Description:
69 Amount (in KiB) of high memory in the balloon.
70
71What: /sys/devices/system/xen_memory/xen_memory0/info/low_kb
72Date: April 2008
73KernelVersion: 2.6.26
74Contact: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
75Description:
76 Amount (in KiB) of low (or normal) memory in the
77 balloon.
diff --git a/arch/ia64/include/asm/xen/interface.h b/arch/ia64/include/asm/xen/interface.h
index 1d2427d116e3..fbb519828aa1 100644
--- a/arch/ia64/include/asm/xen/interface.h
+++ b/arch/ia64/include/asm/xen/interface.h
@@ -71,7 +71,7 @@
71__DEFINE_GUEST_HANDLE(uchar, unsigned char); 71__DEFINE_GUEST_HANDLE(uchar, unsigned char);
72__DEFINE_GUEST_HANDLE(uint, unsigned int); 72__DEFINE_GUEST_HANDLE(uint, unsigned int);
73__DEFINE_GUEST_HANDLE(ulong, unsigned long); 73__DEFINE_GUEST_HANDLE(ulong, unsigned long);
74__DEFINE_GUEST_HANDLE(u64, unsigned long); 74
75DEFINE_GUEST_HANDLE(char); 75DEFINE_GUEST_HANDLE(char);
76DEFINE_GUEST_HANDLE(int); 76DEFINE_GUEST_HANDLE(int);
77DEFINE_GUEST_HANDLE(long); 77DEFINE_GUEST_HANDLE(long);
diff --git a/arch/x86/xen/Kconfig b/arch/x86/xen/Kconfig
index 26c731a106af..fdce49c7aff6 100644
--- a/arch/x86/xen/Kconfig
+++ b/arch/x86/xen/Kconfig
@@ -29,7 +29,8 @@ config XEN_PVHVM
29 29
30config XEN_MAX_DOMAIN_MEMORY 30config XEN_MAX_DOMAIN_MEMORY
31 int 31 int
32 default 128 32 default 500 if X86_64
33 default 64 if X86_32
33 depends on XEN 34 depends on XEN
34 help 35 help
35 This only affects the sizing of some bss arrays, the unused 36 This only affects the sizing of some bss arrays, the unused
@@ -48,3 +49,4 @@ config XEN_DEBUG_FS
48 help 49 help
49 Enable statistics output and various tuning options in debugfs. 50 Enable statistics output and various tuning options in debugfs.
50 Enabling this option may incur a significant performance overhead. 51 Enabling this option may incur a significant performance overhead.
52
diff --git a/arch/x86/xen/grant-table.c b/arch/x86/xen/grant-table.c
index 5a40d24ba331..3a5f55d51907 100644
--- a/arch/x86/xen/grant-table.c
+++ b/arch/x86/xen/grant-table.c
@@ -54,6 +54,20 @@ static int map_pte_fn(pte_t *pte, struct page *pmd_page,
54 return 0; 54 return 0;
55} 55}
56 56
57/*
58 * This function is used to map shared frames to store grant status. It is
59 * different from map_pte_fn above, the frames type here is uint64_t.
60 */
61static int map_pte_fn_status(pte_t *pte, struct page *pmd_page,
62 unsigned long addr, void *data)
63{
64 uint64_t **frames = (uint64_t **)data;
65
66 set_pte_at(&init_mm, addr, pte, mfn_pte((*frames)[0], PAGE_KERNEL));
67 (*frames)++;
68 return 0;
69}
70
57static int unmap_pte_fn(pte_t *pte, struct page *pmd_page, 71static int unmap_pte_fn(pte_t *pte, struct page *pmd_page,
58 unsigned long addr, void *data) 72 unsigned long addr, void *data)
59{ 73{
@@ -64,10 +78,10 @@ static int unmap_pte_fn(pte_t *pte, struct page *pmd_page,
64 78
65int arch_gnttab_map_shared(unsigned long *frames, unsigned long nr_gframes, 79int arch_gnttab_map_shared(unsigned long *frames, unsigned long nr_gframes,
66 unsigned long max_nr_gframes, 80 unsigned long max_nr_gframes,
67 struct grant_entry **__shared) 81 void **__shared)
68{ 82{
69 int rc; 83 int rc;
70 struct grant_entry *shared = *__shared; 84 void *shared = *__shared;
71 85
72 if (shared == NULL) { 86 if (shared == NULL) {
73 struct vm_struct *area = 87 struct vm_struct *area =
@@ -83,8 +97,30 @@ int arch_gnttab_map_shared(unsigned long *frames, unsigned long nr_gframes,
83 return rc; 97 return rc;
84} 98}
85 99
86void arch_gnttab_unmap_shared(struct grant_entry *shared, 100int arch_gnttab_map_status(uint64_t *frames, unsigned long nr_gframes,
87 unsigned long nr_gframes) 101 unsigned long max_nr_gframes,
102 grant_status_t **__shared)
103{
104 int rc;
105 grant_status_t *shared = *__shared;
106
107 if (shared == NULL) {
108 /* No need to pass in PTE as we are going to do it
109 * in apply_to_page_range anyhow. */
110 struct vm_struct *area =
111 alloc_vm_area(PAGE_SIZE * max_nr_gframes, NULL);
112 BUG_ON(area == NULL);
113 shared = area->addr;
114 *__shared = shared;
115 }
116
117 rc = apply_to_page_range(&init_mm, (unsigned long)shared,
118 PAGE_SIZE * nr_gframes,
119 map_pte_fn_status, &frames);
120 return rc;
121}
122
123void arch_gnttab_unmap(void *shared, unsigned long nr_gframes)
88{ 124{
89 apply_to_page_range(&init_mm, (unsigned long)shared, 125 apply_to_page_range(&init_mm, (unsigned long)shared,
90 PAGE_SIZE * nr_gframes, unmap_pte_fn, NULL); 126 PAGE_SIZE * nr_gframes, unmap_pte_fn, NULL);
diff --git a/drivers/block/xen-blkback/xenbus.c b/drivers/block/xen-blkback/xenbus.c
index 8069322e4c9e..37c794d31264 100644
--- a/drivers/block/xen-blkback/xenbus.c
+++ b/drivers/block/xen-blkback/xenbus.c
@@ -787,17 +787,14 @@ static const struct xenbus_device_id xen_blkbk_ids[] = {
787}; 787};
788 788
789 789
790static struct xenbus_driver xen_blkbk = { 790static DEFINE_XENBUS_DRIVER(xen_blkbk, ,
791 .name = "vbd",
792 .owner = THIS_MODULE,
793 .ids = xen_blkbk_ids,
794 .probe = xen_blkbk_probe, 791 .probe = xen_blkbk_probe,
795 .remove = xen_blkbk_remove, 792 .remove = xen_blkbk_remove,
796 .otherend_changed = frontend_changed 793 .otherend_changed = frontend_changed
797}; 794);
798 795
799 796
800int xen_blkif_xenbus_init(void) 797int xen_blkif_xenbus_init(void)
801{ 798{
802 return xenbus_register_backend(&xen_blkbk); 799 return xenbus_register_backend(&xen_blkbk_driver);
803} 800}
diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c
index 7b2ec5908413..9fd3ee203b1e 100644
--- a/drivers/block/xen-blkfront.c
+++ b/drivers/block/xen-blkfront.c
@@ -1437,16 +1437,13 @@ static const struct xenbus_device_id blkfront_ids[] = {
1437 { "" } 1437 { "" }
1438}; 1438};
1439 1439
1440static struct xenbus_driver blkfront = { 1440static DEFINE_XENBUS_DRIVER(blkfront, ,
1441 .name = "vbd",
1442 .owner = THIS_MODULE,
1443 .ids = blkfront_ids,
1444 .probe = blkfront_probe, 1441 .probe = blkfront_probe,
1445 .remove = blkfront_remove, 1442 .remove = blkfront_remove,
1446 .resume = blkfront_resume, 1443 .resume = blkfront_resume,
1447 .otherend_changed = blkback_changed, 1444 .otherend_changed = blkback_changed,
1448 .is_ready = blkfront_is_ready, 1445 .is_ready = blkfront_is_ready,
1449}; 1446);
1450 1447
1451static int __init xlblk_init(void) 1448static int __init xlblk_init(void)
1452{ 1449{
@@ -1461,7 +1458,7 @@ static int __init xlblk_init(void)
1461 return -ENODEV; 1458 return -ENODEV;
1462 } 1459 }
1463 1460
1464 ret = xenbus_register_frontend(&blkfront); 1461 ret = xenbus_register_frontend(&blkfront_driver);
1465 if (ret) { 1462 if (ret) {
1466 unregister_blkdev(XENVBD_MAJOR, DEV_NAME); 1463 unregister_blkdev(XENVBD_MAJOR, DEV_NAME);
1467 return ret; 1464 return ret;
@@ -1474,7 +1471,7 @@ module_init(xlblk_init);
1474 1471
1475static void __exit xlblk_exit(void) 1472static void __exit xlblk_exit(void)
1476{ 1473{
1477 return xenbus_unregister_driver(&blkfront); 1474 return xenbus_unregister_driver(&blkfront_driver);
1478} 1475}
1479module_exit(xlblk_exit); 1476module_exit(xlblk_exit);
1480 1477
diff --git a/drivers/input/misc/xen-kbdfront.c b/drivers/input/misc/xen-kbdfront.c
index ad2e51c04db8..02ca8680ea5b 100644
--- a/drivers/input/misc/xen-kbdfront.c
+++ b/drivers/input/misc/xen-kbdfront.c
@@ -361,15 +361,12 @@ static const struct xenbus_device_id xenkbd_ids[] = {
361 { "" } 361 { "" }
362}; 362};
363 363
364static struct xenbus_driver xenkbd_driver = { 364static DEFINE_XENBUS_DRIVER(xenkbd, ,
365 .name = "vkbd",
366 .owner = THIS_MODULE,
367 .ids = xenkbd_ids,
368 .probe = xenkbd_probe, 365 .probe = xenkbd_probe,
369 .remove = xenkbd_remove, 366 .remove = xenkbd_remove,
370 .resume = xenkbd_resume, 367 .resume = xenkbd_resume,
371 .otherend_changed = xenkbd_backend_changed, 368 .otherend_changed = xenkbd_backend_changed,
372}; 369);
373 370
374static int __init xenkbd_init(void) 371static int __init xenkbd_init(void)
375{ 372{
diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c
index 639cf8ab62ba..59effac15f36 100644
--- a/drivers/net/xen-netback/netback.c
+++ b/drivers/net/xen-netback/netback.c
@@ -1634,7 +1634,7 @@ static int __init netback_init(void)
1634 int rc = 0; 1634 int rc = 0;
1635 int group; 1635 int group;
1636 1636
1637 if (!xen_pv_domain()) 1637 if (!xen_domain())
1638 return -ENODEV; 1638 return -ENODEV;
1639 1639
1640 xen_netbk_group_nr = num_online_cpus(); 1640 xen_netbk_group_nr = num_online_cpus();
diff --git a/drivers/net/xen-netback/xenbus.c b/drivers/net/xen-netback/xenbus.c
index 1ce729d6af75..410018c4c528 100644
--- a/drivers/net/xen-netback/xenbus.c
+++ b/drivers/net/xen-netback/xenbus.c
@@ -474,17 +474,14 @@ static const struct xenbus_device_id netback_ids[] = {
474}; 474};
475 475
476 476
477static struct xenbus_driver netback = { 477static DEFINE_XENBUS_DRIVER(netback, ,
478 .name = "vif",
479 .owner = THIS_MODULE,
480 .ids = netback_ids,
481 .probe = netback_probe, 478 .probe = netback_probe,
482 .remove = netback_remove, 479 .remove = netback_remove,
483 .uevent = netback_uevent, 480 .uevent = netback_uevent,
484 .otherend_changed = frontend_changed, 481 .otherend_changed = frontend_changed,
485}; 482);
486 483
487int xenvif_xenbus_init(void) 484int xenvif_xenbus_init(void)
488{ 485{
489 return xenbus_register_backend(&netback); 486 return xenbus_register_backend(&netback_driver);
490} 487}
diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
index 0a59c57864f5..fa679057630f 100644
--- a/drivers/net/xen-netfront.c
+++ b/drivers/net/xen-netfront.c
@@ -1914,7 +1914,7 @@ static void xennet_sysfs_delif(struct net_device *netdev)
1914 1914
1915#endif /* CONFIG_SYSFS */ 1915#endif /* CONFIG_SYSFS */
1916 1916
1917static struct xenbus_device_id netfront_ids[] = { 1917static const struct xenbus_device_id netfront_ids[] = {
1918 { "vif" }, 1918 { "vif" },
1919 { "" } 1919 { "" }
1920}; 1920};
@@ -1941,15 +1941,12 @@ static int __devexit xennet_remove(struct xenbus_device *dev)
1941 return 0; 1941 return 0;
1942} 1942}
1943 1943
1944static struct xenbus_driver netfront_driver = { 1944static DEFINE_XENBUS_DRIVER(netfront, ,
1945 .name = "vif",
1946 .owner = THIS_MODULE,
1947 .ids = netfront_ids,
1948 .probe = netfront_probe, 1945 .probe = netfront_probe,
1949 .remove = __devexit_p(xennet_remove), 1946 .remove = __devexit_p(xennet_remove),
1950 .resume = netfront_resume, 1947 .resume = netfront_resume,
1951 .otherend_changed = netback_changed, 1948 .otherend_changed = netback_changed,
1952}; 1949);
1953 1950
1954static int __init netif_init(void) 1951static int __init netif_init(void)
1955{ 1952{
diff --git a/drivers/pci/xen-pcifront.c b/drivers/pci/xen-pcifront.c
index 90832a955991..7cf3d2fcf56a 100644
--- a/drivers/pci/xen-pcifront.c
+++ b/drivers/pci/xen-pcifront.c
@@ -1126,14 +1126,11 @@ static const struct xenbus_device_id xenpci_ids[] = {
1126 {""}, 1126 {""},
1127}; 1127};
1128 1128
1129static struct xenbus_driver xenbus_pcifront_driver = { 1129static DEFINE_XENBUS_DRIVER(xenpci, "pcifront",
1130 .name = "pcifront",
1131 .owner = THIS_MODULE,
1132 .ids = xenpci_ids,
1133 .probe = pcifront_xenbus_probe, 1130 .probe = pcifront_xenbus_probe,
1134 .remove = pcifront_xenbus_remove, 1131 .remove = pcifront_xenbus_remove,
1135 .otherend_changed = pcifront_backend_changed, 1132 .otherend_changed = pcifront_backend_changed,
1136}; 1133);
1137 1134
1138static int __init pcifront_init(void) 1135static int __init pcifront_init(void)
1139{ 1136{
@@ -1142,12 +1139,12 @@ static int __init pcifront_init(void)
1142 1139
1143 pci_frontend_registrar(1 /* enable */); 1140 pci_frontend_registrar(1 /* enable */);
1144 1141
1145 return xenbus_register_frontend(&xenbus_pcifront_driver); 1142 return xenbus_register_frontend(&xenpci_driver);
1146} 1143}
1147 1144
1148static void __exit pcifront_cleanup(void) 1145static void __exit pcifront_cleanup(void)
1149{ 1146{
1150 xenbus_unregister_driver(&xenbus_pcifront_driver); 1147 xenbus_unregister_driver(&xenpci_driver);
1151 pci_frontend_registrar(0 /* disable */); 1148 pci_frontend_registrar(0 /* disable */);
1152} 1149}
1153module_init(pcifront_init); 1150module_init(pcifront_init);
diff --git a/drivers/video/xen-fbfront.c b/drivers/video/xen-fbfront.c
index beac52fc1c0e..cb4529c40d74 100644
--- a/drivers/video/xen-fbfront.c
+++ b/drivers/video/xen-fbfront.c
@@ -671,20 +671,17 @@ InitWait:
671 } 671 }
672} 672}
673 673
674static struct xenbus_device_id xenfb_ids[] = { 674static const struct xenbus_device_id xenfb_ids[] = {
675 { "vfb" }, 675 { "vfb" },
676 { "" } 676 { "" }
677}; 677};
678 678
679static struct xenbus_driver xenfb_driver = { 679static DEFINE_XENBUS_DRIVER(xenfb, ,
680 .name = "vfb",
681 .owner = THIS_MODULE,
682 .ids = xenfb_ids,
683 .probe = xenfb_probe, 680 .probe = xenfb_probe,
684 .remove = xenfb_remove, 681 .remove = xenfb_remove,
685 .resume = xenfb_resume, 682 .resume = xenfb_resume,
686 .otherend_changed = xenfb_backend_changed, 683 .otherend_changed = xenfb_backend_changed,
687}; 684);
688 685
689static int __init xenfb_init(void) 686static int __init xenfb_init(void)
690{ 687{
diff --git a/drivers/xen/Kconfig b/drivers/xen/Kconfig
index 8795480c2350..a1ced521cf74 100644
--- a/drivers/xen/Kconfig
+++ b/drivers/xen/Kconfig
@@ -86,6 +86,7 @@ config XEN_BACKEND
86 86
87config XENFS 87config XENFS
88 tristate "Xen filesystem" 88 tristate "Xen filesystem"
89 select XEN_PRIVCMD
89 default y 90 default y
90 help 91 help
91 The xen filesystem provides a way for domains to share 92 The xen filesystem provides a way for domains to share
@@ -171,4 +172,10 @@ config XEN_PCIDEV_BACKEND
171 xen-pciback.hide=(03:00.0)(04:00.0) 172 xen-pciback.hide=(03:00.0)(04:00.0)
172 173
173 If in doubt, say m. 174 If in doubt, say m.
175
176config XEN_PRIVCMD
177 tristate
178 depends on XEN
179 default m
180
174endmenu 181endmenu
diff --git a/drivers/xen/Makefile b/drivers/xen/Makefile
index 974fffdf22b2..aa31337192cc 100644
--- a/drivers/xen/Makefile
+++ b/drivers/xen/Makefile
@@ -19,7 +19,9 @@ obj-$(CONFIG_XEN_TMEM) += tmem.o
19obj-$(CONFIG_SWIOTLB_XEN) += swiotlb-xen.o 19obj-$(CONFIG_SWIOTLB_XEN) += swiotlb-xen.o
20obj-$(CONFIG_XEN_DOM0) += pci.o 20obj-$(CONFIG_XEN_DOM0) += pci.o
21obj-$(CONFIG_XEN_PCIDEV_BACKEND) += xen-pciback/ 21obj-$(CONFIG_XEN_PCIDEV_BACKEND) += xen-pciback/
22obj-$(CONFIG_XEN_PRIVCMD) += xen-privcmd.o
22 23
23xen-evtchn-y := evtchn.o 24xen-evtchn-y := evtchn.o
24xen-gntdev-y := gntdev.o 25xen-gntdev-y := gntdev.o
25xen-gntalloc-y := gntalloc.o 26xen-gntalloc-y := gntalloc.o
27xen-privcmd-y := privcmd.o
diff --git a/drivers/xen/events.c b/drivers/xen/events.c
index 6e075cdd0c6b..e5e5812a1014 100644
--- a/drivers/xen/events.c
+++ b/drivers/xen/events.c
@@ -87,6 +87,7 @@ enum xen_irq_type {
87 */ 87 */
88struct irq_info { 88struct irq_info {
89 struct list_head list; 89 struct list_head list;
90 int refcnt;
90 enum xen_irq_type type; /* type */ 91 enum xen_irq_type type; /* type */
91 unsigned irq; 92 unsigned irq;
92 unsigned short evtchn; /* event channel */ 93 unsigned short evtchn; /* event channel */
@@ -406,6 +407,7 @@ static void xen_irq_init(unsigned irq)
406 panic("Unable to allocate metadata for IRQ%d\n", irq); 407 panic("Unable to allocate metadata for IRQ%d\n", irq);
407 408
408 info->type = IRQT_UNBOUND; 409 info->type = IRQT_UNBOUND;
410 info->refcnt = -1;
409 411
410 irq_set_handler_data(irq, info); 412 irq_set_handler_data(irq, info);
411 413
@@ -469,6 +471,8 @@ static void xen_free_irq(unsigned irq)
469 471
470 irq_set_handler_data(irq, NULL); 472 irq_set_handler_data(irq, NULL);
471 473
474 WARN_ON(info->refcnt > 0);
475
472 kfree(info); 476 kfree(info);
473 477
474 /* Legacy IRQ descriptors are managed by the arch. */ 478 /* Legacy IRQ descriptors are managed by the arch. */
@@ -637,7 +641,7 @@ int xen_bind_pirq_gsi_to_irq(unsigned gsi,
637 if (irq != -1) { 641 if (irq != -1) {
638 printk(KERN_INFO "xen_map_pirq_gsi: returning irq %d for gsi %u\n", 642 printk(KERN_INFO "xen_map_pirq_gsi: returning irq %d for gsi %u\n",
639 irq, gsi); 643 irq, gsi);
640 goto out; /* XXX need refcount? */ 644 goto out;
641 } 645 }
642 646
643 irq = xen_allocate_irq_gsi(gsi); 647 irq = xen_allocate_irq_gsi(gsi);
@@ -939,9 +943,16 @@ static void unbind_from_irq(unsigned int irq)
939{ 943{
940 struct evtchn_close close; 944 struct evtchn_close close;
941 int evtchn = evtchn_from_irq(irq); 945 int evtchn = evtchn_from_irq(irq);
946 struct irq_info *info = irq_get_handler_data(irq);
942 947
943 mutex_lock(&irq_mapping_update_lock); 948 mutex_lock(&irq_mapping_update_lock);
944 949
950 if (info->refcnt > 0) {
951 info->refcnt--;
952 if (info->refcnt != 0)
953 goto done;
954 }
955
945 if (VALID_EVTCHN(evtchn)) { 956 if (VALID_EVTCHN(evtchn)) {
946 close.port = evtchn; 957 close.port = evtchn;
947 if (HYPERVISOR_event_channel_op(EVTCHNOP_close, &close) != 0) 958 if (HYPERVISOR_event_channel_op(EVTCHNOP_close, &close) != 0)
@@ -970,6 +981,7 @@ static void unbind_from_irq(unsigned int irq)
970 981
971 xen_free_irq(irq); 982 xen_free_irq(irq);
972 983
984 done:
973 mutex_unlock(&irq_mapping_update_lock); 985 mutex_unlock(&irq_mapping_update_lock);
974} 986}
975 987
@@ -1065,6 +1077,69 @@ void unbind_from_irqhandler(unsigned int irq, void *dev_id)
1065} 1077}
1066EXPORT_SYMBOL_GPL(unbind_from_irqhandler); 1078EXPORT_SYMBOL_GPL(unbind_from_irqhandler);
1067 1079
1080int evtchn_make_refcounted(unsigned int evtchn)
1081{
1082 int irq = evtchn_to_irq[evtchn];
1083 struct irq_info *info;
1084
1085 if (irq == -1)
1086 return -ENOENT;
1087
1088 info = irq_get_handler_data(irq);
1089
1090 if (!info)
1091 return -ENOENT;
1092
1093 WARN_ON(info->refcnt != -1);
1094
1095 info->refcnt = 1;
1096
1097 return 0;
1098}
1099EXPORT_SYMBOL_GPL(evtchn_make_refcounted);
1100
1101int evtchn_get(unsigned int evtchn)
1102{
1103 int irq;
1104 struct irq_info *info;
1105 int err = -ENOENT;
1106
1107 if (evtchn >= NR_EVENT_CHANNELS)
1108 return -EINVAL;
1109
1110 mutex_lock(&irq_mapping_update_lock);
1111
1112 irq = evtchn_to_irq[evtchn];
1113 if (irq == -1)
1114 goto done;
1115
1116 info = irq_get_handler_data(irq);
1117
1118 if (!info)
1119 goto done;
1120
1121 err = -EINVAL;
1122 if (info->refcnt <= 0)
1123 goto done;
1124
1125 info->refcnt++;
1126 err = 0;
1127 done:
1128 mutex_unlock(&irq_mapping_update_lock);
1129
1130 return err;
1131}
1132EXPORT_SYMBOL_GPL(evtchn_get);
1133
1134void evtchn_put(unsigned int evtchn)
1135{
1136 int irq = evtchn_to_irq[evtchn];
1137 if (WARN_ON(irq == -1))
1138 return;
1139 unbind_from_irq(irq);
1140}
1141EXPORT_SYMBOL_GPL(evtchn_put);
1142
1068void xen_send_IPI_one(unsigned int cpu, enum ipi_vector vector) 1143void xen_send_IPI_one(unsigned int cpu, enum ipi_vector vector)
1069{ 1144{
1070 int irq = per_cpu(ipi_to_irq, cpu)[vector]; 1145 int irq = per_cpu(ipi_to_irq, cpu)[vector];
diff --git a/drivers/xen/evtchn.c b/drivers/xen/evtchn.c
index dbc13e94b612..b1f60a0c0bea 100644
--- a/drivers/xen/evtchn.c
+++ b/drivers/xen/evtchn.c
@@ -268,7 +268,7 @@ static int evtchn_bind_to_user(struct per_user_data *u, int port)
268 rc = bind_evtchn_to_irqhandler(port, evtchn_interrupt, IRQF_DISABLED, 268 rc = bind_evtchn_to_irqhandler(port, evtchn_interrupt, IRQF_DISABLED,
269 u->name, (void *)(unsigned long)port); 269 u->name, (void *)(unsigned long)port);
270 if (rc >= 0) 270 if (rc >= 0)
271 rc = 0; 271 rc = evtchn_make_refcounted(port);
272 272
273 return rc; 273 return rc;
274} 274}
diff --git a/drivers/xen/gntalloc.c b/drivers/xen/gntalloc.c
index e1c4c6e5b469..934985d14c24 100644
--- a/drivers/xen/gntalloc.c
+++ b/drivers/xen/gntalloc.c
@@ -74,7 +74,7 @@ MODULE_PARM_DESC(limit, "Maximum number of grants that may be allocated by "
74 "the gntalloc device"); 74 "the gntalloc device");
75 75
76static LIST_HEAD(gref_list); 76static LIST_HEAD(gref_list);
77static DEFINE_SPINLOCK(gref_lock); 77static DEFINE_MUTEX(gref_mutex);
78static int gref_size; 78static int gref_size;
79 79
80struct notify_info { 80struct notify_info {
@@ -99,6 +99,12 @@ struct gntalloc_file_private_data {
99 uint64_t index; 99 uint64_t index;
100}; 100};
101 101
102struct gntalloc_vma_private_data {
103 struct gntalloc_gref *gref;
104 int users;
105 int count;
106};
107
102static void __del_gref(struct gntalloc_gref *gref); 108static void __del_gref(struct gntalloc_gref *gref);
103 109
104static void do_cleanup(void) 110static void do_cleanup(void)
@@ -143,15 +149,15 @@ static int add_grefs(struct ioctl_gntalloc_alloc_gref *op,
143 } 149 }
144 150
145 /* Add to gref lists. */ 151 /* Add to gref lists. */
146 spin_lock(&gref_lock); 152 mutex_lock(&gref_mutex);
147 list_splice_tail(&queue_gref, &gref_list); 153 list_splice_tail(&queue_gref, &gref_list);
148 list_splice_tail(&queue_file, &priv->list); 154 list_splice_tail(&queue_file, &priv->list);
149 spin_unlock(&gref_lock); 155 mutex_unlock(&gref_mutex);
150 156
151 return 0; 157 return 0;
152 158
153undo: 159undo:
154 spin_lock(&gref_lock); 160 mutex_lock(&gref_mutex);
155 gref_size -= (op->count - i); 161 gref_size -= (op->count - i);
156 162
157 list_for_each_entry(gref, &queue_file, next_file) { 163 list_for_each_entry(gref, &queue_file, next_file) {
@@ -167,7 +173,7 @@ undo:
167 */ 173 */
168 if (unlikely(!list_empty(&queue_gref))) 174 if (unlikely(!list_empty(&queue_gref)))
169 list_splice_tail(&queue_gref, &gref_list); 175 list_splice_tail(&queue_gref, &gref_list);
170 spin_unlock(&gref_lock); 176 mutex_unlock(&gref_mutex);
171 return rc; 177 return rc;
172} 178}
173 179
@@ -178,8 +184,10 @@ static void __del_gref(struct gntalloc_gref *gref)
178 tmp[gref->notify.pgoff] = 0; 184 tmp[gref->notify.pgoff] = 0;
179 kunmap(gref->page); 185 kunmap(gref->page);
180 } 186 }
181 if (gref->notify.flags & UNMAP_NOTIFY_SEND_EVENT) 187 if (gref->notify.flags & UNMAP_NOTIFY_SEND_EVENT) {
182 notify_remote_via_evtchn(gref->notify.event); 188 notify_remote_via_evtchn(gref->notify.event);
189 evtchn_put(gref->notify.event);
190 }
183 191
184 gref->notify.flags = 0; 192 gref->notify.flags = 0;
185 193
@@ -189,6 +197,8 @@ static void __del_gref(struct gntalloc_gref *gref)
189 197
190 if (!gnttab_end_foreign_access_ref(gref->gref_id, 0)) 198 if (!gnttab_end_foreign_access_ref(gref->gref_id, 0))
191 return; 199 return;
200
201 gnttab_free_grant_reference(gref->gref_id);
192 } 202 }
193 203
194 gref_size--; 204 gref_size--;
@@ -251,7 +261,7 @@ static int gntalloc_release(struct inode *inode, struct file *filp)
251 261
252 pr_debug("%s: priv %p\n", __func__, priv); 262 pr_debug("%s: priv %p\n", __func__, priv);
253 263
254 spin_lock(&gref_lock); 264 mutex_lock(&gref_mutex);
255 while (!list_empty(&priv->list)) { 265 while (!list_empty(&priv->list)) {
256 gref = list_entry(priv->list.next, 266 gref = list_entry(priv->list.next,
257 struct gntalloc_gref, next_file); 267 struct gntalloc_gref, next_file);
@@ -261,7 +271,7 @@ static int gntalloc_release(struct inode *inode, struct file *filp)
261 __del_gref(gref); 271 __del_gref(gref);
262 } 272 }
263 kfree(priv); 273 kfree(priv);
264 spin_unlock(&gref_lock); 274 mutex_unlock(&gref_mutex);
265 275
266 return 0; 276 return 0;
267} 277}
@@ -286,21 +296,21 @@ static long gntalloc_ioctl_alloc(struct gntalloc_file_private_data *priv,
286 goto out; 296 goto out;
287 } 297 }
288 298
289 spin_lock(&gref_lock); 299 mutex_lock(&gref_mutex);
290 /* Clean up pages that were at zero (local) users but were still mapped 300 /* Clean up pages that were at zero (local) users but were still mapped
291 * by remote domains. Since those pages count towards the limit that we 301 * by remote domains. Since those pages count towards the limit that we
292 * are about to enforce, removing them here is a good idea. 302 * are about to enforce, removing them here is a good idea.
293 */ 303 */
294 do_cleanup(); 304 do_cleanup();
295 if (gref_size + op.count > limit) { 305 if (gref_size + op.count > limit) {
296 spin_unlock(&gref_lock); 306 mutex_unlock(&gref_mutex);
297 rc = -ENOSPC; 307 rc = -ENOSPC;
298 goto out_free; 308 goto out_free;
299 } 309 }
300 gref_size += op.count; 310 gref_size += op.count;
301 op.index = priv->index; 311 op.index = priv->index;
302 priv->index += op.count * PAGE_SIZE; 312 priv->index += op.count * PAGE_SIZE;
303 spin_unlock(&gref_lock); 313 mutex_unlock(&gref_mutex);
304 314
305 rc = add_grefs(&op, gref_ids, priv); 315 rc = add_grefs(&op, gref_ids, priv);
306 if (rc < 0) 316 if (rc < 0)
@@ -343,7 +353,7 @@ static long gntalloc_ioctl_dealloc(struct gntalloc_file_private_data *priv,
343 goto dealloc_grant_out; 353 goto dealloc_grant_out;
344 } 354 }
345 355
346 spin_lock(&gref_lock); 356 mutex_lock(&gref_mutex);
347 gref = find_grefs(priv, op.index, op.count); 357 gref = find_grefs(priv, op.index, op.count);
348 if (gref) { 358 if (gref) {
349 /* Remove from the file list only, and decrease reference count. 359 /* Remove from the file list only, and decrease reference count.
@@ -363,7 +373,7 @@ static long gntalloc_ioctl_dealloc(struct gntalloc_file_private_data *priv,
363 373
364 do_cleanup(); 374 do_cleanup();
365 375
366 spin_unlock(&gref_lock); 376 mutex_unlock(&gref_mutex);
367dealloc_grant_out: 377dealloc_grant_out:
368 return rc; 378 return rc;
369} 379}
@@ -383,7 +393,7 @@ static long gntalloc_ioctl_unmap_notify(struct gntalloc_file_private_data *priv,
383 index = op.index & ~(PAGE_SIZE - 1); 393 index = op.index & ~(PAGE_SIZE - 1);
384 pgoff = op.index & (PAGE_SIZE - 1); 394 pgoff = op.index & (PAGE_SIZE - 1);
385 395
386 spin_lock(&gref_lock); 396 mutex_lock(&gref_mutex);
387 397
388 gref = find_grefs(priv, index, 1); 398 gref = find_grefs(priv, index, 1);
389 if (!gref) { 399 if (!gref) {
@@ -396,12 +406,30 @@ static long gntalloc_ioctl_unmap_notify(struct gntalloc_file_private_data *priv,
396 goto unlock_out; 406 goto unlock_out;
397 } 407 }
398 408
409 /* We need to grab a reference to the event channel we are going to use
410 * to send the notify before releasing the reference we may already have
411 * (if someone has called this ioctl twice). This is required so that
412 * it is possible to change the clear_byte part of the notification
413 * without disturbing the event channel part, which may now be the last
414 * reference to that event channel.
415 */
416 if (op.action & UNMAP_NOTIFY_SEND_EVENT) {
417 if (evtchn_get(op.event_channel_port)) {
418 rc = -EINVAL;
419 goto unlock_out;
420 }
421 }
422
423 if (gref->notify.flags & UNMAP_NOTIFY_SEND_EVENT)
424 evtchn_put(gref->notify.event);
425
399 gref->notify.flags = op.action; 426 gref->notify.flags = op.action;
400 gref->notify.pgoff = pgoff; 427 gref->notify.pgoff = pgoff;
401 gref->notify.event = op.event_channel_port; 428 gref->notify.event = op.event_channel_port;
402 rc = 0; 429 rc = 0;
430
403 unlock_out: 431 unlock_out:
404 spin_unlock(&gref_lock); 432 mutex_unlock(&gref_mutex);
405 return rc; 433 return rc;
406} 434}
407 435
@@ -429,26 +457,40 @@ static long gntalloc_ioctl(struct file *filp, unsigned int cmd,
429 457
430static void gntalloc_vma_open(struct vm_area_struct *vma) 458static void gntalloc_vma_open(struct vm_area_struct *vma)
431{ 459{
432 struct gntalloc_gref *gref = vma->vm_private_data; 460 struct gntalloc_vma_private_data *priv = vma->vm_private_data;
433 if (!gref) 461
462 if (!priv)
434 return; 463 return;
435 464
436 spin_lock(&gref_lock); 465 mutex_lock(&gref_mutex);
437 gref->users++; 466 priv->users++;
438 spin_unlock(&gref_lock); 467 mutex_unlock(&gref_mutex);
439} 468}
440 469
441static void gntalloc_vma_close(struct vm_area_struct *vma) 470static void gntalloc_vma_close(struct vm_area_struct *vma)
442{ 471{
443 struct gntalloc_gref *gref = vma->vm_private_data; 472 struct gntalloc_vma_private_data *priv = vma->vm_private_data;
444 if (!gref) 473 struct gntalloc_gref *gref, *next;
474 int i;
475
476 if (!priv)
445 return; 477 return;
446 478
447 spin_lock(&gref_lock); 479 mutex_lock(&gref_mutex);
448 gref->users--; 480 priv->users--;
449 if (gref->users == 0) 481 if (priv->users == 0) {
450 __del_gref(gref); 482 gref = priv->gref;
451 spin_unlock(&gref_lock); 483 for (i = 0; i < priv->count; i++) {
484 gref->users--;
485 next = list_entry(gref->next_gref.next,
486 struct gntalloc_gref, next_gref);
487 if (gref->users == 0)
488 __del_gref(gref);
489 gref = next;
490 }
491 kfree(priv);
492 }
493 mutex_unlock(&gref_mutex);
452} 494}
453 495
454static struct vm_operations_struct gntalloc_vmops = { 496static struct vm_operations_struct gntalloc_vmops = {
@@ -459,30 +501,41 @@ static struct vm_operations_struct gntalloc_vmops = {
459static int gntalloc_mmap(struct file *filp, struct vm_area_struct *vma) 501static int gntalloc_mmap(struct file *filp, struct vm_area_struct *vma)
460{ 502{
461 struct gntalloc_file_private_data *priv = filp->private_data; 503 struct gntalloc_file_private_data *priv = filp->private_data;
504 struct gntalloc_vma_private_data *vm_priv;
462 struct gntalloc_gref *gref; 505 struct gntalloc_gref *gref;
463 int count = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT; 506 int count = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
464 int rv, i; 507 int rv, i;
465 508
466 pr_debug("%s: priv %p, page %lu+%d\n", __func__,
467 priv, vma->vm_pgoff, count);
468
469 if (!(vma->vm_flags & VM_SHARED)) { 509 if (!(vma->vm_flags & VM_SHARED)) {
470 printk(KERN_ERR "%s: Mapping must be shared.\n", __func__); 510 printk(KERN_ERR "%s: Mapping must be shared.\n", __func__);
471 return -EINVAL; 511 return -EINVAL;
472 } 512 }
473 513
474 spin_lock(&gref_lock); 514 vm_priv = kmalloc(sizeof(*vm_priv), GFP_KERNEL);
515 if (!vm_priv)
516 return -ENOMEM;
517
518 mutex_lock(&gref_mutex);
519
520 pr_debug("%s: priv %p,%p, page %lu+%d\n", __func__,
521 priv, vm_priv, vma->vm_pgoff, count);
522
475 gref = find_grefs(priv, vma->vm_pgoff << PAGE_SHIFT, count); 523 gref = find_grefs(priv, vma->vm_pgoff << PAGE_SHIFT, count);
476 if (gref == NULL) { 524 if (gref == NULL) {
477 rv = -ENOENT; 525 rv = -ENOENT;
478 pr_debug("%s: Could not find grant reference", 526 pr_debug("%s: Could not find grant reference",
479 __func__); 527 __func__);
528 kfree(vm_priv);
480 goto out_unlock; 529 goto out_unlock;
481 } 530 }
482 531
483 vma->vm_private_data = gref; 532 vm_priv->gref = gref;
533 vm_priv->users = 1;
534 vm_priv->count = count;
535
536 vma->vm_private_data = vm_priv;
484 537
485 vma->vm_flags |= VM_RESERVED; 538 vma->vm_flags |= VM_RESERVED | VM_DONTEXPAND;
486 539
487 vma->vm_ops = &gntalloc_vmops; 540 vma->vm_ops = &gntalloc_vmops;
488 541
@@ -499,7 +552,7 @@ static int gntalloc_mmap(struct file *filp, struct vm_area_struct *vma)
499 rv = 0; 552 rv = 0;
500 553
501out_unlock: 554out_unlock:
502 spin_unlock(&gref_lock); 555 mutex_unlock(&gref_mutex);
503 return rv; 556 return rv;
504} 557}
505 558
diff --git a/drivers/xen/gntdev.c b/drivers/xen/gntdev.c
index afca14d9042e..99d8151c824a 100644
--- a/drivers/xen/gntdev.c
+++ b/drivers/xen/gntdev.c
@@ -193,8 +193,10 @@ static void gntdev_put_map(struct grant_map *map)
193 193
194 atomic_sub(map->count, &pages_mapped); 194 atomic_sub(map->count, &pages_mapped);
195 195
196 if (map->notify.flags & UNMAP_NOTIFY_SEND_EVENT) 196 if (map->notify.flags & UNMAP_NOTIFY_SEND_EVENT) {
197 notify_remote_via_evtchn(map->notify.event); 197 notify_remote_via_evtchn(map->notify.event);
198 evtchn_put(map->notify.event);
199 }
198 200
199 if (map->pages) { 201 if (map->pages) {
200 if (!use_ptemod) 202 if (!use_ptemod)
@@ -312,7 +314,8 @@ static int __unmap_grant_pages(struct grant_map *map, int offset, int pages)
312 } 314 }
313 } 315 }
314 316
315 err = gnttab_unmap_refs(map->unmap_ops + offset, map->pages + offset, pages); 317 err = gnttab_unmap_refs(map->unmap_ops + offset, map->pages + offset,
318 pages, true);
316 if (err) 319 if (err)
317 return err; 320 return err;
318 321
@@ -599,6 +602,8 @@ static long gntdev_ioctl_notify(struct gntdev_priv *priv, void __user *u)
599 struct ioctl_gntdev_unmap_notify op; 602 struct ioctl_gntdev_unmap_notify op;
600 struct grant_map *map; 603 struct grant_map *map;
601 int rc; 604 int rc;
605 int out_flags;
606 unsigned int out_event;
602 607
603 if (copy_from_user(&op, u, sizeof(op))) 608 if (copy_from_user(&op, u, sizeof(op)))
604 return -EFAULT; 609 return -EFAULT;
@@ -606,6 +611,21 @@ static long gntdev_ioctl_notify(struct gntdev_priv *priv, void __user *u)
606 if (op.action & ~(UNMAP_NOTIFY_CLEAR_BYTE|UNMAP_NOTIFY_SEND_EVENT)) 611 if (op.action & ~(UNMAP_NOTIFY_CLEAR_BYTE|UNMAP_NOTIFY_SEND_EVENT))
607 return -EINVAL; 612 return -EINVAL;
608 613
614 /* We need to grab a reference to the event channel we are going to use
615 * to send the notify before releasing the reference we may already have
616 * (if someone has called this ioctl twice). This is required so that
617 * it is possible to change the clear_byte part of the notification
618 * without disturbing the event channel part, which may now be the last
619 * reference to that event channel.
620 */
621 if (op.action & UNMAP_NOTIFY_SEND_EVENT) {
622 if (evtchn_get(op.event_channel_port))
623 return -EINVAL;
624 }
625
626 out_flags = op.action;
627 out_event = op.event_channel_port;
628
609 spin_lock(&priv->lock); 629 spin_lock(&priv->lock);
610 630
611 list_for_each_entry(map, &priv->maps, next) { 631 list_for_each_entry(map, &priv->maps, next) {
@@ -624,12 +644,22 @@ static long gntdev_ioctl_notify(struct gntdev_priv *priv, void __user *u)
624 goto unlock_out; 644 goto unlock_out;
625 } 645 }
626 646
647 out_flags = map->notify.flags;
648 out_event = map->notify.event;
649
627 map->notify.flags = op.action; 650 map->notify.flags = op.action;
628 map->notify.addr = op.index - (map->index << PAGE_SHIFT); 651 map->notify.addr = op.index - (map->index << PAGE_SHIFT);
629 map->notify.event = op.event_channel_port; 652 map->notify.event = op.event_channel_port;
653
630 rc = 0; 654 rc = 0;
655
631 unlock_out: 656 unlock_out:
632 spin_unlock(&priv->lock); 657 spin_unlock(&priv->lock);
658
659 /* Drop the reference to the event channel we did not save in the map */
660 if (out_flags & UNMAP_NOTIFY_SEND_EVENT)
661 evtchn_put(out_event);
662
633 return rc; 663 return rc;
634} 664}
635 665
diff --git a/drivers/xen/grant-table.c b/drivers/xen/grant-table.c
index bf1c094f4ebf..1cd94daa71db 100644
--- a/drivers/xen/grant-table.c
+++ b/drivers/xen/grant-table.c
@@ -44,16 +44,19 @@
44#include <xen/page.h> 44#include <xen/page.h>
45#include <xen/grant_table.h> 45#include <xen/grant_table.h>
46#include <xen/interface/memory.h> 46#include <xen/interface/memory.h>
47#include <xen/hvc-console.h>
47#include <asm/xen/hypercall.h> 48#include <asm/xen/hypercall.h>
48 49
49#include <asm/pgtable.h> 50#include <asm/pgtable.h>
50#include <asm/sync_bitops.h> 51#include <asm/sync_bitops.h>
51 52
52
53/* External tools reserve first few grant table entries. */ 53/* External tools reserve first few grant table entries. */
54#define NR_RESERVED_ENTRIES 8 54#define NR_RESERVED_ENTRIES 8
55#define GNTTAB_LIST_END 0xffffffff 55#define GNTTAB_LIST_END 0xffffffff
56#define GREFS_PER_GRANT_FRAME (PAGE_SIZE / sizeof(struct grant_entry)) 56#define GREFS_PER_GRANT_FRAME \
57(grant_table_version == 1 ? \
58(PAGE_SIZE / sizeof(struct grant_entry_v1)) : \
59(PAGE_SIZE / sizeof(union grant_entry_v2)))
57 60
58static grant_ref_t **gnttab_list; 61static grant_ref_t **gnttab_list;
59static unsigned int nr_grant_frames; 62static unsigned int nr_grant_frames;
@@ -64,13 +67,97 @@ static DEFINE_SPINLOCK(gnttab_list_lock);
64unsigned long xen_hvm_resume_frames; 67unsigned long xen_hvm_resume_frames;
65EXPORT_SYMBOL_GPL(xen_hvm_resume_frames); 68EXPORT_SYMBOL_GPL(xen_hvm_resume_frames);
66 69
67static struct grant_entry *shared; 70static union {
71 struct grant_entry_v1 *v1;
72 union grant_entry_v2 *v2;
73 void *addr;
74} gnttab_shared;
75
76/*This is a structure of function pointers for grant table*/
77struct gnttab_ops {
78 /*
79 * Mapping a list of frames for storing grant entries. Frames parameter
80 * is used to store grant table address when grant table being setup,
81 * nr_gframes is the number of frames to map grant table. Returning
82 * GNTST_okay means success and negative value means failure.
83 */
84 int (*map_frames)(unsigned long *frames, unsigned int nr_gframes);
85 /*
86 * Release a list of frames which are mapped in map_frames for grant
87 * entry status.
88 */
89 void (*unmap_frames)(void);
90 /*
91 * Introducing a valid entry into the grant table, granting the frame of
92 * this grant entry to domain for accessing or transfering. Ref
93 * parameter is reference of this introduced grant entry, domid is id of
94 * granted domain, frame is the page frame to be granted, and flags is
95 * status of the grant entry to be updated.
96 */
97 void (*update_entry)(grant_ref_t ref, domid_t domid,
98 unsigned long frame, unsigned flags);
99 /*
100 * Stop granting a grant entry to domain for accessing. Ref parameter is
101 * reference of a grant entry whose grant access will be stopped,
102 * readonly is not in use in this function. If the grant entry is
103 * currently mapped for reading or writing, just return failure(==0)
104 * directly and don't tear down the grant access. Otherwise, stop grant
105 * access for this entry and return success(==1).
106 */
107 int (*end_foreign_access_ref)(grant_ref_t ref, int readonly);
108 /*
109 * Stop granting a grant entry to domain for transfer. Ref parameter is
110 * reference of a grant entry whose grant transfer will be stopped. If
111 * tranfer has not started, just reclaim the grant entry and return
112 * failure(==0). Otherwise, wait for the transfer to complete and then
113 * return the frame.
114 */
115 unsigned long (*end_foreign_transfer_ref)(grant_ref_t ref);
116 /*
117 * Query the status of a grant entry. Ref parameter is reference of
118 * queried grant entry, return value is the status of queried entry.
119 * Detailed status(writing/reading) can be gotten from the return value
120 * by bit operations.
121 */
122 int (*query_foreign_access)(grant_ref_t ref);
123 /*
124 * Grant a domain to access a range of bytes within the page referred by
125 * an available grant entry. Ref parameter is reference of a grant entry
126 * which will be sub-page accessed, domid is id of grantee domain, frame
127 * is frame address of subpage grant, flags is grant type and flag
128 * information, page_off is offset of the range of bytes, and length is
129 * length of bytes to be accessed.
130 */
131 void (*update_subpage_entry)(grant_ref_t ref, domid_t domid,
132 unsigned long frame, int flags,
133 unsigned page_off, unsigned length);
134 /*
135 * Redirect an available grant entry on domain A to another grant
136 * reference of domain B, then allow domain C to use grant reference
137 * of domain B transitively. Ref parameter is an available grant entry
138 * reference on domain A, domid is id of domain C which accesses grant
139 * entry transitively, flags is grant type and flag information,
140 * trans_domid is id of domain B whose grant entry is finally accessed
141 * transitively, trans_gref is grant entry transitive reference of
142 * domain B.
143 */
144 void (*update_trans_entry)(grant_ref_t ref, domid_t domid, int flags,
145 domid_t trans_domid, grant_ref_t trans_gref);
146};
147
148static struct gnttab_ops *gnttab_interface;
149
150/*This reflects status of grant entries, so act as a global value*/
151static grant_status_t *grstatus;
152
153static int grant_table_version;
68 154
69static struct gnttab_free_callback *gnttab_free_callback_list; 155static struct gnttab_free_callback *gnttab_free_callback_list;
70 156
71static int gnttab_expand(unsigned int req_entries); 157static int gnttab_expand(unsigned int req_entries);
72 158
73#define RPP (PAGE_SIZE / sizeof(grant_ref_t)) 159#define RPP (PAGE_SIZE / sizeof(grant_ref_t))
160#define SPP (PAGE_SIZE / sizeof(grant_status_t))
74 161
75static inline grant_ref_t *__gnttab_entry(grant_ref_t entry) 162static inline grant_ref_t *__gnttab_entry(grant_ref_t entry)
76{ 163{
@@ -142,23 +229,33 @@ static void put_free_entry(grant_ref_t ref)
142 spin_unlock_irqrestore(&gnttab_list_lock, flags); 229 spin_unlock_irqrestore(&gnttab_list_lock, flags);
143} 230}
144 231
145static void update_grant_entry(grant_ref_t ref, domid_t domid, 232/*
146 unsigned long frame, unsigned flags) 233 * Following applies to gnttab_update_entry_v1 and gnttab_update_entry_v2.
234 * Introducing a valid entry into the grant table:
235 * 1. Write ent->domid.
236 * 2. Write ent->frame:
237 * GTF_permit_access: Frame to which access is permitted.
238 * GTF_accept_transfer: Pseudo-phys frame slot being filled by new
239 * frame, or zero if none.
240 * 3. Write memory barrier (WMB).
241 * 4. Write ent->flags, inc. valid type.
242 */
243static void gnttab_update_entry_v1(grant_ref_t ref, domid_t domid,
244 unsigned long frame, unsigned flags)
245{
246 gnttab_shared.v1[ref].domid = domid;
247 gnttab_shared.v1[ref].frame = frame;
248 wmb();
249 gnttab_shared.v1[ref].flags = flags;
250}
251
252static void gnttab_update_entry_v2(grant_ref_t ref, domid_t domid,
253 unsigned long frame, unsigned flags)
147{ 254{
148 /* 255 gnttab_shared.v2[ref].hdr.domid = domid;
149 * Introducing a valid entry into the grant table: 256 gnttab_shared.v2[ref].full_page.frame = frame;
150 * 1. Write ent->domid.
151 * 2. Write ent->frame:
152 * GTF_permit_access: Frame to which access is permitted.
153 * GTF_accept_transfer: Pseudo-phys frame slot being filled by new
154 * frame, or zero if none.
155 * 3. Write memory barrier (WMB).
156 * 4. Write ent->flags, inc. valid type.
157 */
158 shared[ref].frame = frame;
159 shared[ref].domid = domid;
160 wmb(); 257 wmb();
161 shared[ref].flags = flags; 258 gnttab_shared.v2[ref].hdr.flags = GTF_permit_access | flags;
162} 259}
163 260
164/* 261/*
@@ -167,7 +264,7 @@ static void update_grant_entry(grant_ref_t ref, domid_t domid,
167void gnttab_grant_foreign_access_ref(grant_ref_t ref, domid_t domid, 264void gnttab_grant_foreign_access_ref(grant_ref_t ref, domid_t domid,
168 unsigned long frame, int readonly) 265 unsigned long frame, int readonly)
169{ 266{
170 update_grant_entry(ref, domid, frame, 267 gnttab_interface->update_entry(ref, domid, frame,
171 GTF_permit_access | (readonly ? GTF_readonly : 0)); 268 GTF_permit_access | (readonly ? GTF_readonly : 0));
172} 269}
173EXPORT_SYMBOL_GPL(gnttab_grant_foreign_access_ref); 270EXPORT_SYMBOL_GPL(gnttab_grant_foreign_access_ref);
@@ -187,31 +284,184 @@ int gnttab_grant_foreign_access(domid_t domid, unsigned long frame,
187} 284}
188EXPORT_SYMBOL_GPL(gnttab_grant_foreign_access); 285EXPORT_SYMBOL_GPL(gnttab_grant_foreign_access);
189 286
190int gnttab_query_foreign_access(grant_ref_t ref) 287void gnttab_update_subpage_entry_v2(grant_ref_t ref, domid_t domid,
288 unsigned long frame, int flags,
289 unsigned page_off,
290 unsigned length)
291{
292 gnttab_shared.v2[ref].sub_page.frame = frame;
293 gnttab_shared.v2[ref].sub_page.page_off = page_off;
294 gnttab_shared.v2[ref].sub_page.length = length;
295 gnttab_shared.v2[ref].hdr.domid = domid;
296 wmb();
297 gnttab_shared.v2[ref].hdr.flags =
298 GTF_permit_access | GTF_sub_page | flags;
299}
300
301int gnttab_grant_foreign_access_subpage_ref(grant_ref_t ref, domid_t domid,
302 unsigned long frame, int flags,
303 unsigned page_off,
304 unsigned length)
191{ 305{
192 u16 nflags; 306 if (flags & (GTF_accept_transfer | GTF_reading |
307 GTF_writing | GTF_transitive))
308 return -EPERM;
193 309
194 nflags = shared[ref].flags; 310 if (gnttab_interface->update_subpage_entry == NULL)
311 return -ENOSYS;
195 312
196 return nflags & (GTF_reading|GTF_writing); 313 gnttab_interface->update_subpage_entry(ref, domid, frame, flags,
314 page_off, length);
315
316 return 0;
317}
318EXPORT_SYMBOL_GPL(gnttab_grant_foreign_access_subpage_ref);
319
320int gnttab_grant_foreign_access_subpage(domid_t domid, unsigned long frame,
321 int flags, unsigned page_off,
322 unsigned length)
323{
324 int ref, rc;
325
326 ref = get_free_entries(1);
327 if (unlikely(ref < 0))
328 return -ENOSPC;
329
330 rc = gnttab_grant_foreign_access_subpage_ref(ref, domid, frame, flags,
331 page_off, length);
332 if (rc < 0) {
333 put_free_entry(ref);
334 return rc;
335 }
336
337 return ref;
338}
339EXPORT_SYMBOL_GPL(gnttab_grant_foreign_access_subpage);
340
341bool gnttab_subpage_grants_available(void)
342{
343 return gnttab_interface->update_subpage_entry != NULL;
344}
345EXPORT_SYMBOL_GPL(gnttab_subpage_grants_available);
346
347void gnttab_update_trans_entry_v2(grant_ref_t ref, domid_t domid,
348 int flags, domid_t trans_domid,
349 grant_ref_t trans_gref)
350{
351 gnttab_shared.v2[ref].transitive.trans_domid = trans_domid;
352 gnttab_shared.v2[ref].transitive.gref = trans_gref;
353 gnttab_shared.v2[ref].hdr.domid = domid;
354 wmb();
355 gnttab_shared.v2[ref].hdr.flags =
356 GTF_permit_access | GTF_transitive | flags;
357}
358
359int gnttab_grant_foreign_access_trans_ref(grant_ref_t ref, domid_t domid,
360 int flags, domid_t trans_domid,
361 grant_ref_t trans_gref)
362{
363 if (flags & (GTF_accept_transfer | GTF_reading |
364 GTF_writing | GTF_sub_page))
365 return -EPERM;
366
367 if (gnttab_interface->update_trans_entry == NULL)
368 return -ENOSYS;
369
370 gnttab_interface->update_trans_entry(ref, domid, flags, trans_domid,
371 trans_gref);
372
373 return 0;
374}
375EXPORT_SYMBOL_GPL(gnttab_grant_foreign_access_trans_ref);
376
377int gnttab_grant_foreign_access_trans(domid_t domid, int flags,
378 domid_t trans_domid,
379 grant_ref_t trans_gref)
380{
381 int ref, rc;
382
383 ref = get_free_entries(1);
384 if (unlikely(ref < 0))
385 return -ENOSPC;
386
387 rc = gnttab_grant_foreign_access_trans_ref(ref, domid, flags,
388 trans_domid, trans_gref);
389 if (rc < 0) {
390 put_free_entry(ref);
391 return rc;
392 }
393
394 return ref;
395}
396EXPORT_SYMBOL_GPL(gnttab_grant_foreign_access_trans);
397
398bool gnttab_trans_grants_available(void)
399{
400 return gnttab_interface->update_trans_entry != NULL;
401}
402EXPORT_SYMBOL_GPL(gnttab_trans_grants_available);
403
404static int gnttab_query_foreign_access_v1(grant_ref_t ref)
405{
406 return gnttab_shared.v1[ref].flags & (GTF_reading|GTF_writing);
407}
408
409static int gnttab_query_foreign_access_v2(grant_ref_t ref)
410{
411 return grstatus[ref] & (GTF_reading|GTF_writing);
412}
413
414int gnttab_query_foreign_access(grant_ref_t ref)
415{
416 return gnttab_interface->query_foreign_access(ref);
197} 417}
198EXPORT_SYMBOL_GPL(gnttab_query_foreign_access); 418EXPORT_SYMBOL_GPL(gnttab_query_foreign_access);
199 419
200int gnttab_end_foreign_access_ref(grant_ref_t ref, int readonly) 420static int gnttab_end_foreign_access_ref_v1(grant_ref_t ref, int readonly)
201{ 421{
202 u16 flags, nflags; 422 u16 flags, nflags;
423 u16 *pflags;
203 424
204 nflags = shared[ref].flags; 425 pflags = &gnttab_shared.v1[ref].flags;
426 nflags = *pflags;
205 do { 427 do {
206 flags = nflags; 428 flags = nflags;
207 if (flags & (GTF_reading|GTF_writing)) { 429 if (flags & (GTF_reading|GTF_writing)) {
208 printk(KERN_ALERT "WARNING: g.e. still in use!\n"); 430 printk(KERN_ALERT "WARNING: g.e. still in use!\n");
209 return 0; 431 return 0;
210 } 432 }
211 } while ((nflags = sync_cmpxchg(&shared[ref].flags, flags, 0)) != flags); 433 } while ((nflags = sync_cmpxchg(pflags, flags, 0)) != flags);
434
435 return 1;
436}
437
438static int gnttab_end_foreign_access_ref_v2(grant_ref_t ref, int readonly)
439{
440 gnttab_shared.v2[ref].hdr.flags = 0;
441 mb();
442 if (grstatus[ref] & (GTF_reading|GTF_writing)) {
443 return 0;
444 } else {
445 /* The read of grstatus needs to have acquire
446 semantics. On x86, reads already have
447 that, and we just need to protect against
448 compiler reorderings. On other
449 architectures we may need a full
450 barrier. */
451#ifdef CONFIG_X86
452 barrier();
453#else
454 mb();
455#endif
456 }
212 457
213 return 1; 458 return 1;
214} 459}
460
461int gnttab_end_foreign_access_ref(grant_ref_t ref, int readonly)
462{
463 return gnttab_interface->end_foreign_access_ref(ref, readonly);
464}
215EXPORT_SYMBOL_GPL(gnttab_end_foreign_access_ref); 465EXPORT_SYMBOL_GPL(gnttab_end_foreign_access_ref);
216 466
217void gnttab_end_foreign_access(grant_ref_t ref, int readonly, 467void gnttab_end_foreign_access(grant_ref_t ref, int readonly,
@@ -246,37 +496,76 @@ EXPORT_SYMBOL_GPL(gnttab_grant_foreign_transfer);
246void gnttab_grant_foreign_transfer_ref(grant_ref_t ref, domid_t domid, 496void gnttab_grant_foreign_transfer_ref(grant_ref_t ref, domid_t domid,
247 unsigned long pfn) 497 unsigned long pfn)
248{ 498{
249 update_grant_entry(ref, domid, pfn, GTF_accept_transfer); 499 gnttab_interface->update_entry(ref, domid, pfn, GTF_accept_transfer);
250} 500}
251EXPORT_SYMBOL_GPL(gnttab_grant_foreign_transfer_ref); 501EXPORT_SYMBOL_GPL(gnttab_grant_foreign_transfer_ref);
252 502
253unsigned long gnttab_end_foreign_transfer_ref(grant_ref_t ref) 503static unsigned long gnttab_end_foreign_transfer_ref_v1(grant_ref_t ref)
254{ 504{
255 unsigned long frame; 505 unsigned long frame;
256 u16 flags; 506 u16 flags;
507 u16 *pflags;
508
509 pflags = &gnttab_shared.v1[ref].flags;
257 510
258 /* 511 /*
259 * If a transfer is not even yet started, try to reclaim the grant 512 * If a transfer is not even yet started, try to reclaim the grant
260 * reference and return failure (== 0). 513 * reference and return failure (== 0).
261 */ 514 */
262 while (!((flags = shared[ref].flags) & GTF_transfer_committed)) { 515 while (!((flags = *pflags) & GTF_transfer_committed)) {
263 if (sync_cmpxchg(&shared[ref].flags, flags, 0) == flags) 516 if (sync_cmpxchg(pflags, flags, 0) == flags)
264 return 0; 517 return 0;
265 cpu_relax(); 518 cpu_relax();
266 } 519 }
267 520
268 /* If a transfer is in progress then wait until it is completed. */ 521 /* If a transfer is in progress then wait until it is completed. */
269 while (!(flags & GTF_transfer_completed)) { 522 while (!(flags & GTF_transfer_completed)) {
270 flags = shared[ref].flags; 523 flags = *pflags;
271 cpu_relax(); 524 cpu_relax();
272 } 525 }
273 526
274 rmb(); /* Read the frame number /after/ reading completion status. */ 527 rmb(); /* Read the frame number /after/ reading completion status. */
275 frame = shared[ref].frame; 528 frame = gnttab_shared.v1[ref].frame;
529 BUG_ON(frame == 0);
530
531 return frame;
532}
533
534static unsigned long gnttab_end_foreign_transfer_ref_v2(grant_ref_t ref)
535{
536 unsigned long frame;
537 u16 flags;
538 u16 *pflags;
539
540 pflags = &gnttab_shared.v2[ref].hdr.flags;
541
542 /*
543 * If a transfer is not even yet started, try to reclaim the grant
544 * reference and return failure (== 0).
545 */
546 while (!((flags = *pflags) & GTF_transfer_committed)) {
547 if (sync_cmpxchg(pflags, flags, 0) == flags)
548 return 0;
549 cpu_relax();
550 }
551
552 /* If a transfer is in progress then wait until it is completed. */
553 while (!(flags & GTF_transfer_completed)) {
554 flags = *pflags;
555 cpu_relax();
556 }
557
558 rmb(); /* Read the frame number /after/ reading completion status. */
559 frame = gnttab_shared.v2[ref].full_page.frame;
276 BUG_ON(frame == 0); 560 BUG_ON(frame == 0);
277 561
278 return frame; 562 return frame;
279} 563}
564
565unsigned long gnttab_end_foreign_transfer_ref(grant_ref_t ref)
566{
567 return gnttab_interface->end_foreign_transfer_ref(ref);
568}
280EXPORT_SYMBOL_GPL(gnttab_end_foreign_transfer_ref); 569EXPORT_SYMBOL_GPL(gnttab_end_foreign_transfer_ref);
281 570
282unsigned long gnttab_end_foreign_transfer(grant_ref_t ref) 571unsigned long gnttab_end_foreign_transfer(grant_ref_t ref)
@@ -448,8 +737,8 @@ unsigned int gnttab_max_grant_frames(void)
448EXPORT_SYMBOL_GPL(gnttab_max_grant_frames); 737EXPORT_SYMBOL_GPL(gnttab_max_grant_frames);
449 738
450int gnttab_map_refs(struct gnttab_map_grant_ref *map_ops, 739int gnttab_map_refs(struct gnttab_map_grant_ref *map_ops,
451 struct gnttab_map_grant_ref *kmap_ops, 740 struct gnttab_map_grant_ref *kmap_ops,
452 struct page **pages, unsigned int count) 741 struct page **pages, unsigned int count)
453{ 742{
454 int i, ret; 743 int i, ret;
455 pte_t *pte; 744 pte_t *pte;
@@ -472,24 +761,10 @@ int gnttab_map_refs(struct gnttab_map_grant_ref *map_ops,
472 (map_ops[i].host_addr & ~PAGE_MASK)); 761 (map_ops[i].host_addr & ~PAGE_MASK));
473 mfn = pte_mfn(*pte); 762 mfn = pte_mfn(*pte);
474 } else { 763 } else {
475 /* If you really wanted to do this: 764 mfn = PFN_DOWN(map_ops[i].dev_bus_addr);
476 * mfn = PFN_DOWN(map_ops[i].dev_bus_addr);
477 *
478 * The reason we do not implement it is b/c on the
479 * unmap path (gnttab_unmap_refs) we have no means of
480 * checking whether the page is !GNTMAP_contains_pte.
481 *
482 * That is without some extra data-structure to carry
483 * the struct page, bool clear_pte, and list_head next
484 * tuples and deal with allocation/delallocation, etc.
485 *
486 * The users of this API set the GNTMAP_contains_pte
487 * flag so lets just return not supported until it
488 * becomes neccessary to implement.
489 */
490 return -EOPNOTSUPP;
491 } 765 }
492 ret = m2p_add_override(mfn, pages[i], &kmap_ops[i]); 766 ret = m2p_add_override(mfn, pages[i], kmap_ops ?
767 &kmap_ops[i] : NULL);
493 if (ret) 768 if (ret)
494 return ret; 769 return ret;
495 } 770 }
@@ -499,7 +774,7 @@ int gnttab_map_refs(struct gnttab_map_grant_ref *map_ops,
499EXPORT_SYMBOL_GPL(gnttab_map_refs); 774EXPORT_SYMBOL_GPL(gnttab_map_refs);
500 775
501int gnttab_unmap_refs(struct gnttab_unmap_grant_ref *unmap_ops, 776int gnttab_unmap_refs(struct gnttab_unmap_grant_ref *unmap_ops,
502 struct page **pages, unsigned int count) 777 struct page **pages, unsigned int count, bool clear_pte)
503{ 778{
504 int i, ret; 779 int i, ret;
505 780
@@ -511,7 +786,7 @@ int gnttab_unmap_refs(struct gnttab_unmap_grant_ref *unmap_ops,
511 return ret; 786 return ret;
512 787
513 for (i = 0; i < count; i++) { 788 for (i = 0; i < count; i++) {
514 ret = m2p_remove_override(pages[i], true /* clear the PTE */); 789 ret = m2p_remove_override(pages[i], clear_pte);
515 if (ret) 790 if (ret)
516 return ret; 791 return ret;
517 } 792 }
@@ -520,6 +795,77 @@ int gnttab_unmap_refs(struct gnttab_unmap_grant_ref *unmap_ops,
520} 795}
521EXPORT_SYMBOL_GPL(gnttab_unmap_refs); 796EXPORT_SYMBOL_GPL(gnttab_unmap_refs);
522 797
798static unsigned nr_status_frames(unsigned nr_grant_frames)
799{
800 return (nr_grant_frames * GREFS_PER_GRANT_FRAME + SPP - 1) / SPP;
801}
802
803static int gnttab_map_frames_v1(unsigned long *frames, unsigned int nr_gframes)
804{
805 int rc;
806
807 rc = arch_gnttab_map_shared(frames, nr_gframes,
808 gnttab_max_grant_frames(),
809 &gnttab_shared.addr);
810 BUG_ON(rc);
811
812 return 0;
813}
814
815static void gnttab_unmap_frames_v1(void)
816{
817 arch_gnttab_unmap(gnttab_shared.addr, nr_grant_frames);
818}
819
820static int gnttab_map_frames_v2(unsigned long *frames, unsigned int nr_gframes)
821{
822 uint64_t *sframes;
823 unsigned int nr_sframes;
824 struct gnttab_get_status_frames getframes;
825 int rc;
826
827 nr_sframes = nr_status_frames(nr_gframes);
828
829 /* No need for kzalloc as it is initialized in following hypercall
830 * GNTTABOP_get_status_frames.
831 */
832 sframes = kmalloc(nr_sframes * sizeof(uint64_t), GFP_ATOMIC);
833 if (!sframes)
834 return -ENOMEM;
835
836 getframes.dom = DOMID_SELF;
837 getframes.nr_frames = nr_sframes;
838 set_xen_guest_handle(getframes.frame_list, sframes);
839
840 rc = HYPERVISOR_grant_table_op(GNTTABOP_get_status_frames,
841 &getframes, 1);
842 if (rc == -ENOSYS) {
843 kfree(sframes);
844 return -ENOSYS;
845 }
846
847 BUG_ON(rc || getframes.status);
848
849 rc = arch_gnttab_map_status(sframes, nr_sframes,
850 nr_status_frames(gnttab_max_grant_frames()),
851 &grstatus);
852 BUG_ON(rc);
853 kfree(sframes);
854
855 rc = arch_gnttab_map_shared(frames, nr_gframes,
856 gnttab_max_grant_frames(),
857 &gnttab_shared.addr);
858 BUG_ON(rc);
859
860 return 0;
861}
862
863static void gnttab_unmap_frames_v2(void)
864{
865 arch_gnttab_unmap(gnttab_shared.addr, nr_grant_frames);
866 arch_gnttab_unmap(grstatus, nr_status_frames(nr_grant_frames));
867}
868
523static int gnttab_map(unsigned int start_idx, unsigned int end_idx) 869static int gnttab_map(unsigned int start_idx, unsigned int end_idx)
524{ 870{
525 struct gnttab_setup_table setup; 871 struct gnttab_setup_table setup;
@@ -551,6 +897,9 @@ static int gnttab_map(unsigned int start_idx, unsigned int end_idx)
551 return rc; 897 return rc;
552 } 898 }
553 899
900 /* No need for kzalloc as it is initialized in following hypercall
901 * GNTTABOP_setup_table.
902 */
554 frames = kmalloc(nr_gframes * sizeof(unsigned long), GFP_ATOMIC); 903 frames = kmalloc(nr_gframes * sizeof(unsigned long), GFP_ATOMIC);
555 if (!frames) 904 if (!frames)
556 return -ENOMEM; 905 return -ENOMEM;
@@ -567,19 +916,65 @@ static int gnttab_map(unsigned int start_idx, unsigned int end_idx)
567 916
568 BUG_ON(rc || setup.status); 917 BUG_ON(rc || setup.status);
569 918
570 rc = arch_gnttab_map_shared(frames, nr_gframes, gnttab_max_grant_frames(), 919 rc = gnttab_interface->map_frames(frames, nr_gframes);
571 &shared);
572 BUG_ON(rc);
573 920
574 kfree(frames); 921 kfree(frames);
575 922
576 return 0; 923 return rc;
924}
925
926static struct gnttab_ops gnttab_v1_ops = {
927 .map_frames = gnttab_map_frames_v1,
928 .unmap_frames = gnttab_unmap_frames_v1,
929 .update_entry = gnttab_update_entry_v1,
930 .end_foreign_access_ref = gnttab_end_foreign_access_ref_v1,
931 .end_foreign_transfer_ref = gnttab_end_foreign_transfer_ref_v1,
932 .query_foreign_access = gnttab_query_foreign_access_v1,
933};
934
935static struct gnttab_ops gnttab_v2_ops = {
936 .map_frames = gnttab_map_frames_v2,
937 .unmap_frames = gnttab_unmap_frames_v2,
938 .update_entry = gnttab_update_entry_v2,
939 .end_foreign_access_ref = gnttab_end_foreign_access_ref_v2,
940 .end_foreign_transfer_ref = gnttab_end_foreign_transfer_ref_v2,
941 .query_foreign_access = gnttab_query_foreign_access_v2,
942 .update_subpage_entry = gnttab_update_subpage_entry_v2,
943 .update_trans_entry = gnttab_update_trans_entry_v2,
944};
945
946static void gnttab_request_version(void)
947{
948 int rc;
949 struct gnttab_set_version gsv;
950
951 gsv.version = 2;
952 rc = HYPERVISOR_grant_table_op(GNTTABOP_set_version, &gsv, 1);
953 if (rc == 0) {
954 grant_table_version = 2;
955 gnttab_interface = &gnttab_v2_ops;
956 } else if (grant_table_version == 2) {
957 /*
958 * If we've already used version 2 features,
959 * but then suddenly discover that they're not
960 * available (e.g. migrating to an older
961 * version of Xen), almost unbounded badness
962 * can happen.
963 */
964 panic("we need grant tables version 2, but only version 1 is available");
965 } else {
966 grant_table_version = 1;
967 gnttab_interface = &gnttab_v1_ops;
968 }
969 printk(KERN_INFO "Grant tables using version %d layout.\n",
970 grant_table_version);
577} 971}
578 972
579int gnttab_resume(void) 973int gnttab_resume(void)
580{ 974{
581 unsigned int max_nr_gframes; 975 unsigned int max_nr_gframes;
582 976
977 gnttab_request_version();
583 max_nr_gframes = gnttab_max_grant_frames(); 978 max_nr_gframes = gnttab_max_grant_frames();
584 if (max_nr_gframes < nr_grant_frames) 979 if (max_nr_gframes < nr_grant_frames)
585 return -ENOSYS; 980 return -ENOSYS;
@@ -587,9 +982,10 @@ int gnttab_resume(void)
587 if (xen_pv_domain()) 982 if (xen_pv_domain())
588 return gnttab_map(0, nr_grant_frames - 1); 983 return gnttab_map(0, nr_grant_frames - 1);
589 984
590 if (!shared) { 985 if (gnttab_shared.addr == NULL) {
591 shared = ioremap(xen_hvm_resume_frames, PAGE_SIZE * max_nr_gframes); 986 gnttab_shared.addr = ioremap(xen_hvm_resume_frames,
592 if (shared == NULL) { 987 PAGE_SIZE * max_nr_gframes);
988 if (gnttab_shared.addr == NULL) {
593 printk(KERN_WARNING 989 printk(KERN_WARNING
594 "Failed to ioremap gnttab share frames!"); 990 "Failed to ioremap gnttab share frames!");
595 return -ENOMEM; 991 return -ENOMEM;
@@ -603,7 +999,7 @@ int gnttab_resume(void)
603 999
604int gnttab_suspend(void) 1000int gnttab_suspend(void)
605{ 1001{
606 arch_gnttab_unmap_shared(shared, nr_grant_frames); 1002 gnttab_interface->unmap_frames();
607 return 0; 1003 return 0;
608} 1004}
609 1005
diff --git a/drivers/xen/xenfs/privcmd.c b/drivers/xen/privcmd.c
index dbd3b16fd131..ccee0f16bcf8 100644
--- a/drivers/xen/xenfs/privcmd.c
+++ b/drivers/xen/privcmd.c
@@ -7,6 +7,7 @@
7 */ 7 */
8 8
9#include <linux/kernel.h> 9#include <linux/kernel.h>
10#include <linux/module.h>
10#include <linux/sched.h> 11#include <linux/sched.h>
11#include <linux/slab.h> 12#include <linux/slab.h>
12#include <linux/string.h> 13#include <linux/string.h>
@@ -18,6 +19,7 @@
18#include <linux/highmem.h> 19#include <linux/highmem.h>
19#include <linux/pagemap.h> 20#include <linux/pagemap.h>
20#include <linux/seq_file.h> 21#include <linux/seq_file.h>
22#include <linux/miscdevice.h>
21 23
22#include <asm/pgalloc.h> 24#include <asm/pgalloc.h>
23#include <asm/pgtable.h> 25#include <asm/pgtable.h>
@@ -32,6 +34,10 @@
32#include <xen/page.h> 34#include <xen/page.h>
33#include <xen/xen-ops.h> 35#include <xen/xen-ops.h>
34 36
37#include "privcmd.h"
38
39MODULE_LICENSE("GPL");
40
35#ifndef HAVE_ARCH_PRIVCMD_MMAP 41#ifndef HAVE_ARCH_PRIVCMD_MMAP
36static int privcmd_enforce_singleshot_mapping(struct vm_area_struct *vma); 42static int privcmd_enforce_singleshot_mapping(struct vm_area_struct *vma);
37#endif 43#endif
@@ -359,7 +365,6 @@ static long privcmd_ioctl(struct file *file,
359 return ret; 365 return ret;
360} 366}
361 367
362#ifndef HAVE_ARCH_PRIVCMD_MMAP
363static int privcmd_fault(struct vm_area_struct *vma, struct vm_fault *vmf) 368static int privcmd_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
364{ 369{
365 printk(KERN_DEBUG "privcmd_fault: vma=%p %lx-%lx, pgoff=%lx, uv=%p\n", 370 printk(KERN_DEBUG "privcmd_fault: vma=%p %lx-%lx, pgoff=%lx, uv=%p\n",
@@ -392,9 +397,39 @@ static int privcmd_enforce_singleshot_mapping(struct vm_area_struct *vma)
392{ 397{
393 return (xchg(&vma->vm_private_data, (void *)1) == NULL); 398 return (xchg(&vma->vm_private_data, (void *)1) == NULL);
394} 399}
395#endif
396 400
397const struct file_operations privcmd_file_ops = { 401const struct file_operations xen_privcmd_fops = {
402 .owner = THIS_MODULE,
398 .unlocked_ioctl = privcmd_ioctl, 403 .unlocked_ioctl = privcmd_ioctl,
399 .mmap = privcmd_mmap, 404 .mmap = privcmd_mmap,
400}; 405};
406EXPORT_SYMBOL_GPL(xen_privcmd_fops);
407
408static struct miscdevice privcmd_dev = {
409 .minor = MISC_DYNAMIC_MINOR,
410 .name = "xen/privcmd",
411 .fops = &xen_privcmd_fops,
412};
413
414static int __init privcmd_init(void)
415{
416 int err;
417
418 if (!xen_domain())
419 return -ENODEV;
420
421 err = misc_register(&privcmd_dev);
422 if (err != 0) {
423 printk(KERN_ERR "Could not register Xen privcmd device\n");
424 return err;
425 }
426 return 0;
427}
428
429static void __exit privcmd_exit(void)
430{
431 misc_deregister(&privcmd_dev);
432}
433
434module_init(privcmd_init);
435module_exit(privcmd_exit);
diff --git a/drivers/xen/privcmd.h b/drivers/xen/privcmd.h
new file mode 100644
index 000000000000..14facaeed36f
--- /dev/null
+++ b/drivers/xen/privcmd.h
@@ -0,0 +1,3 @@
1#include <linux/fs.h>
2
3extern const struct file_operations xen_privcmd_fops;
diff --git a/drivers/xen/xen-pciback/pci_stub.c b/drivers/xen/xen-pciback/pci_stub.c
index 8f06e1ed028c..7944a17f5cbf 100644
--- a/drivers/xen/xen-pciback/pci_stub.c
+++ b/drivers/xen/xen-pciback/pci_stub.c
@@ -99,6 +99,7 @@ static void pcistub_device_release(struct kref *kref)
99 kfree(pci_get_drvdata(psdev->dev)); 99 kfree(pci_get_drvdata(psdev->dev));
100 pci_set_drvdata(psdev->dev, NULL); 100 pci_set_drvdata(psdev->dev, NULL);
101 101
102 psdev->dev->dev_flags &= ~PCI_DEV_FLAGS_ASSIGNED;
102 pci_dev_put(psdev->dev); 103 pci_dev_put(psdev->dev);
103 104
104 kfree(psdev); 105 kfree(psdev);
@@ -234,6 +235,8 @@ void pcistub_put_pci_dev(struct pci_dev *dev)
234 xen_pcibk_config_free_dyn_fields(found_psdev->dev); 235 xen_pcibk_config_free_dyn_fields(found_psdev->dev);
235 xen_pcibk_config_reset_dev(found_psdev->dev); 236 xen_pcibk_config_reset_dev(found_psdev->dev);
236 237
238 xen_unregister_device_domain_owner(found_psdev->dev);
239
237 spin_lock_irqsave(&found_psdev->lock, flags); 240 spin_lock_irqsave(&found_psdev->lock, flags);
238 found_psdev->pdev = NULL; 241 found_psdev->pdev = NULL;
239 spin_unlock_irqrestore(&found_psdev->lock, flags); 242 spin_unlock_irqrestore(&found_psdev->lock, flags);
@@ -331,6 +334,7 @@ static int __devinit pcistub_init_device(struct pci_dev *dev)
331 dev_dbg(&dev->dev, "reset device\n"); 334 dev_dbg(&dev->dev, "reset device\n");
332 xen_pcibk_reset_device(dev); 335 xen_pcibk_reset_device(dev);
333 336
337 dev->dev_flags |= PCI_DEV_FLAGS_ASSIGNED;
334 return 0; 338 return 0;
335 339
336config_release: 340config_release:
diff --git a/drivers/xen/xen-pciback/xenbus.c b/drivers/xen/xen-pciback/xenbus.c
index 075525945e36..8e1c44d8ab46 100644
--- a/drivers/xen/xen-pciback/xenbus.c
+++ b/drivers/xen/xen-pciback/xenbus.c
@@ -241,11 +241,10 @@ static int xen_pcibk_export_device(struct xen_pcibk_device *pdev,
241 goto out; 241 goto out;
242 242
243 dev_dbg(&dev->dev, "registering for %d\n", pdev->xdev->otherend_id); 243 dev_dbg(&dev->dev, "registering for %d\n", pdev->xdev->otherend_id);
244 dev->dev_flags |= PCI_DEV_FLAGS_ASSIGNED;
245 if (xen_register_device_domain_owner(dev, 244 if (xen_register_device_domain_owner(dev,
246 pdev->xdev->otherend_id) != 0) { 245 pdev->xdev->otherend_id) != 0) {
247 dev_err(&dev->dev, "device has been assigned to another " \ 246 dev_err(&dev->dev, "Stealing ownership from dom%d.\n",
248 "domain! Over-writting the ownership, but beware.\n"); 247 xen_find_device_domain_owner(dev));
249 xen_unregister_device_domain_owner(dev); 248 xen_unregister_device_domain_owner(dev);
250 xen_register_device_domain_owner(dev, pdev->xdev->otherend_id); 249 xen_register_device_domain_owner(dev, pdev->xdev->otherend_id);
251 } 250 }
@@ -281,7 +280,6 @@ static int xen_pcibk_remove_device(struct xen_pcibk_device *pdev,
281 } 280 }
282 281
283 dev_dbg(&dev->dev, "unregistering for %d\n", pdev->xdev->otherend_id); 282 dev_dbg(&dev->dev, "unregistering for %d\n", pdev->xdev->otherend_id);
284 dev->dev_flags &= ~PCI_DEV_FLAGS_ASSIGNED;
285 xen_unregister_device_domain_owner(dev); 283 xen_unregister_device_domain_owner(dev);
286 284
287 xen_pcibk_release_pci_dev(pdev, dev); 285 xen_pcibk_release_pci_dev(pdev, dev);
@@ -707,19 +705,16 @@ static int xen_pcibk_xenbus_remove(struct xenbus_device *dev)
707 return 0; 705 return 0;
708} 706}
709 707
710static const struct xenbus_device_id xenpci_ids[] = { 708static const struct xenbus_device_id xen_pcibk_ids[] = {
711 {"pci"}, 709 {"pci"},
712 {""}, 710 {""},
713}; 711};
714 712
715static struct xenbus_driver xenbus_xen_pcibk_driver = { 713static DEFINE_XENBUS_DRIVER(xen_pcibk, DRV_NAME,
716 .name = DRV_NAME,
717 .owner = THIS_MODULE,
718 .ids = xenpci_ids,
719 .probe = xen_pcibk_xenbus_probe, 714 .probe = xen_pcibk_xenbus_probe,
720 .remove = xen_pcibk_xenbus_remove, 715 .remove = xen_pcibk_xenbus_remove,
721 .otherend_changed = xen_pcibk_frontend_changed, 716 .otherend_changed = xen_pcibk_frontend_changed,
722}; 717);
723 718
724const struct xen_pcibk_backend *__read_mostly xen_pcibk_backend; 719const struct xen_pcibk_backend *__read_mostly xen_pcibk_backend;
725 720
@@ -735,11 +730,11 @@ int __init xen_pcibk_xenbus_register(void)
735 if (passthrough) 730 if (passthrough)
736 xen_pcibk_backend = &xen_pcibk_passthrough_backend; 731 xen_pcibk_backend = &xen_pcibk_passthrough_backend;
737 pr_info(DRV_NAME ": backend is %s\n", xen_pcibk_backend->name); 732 pr_info(DRV_NAME ": backend is %s\n", xen_pcibk_backend->name);
738 return xenbus_register_backend(&xenbus_xen_pcibk_driver); 733 return xenbus_register_backend(&xen_pcibk_driver);
739} 734}
740 735
741void __exit xen_pcibk_xenbus_unregister(void) 736void __exit xen_pcibk_xenbus_unregister(void)
742{ 737{
743 destroy_workqueue(xen_pcibk_wq); 738 destroy_workqueue(xen_pcibk_wq);
744 xenbus_unregister_driver(&xenbus_xen_pcibk_driver); 739 xenbus_unregister_driver(&xen_pcibk_driver);
745} 740}
diff --git a/drivers/xen/xenbus/Makefile b/drivers/xen/xenbus/Makefile
index 8dca685358b4..31e2e9050c7a 100644
--- a/drivers/xen/xenbus/Makefile
+++ b/drivers/xen/xenbus/Makefile
@@ -1,4 +1,5 @@
1obj-y += xenbus.o 1obj-y += xenbus.o
2obj-y += xenbus_dev_frontend.o
2 3
3xenbus-objs = 4xenbus-objs =
4xenbus-objs += xenbus_client.o 5xenbus-objs += xenbus_client.o
@@ -9,4 +10,5 @@ xenbus-objs += xenbus_probe.o
9xenbus-be-objs-$(CONFIG_XEN_BACKEND) += xenbus_probe_backend.o 10xenbus-be-objs-$(CONFIG_XEN_BACKEND) += xenbus_probe_backend.o
10xenbus-objs += $(xenbus-be-objs-y) 11xenbus-objs += $(xenbus-be-objs-y)
11 12
13obj-$(CONFIG_XEN_BACKEND) += xenbus_dev_backend.o
12obj-$(CONFIG_XEN_XENBUS_FRONTEND) += xenbus_probe_frontend.o 14obj-$(CONFIG_XEN_XENBUS_FRONTEND) += xenbus_probe_frontend.o
diff --git a/drivers/xen/xenbus/xenbus_client.c b/drivers/xen/xenbus/xenbus_client.c
index 1906125eab49..566d2adbd6ea 100644
--- a/drivers/xen/xenbus/xenbus_client.c
+++ b/drivers/xen/xenbus/xenbus_client.c
@@ -32,15 +32,39 @@
32 32
33#include <linux/slab.h> 33#include <linux/slab.h>
34#include <linux/types.h> 34#include <linux/types.h>
35#include <linux/spinlock.h>
35#include <linux/vmalloc.h> 36#include <linux/vmalloc.h>
36#include <linux/export.h> 37#include <linux/export.h>
37#include <asm/xen/hypervisor.h> 38#include <asm/xen/hypervisor.h>
38#include <asm/xen/page.h> 39#include <asm/xen/page.h>
39#include <xen/interface/xen.h> 40#include <xen/interface/xen.h>
40#include <xen/interface/event_channel.h> 41#include <xen/interface/event_channel.h>
42#include <xen/balloon.h>
41#include <xen/events.h> 43#include <xen/events.h>
42#include <xen/grant_table.h> 44#include <xen/grant_table.h>
43#include <xen/xenbus.h> 45#include <xen/xenbus.h>
46#include <xen/xen.h>
47
48#include "xenbus_probe.h"
49
50struct xenbus_map_node {
51 struct list_head next;
52 union {
53 struct vm_struct *area; /* PV */
54 struct page *page; /* HVM */
55 };
56 grant_handle_t handle;
57};
58
59static DEFINE_SPINLOCK(xenbus_valloc_lock);
60static LIST_HEAD(xenbus_valloc_pages);
61
62struct xenbus_ring_ops {
63 int (*map)(struct xenbus_device *dev, int gnt, void **vaddr);
64 int (*unmap)(struct xenbus_device *dev, void *vaddr);
65};
66
67static const struct xenbus_ring_ops *ring_ops __read_mostly;
44 68
45const char *xenbus_strstate(enum xenbus_state state) 69const char *xenbus_strstate(enum xenbus_state state)
46{ 70{
@@ -436,19 +460,33 @@ EXPORT_SYMBOL_GPL(xenbus_free_evtchn);
436 */ 460 */
437int xenbus_map_ring_valloc(struct xenbus_device *dev, int gnt_ref, void **vaddr) 461int xenbus_map_ring_valloc(struct xenbus_device *dev, int gnt_ref, void **vaddr)
438{ 462{
463 return ring_ops->map(dev, gnt_ref, vaddr);
464}
465EXPORT_SYMBOL_GPL(xenbus_map_ring_valloc);
466
467static int xenbus_map_ring_valloc_pv(struct xenbus_device *dev,
468 int gnt_ref, void **vaddr)
469{
439 struct gnttab_map_grant_ref op = { 470 struct gnttab_map_grant_ref op = {
440 .flags = GNTMAP_host_map | GNTMAP_contains_pte, 471 .flags = GNTMAP_host_map | GNTMAP_contains_pte,
441 .ref = gnt_ref, 472 .ref = gnt_ref,
442 .dom = dev->otherend_id, 473 .dom = dev->otherend_id,
443 }; 474 };
475 struct xenbus_map_node *node;
444 struct vm_struct *area; 476 struct vm_struct *area;
445 pte_t *pte; 477 pte_t *pte;
446 478
447 *vaddr = NULL; 479 *vaddr = NULL;
448 480
481 node = kzalloc(sizeof(*node), GFP_KERNEL);
482 if (!node)
483 return -ENOMEM;
484
449 area = alloc_vm_area(PAGE_SIZE, &pte); 485 area = alloc_vm_area(PAGE_SIZE, &pte);
450 if (!area) 486 if (!area) {
487 kfree(node);
451 return -ENOMEM; 488 return -ENOMEM;
489 }
452 490
453 op.host_addr = arbitrary_virt_to_machine(pte).maddr; 491 op.host_addr = arbitrary_virt_to_machine(pte).maddr;
454 492
@@ -457,19 +495,59 @@ int xenbus_map_ring_valloc(struct xenbus_device *dev, int gnt_ref, void **vaddr)
457 495
458 if (op.status != GNTST_okay) { 496 if (op.status != GNTST_okay) {
459 free_vm_area(area); 497 free_vm_area(area);
498 kfree(node);
460 xenbus_dev_fatal(dev, op.status, 499 xenbus_dev_fatal(dev, op.status,
461 "mapping in shared page %d from domain %d", 500 "mapping in shared page %d from domain %d",
462 gnt_ref, dev->otherend_id); 501 gnt_ref, dev->otherend_id);
463 return op.status; 502 return op.status;
464 } 503 }
465 504
466 /* Stuff the handle in an unused field */ 505 node->handle = op.handle;
467 area->phys_addr = (unsigned long)op.handle; 506 node->area = area;
507
508 spin_lock(&xenbus_valloc_lock);
509 list_add(&node->next, &xenbus_valloc_pages);
510 spin_unlock(&xenbus_valloc_lock);
468 511
469 *vaddr = area->addr; 512 *vaddr = area->addr;
470 return 0; 513 return 0;
471} 514}
472EXPORT_SYMBOL_GPL(xenbus_map_ring_valloc); 515
516static int xenbus_map_ring_valloc_hvm(struct xenbus_device *dev,
517 int gnt_ref, void **vaddr)
518{
519 struct xenbus_map_node *node;
520 int err;
521 void *addr;
522
523 *vaddr = NULL;
524
525 node = kzalloc(sizeof(*node), GFP_KERNEL);
526 if (!node)
527 return -ENOMEM;
528
529 err = alloc_xenballooned_pages(1, &node->page, false /* lowmem */);
530 if (err)
531 goto out_err;
532
533 addr = pfn_to_kaddr(page_to_pfn(node->page));
534
535 err = xenbus_map_ring(dev, gnt_ref, &node->handle, addr);
536 if (err)
537 goto out_err;
538
539 spin_lock(&xenbus_valloc_lock);
540 list_add(&node->next, &xenbus_valloc_pages);
541 spin_unlock(&xenbus_valloc_lock);
542
543 *vaddr = addr;
544 return 0;
545
546 out_err:
547 free_xenballooned_pages(1, &node->page);
548 kfree(node);
549 return err;
550}
473 551
474 552
475/** 553/**
@@ -489,12 +567,10 @@ EXPORT_SYMBOL_GPL(xenbus_map_ring_valloc);
489int xenbus_map_ring(struct xenbus_device *dev, int gnt_ref, 567int xenbus_map_ring(struct xenbus_device *dev, int gnt_ref,
490 grant_handle_t *handle, void *vaddr) 568 grant_handle_t *handle, void *vaddr)
491{ 569{
492 struct gnttab_map_grant_ref op = { 570 struct gnttab_map_grant_ref op;
493 .host_addr = (unsigned long)vaddr, 571
494 .flags = GNTMAP_host_map, 572 gnttab_set_map_op(&op, (phys_addr_t)vaddr, GNTMAP_host_map, gnt_ref,
495 .ref = gnt_ref, 573 dev->otherend_id);
496 .dom = dev->otherend_id,
497 };
498 574
499 if (HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, &op, 1)) 575 if (HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, &op, 1))
500 BUG(); 576 BUG();
@@ -525,32 +601,36 @@ EXPORT_SYMBOL_GPL(xenbus_map_ring);
525 */ 601 */
526int xenbus_unmap_ring_vfree(struct xenbus_device *dev, void *vaddr) 602int xenbus_unmap_ring_vfree(struct xenbus_device *dev, void *vaddr)
527{ 603{
528 struct vm_struct *area; 604 return ring_ops->unmap(dev, vaddr);
605}
606EXPORT_SYMBOL_GPL(xenbus_unmap_ring_vfree);
607
608static int xenbus_unmap_ring_vfree_pv(struct xenbus_device *dev, void *vaddr)
609{
610 struct xenbus_map_node *node;
529 struct gnttab_unmap_grant_ref op = { 611 struct gnttab_unmap_grant_ref op = {
530 .host_addr = (unsigned long)vaddr, 612 .host_addr = (unsigned long)vaddr,
531 }; 613 };
532 unsigned int level; 614 unsigned int level;
533 615
534 /* It'd be nice if linux/vmalloc.h provided a find_vm_area(void *addr) 616 spin_lock(&xenbus_valloc_lock);
535 * method so that we don't have to muck with vmalloc internals here. 617 list_for_each_entry(node, &xenbus_valloc_pages, next) {
536 * We could force the user to hang on to their struct vm_struct from 618 if (node->area->addr == vaddr) {
537 * xenbus_map_ring_valloc, but these 6 lines considerably simplify 619 list_del(&node->next);
538 * this API. 620 goto found;
539 */ 621 }
540 read_lock(&vmlist_lock);
541 for (area = vmlist; area != NULL; area = area->next) {
542 if (area->addr == vaddr)
543 break;
544 } 622 }
545 read_unlock(&vmlist_lock); 623 node = NULL;
624 found:
625 spin_unlock(&xenbus_valloc_lock);
546 626
547 if (!area) { 627 if (!node) {
548 xenbus_dev_error(dev, -ENOENT, 628 xenbus_dev_error(dev, -ENOENT,
549 "can't find mapped virtual address %p", vaddr); 629 "can't find mapped virtual address %p", vaddr);
550 return GNTST_bad_virt_addr; 630 return GNTST_bad_virt_addr;
551 } 631 }
552 632
553 op.handle = (grant_handle_t)area->phys_addr; 633 op.handle = node->handle;
554 op.host_addr = arbitrary_virt_to_machine( 634 op.host_addr = arbitrary_virt_to_machine(
555 lookup_address((unsigned long)vaddr, &level)).maddr; 635 lookup_address((unsigned long)vaddr, &level)).maddr;
556 636
@@ -558,16 +638,50 @@ int xenbus_unmap_ring_vfree(struct xenbus_device *dev, void *vaddr)
558 BUG(); 638 BUG();
559 639
560 if (op.status == GNTST_okay) 640 if (op.status == GNTST_okay)
561 free_vm_area(area); 641 free_vm_area(node->area);
562 else 642 else
563 xenbus_dev_error(dev, op.status, 643 xenbus_dev_error(dev, op.status,
564 "unmapping page at handle %d error %d", 644 "unmapping page at handle %d error %d",
565 (int16_t)area->phys_addr, op.status); 645 node->handle, op.status);
566 646
647 kfree(node);
567 return op.status; 648 return op.status;
568} 649}
569EXPORT_SYMBOL_GPL(xenbus_unmap_ring_vfree);
570 650
651static int xenbus_unmap_ring_vfree_hvm(struct xenbus_device *dev, void *vaddr)
652{
653 int rv;
654 struct xenbus_map_node *node;
655 void *addr;
656
657 spin_lock(&xenbus_valloc_lock);
658 list_for_each_entry(node, &xenbus_valloc_pages, next) {
659 addr = pfn_to_kaddr(page_to_pfn(node->page));
660 if (addr == vaddr) {
661 list_del(&node->next);
662 goto found;
663 }
664 }
665 node = NULL;
666 found:
667 spin_unlock(&xenbus_valloc_lock);
668
669 if (!node) {
670 xenbus_dev_error(dev, -ENOENT,
671 "can't find mapped virtual address %p", vaddr);
672 return GNTST_bad_virt_addr;
673 }
674
675 rv = xenbus_unmap_ring(dev, node->handle, addr);
676
677 if (!rv)
678 free_xenballooned_pages(1, &node->page);
679 else
680 WARN(1, "Leaking %p\n", vaddr);
681
682 kfree(node);
683 return rv;
684}
571 685
572/** 686/**
573 * xenbus_unmap_ring 687 * xenbus_unmap_ring
@@ -582,10 +696,9 @@ EXPORT_SYMBOL_GPL(xenbus_unmap_ring_vfree);
582int xenbus_unmap_ring(struct xenbus_device *dev, 696int xenbus_unmap_ring(struct xenbus_device *dev,
583 grant_handle_t handle, void *vaddr) 697 grant_handle_t handle, void *vaddr)
584{ 698{
585 struct gnttab_unmap_grant_ref op = { 699 struct gnttab_unmap_grant_ref op;
586 .host_addr = (unsigned long)vaddr, 700
587 .handle = handle, 701 gnttab_set_unmap_op(&op, (phys_addr_t)vaddr, GNTMAP_host_map, handle);
588 };
589 702
590 if (HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, &op, 1)) 703 if (HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, &op, 1))
591 BUG(); 704 BUG();
@@ -617,3 +730,21 @@ enum xenbus_state xenbus_read_driver_state(const char *path)
617 return result; 730 return result;
618} 731}
619EXPORT_SYMBOL_GPL(xenbus_read_driver_state); 732EXPORT_SYMBOL_GPL(xenbus_read_driver_state);
733
734static const struct xenbus_ring_ops ring_ops_pv = {
735 .map = xenbus_map_ring_valloc_pv,
736 .unmap = xenbus_unmap_ring_vfree_pv,
737};
738
739static const struct xenbus_ring_ops ring_ops_hvm = {
740 .map = xenbus_map_ring_valloc_hvm,
741 .unmap = xenbus_unmap_ring_vfree_hvm,
742};
743
744void __init xenbus_ring_ops_init(void)
745{
746 if (xen_pv_domain())
747 ring_ops = &ring_ops_pv;
748 else
749 ring_ops = &ring_ops_hvm;
750}
diff --git a/drivers/xen/xenbus/xenbus_comms.h b/drivers/xen/xenbus/xenbus_comms.h
index c21db7513736..6e42800fa499 100644
--- a/drivers/xen/xenbus/xenbus_comms.h
+++ b/drivers/xen/xenbus/xenbus_comms.h
@@ -31,6 +31,8 @@
31#ifndef _XENBUS_COMMS_H 31#ifndef _XENBUS_COMMS_H
32#define _XENBUS_COMMS_H 32#define _XENBUS_COMMS_H
33 33
34#include <linux/fs.h>
35
34int xs_init(void); 36int xs_init(void);
35int xb_init_comms(void); 37int xb_init_comms(void);
36 38
@@ -43,4 +45,6 @@ int xs_input_avail(void);
43extern struct xenstore_domain_interface *xen_store_interface; 45extern struct xenstore_domain_interface *xen_store_interface;
44extern int xen_store_evtchn; 46extern int xen_store_evtchn;
45 47
48extern const struct file_operations xen_xenbus_fops;
49
46#endif /* _XENBUS_COMMS_H */ 50#endif /* _XENBUS_COMMS_H */
diff --git a/drivers/xen/xenbus/xenbus_dev_backend.c b/drivers/xen/xenbus/xenbus_dev_backend.c
new file mode 100644
index 000000000000..3d3be78c1093
--- /dev/null
+++ b/drivers/xen/xenbus/xenbus_dev_backend.c
@@ -0,0 +1,90 @@
1#include <linux/slab.h>
2#include <linux/types.h>
3#include <linux/mm.h>
4#include <linux/fs.h>
5#include <linux/miscdevice.h>
6#include <linux/module.h>
7#include <linux/capability.h>
8
9#include <xen/xen.h>
10#include <xen/page.h>
11#include <xen/xenbus_dev.h>
12
13#include "xenbus_comms.h"
14
15MODULE_LICENSE("GPL");
16
17static int xenbus_backend_open(struct inode *inode, struct file *filp)
18{
19 if (!capable(CAP_SYS_ADMIN))
20 return -EPERM;
21
22 return nonseekable_open(inode, filp);
23}
24
25static long xenbus_backend_ioctl(struct file *file, unsigned int cmd, unsigned long data)
26{
27 if (!capable(CAP_SYS_ADMIN))
28 return -EPERM;
29
30 switch (cmd) {
31 case IOCTL_XENBUS_BACKEND_EVTCHN:
32 if (xen_store_evtchn > 0)
33 return xen_store_evtchn;
34 return -ENODEV;
35
36 default:
37 return -ENOTTY;
38 }
39}
40
41static int xenbus_backend_mmap(struct file *file, struct vm_area_struct *vma)
42{
43 size_t size = vma->vm_end - vma->vm_start;
44
45 if (!capable(CAP_SYS_ADMIN))
46 return -EPERM;
47
48 if ((size > PAGE_SIZE) || (vma->vm_pgoff != 0))
49 return -EINVAL;
50
51 if (remap_pfn_range(vma, vma->vm_start,
52 virt_to_pfn(xen_store_interface),
53 size, vma->vm_page_prot))
54 return -EAGAIN;
55
56 return 0;
57}
58
59const struct file_operations xenbus_backend_fops = {
60 .open = xenbus_backend_open,
61 .mmap = xenbus_backend_mmap,
62 .unlocked_ioctl = xenbus_backend_ioctl,
63};
64
65static struct miscdevice xenbus_backend_dev = {
66 .minor = MISC_DYNAMIC_MINOR,
67 .name = "xen/xenbus_backend",
68 .fops = &xenbus_backend_fops,
69};
70
71static int __init xenbus_backend_init(void)
72{
73 int err;
74
75 if (!xen_initial_domain())
76 return -ENODEV;
77
78 err = misc_register(&xenbus_backend_dev);
79 if (err)
80 printk(KERN_ERR "Could not register xenbus backend device\n");
81 return err;
82}
83
84static void __exit xenbus_backend_exit(void)
85{
86 misc_deregister(&xenbus_backend_dev);
87}
88
89module_init(xenbus_backend_init);
90module_exit(xenbus_backend_exit);
diff --git a/drivers/xen/xenfs/xenbus.c b/drivers/xen/xenbus/xenbus_dev_frontend.c
index bbd000f88af7..527dc2a3b89f 100644
--- a/drivers/xen/xenfs/xenbus.c
+++ b/drivers/xen/xenbus/xenbus_dev_frontend.c
@@ -52,13 +52,17 @@
52#include <linux/namei.h> 52#include <linux/namei.h>
53#include <linux/string.h> 53#include <linux/string.h>
54#include <linux/slab.h> 54#include <linux/slab.h>
55#include <linux/miscdevice.h>
56#include <linux/module.h>
55 57
56#include "xenfs.h" 58#include "xenbus_comms.h"
57#include "../xenbus/xenbus_comms.h"
58 59
59#include <xen/xenbus.h> 60#include <xen/xenbus.h>
61#include <xen/xen.h>
60#include <asm/xen/hypervisor.h> 62#include <asm/xen/hypervisor.h>
61 63
64MODULE_LICENSE("GPL");
65
62/* 66/*
63 * An element of a list of outstanding transactions, for which we're 67 * An element of a list of outstanding transactions, for which we're
64 * still waiting a reply. 68 * still waiting a reply.
@@ -101,7 +105,7 @@ struct xenbus_file_priv {
101 unsigned int len; 105 unsigned int len;
102 union { 106 union {
103 struct xsd_sockmsg msg; 107 struct xsd_sockmsg msg;
104 char buffer[PAGE_SIZE]; 108 char buffer[XENSTORE_PAYLOAD_MAX];
105 } u; 109 } u;
106 110
107 /* Response queue. */ 111 /* Response queue. */
@@ -583,7 +587,7 @@ static unsigned int xenbus_file_poll(struct file *file, poll_table *wait)
583 return 0; 587 return 0;
584} 588}
585 589
586const struct file_operations xenbus_file_ops = { 590const struct file_operations xen_xenbus_fops = {
587 .read = xenbus_file_read, 591 .read = xenbus_file_read,
588 .write = xenbus_file_write, 592 .write = xenbus_file_write,
589 .open = xenbus_file_open, 593 .open = xenbus_file_open,
@@ -591,3 +595,31 @@ const struct file_operations xenbus_file_ops = {
591 .poll = xenbus_file_poll, 595 .poll = xenbus_file_poll,
592 .llseek = no_llseek, 596 .llseek = no_llseek,
593}; 597};
598EXPORT_SYMBOL_GPL(xen_xenbus_fops);
599
600static struct miscdevice xenbus_dev = {
601 .minor = MISC_DYNAMIC_MINOR,
602 .name = "xen/xenbus",
603 .fops = &xen_xenbus_fops,
604};
605
606static int __init xenbus_init(void)
607{
608 int err;
609
610 if (!xen_domain())
611 return -ENODEV;
612
613 err = misc_register(&xenbus_dev);
614 if (err)
615 printk(KERN_ERR "Could not register xenbus frontend device\n");
616 return err;
617}
618
619static void __exit xenbus_exit(void)
620{
621 misc_deregister(&xenbus_dev);
622}
623
624module_init(xenbus_init);
625module_exit(xenbus_exit);
diff --git a/drivers/xen/xenbus/xenbus_probe.c b/drivers/xen/xenbus/xenbus_probe.c
index 1b178c6e8937..3864967202b5 100644
--- a/drivers/xen/xenbus/xenbus_probe.c
+++ b/drivers/xen/xenbus/xenbus_probe.c
@@ -291,14 +291,9 @@ void xenbus_dev_shutdown(struct device *_dev)
291EXPORT_SYMBOL_GPL(xenbus_dev_shutdown); 291EXPORT_SYMBOL_GPL(xenbus_dev_shutdown);
292 292
293int xenbus_register_driver_common(struct xenbus_driver *drv, 293int xenbus_register_driver_common(struct xenbus_driver *drv,
294 struct xen_bus_type *bus, 294 struct xen_bus_type *bus)
295 struct module *owner,
296 const char *mod_name)
297{ 295{
298 drv->driver.name = drv->name;
299 drv->driver.bus = &bus->bus; 296 drv->driver.bus = &bus->bus;
300 drv->driver.owner = owner;
301 drv->driver.mod_name = mod_name;
302 297
303 return driver_register(&drv->driver); 298 return driver_register(&drv->driver);
304} 299}
@@ -730,6 +725,8 @@ static int __init xenbus_init(void)
730 if (!xen_domain()) 725 if (!xen_domain())
731 return -ENODEV; 726 return -ENODEV;
732 727
728 xenbus_ring_ops_init();
729
733 if (xen_hvm_domain()) { 730 if (xen_hvm_domain()) {
734 uint64_t v = 0; 731 uint64_t v = 0;
735 err = hvm_get_parameter(HVM_PARAM_STORE_EVTCHN, &v); 732 err = hvm_get_parameter(HVM_PARAM_STORE_EVTCHN, &v);
diff --git a/drivers/xen/xenbus/xenbus_probe.h b/drivers/xen/xenbus/xenbus_probe.h
index 9b1de4e34c64..bb4f92ed8730 100644
--- a/drivers/xen/xenbus/xenbus_probe.h
+++ b/drivers/xen/xenbus/xenbus_probe.h
@@ -53,9 +53,7 @@ extern int xenbus_match(struct device *_dev, struct device_driver *_drv);
53extern int xenbus_dev_probe(struct device *_dev); 53extern int xenbus_dev_probe(struct device *_dev);
54extern int xenbus_dev_remove(struct device *_dev); 54extern int xenbus_dev_remove(struct device *_dev);
55extern int xenbus_register_driver_common(struct xenbus_driver *drv, 55extern int xenbus_register_driver_common(struct xenbus_driver *drv,
56 struct xen_bus_type *bus, 56 struct xen_bus_type *bus);
57 struct module *owner,
58 const char *mod_name);
59extern int xenbus_probe_node(struct xen_bus_type *bus, 57extern int xenbus_probe_node(struct xen_bus_type *bus,
60 const char *type, 58 const char *type,
61 const char *nodename); 59 const char *nodename);
@@ -76,4 +74,6 @@ extern void xenbus_otherend_changed(struct xenbus_watch *watch,
76extern int xenbus_read_otherend_details(struct xenbus_device *xendev, 74extern int xenbus_read_otherend_details(struct xenbus_device *xendev,
77 char *id_node, char *path_node); 75 char *id_node, char *path_node);
78 76
77void xenbus_ring_ops_init(void);
78
79#endif 79#endif
diff --git a/drivers/xen/xenbus/xenbus_probe_backend.c b/drivers/xen/xenbus/xenbus_probe_backend.c
index c3c7cd195c11..257be37d9091 100644
--- a/drivers/xen/xenbus/xenbus_probe_backend.c
+++ b/drivers/xen/xenbus/xenbus_probe_backend.c
@@ -232,15 +232,13 @@ int xenbus_dev_is_online(struct xenbus_device *dev)
232} 232}
233EXPORT_SYMBOL_GPL(xenbus_dev_is_online); 233EXPORT_SYMBOL_GPL(xenbus_dev_is_online);
234 234
235int __xenbus_register_backend(struct xenbus_driver *drv, 235int xenbus_register_backend(struct xenbus_driver *drv)
236 struct module *owner, const char *mod_name)
237{ 236{
238 drv->read_otherend_details = read_frontend_details; 237 drv->read_otherend_details = read_frontend_details;
239 238
240 return xenbus_register_driver_common(drv, &xenbus_backend, 239 return xenbus_register_driver_common(drv, &xenbus_backend);
241 owner, mod_name);
242} 240}
243EXPORT_SYMBOL_GPL(__xenbus_register_backend); 241EXPORT_SYMBOL_GPL(xenbus_register_backend);
244 242
245static int backend_probe_and_watch(struct notifier_block *notifier, 243static int backend_probe_and_watch(struct notifier_block *notifier,
246 unsigned long event, 244 unsigned long event,
diff --git a/drivers/xen/xenbus/xenbus_probe_frontend.c b/drivers/xen/xenbus/xenbus_probe_frontend.c
index 2f73195512b4..9c57819df51a 100644
--- a/drivers/xen/xenbus/xenbus_probe_frontend.c
+++ b/drivers/xen/xenbus/xenbus_probe_frontend.c
@@ -230,15 +230,13 @@ static void wait_for_devices(struct xenbus_driver *xendrv)
230 print_device_status); 230 print_device_status);
231} 231}
232 232
233int __xenbus_register_frontend(struct xenbus_driver *drv, 233int xenbus_register_frontend(struct xenbus_driver *drv)
234 struct module *owner, const char *mod_name)
235{ 234{
236 int ret; 235 int ret;
237 236
238 drv->read_otherend_details = read_backend_details; 237 drv->read_otherend_details = read_backend_details;
239 238
240 ret = xenbus_register_driver_common(drv, &xenbus_frontend, 239 ret = xenbus_register_driver_common(drv, &xenbus_frontend);
241 owner, mod_name);
242 if (ret) 240 if (ret)
243 return ret; 241 return ret;
244 242
@@ -247,7 +245,7 @@ int __xenbus_register_frontend(struct xenbus_driver *drv,
247 245
248 return 0; 246 return 0;
249} 247}
250EXPORT_SYMBOL_GPL(__xenbus_register_frontend); 248EXPORT_SYMBOL_GPL(xenbus_register_frontend);
251 249
252static DECLARE_WAIT_QUEUE_HEAD(backend_state_wq); 250static DECLARE_WAIT_QUEUE_HEAD(backend_state_wq);
253static int backend_state; 251static int backend_state;
diff --git a/drivers/xen/xenbus/xenbus_xs.c b/drivers/xen/xenbus/xenbus_xs.c
index ede860f921df..d1c217b23a42 100644
--- a/drivers/xen/xenbus/xenbus_xs.c
+++ b/drivers/xen/xenbus/xenbus_xs.c
@@ -532,21 +532,18 @@ int xenbus_printf(struct xenbus_transaction t,
532{ 532{
533 va_list ap; 533 va_list ap;
534 int ret; 534 int ret;
535#define PRINTF_BUFFER_SIZE 4096 535 char *buf;
536 char *printf_buffer;
537
538 printf_buffer = kmalloc(PRINTF_BUFFER_SIZE, GFP_NOIO | __GFP_HIGH);
539 if (printf_buffer == NULL)
540 return -ENOMEM;
541 536
542 va_start(ap, fmt); 537 va_start(ap, fmt);
543 ret = vsnprintf(printf_buffer, PRINTF_BUFFER_SIZE, fmt, ap); 538 buf = kvasprintf(GFP_NOIO | __GFP_HIGH, fmt, ap);
544 va_end(ap); 539 va_end(ap);
545 540
546 BUG_ON(ret > PRINTF_BUFFER_SIZE-1); 541 if (!buf)
547 ret = xenbus_write(t, dir, node, printf_buffer); 542 return -ENOMEM;
543
544 ret = xenbus_write(t, dir, node, buf);
548 545
549 kfree(printf_buffer); 546 kfree(buf);
550 547
551 return ret; 548 return ret;
552} 549}
@@ -801,6 +798,12 @@ static int process_msg(void)
801 goto out; 798 goto out;
802 } 799 }
803 800
801 if (msg->hdr.len > XENSTORE_PAYLOAD_MAX) {
802 kfree(msg);
803 err = -EINVAL;
804 goto out;
805 }
806
804 body = kmalloc(msg->hdr.len + 1, GFP_NOIO | __GFP_HIGH); 807 body = kmalloc(msg->hdr.len + 1, GFP_NOIO | __GFP_HIGH);
805 if (body == NULL) { 808 if (body == NULL) {
806 kfree(msg); 809 kfree(msg);
diff --git a/drivers/xen/xenfs/Makefile b/drivers/xen/xenfs/Makefile
index 4fde9440fe1f..b019865fcc56 100644
--- a/drivers/xen/xenfs/Makefile
+++ b/drivers/xen/xenfs/Makefile
@@ -1,4 +1,4 @@
1obj-$(CONFIG_XENFS) += xenfs.o 1obj-$(CONFIG_XENFS) += xenfs.o
2 2
3xenfs-y = super.o xenbus.o privcmd.o 3xenfs-y = super.o
4xenfs-$(CONFIG_XEN_DOM0) += xenstored.o 4xenfs-$(CONFIG_XEN_DOM0) += xenstored.o
diff --git a/drivers/xen/xenfs/super.c b/drivers/xen/xenfs/super.c
index 1aa389719846..a84b53c01436 100644
--- a/drivers/xen/xenfs/super.c
+++ b/drivers/xen/xenfs/super.c
@@ -16,6 +16,8 @@
16#include <xen/xen.h> 16#include <xen/xen.h>
17 17
18#include "xenfs.h" 18#include "xenfs.h"
19#include "../privcmd.h"
20#include "../xenbus/xenbus_comms.h"
19 21
20#include <asm/xen/hypervisor.h> 22#include <asm/xen/hypervisor.h>
21 23
@@ -82,9 +84,9 @@ static int xenfs_fill_super(struct super_block *sb, void *data, int silent)
82{ 84{
83 static struct tree_descr xenfs_files[] = { 85 static struct tree_descr xenfs_files[] = {
84 [1] = {}, 86 [1] = {},
85 { "xenbus", &xenbus_file_ops, S_IRUSR|S_IWUSR }, 87 { "xenbus", &xen_xenbus_fops, S_IRUSR|S_IWUSR },
86 { "capabilities", &capabilities_file_ops, S_IRUGO }, 88 { "capabilities", &capabilities_file_ops, S_IRUGO },
87 { "privcmd", &privcmd_file_ops, S_IRUSR|S_IWUSR }, 89 { "privcmd", &xen_privcmd_fops, S_IRUSR|S_IWUSR },
88 {""}, 90 {""},
89 }; 91 };
90 int rc; 92 int rc;
diff --git a/drivers/xen/xenfs/xenfs.h b/drivers/xen/xenfs/xenfs.h
index b68aa6200003..6b80c7779c02 100644
--- a/drivers/xen/xenfs/xenfs.h
+++ b/drivers/xen/xenfs/xenfs.h
@@ -1,8 +1,6 @@
1#ifndef _XENFS_XENBUS_H 1#ifndef _XENFS_XENBUS_H
2#define _XENFS_XENBUS_H 2#define _XENFS_XENBUS_H
3 3
4extern const struct file_operations xenbus_file_ops;
5extern const struct file_operations privcmd_file_ops;
6extern const struct file_operations xsd_kva_file_ops; 4extern const struct file_operations xsd_kva_file_ops;
7extern const struct file_operations xsd_port_file_ops; 5extern const struct file_operations xsd_port_file_ops;
8 6
diff --git a/include/xen/events.h b/include/xen/events.h
index d287997d3eab..0f773708e02c 100644
--- a/include/xen/events.h
+++ b/include/xen/events.h
@@ -37,6 +37,13 @@ int bind_interdomain_evtchn_to_irqhandler(unsigned int remote_domain,
37 */ 37 */
38void unbind_from_irqhandler(unsigned int irq, void *dev_id); 38void unbind_from_irqhandler(unsigned int irq, void *dev_id);
39 39
40/*
41 * Allow extra references to event channels exposed to userspace by evtchn
42 */
43int evtchn_make_refcounted(unsigned int evtchn);
44int evtchn_get(unsigned int evtchn);
45void evtchn_put(unsigned int evtchn);
46
40void xen_send_IPI_one(unsigned int cpu, enum ipi_vector vector); 47void xen_send_IPI_one(unsigned int cpu, enum ipi_vector vector);
41int resend_irq_on_evtchn(unsigned int irq); 48int resend_irq_on_evtchn(unsigned int irq);
42void rebind_evtchn_irq(int evtchn, int irq); 49void rebind_evtchn_irq(int evtchn, int irq);
diff --git a/include/xen/grant_table.h b/include/xen/grant_table.h
index 11e2dfce42f8..15f8a00ff003 100644
--- a/include/xen/grant_table.h
+++ b/include/xen/grant_table.h
@@ -62,6 +62,24 @@ int gnttab_resume(void);
62 62
63int gnttab_grant_foreign_access(domid_t domid, unsigned long frame, 63int gnttab_grant_foreign_access(domid_t domid, unsigned long frame,
64 int readonly); 64 int readonly);
65int gnttab_grant_foreign_access_subpage(domid_t domid, unsigned long frame,
66 int flags, unsigned page_off,
67 unsigned length);
68int gnttab_grant_foreign_access_trans(domid_t domid, int flags,
69 domid_t trans_domid,
70 grant_ref_t trans_gref);
71
72/*
73 * Are sub-page grants available on this version of Xen? Returns true if they
74 * are, and false if they're not.
75 */
76bool gnttab_subpage_grants_available(void);
77
78/*
79 * Are transitive grants available on this version of Xen? Returns true if they
80 * are, and false if they're not.
81 */
82bool gnttab_trans_grants_available(void);
65 83
66/* 84/*
67 * End access through the given grant reference, iff the grant entry is no 85 * End access through the given grant reference, iff the grant entry is no
@@ -108,6 +126,13 @@ void gnttab_cancel_free_callback(struct gnttab_free_callback *callback);
108 126
109void gnttab_grant_foreign_access_ref(grant_ref_t ref, domid_t domid, 127void gnttab_grant_foreign_access_ref(grant_ref_t ref, domid_t domid,
110 unsigned long frame, int readonly); 128 unsigned long frame, int readonly);
129int gnttab_grant_foreign_access_subpage_ref(grant_ref_t ref, domid_t domid,
130 unsigned long frame, int flags,
131 unsigned page_off,
132 unsigned length);
133int gnttab_grant_foreign_access_trans_ref(grant_ref_t ref, domid_t domid,
134 int flags, domid_t trans_domid,
135 grant_ref_t trans_gref);
111 136
112void gnttab_grant_foreign_transfer_ref(grant_ref_t, domid_t domid, 137void gnttab_grant_foreign_transfer_ref(grant_ref_t, domid_t domid,
113 unsigned long pfn); 138 unsigned long pfn);
@@ -145,9 +170,11 @@ gnttab_set_unmap_op(struct gnttab_unmap_grant_ref *unmap, phys_addr_t addr,
145 170
146int arch_gnttab_map_shared(unsigned long *frames, unsigned long nr_gframes, 171int arch_gnttab_map_shared(unsigned long *frames, unsigned long nr_gframes,
147 unsigned long max_nr_gframes, 172 unsigned long max_nr_gframes,
148 struct grant_entry **__shared); 173 void **__shared);
149void arch_gnttab_unmap_shared(struct grant_entry *shared, 174int arch_gnttab_map_status(uint64_t *frames, unsigned long nr_gframes,
150 unsigned long nr_gframes); 175 unsigned long max_nr_gframes,
176 grant_status_t **__shared);
177void arch_gnttab_unmap(void *shared, unsigned long nr_gframes);
151 178
152extern unsigned long xen_hvm_resume_frames; 179extern unsigned long xen_hvm_resume_frames;
153unsigned int gnttab_max_grant_frames(void); 180unsigned int gnttab_max_grant_frames(void);
@@ -155,9 +182,9 @@ unsigned int gnttab_max_grant_frames(void);
155#define gnttab_map_vaddr(map) ((void *)(map.host_virt_addr)) 182#define gnttab_map_vaddr(map) ((void *)(map.host_virt_addr))
156 183
157int gnttab_map_refs(struct gnttab_map_grant_ref *map_ops, 184int gnttab_map_refs(struct gnttab_map_grant_ref *map_ops,
158 struct gnttab_map_grant_ref *kmap_ops, 185 struct gnttab_map_grant_ref *kmap_ops,
159 struct page **pages, unsigned int count); 186 struct page **pages, unsigned int count);
160int gnttab_unmap_refs(struct gnttab_unmap_grant_ref *unmap_ops, 187int gnttab_unmap_refs(struct gnttab_unmap_grant_ref *unmap_ops,
161 struct page **pages, unsigned int count); 188 struct page **pages, unsigned int count, bool clear_pte);
162 189
163#endif /* __ASM_GNTTAB_H__ */ 190#endif /* __ASM_GNTTAB_H__ */
diff --git a/include/xen/interface/grant_table.h b/include/xen/interface/grant_table.h
index 39e571796e32..a17d84433e6a 100644
--- a/include/xen/interface/grant_table.h
+++ b/include/xen/interface/grant_table.h
@@ -85,12 +85,22 @@
85 */ 85 */
86 86
87/* 87/*
88 * Reference to a grant entry in a specified domain's grant table.
89 */
90typedef uint32_t grant_ref_t;
91
92/*
88 * A grant table comprises a packed array of grant entries in one or more 93 * A grant table comprises a packed array of grant entries in one or more
89 * page frames shared between Xen and a guest. 94 * page frames shared between Xen and a guest.
90 * [XEN]: This field is written by Xen and read by the sharing guest. 95 * [XEN]: This field is written by Xen and read by the sharing guest.
91 * [GST]: This field is written by the guest and read by Xen. 96 * [GST]: This field is written by the guest and read by Xen.
92 */ 97 */
93struct grant_entry { 98
99/*
100 * Version 1 of the grant table entry structure is maintained purely
101 * for backwards compatibility. New guests should use version 2.
102 */
103struct grant_entry_v1 {
94 /* GTF_xxx: various type and flag information. [XEN,GST] */ 104 /* GTF_xxx: various type and flag information. [XEN,GST] */
95 uint16_t flags; 105 uint16_t flags;
96 /* The domain being granted foreign privileges. [GST] */ 106 /* The domain being granted foreign privileges. [GST] */
@@ -108,10 +118,13 @@ struct grant_entry {
108 * GTF_permit_access: Allow @domid to map/access @frame. 118 * GTF_permit_access: Allow @domid to map/access @frame.
109 * GTF_accept_transfer: Allow @domid to transfer ownership of one page frame 119 * GTF_accept_transfer: Allow @domid to transfer ownership of one page frame
110 * to this guest. Xen writes the page number to @frame. 120 * to this guest. Xen writes the page number to @frame.
121 * GTF_transitive: Allow @domid to transitively access a subrange of
122 * @trans_grant in @trans_domid. No mappings are allowed.
111 */ 123 */
112#define GTF_invalid (0U<<0) 124#define GTF_invalid (0U<<0)
113#define GTF_permit_access (1U<<0) 125#define GTF_permit_access (1U<<0)
114#define GTF_accept_transfer (2U<<0) 126#define GTF_accept_transfer (2U<<0)
127#define GTF_transitive (3U<<0)
115#define GTF_type_mask (3U<<0) 128#define GTF_type_mask (3U<<0)
116 129
117/* 130/*
@@ -119,6 +132,9 @@ struct grant_entry {
119 * GTF_readonly: Restrict @domid to read-only mappings and accesses. [GST] 132 * GTF_readonly: Restrict @domid to read-only mappings and accesses. [GST]
120 * GTF_reading: Grant entry is currently mapped for reading by @domid. [XEN] 133 * GTF_reading: Grant entry is currently mapped for reading by @domid. [XEN]
121 * GTF_writing: Grant entry is currently mapped for writing by @domid. [XEN] 134 * GTF_writing: Grant entry is currently mapped for writing by @domid. [XEN]
135 * GTF_sub_page: Grant access to only a subrange of the page. @domid
136 * will only be allowed to copy from the grant, and not
137 * map it. [GST]
122 */ 138 */
123#define _GTF_readonly (2) 139#define _GTF_readonly (2)
124#define GTF_readonly (1U<<_GTF_readonly) 140#define GTF_readonly (1U<<_GTF_readonly)
@@ -126,6 +142,8 @@ struct grant_entry {
126#define GTF_reading (1U<<_GTF_reading) 142#define GTF_reading (1U<<_GTF_reading)
127#define _GTF_writing (4) 143#define _GTF_writing (4)
128#define GTF_writing (1U<<_GTF_writing) 144#define GTF_writing (1U<<_GTF_writing)
145#define _GTF_sub_page (8)
146#define GTF_sub_page (1U<<_GTF_sub_page)
129 147
130/* 148/*
131 * Subflags for GTF_accept_transfer: 149 * Subflags for GTF_accept_transfer:
@@ -142,15 +160,81 @@ struct grant_entry {
142#define _GTF_transfer_completed (3) 160#define _GTF_transfer_completed (3)
143#define GTF_transfer_completed (1U<<_GTF_transfer_completed) 161#define GTF_transfer_completed (1U<<_GTF_transfer_completed)
144 162
163/*
164 * Version 2 grant table entries. These fulfil the same role as
165 * version 1 entries, but can represent more complicated operations.
166 * Any given domain will have either a version 1 or a version 2 table,
167 * and every entry in the table will be the same version.
168 *
169 * The interface by which domains use grant references does not depend
170 * on the grant table version in use by the other domain.
171 */
145 172
146/*********************************** 173/*
147 * GRANT TABLE QUERIES AND USES 174 * Version 1 and version 2 grant entries share a common prefix. The
175 * fields of the prefix are documented as part of struct
176 * grant_entry_v1.
148 */ 177 */
178struct grant_entry_header {
179 uint16_t flags;
180 domid_t domid;
181};
149 182
150/* 183/*
151 * Reference to a grant entry in a specified domain's grant table. 184 * Version 2 of the grant entry structure, here is an union because three
185 * different types are suppotted: full_page, sub_page and transitive.
186 */
187union grant_entry_v2 {
188 struct grant_entry_header hdr;
189
190 /*
191 * This member is used for V1-style full page grants, where either:
192 *
193 * -- hdr.type is GTF_accept_transfer, or
194 * -- hdr.type is GTF_permit_access and GTF_sub_page is not set.
195 *
196 * In that case, the frame field has the same semantics as the
197 * field of the same name in the V1 entry structure.
198 */
199 struct {
200 struct grant_entry_header hdr;
201 uint32_t pad0;
202 uint64_t frame;
203 } full_page;
204
205 /*
206 * If the grant type is GTF_grant_access and GTF_sub_page is set,
207 * @domid is allowed to access bytes [@page_off,@page_off+@length)
208 * in frame @frame.
209 */
210 struct {
211 struct grant_entry_header hdr;
212 uint16_t page_off;
213 uint16_t length;
214 uint64_t frame;
215 } sub_page;
216
217 /*
218 * If the grant is GTF_transitive, @domid is allowed to use the
219 * grant @gref in domain @trans_domid, as if it was the local
220 * domain. Obviously, the transitive access must be compatible
221 * with the original grant.
222 */
223 struct {
224 struct grant_entry_header hdr;
225 domid_t trans_domid;
226 uint16_t pad0;
227 grant_ref_t gref;
228 } transitive;
229
230 uint32_t __spacer[4]; /* Pad to a power of two */
231};
232
233typedef uint16_t grant_status_t;
234
235/***********************************
236 * GRANT TABLE QUERIES AND USES
152 */ 237 */
153typedef uint32_t grant_ref_t;
154 238
155/* 239/*
156 * Handle to track a mapping created via a grant reference. 240 * Handle to track a mapping created via a grant reference.
@@ -322,6 +406,79 @@ struct gnttab_query_size {
322DEFINE_GUEST_HANDLE_STRUCT(gnttab_query_size); 406DEFINE_GUEST_HANDLE_STRUCT(gnttab_query_size);
323 407
324/* 408/*
409 * GNTTABOP_unmap_and_replace: Destroy one or more grant-reference mappings
410 * tracked by <handle> but atomically replace the page table entry with one
411 * pointing to the machine address under <new_addr>. <new_addr> will be
412 * redirected to the null entry.
413 * NOTES:
414 * 1. The call may fail in an undefined manner if either mapping is not
415 * tracked by <handle>.
416 * 2. After executing a batch of unmaps, it is guaranteed that no stale
417 * mappings will remain in the device or host TLBs.
418 */
419#define GNTTABOP_unmap_and_replace 7
420struct gnttab_unmap_and_replace {
421 /* IN parameters. */
422 uint64_t host_addr;
423 uint64_t new_addr;
424 grant_handle_t handle;
425 /* OUT parameters. */
426 int16_t status; /* GNTST_* */
427};
428DEFINE_GUEST_HANDLE_STRUCT(gnttab_unmap_and_replace);
429
430/*
431 * GNTTABOP_set_version: Request a particular version of the grant
432 * table shared table structure. This operation can only be performed
433 * once in any given domain. It must be performed before any grants
434 * are activated; otherwise, the domain will be stuck with version 1.
435 * The only defined versions are 1 and 2.
436 */
437#define GNTTABOP_set_version 8
438struct gnttab_set_version {
439 /* IN parameters */
440 uint32_t version;
441};
442DEFINE_GUEST_HANDLE_STRUCT(gnttab_set_version);
443
444/*
445 * GNTTABOP_get_status_frames: Get the list of frames used to store grant
446 * status for <dom>. In grant format version 2, the status is separated
447 * from the other shared grant fields to allow more efficient synchronization
448 * using barriers instead of atomic cmpexch operations.
449 * <nr_frames> specify the size of vector <frame_list>.
450 * The frame addresses are returned in the <frame_list>.
451 * Only <nr_frames> addresses are returned, even if the table is larger.
452 * NOTES:
453 * 1. <dom> may be specified as DOMID_SELF.
454 * 2. Only a sufficiently-privileged domain may specify <dom> != DOMID_SELF.
455 */
456#define GNTTABOP_get_status_frames 9
457struct gnttab_get_status_frames {
458 /* IN parameters. */
459 uint32_t nr_frames;
460 domid_t dom;
461 /* OUT parameters. */
462 int16_t status; /* GNTST_* */
463 GUEST_HANDLE(uint64_t) frame_list;
464};
465DEFINE_GUEST_HANDLE_STRUCT(gnttab_get_status_frames);
466
467/*
468 * GNTTABOP_get_version: Get the grant table version which is in
469 * effect for domain <dom>.
470 */
471#define GNTTABOP_get_version 10
472struct gnttab_get_version {
473 /* IN parameters */
474 domid_t dom;
475 uint16_t pad;
476 /* OUT parameters */
477 uint32_t version;
478};
479DEFINE_GUEST_HANDLE_STRUCT(gnttab_get_version);
480
481/*
325 * Bitfield values for update_pin_status.flags. 482 * Bitfield values for update_pin_status.flags.
326 */ 483 */
327 /* Map the grant entry for access by I/O devices. */ 484 /* Map the grant entry for access by I/O devices. */
diff --git a/include/xen/interface/io/xs_wire.h b/include/xen/interface/io/xs_wire.h
index f6f07aa35af5..7cdfca24eafb 100644
--- a/include/xen/interface/io/xs_wire.h
+++ b/include/xen/interface/io/xs_wire.h
@@ -87,4 +87,7 @@ struct xenstore_domain_interface {
87 XENSTORE_RING_IDX rsp_cons, rsp_prod; 87 XENSTORE_RING_IDX rsp_cons, rsp_prod;
88}; 88};
89 89
90/* Violating this is very bad. See docs/misc/xenstore.txt. */
91#define XENSTORE_PAYLOAD_MAX 4096
92
90#endif /* _XS_WIRE_H */ 93#endif /* _XS_WIRE_H */
diff --git a/include/xen/interface/xen.h b/include/xen/interface/xen.h
index 6a6e91449347..a890804945e3 100644
--- a/include/xen/interface/xen.h
+++ b/include/xen/interface/xen.h
@@ -523,6 +523,8 @@ struct tmem_op {
523 } u; 523 } u;
524}; 524};
525 525
526DEFINE_GUEST_HANDLE(u64);
527
526#else /* __ASSEMBLY__ */ 528#else /* __ASSEMBLY__ */
527 529
528/* In assembly code we cannot use C numeric constant suffixes. */ 530/* In assembly code we cannot use C numeric constant suffixes. */
diff --git a/include/xen/xenbus.h b/include/xen/xenbus.h
index b1b6676c1c43..e8c599b237c2 100644
--- a/include/xen/xenbus.h
+++ b/include/xen/xenbus.h
@@ -85,8 +85,6 @@ struct xenbus_device_id
85 85
86/* A xenbus driver. */ 86/* A xenbus driver. */
87struct xenbus_driver { 87struct xenbus_driver {
88 char *name;
89 struct module *owner;
90 const struct xenbus_device_id *ids; 88 const struct xenbus_device_id *ids;
91 int (*probe)(struct xenbus_device *dev, 89 int (*probe)(struct xenbus_device *dev,
92 const struct xenbus_device_id *id); 90 const struct xenbus_device_id *id);
@@ -101,31 +99,20 @@ struct xenbus_driver {
101 int (*is_ready)(struct xenbus_device *dev); 99 int (*is_ready)(struct xenbus_device *dev);
102}; 100};
103 101
104static inline struct xenbus_driver *to_xenbus_driver(struct device_driver *drv) 102#define DEFINE_XENBUS_DRIVER(var, drvname, methods...) \
105{ 103struct xenbus_driver var ## _driver = { \
106 return container_of(drv, struct xenbus_driver, driver); 104 .driver.name = drvname + 0 ?: var ## _ids->devicetype, \
105 .driver.owner = THIS_MODULE, \
106 .ids = var ## _ids, ## methods \
107} 107}
108 108
109int __must_check __xenbus_register_frontend(struct xenbus_driver *drv, 109static inline struct xenbus_driver *to_xenbus_driver(struct device_driver *drv)
110 struct module *owner,
111 const char *mod_name);
112
113static inline int __must_check
114xenbus_register_frontend(struct xenbus_driver *drv)
115{ 110{
116 WARN_ON(drv->owner != THIS_MODULE); 111 return container_of(drv, struct xenbus_driver, driver);
117 return __xenbus_register_frontend(drv, THIS_MODULE, KBUILD_MODNAME);
118} 112}
119 113
120int __must_check __xenbus_register_backend(struct xenbus_driver *drv, 114int __must_check xenbus_register_frontend(struct xenbus_driver *);
121 struct module *owner, 115int __must_check xenbus_register_backend(struct xenbus_driver *);
122 const char *mod_name);
123static inline int __must_check
124xenbus_register_backend(struct xenbus_driver *drv)
125{
126 WARN_ON(drv->owner != THIS_MODULE);
127 return __xenbus_register_backend(drv, THIS_MODULE, KBUILD_MODNAME);
128}
129 116
130void xenbus_unregister_driver(struct xenbus_driver *drv); 117void xenbus_unregister_driver(struct xenbus_driver *drv);
131 118
diff --git a/include/xen/xenbus_dev.h b/include/xen/xenbus_dev.h
new file mode 100644
index 000000000000..ac5f0fe47ed9
--- /dev/null
+++ b/include/xen/xenbus_dev.h
@@ -0,0 +1,41 @@
1/******************************************************************************
2 * evtchn.h
3 *
4 * Interface to /dev/xen/xenbus_backend.
5 *
6 * Copyright (c) 2011 Bastian Blank <waldi@debian.org>
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License version 2
10 * as published by the Free Software Foundation; or, when distributed
11 * separately from the Linux kernel or incorporated into other
12 * software packages, subject to the following license:
13 *
14 * Permission is hereby granted, free of charge, to any person obtaining a copy
15 * of this source file (the "Software"), to deal in the Software without
16 * restriction, including without limitation the rights to use, copy, modify,
17 * merge, publish, distribute, sublicense, and/or sell copies of the Software,
18 * and to permit persons to whom the Software is furnished to do so, subject to
19 * the following conditions:
20 *
21 * The above copyright notice and this permission notice shall be included in
22 * all copies or substantial portions of the Software.
23 *
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
25 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
26 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
27 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
28 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
29 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
30 * IN THE SOFTWARE.
31 */
32
33#ifndef __LINUX_XEN_XENBUS_DEV_H__
34#define __LINUX_XEN_XENBUS_DEV_H__
35
36#include <linux/ioctl.h>
37
38#define IOCTL_XENBUS_BACKEND_EVTCHN \
39 _IOC(_IOC_NONE, 'B', 0, 0)
40
41#endif /* __LINUX_XEN_XENBUS_DEV_H__ */