diff options
Diffstat (limited to 'drivers')
-rw-r--r-- | drivers/Kconfig | 2 | ||||
-rw-r--r-- | drivers/block/xen-blkfront.c | 23 | ||||
-rw-r--r-- | drivers/input/Kconfig | 9 | ||||
-rw-r--r-- | drivers/input/Makefile | 2 | ||||
-rw-r--r-- | drivers/input/xen-kbdfront.c | 340 | ||||
-rw-r--r-- | drivers/net/xen-netfront.c | 2 | ||||
-rw-r--r-- | drivers/video/Kconfig | 14 | ||||
-rw-r--r-- | drivers/video/Makefile | 1 | ||||
-rw-r--r-- | drivers/video/xen-fbfront.c | 550 | ||||
-rw-r--r-- | drivers/xen/Kconfig | 19 | ||||
-rw-r--r-- | drivers/xen/Makefile | 4 | ||||
-rw-r--r-- | drivers/xen/balloon.c | 712 | ||||
-rw-r--r-- | drivers/xen/events.c | 674 | ||||
-rw-r--r-- | drivers/xen/features.c | 29 | ||||
-rw-r--r-- | drivers/xen/grant-table.c | 37 | ||||
-rw-r--r-- | drivers/xen/xenbus/xenbus_client.c | 6 | ||||
-rw-r--r-- | drivers/xen/xenbus/xenbus_probe.c | 32 | ||||
-rw-r--r-- | drivers/xen/xencomm.c | 232 |
18 files changed, 2646 insertions, 42 deletions
diff --git a/drivers/Kconfig b/drivers/Kconfig index 3a0e3549739f..80f0ec91e2cf 100644 --- a/drivers/Kconfig +++ b/drivers/Kconfig | |||
@@ -97,4 +97,6 @@ source "drivers/dca/Kconfig" | |||
97 | source "drivers/auxdisplay/Kconfig" | 97 | source "drivers/auxdisplay/Kconfig" |
98 | 98 | ||
99 | source "drivers/uio/Kconfig" | 99 | source "drivers/uio/Kconfig" |
100 | |||
101 | source "drivers/xen/Kconfig" | ||
100 | endmenu | 102 | endmenu |
diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c index 9c6f3f99208d..d771da816d95 100644 --- a/drivers/block/xen-blkfront.c +++ b/drivers/block/xen-blkfront.c | |||
@@ -47,6 +47,7 @@ | |||
47 | 47 | ||
48 | #include <xen/interface/grant_table.h> | 48 | #include <xen/interface/grant_table.h> |
49 | #include <xen/interface/io/blkif.h> | 49 | #include <xen/interface/io/blkif.h> |
50 | #include <xen/interface/io/protocols.h> | ||
50 | 51 | ||
51 | #include <asm/xen/hypervisor.h> | 52 | #include <asm/xen/hypervisor.h> |
52 | 53 | ||
@@ -74,7 +75,6 @@ static struct block_device_operations xlvbd_block_fops; | |||
74 | struct blkfront_info | 75 | struct blkfront_info |
75 | { | 76 | { |
76 | struct xenbus_device *xbdev; | 77 | struct xenbus_device *xbdev; |
77 | dev_t dev; | ||
78 | struct gendisk *gd; | 78 | struct gendisk *gd; |
79 | int vdevice; | 79 | int vdevice; |
80 | blkif_vdev_t handle; | 80 | blkif_vdev_t handle; |
@@ -88,6 +88,7 @@ struct blkfront_info | |||
88 | struct blk_shadow shadow[BLK_RING_SIZE]; | 88 | struct blk_shadow shadow[BLK_RING_SIZE]; |
89 | unsigned long shadow_free; | 89 | unsigned long shadow_free; |
90 | int feature_barrier; | 90 | int feature_barrier; |
91 | int is_ready; | ||
91 | 92 | ||
92 | /** | 93 | /** |
93 | * The number of people holding this device open. We won't allow a | 94 | * The number of people holding this device open. We won't allow a |
@@ -614,6 +615,12 @@ again: | |||
614 | message = "writing event-channel"; | 615 | message = "writing event-channel"; |
615 | goto abort_transaction; | 616 | goto abort_transaction; |
616 | } | 617 | } |
618 | err = xenbus_printf(xbt, dev->nodename, "protocol", "%s", | ||
619 | XEN_IO_PROTO_ABI_NATIVE); | ||
620 | if (err) { | ||
621 | message = "writing protocol"; | ||
622 | goto abort_transaction; | ||
623 | } | ||
617 | 624 | ||
618 | err = xenbus_transaction_end(xbt, 0); | 625 | err = xenbus_transaction_end(xbt, 0); |
619 | if (err) { | 626 | if (err) { |
@@ -833,6 +840,8 @@ static void blkfront_connect(struct blkfront_info *info) | |||
833 | spin_unlock_irq(&blkif_io_lock); | 840 | spin_unlock_irq(&blkif_io_lock); |
834 | 841 | ||
835 | add_disk(info->gd); | 842 | add_disk(info->gd); |
843 | |||
844 | info->is_ready = 1; | ||
836 | } | 845 | } |
837 | 846 | ||
838 | /** | 847 | /** |
@@ -896,7 +905,7 @@ static void backend_changed(struct xenbus_device *dev, | |||
896 | break; | 905 | break; |
897 | 906 | ||
898 | case XenbusStateClosing: | 907 | case XenbusStateClosing: |
899 | bd = bdget(info->dev); | 908 | bd = bdget_disk(info->gd, 0); |
900 | if (bd == NULL) | 909 | if (bd == NULL) |
901 | xenbus_dev_fatal(dev, -ENODEV, "bdget failed"); | 910 | xenbus_dev_fatal(dev, -ENODEV, "bdget failed"); |
902 | 911 | ||
@@ -925,6 +934,13 @@ static int blkfront_remove(struct xenbus_device *dev) | |||
925 | return 0; | 934 | return 0; |
926 | } | 935 | } |
927 | 936 | ||
937 | static int blkfront_is_ready(struct xenbus_device *dev) | ||
938 | { | ||
939 | struct blkfront_info *info = dev->dev.driver_data; | ||
940 | |||
941 | return info->is_ready; | ||
942 | } | ||
943 | |||
928 | static int blkif_open(struct inode *inode, struct file *filep) | 944 | static int blkif_open(struct inode *inode, struct file *filep) |
929 | { | 945 | { |
930 | struct blkfront_info *info = inode->i_bdev->bd_disk->private_data; | 946 | struct blkfront_info *info = inode->i_bdev->bd_disk->private_data; |
@@ -971,6 +987,7 @@ static struct xenbus_driver blkfront = { | |||
971 | .remove = blkfront_remove, | 987 | .remove = blkfront_remove, |
972 | .resume = blkfront_resume, | 988 | .resume = blkfront_resume, |
973 | .otherend_changed = backend_changed, | 989 | .otherend_changed = backend_changed, |
990 | .is_ready = blkfront_is_ready, | ||
974 | }; | 991 | }; |
975 | 992 | ||
976 | static int __init xlblk_init(void) | 993 | static int __init xlblk_init(void) |
@@ -998,3 +1015,5 @@ module_exit(xlblk_exit); | |||
998 | MODULE_DESCRIPTION("Xen virtual block device frontend"); | 1015 | MODULE_DESCRIPTION("Xen virtual block device frontend"); |
999 | MODULE_LICENSE("GPL"); | 1016 | MODULE_LICENSE("GPL"); |
1000 | MODULE_ALIAS_BLOCKDEV_MAJOR(XENVBD_MAJOR); | 1017 | MODULE_ALIAS_BLOCKDEV_MAJOR(XENVBD_MAJOR); |
1018 | MODULE_ALIAS("xen:vbd"); | ||
1019 | MODULE_ALIAS("xenblk"); | ||
diff --git a/drivers/input/Kconfig b/drivers/input/Kconfig index 9dea14db724c..5f9d860925a1 100644 --- a/drivers/input/Kconfig +++ b/drivers/input/Kconfig | |||
@@ -149,6 +149,15 @@ config INPUT_APMPOWER | |||
149 | To compile this driver as a module, choose M here: the | 149 | To compile this driver as a module, choose M here: the |
150 | module will be called apm-power. | 150 | module will be called apm-power. |
151 | 151 | ||
152 | config XEN_KBDDEV_FRONTEND | ||
153 | tristate "Xen virtual keyboard and mouse support" | ||
154 | depends on XEN_FBDEV_FRONTEND | ||
155 | default y | ||
156 | help | ||
157 | This driver implements the front-end of the Xen virtual | ||
158 | keyboard and mouse device driver. It communicates with a back-end | ||
159 | in another domain. | ||
160 | |||
152 | comment "Input Device Drivers" | 161 | comment "Input Device Drivers" |
153 | 162 | ||
154 | source "drivers/input/keyboard/Kconfig" | 163 | source "drivers/input/keyboard/Kconfig" |
diff --git a/drivers/input/Makefile b/drivers/input/Makefile index 2ae87b19caa8..98c4f9a77876 100644 --- a/drivers/input/Makefile +++ b/drivers/input/Makefile | |||
@@ -23,3 +23,5 @@ obj-$(CONFIG_INPUT_TOUCHSCREEN) += touchscreen/ | |||
23 | obj-$(CONFIG_INPUT_MISC) += misc/ | 23 | obj-$(CONFIG_INPUT_MISC) += misc/ |
24 | 24 | ||
25 | obj-$(CONFIG_INPUT_APMPOWER) += apm-power.o | 25 | obj-$(CONFIG_INPUT_APMPOWER) += apm-power.o |
26 | |||
27 | obj-$(CONFIG_XEN_KBDDEV_FRONTEND) += xen-kbdfront.o | ||
diff --git a/drivers/input/xen-kbdfront.c b/drivers/input/xen-kbdfront.c new file mode 100644 index 000000000000..0f47f4697cdf --- /dev/null +++ b/drivers/input/xen-kbdfront.c | |||
@@ -0,0 +1,340 @@ | |||
1 | /* | ||
2 | * Xen para-virtual input device | ||
3 | * | ||
4 | * Copyright (C) 2005 Anthony Liguori <aliguori@us.ibm.com> | ||
5 | * Copyright (C) 2006-2008 Red Hat, Inc., Markus Armbruster <armbru@redhat.com> | ||
6 | * | ||
7 | * Based on linux/drivers/input/mouse/sermouse.c | ||
8 | * | ||
9 | * This file is subject to the terms and conditions of the GNU General Public | ||
10 | * License. See the file COPYING in the main directory of this archive for | ||
11 | * more details. | ||
12 | */ | ||
13 | |||
14 | /* | ||
15 | * TODO: | ||
16 | * | ||
17 | * Switch to grant tables together with xen-fbfront.c. | ||
18 | */ | ||
19 | |||
20 | #include <linux/kernel.h> | ||
21 | #include <linux/errno.h> | ||
22 | #include <linux/module.h> | ||
23 | #include <linux/input.h> | ||
24 | #include <asm/xen/hypervisor.h> | ||
25 | #include <xen/events.h> | ||
26 | #include <xen/page.h> | ||
27 | #include <xen/interface/io/fbif.h> | ||
28 | #include <xen/interface/io/kbdif.h> | ||
29 | #include <xen/xenbus.h> | ||
30 | |||
31 | struct xenkbd_info { | ||
32 | struct input_dev *kbd; | ||
33 | struct input_dev *ptr; | ||
34 | struct xenkbd_page *page; | ||
35 | int irq; | ||
36 | struct xenbus_device *xbdev; | ||
37 | char phys[32]; | ||
38 | }; | ||
39 | |||
40 | static int xenkbd_remove(struct xenbus_device *); | ||
41 | static int xenkbd_connect_backend(struct xenbus_device *, struct xenkbd_info *); | ||
42 | static void xenkbd_disconnect_backend(struct xenkbd_info *); | ||
43 | |||
44 | /* | ||
45 | * Note: if you need to send out events, see xenfb_do_update() for how | ||
46 | * to do that. | ||
47 | */ | ||
48 | |||
49 | static irqreturn_t input_handler(int rq, void *dev_id) | ||
50 | { | ||
51 | struct xenkbd_info *info = dev_id; | ||
52 | struct xenkbd_page *page = info->page; | ||
53 | __u32 cons, prod; | ||
54 | |||
55 | prod = page->in_prod; | ||
56 | if (prod == page->in_cons) | ||
57 | return IRQ_HANDLED; | ||
58 | rmb(); /* ensure we see ring contents up to prod */ | ||
59 | for (cons = page->in_cons; cons != prod; cons++) { | ||
60 | union xenkbd_in_event *event; | ||
61 | struct input_dev *dev; | ||
62 | event = &XENKBD_IN_RING_REF(page, cons); | ||
63 | |||
64 | dev = info->ptr; | ||
65 | switch (event->type) { | ||
66 | case XENKBD_TYPE_MOTION: | ||
67 | input_report_rel(dev, REL_X, event->motion.rel_x); | ||
68 | input_report_rel(dev, REL_Y, event->motion.rel_y); | ||
69 | break; | ||
70 | case XENKBD_TYPE_KEY: | ||
71 | dev = NULL; | ||
72 | if (test_bit(event->key.keycode, info->kbd->keybit)) | ||
73 | dev = info->kbd; | ||
74 | if (test_bit(event->key.keycode, info->ptr->keybit)) | ||
75 | dev = info->ptr; | ||
76 | if (dev) | ||
77 | input_report_key(dev, event->key.keycode, | ||
78 | event->key.pressed); | ||
79 | else | ||
80 | printk(KERN_WARNING | ||
81 | "xenkbd: unhandled keycode 0x%x\n", | ||
82 | event->key.keycode); | ||
83 | break; | ||
84 | case XENKBD_TYPE_POS: | ||
85 | input_report_abs(dev, ABS_X, event->pos.abs_x); | ||
86 | input_report_abs(dev, ABS_Y, event->pos.abs_y); | ||
87 | break; | ||
88 | } | ||
89 | if (dev) | ||
90 | input_sync(dev); | ||
91 | } | ||
92 | mb(); /* ensure we got ring contents */ | ||
93 | page->in_cons = cons; | ||
94 | notify_remote_via_irq(info->irq); | ||
95 | |||
96 | return IRQ_HANDLED; | ||
97 | } | ||
98 | |||
99 | static int __devinit xenkbd_probe(struct xenbus_device *dev, | ||
100 | const struct xenbus_device_id *id) | ||
101 | { | ||
102 | int ret, i; | ||
103 | struct xenkbd_info *info; | ||
104 | struct input_dev *kbd, *ptr; | ||
105 | |||
106 | info = kzalloc(sizeof(*info), GFP_KERNEL); | ||
107 | if (!info) { | ||
108 | xenbus_dev_fatal(dev, -ENOMEM, "allocating info structure"); | ||
109 | return -ENOMEM; | ||
110 | } | ||
111 | dev->dev.driver_data = info; | ||
112 | info->xbdev = dev; | ||
113 | info->irq = -1; | ||
114 | snprintf(info->phys, sizeof(info->phys), "xenbus/%s", dev->nodename); | ||
115 | |||
116 | info->page = (void *)__get_free_page(GFP_KERNEL | __GFP_ZERO); | ||
117 | if (!info->page) | ||
118 | goto error_nomem; | ||
119 | |||
120 | /* keyboard */ | ||
121 | kbd = input_allocate_device(); | ||
122 | if (!kbd) | ||
123 | goto error_nomem; | ||
124 | kbd->name = "Xen Virtual Keyboard"; | ||
125 | kbd->phys = info->phys; | ||
126 | kbd->id.bustype = BUS_PCI; | ||
127 | kbd->id.vendor = 0x5853; | ||
128 | kbd->id.product = 0xffff; | ||
129 | kbd->evbit[0] = BIT(EV_KEY); | ||
130 | for (i = KEY_ESC; i < KEY_UNKNOWN; i++) | ||
131 | set_bit(i, kbd->keybit); | ||
132 | for (i = KEY_OK; i < KEY_MAX; i++) | ||
133 | set_bit(i, kbd->keybit); | ||
134 | |||
135 | ret = input_register_device(kbd); | ||
136 | if (ret) { | ||
137 | input_free_device(kbd); | ||
138 | xenbus_dev_fatal(dev, ret, "input_register_device(kbd)"); | ||
139 | goto error; | ||
140 | } | ||
141 | info->kbd = kbd; | ||
142 | |||
143 | /* pointing device */ | ||
144 | ptr = input_allocate_device(); | ||
145 | if (!ptr) | ||
146 | goto error_nomem; | ||
147 | ptr->name = "Xen Virtual Pointer"; | ||
148 | ptr->phys = info->phys; | ||
149 | ptr->id.bustype = BUS_PCI; | ||
150 | ptr->id.vendor = 0x5853; | ||
151 | ptr->id.product = 0xfffe; | ||
152 | ptr->evbit[0] = BIT(EV_KEY) | BIT(EV_REL) | BIT(EV_ABS); | ||
153 | for (i = BTN_LEFT; i <= BTN_TASK; i++) | ||
154 | set_bit(i, ptr->keybit); | ||
155 | ptr->relbit[0] = BIT(REL_X) | BIT(REL_Y); | ||
156 | input_set_abs_params(ptr, ABS_X, 0, XENFB_WIDTH, 0, 0); | ||
157 | input_set_abs_params(ptr, ABS_Y, 0, XENFB_HEIGHT, 0, 0); | ||
158 | |||
159 | ret = input_register_device(ptr); | ||
160 | if (ret) { | ||
161 | input_free_device(ptr); | ||
162 | xenbus_dev_fatal(dev, ret, "input_register_device(ptr)"); | ||
163 | goto error; | ||
164 | } | ||
165 | info->ptr = ptr; | ||
166 | |||
167 | ret = xenkbd_connect_backend(dev, info); | ||
168 | if (ret < 0) | ||
169 | goto error; | ||
170 | |||
171 | return 0; | ||
172 | |||
173 | error_nomem: | ||
174 | ret = -ENOMEM; | ||
175 | xenbus_dev_fatal(dev, ret, "allocating device memory"); | ||
176 | error: | ||
177 | xenkbd_remove(dev); | ||
178 | return ret; | ||
179 | } | ||
180 | |||
181 | static int xenkbd_resume(struct xenbus_device *dev) | ||
182 | { | ||
183 | struct xenkbd_info *info = dev->dev.driver_data; | ||
184 | |||
185 | xenkbd_disconnect_backend(info); | ||
186 | memset(info->page, 0, PAGE_SIZE); | ||
187 | return xenkbd_connect_backend(dev, info); | ||
188 | } | ||
189 | |||
190 | static int xenkbd_remove(struct xenbus_device *dev) | ||
191 | { | ||
192 | struct xenkbd_info *info = dev->dev.driver_data; | ||
193 | |||
194 | xenkbd_disconnect_backend(info); | ||
195 | if (info->kbd) | ||
196 | input_unregister_device(info->kbd); | ||
197 | if (info->ptr) | ||
198 | input_unregister_device(info->ptr); | ||
199 | free_page((unsigned long)info->page); | ||
200 | kfree(info); | ||
201 | return 0; | ||
202 | } | ||
203 | |||
204 | static int xenkbd_connect_backend(struct xenbus_device *dev, | ||
205 | struct xenkbd_info *info) | ||
206 | { | ||
207 | int ret, evtchn; | ||
208 | struct xenbus_transaction xbt; | ||
209 | |||
210 | ret = xenbus_alloc_evtchn(dev, &evtchn); | ||
211 | if (ret) | ||
212 | return ret; | ||
213 | ret = bind_evtchn_to_irqhandler(evtchn, input_handler, | ||
214 | 0, dev->devicetype, info); | ||
215 | if (ret < 0) { | ||
216 | xenbus_free_evtchn(dev, evtchn); | ||
217 | xenbus_dev_fatal(dev, ret, "bind_evtchn_to_irqhandler"); | ||
218 | return ret; | ||
219 | } | ||
220 | info->irq = ret; | ||
221 | |||
222 | again: | ||
223 | ret = xenbus_transaction_start(&xbt); | ||
224 | if (ret) { | ||
225 | xenbus_dev_fatal(dev, ret, "starting transaction"); | ||
226 | return ret; | ||
227 | } | ||
228 | ret = xenbus_printf(xbt, dev->nodename, "page-ref", "%lu", | ||
229 | virt_to_mfn(info->page)); | ||
230 | if (ret) | ||
231 | goto error_xenbus; | ||
232 | ret = xenbus_printf(xbt, dev->nodename, "event-channel", "%u", | ||
233 | evtchn); | ||
234 | if (ret) | ||
235 | goto error_xenbus; | ||
236 | ret = xenbus_transaction_end(xbt, 0); | ||
237 | if (ret) { | ||
238 | if (ret == -EAGAIN) | ||
239 | goto again; | ||
240 | xenbus_dev_fatal(dev, ret, "completing transaction"); | ||
241 | return ret; | ||
242 | } | ||
243 | |||
244 | xenbus_switch_state(dev, XenbusStateInitialised); | ||
245 | return 0; | ||
246 | |||
247 | error_xenbus: | ||
248 | xenbus_transaction_end(xbt, 1); | ||
249 | xenbus_dev_fatal(dev, ret, "writing xenstore"); | ||
250 | return ret; | ||
251 | } | ||
252 | |||
253 | static void xenkbd_disconnect_backend(struct xenkbd_info *info) | ||
254 | { | ||
255 | if (info->irq >= 0) | ||
256 | unbind_from_irqhandler(info->irq, info); | ||
257 | info->irq = -1; | ||
258 | } | ||
259 | |||
260 | static void xenkbd_backend_changed(struct xenbus_device *dev, | ||
261 | enum xenbus_state backend_state) | ||
262 | { | ||
263 | struct xenkbd_info *info = dev->dev.driver_data; | ||
264 | int ret, val; | ||
265 | |||
266 | switch (backend_state) { | ||
267 | case XenbusStateInitialising: | ||
268 | case XenbusStateInitialised: | ||
269 | case XenbusStateUnknown: | ||
270 | case XenbusStateClosed: | ||
271 | break; | ||
272 | |||
273 | case XenbusStateInitWait: | ||
274 | InitWait: | ||
275 | ret = xenbus_scanf(XBT_NIL, info->xbdev->otherend, | ||
276 | "feature-abs-pointer", "%d", &val); | ||
277 | if (ret < 0) | ||
278 | val = 0; | ||
279 | if (val) { | ||
280 | ret = xenbus_printf(XBT_NIL, info->xbdev->nodename, | ||
281 | "request-abs-pointer", "1"); | ||
282 | if (ret) | ||
283 | printk(KERN_WARNING | ||
284 | "xenkbd: can't request abs-pointer"); | ||
285 | } | ||
286 | xenbus_switch_state(dev, XenbusStateConnected); | ||
287 | break; | ||
288 | |||
289 | case XenbusStateConnected: | ||
290 | /* | ||
291 | * Work around xenbus race condition: If backend goes | ||
292 | * through InitWait to Connected fast enough, we can | ||
293 | * get Connected twice here. | ||
294 | */ | ||
295 | if (dev->state != XenbusStateConnected) | ||
296 | goto InitWait; /* no InitWait seen yet, fudge it */ | ||
297 | break; | ||
298 | |||
299 | case XenbusStateClosing: | ||
300 | xenbus_frontend_closed(dev); | ||
301 | break; | ||
302 | } | ||
303 | } | ||
304 | |||
305 | static struct xenbus_device_id xenkbd_ids[] = { | ||
306 | { "vkbd" }, | ||
307 | { "" } | ||
308 | }; | ||
309 | |||
310 | static struct xenbus_driver xenkbd = { | ||
311 | .name = "vkbd", | ||
312 | .owner = THIS_MODULE, | ||
313 | .ids = xenkbd_ids, | ||
314 | .probe = xenkbd_probe, | ||
315 | .remove = xenkbd_remove, | ||
316 | .resume = xenkbd_resume, | ||
317 | .otherend_changed = xenkbd_backend_changed, | ||
318 | }; | ||
319 | |||
320 | static int __init xenkbd_init(void) | ||
321 | { | ||
322 | if (!is_running_on_xen()) | ||
323 | return -ENODEV; | ||
324 | |||
325 | /* Nothing to do if running in dom0. */ | ||
326 | if (is_initial_xendomain()) | ||
327 | return -ENODEV; | ||
328 | |||
329 | return xenbus_register_frontend(&xenkbd); | ||
330 | } | ||
331 | |||
332 | static void __exit xenkbd_cleanup(void) | ||
333 | { | ||
334 | xenbus_unregister_driver(&xenkbd); | ||
335 | } | ||
336 | |||
337 | module_init(xenkbd_init); | ||
338 | module_exit(xenkbd_cleanup); | ||
339 | |||
340 | MODULE_LICENSE("GPL"); | ||
diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c index 7483d45bc5bc..e62018a36133 100644 --- a/drivers/net/xen-netfront.c +++ b/drivers/net/xen-netfront.c | |||
@@ -1809,3 +1809,5 @@ module_exit(netif_exit); | |||
1809 | 1809 | ||
1810 | MODULE_DESCRIPTION("Xen virtual network device frontend"); | 1810 | MODULE_DESCRIPTION("Xen virtual network device frontend"); |
1811 | MODULE_LICENSE("GPL"); | 1811 | MODULE_LICENSE("GPL"); |
1812 | MODULE_ALIAS("xen:vif"); | ||
1813 | MODULE_ALIAS("xennet"); | ||
diff --git a/drivers/video/Kconfig b/drivers/video/Kconfig index 1bd5fb30237d..e3dc8f8d0c3e 100644 --- a/drivers/video/Kconfig +++ b/drivers/video/Kconfig | |||
@@ -1930,6 +1930,20 @@ config FB_VIRTUAL | |||
1930 | 1930 | ||
1931 | If unsure, say N. | 1931 | If unsure, say N. |
1932 | 1932 | ||
1933 | config XEN_FBDEV_FRONTEND | ||
1934 | tristate "Xen virtual frame buffer support" | ||
1935 | depends on FB && XEN | ||
1936 | select FB_SYS_FILLRECT | ||
1937 | select FB_SYS_COPYAREA | ||
1938 | select FB_SYS_IMAGEBLIT | ||
1939 | select FB_SYS_FOPS | ||
1940 | select FB_DEFERRED_IO | ||
1941 | default y | ||
1942 | help | ||
1943 | This driver implements the front-end of the Xen virtual | ||
1944 | frame buffer driver. It communicates with a back-end | ||
1945 | in another domain. | ||
1946 | |||
1933 | source "drivers/video/omap/Kconfig" | 1947 | source "drivers/video/omap/Kconfig" |
1934 | 1948 | ||
1935 | source "drivers/video/backlight/Kconfig" | 1949 | source "drivers/video/backlight/Kconfig" |
diff --git a/drivers/video/Makefile b/drivers/video/Makefile index 11c0e5e05f21..f172b9b73314 100644 --- a/drivers/video/Makefile +++ b/drivers/video/Makefile | |||
@@ -114,6 +114,7 @@ obj-$(CONFIG_FB_PS3) += ps3fb.o | |||
114 | obj-$(CONFIG_FB_SM501) += sm501fb.o | 114 | obj-$(CONFIG_FB_SM501) += sm501fb.o |
115 | obj-$(CONFIG_FB_XILINX) += xilinxfb.o | 115 | obj-$(CONFIG_FB_XILINX) += xilinxfb.o |
116 | obj-$(CONFIG_FB_OMAP) += omap/ | 116 | obj-$(CONFIG_FB_OMAP) += omap/ |
117 | obj-$(CONFIG_XEN_FBDEV_FRONTEND) += xen-fbfront.o | ||
117 | 118 | ||
118 | # Platform or fallback drivers go here | 119 | # Platform or fallback drivers go here |
119 | obj-$(CONFIG_FB_UVESA) += uvesafb.o | 120 | obj-$(CONFIG_FB_UVESA) += uvesafb.o |
diff --git a/drivers/video/xen-fbfront.c b/drivers/video/xen-fbfront.c new file mode 100644 index 000000000000..619a6f8d65a2 --- /dev/null +++ b/drivers/video/xen-fbfront.c | |||
@@ -0,0 +1,550 @@ | |||
1 | /* | ||
2 | * Xen para-virtual frame buffer device | ||
3 | * | ||
4 | * Copyright (C) 2005-2006 Anthony Liguori <aliguori@us.ibm.com> | ||
5 | * Copyright (C) 2006-2008 Red Hat, Inc., Markus Armbruster <armbru@redhat.com> | ||
6 | * | ||
7 | * Based on linux/drivers/video/q40fb.c | ||
8 | * | ||
9 | * This file is subject to the terms and conditions of the GNU General Public | ||
10 | * License. See the file COPYING in the main directory of this archive for | ||
11 | * more details. | ||
12 | */ | ||
13 | |||
14 | /* | ||
15 | * TODO: | ||
16 | * | ||
17 | * Switch to grant tables when they become capable of dealing with the | ||
18 | * frame buffer. | ||
19 | */ | ||
20 | |||
21 | #include <linux/kernel.h> | ||
22 | #include <linux/errno.h> | ||
23 | #include <linux/fb.h> | ||
24 | #include <linux/module.h> | ||
25 | #include <linux/vmalloc.h> | ||
26 | #include <linux/mm.h> | ||
27 | #include <asm/xen/hypervisor.h> | ||
28 | #include <xen/events.h> | ||
29 | #include <xen/page.h> | ||
30 | #include <xen/interface/io/fbif.h> | ||
31 | #include <xen/interface/io/protocols.h> | ||
32 | #include <xen/xenbus.h> | ||
33 | |||
34 | struct xenfb_info { | ||
35 | unsigned char *fb; | ||
36 | struct fb_info *fb_info; | ||
37 | int x1, y1, x2, y2; /* dirty rectangle, | ||
38 | protected by dirty_lock */ | ||
39 | spinlock_t dirty_lock; | ||
40 | int nr_pages; | ||
41 | int irq; | ||
42 | struct xenfb_page *page; | ||
43 | unsigned long *mfns; | ||
44 | int update_wanted; /* XENFB_TYPE_UPDATE wanted */ | ||
45 | |||
46 | struct xenbus_device *xbdev; | ||
47 | }; | ||
48 | |||
49 | static u32 xenfb_mem_len = XENFB_WIDTH * XENFB_HEIGHT * XENFB_DEPTH / 8; | ||
50 | |||
51 | static int xenfb_remove(struct xenbus_device *); | ||
52 | static void xenfb_init_shared_page(struct xenfb_info *); | ||
53 | static int xenfb_connect_backend(struct xenbus_device *, struct xenfb_info *); | ||
54 | static void xenfb_disconnect_backend(struct xenfb_info *); | ||
55 | |||
56 | static void xenfb_do_update(struct xenfb_info *info, | ||
57 | int x, int y, int w, int h) | ||
58 | { | ||
59 | union xenfb_out_event event; | ||
60 | u32 prod; | ||
61 | |||
62 | event.type = XENFB_TYPE_UPDATE; | ||
63 | event.update.x = x; | ||
64 | event.update.y = y; | ||
65 | event.update.width = w; | ||
66 | event.update.height = h; | ||
67 | |||
68 | prod = info->page->out_prod; | ||
69 | /* caller ensures !xenfb_queue_full() */ | ||
70 | mb(); /* ensure ring space available */ | ||
71 | XENFB_OUT_RING_REF(info->page, prod) = event; | ||
72 | wmb(); /* ensure ring contents visible */ | ||
73 | info->page->out_prod = prod + 1; | ||
74 | |||
75 | notify_remote_via_irq(info->irq); | ||
76 | } | ||
77 | |||
78 | static int xenfb_queue_full(struct xenfb_info *info) | ||
79 | { | ||
80 | u32 cons, prod; | ||
81 | |||
82 | prod = info->page->out_prod; | ||
83 | cons = info->page->out_cons; | ||
84 | return prod - cons == XENFB_OUT_RING_LEN; | ||
85 | } | ||
86 | |||
87 | static void xenfb_refresh(struct xenfb_info *info, | ||
88 | int x1, int y1, int w, int h) | ||
89 | { | ||
90 | unsigned long flags; | ||
91 | int y2 = y1 + h - 1; | ||
92 | int x2 = x1 + w - 1; | ||
93 | |||
94 | if (!info->update_wanted) | ||
95 | return; | ||
96 | |||
97 | spin_lock_irqsave(&info->dirty_lock, flags); | ||
98 | |||
99 | /* Combine with dirty rectangle: */ | ||
100 | if (info->y1 < y1) | ||
101 | y1 = info->y1; | ||
102 | if (info->y2 > y2) | ||
103 | y2 = info->y2; | ||
104 | if (info->x1 < x1) | ||
105 | x1 = info->x1; | ||
106 | if (info->x2 > x2) | ||
107 | x2 = info->x2; | ||
108 | |||
109 | if (xenfb_queue_full(info)) { | ||
110 | /* Can't send right now, stash it in the dirty rectangle */ | ||
111 | info->x1 = x1; | ||
112 | info->x2 = x2; | ||
113 | info->y1 = y1; | ||
114 | info->y2 = y2; | ||
115 | spin_unlock_irqrestore(&info->dirty_lock, flags); | ||
116 | return; | ||
117 | } | ||
118 | |||
119 | /* Clear dirty rectangle: */ | ||
120 | info->x1 = info->y1 = INT_MAX; | ||
121 | info->x2 = info->y2 = 0; | ||
122 | |||
123 | spin_unlock_irqrestore(&info->dirty_lock, flags); | ||
124 | |||
125 | if (x1 <= x2 && y1 <= y2) | ||
126 | xenfb_do_update(info, x1, y1, x2 - x1 + 1, y2 - y1 + 1); | ||
127 | } | ||
128 | |||
129 | static void xenfb_deferred_io(struct fb_info *fb_info, | ||
130 | struct list_head *pagelist) | ||
131 | { | ||
132 | struct xenfb_info *info = fb_info->par; | ||
133 | struct page *page; | ||
134 | unsigned long beg, end; | ||
135 | int y1, y2, miny, maxy; | ||
136 | |||
137 | miny = INT_MAX; | ||
138 | maxy = 0; | ||
139 | list_for_each_entry(page, pagelist, lru) { | ||
140 | beg = page->index << PAGE_SHIFT; | ||
141 | end = beg + PAGE_SIZE - 1; | ||
142 | y1 = beg / fb_info->fix.line_length; | ||
143 | y2 = end / fb_info->fix.line_length; | ||
144 | if (y2 >= fb_info->var.yres) | ||
145 | y2 = fb_info->var.yres - 1; | ||
146 | if (miny > y1) | ||
147 | miny = y1; | ||
148 | if (maxy < y2) | ||
149 | maxy = y2; | ||
150 | } | ||
151 | xenfb_refresh(info, 0, miny, fb_info->var.xres, maxy - miny + 1); | ||
152 | } | ||
153 | |||
154 | static struct fb_deferred_io xenfb_defio = { | ||
155 | .delay = HZ / 20, | ||
156 | .deferred_io = xenfb_deferred_io, | ||
157 | }; | ||
158 | |||
159 | static int xenfb_setcolreg(unsigned regno, unsigned red, unsigned green, | ||
160 | unsigned blue, unsigned transp, | ||
161 | struct fb_info *info) | ||
162 | { | ||
163 | u32 v; | ||
164 | |||
165 | if (regno > info->cmap.len) | ||
166 | return 1; | ||
167 | |||
168 | #define CNVT_TOHW(val, width) ((((val)<<(width))+0x7FFF-(val))>>16) | ||
169 | red = CNVT_TOHW(red, info->var.red.length); | ||
170 | green = CNVT_TOHW(green, info->var.green.length); | ||
171 | blue = CNVT_TOHW(blue, info->var.blue.length); | ||
172 | transp = CNVT_TOHW(transp, info->var.transp.length); | ||
173 | #undef CNVT_TOHW | ||
174 | |||
175 | v = (red << info->var.red.offset) | | ||
176 | (green << info->var.green.offset) | | ||
177 | (blue << info->var.blue.offset); | ||
178 | |||
179 | switch (info->var.bits_per_pixel) { | ||
180 | case 16: | ||
181 | case 24: | ||
182 | case 32: | ||
183 | ((u32 *)info->pseudo_palette)[regno] = v; | ||
184 | break; | ||
185 | } | ||
186 | |||
187 | return 0; | ||
188 | } | ||
189 | |||
190 | static void xenfb_fillrect(struct fb_info *p, const struct fb_fillrect *rect) | ||
191 | { | ||
192 | struct xenfb_info *info = p->par; | ||
193 | |||
194 | sys_fillrect(p, rect); | ||
195 | xenfb_refresh(info, rect->dx, rect->dy, rect->width, rect->height); | ||
196 | } | ||
197 | |||
198 | static void xenfb_imageblit(struct fb_info *p, const struct fb_image *image) | ||
199 | { | ||
200 | struct xenfb_info *info = p->par; | ||
201 | |||
202 | sys_imageblit(p, image); | ||
203 | xenfb_refresh(info, image->dx, image->dy, image->width, image->height); | ||
204 | } | ||
205 | |||
206 | static void xenfb_copyarea(struct fb_info *p, const struct fb_copyarea *area) | ||
207 | { | ||
208 | struct xenfb_info *info = p->par; | ||
209 | |||
210 | sys_copyarea(p, area); | ||
211 | xenfb_refresh(info, area->dx, area->dy, area->width, area->height); | ||
212 | } | ||
213 | |||
214 | static ssize_t xenfb_write(struct fb_info *p, const char __user *buf, | ||
215 | size_t count, loff_t *ppos) | ||
216 | { | ||
217 | struct xenfb_info *info = p->par; | ||
218 | ssize_t res; | ||
219 | |||
220 | res = fb_sys_write(p, buf, count, ppos); | ||
221 | xenfb_refresh(info, 0, 0, info->page->width, info->page->height); | ||
222 | return res; | ||
223 | } | ||
224 | |||
225 | static struct fb_ops xenfb_fb_ops = { | ||
226 | .owner = THIS_MODULE, | ||
227 | .fb_read = fb_sys_read, | ||
228 | .fb_write = xenfb_write, | ||
229 | .fb_setcolreg = xenfb_setcolreg, | ||
230 | .fb_fillrect = xenfb_fillrect, | ||
231 | .fb_copyarea = xenfb_copyarea, | ||
232 | .fb_imageblit = xenfb_imageblit, | ||
233 | }; | ||
234 | |||
235 | static irqreturn_t xenfb_event_handler(int rq, void *dev_id) | ||
236 | { | ||
237 | /* | ||
238 | * No in events recognized, simply ignore them all. | ||
239 | * If you need to recognize some, see xen-kbdfront's | ||
240 | * input_handler() for how to do that. | ||
241 | */ | ||
242 | struct xenfb_info *info = dev_id; | ||
243 | struct xenfb_page *page = info->page; | ||
244 | |||
245 | if (page->in_cons != page->in_prod) { | ||
246 | info->page->in_cons = info->page->in_prod; | ||
247 | notify_remote_via_irq(info->irq); | ||
248 | } | ||
249 | |||
250 | /* Flush dirty rectangle: */ | ||
251 | xenfb_refresh(info, INT_MAX, INT_MAX, -INT_MAX, -INT_MAX); | ||
252 | |||
253 | return IRQ_HANDLED; | ||
254 | } | ||
255 | |||
256 | static int __devinit xenfb_probe(struct xenbus_device *dev, | ||
257 | const struct xenbus_device_id *id) | ||
258 | { | ||
259 | struct xenfb_info *info; | ||
260 | struct fb_info *fb_info; | ||
261 | int ret; | ||
262 | |||
263 | info = kzalloc(sizeof(*info), GFP_KERNEL); | ||
264 | if (info == NULL) { | ||
265 | xenbus_dev_fatal(dev, -ENOMEM, "allocating info structure"); | ||
266 | return -ENOMEM; | ||
267 | } | ||
268 | dev->dev.driver_data = info; | ||
269 | info->xbdev = dev; | ||
270 | info->irq = -1; | ||
271 | info->x1 = info->y1 = INT_MAX; | ||
272 | spin_lock_init(&info->dirty_lock); | ||
273 | |||
274 | info->fb = vmalloc(xenfb_mem_len); | ||
275 | if (info->fb == NULL) | ||
276 | goto error_nomem; | ||
277 | memset(info->fb, 0, xenfb_mem_len); | ||
278 | |||
279 | info->nr_pages = (xenfb_mem_len + PAGE_SIZE - 1) >> PAGE_SHIFT; | ||
280 | |||
281 | info->mfns = vmalloc(sizeof(unsigned long) * info->nr_pages); | ||
282 | if (!info->mfns) | ||
283 | goto error_nomem; | ||
284 | |||
285 | /* set up shared page */ | ||
286 | info->page = (void *)__get_free_page(GFP_KERNEL | __GFP_ZERO); | ||
287 | if (!info->page) | ||
288 | goto error_nomem; | ||
289 | |||
290 | xenfb_init_shared_page(info); | ||
291 | |||
292 | /* abusing framebuffer_alloc() to allocate pseudo_palette */ | ||
293 | fb_info = framebuffer_alloc(sizeof(u32) * 256, NULL); | ||
294 | if (fb_info == NULL) | ||
295 | goto error_nomem; | ||
296 | |||
297 | /* complete the abuse: */ | ||
298 | fb_info->pseudo_palette = fb_info->par; | ||
299 | fb_info->par = info; | ||
300 | |||
301 | fb_info->screen_base = info->fb; | ||
302 | |||
303 | fb_info->fbops = &xenfb_fb_ops; | ||
304 | fb_info->var.xres_virtual = fb_info->var.xres = info->page->width; | ||
305 | fb_info->var.yres_virtual = fb_info->var.yres = info->page->height; | ||
306 | fb_info->var.bits_per_pixel = info->page->depth; | ||
307 | |||
308 | fb_info->var.red = (struct fb_bitfield){16, 8, 0}; | ||
309 | fb_info->var.green = (struct fb_bitfield){8, 8, 0}; | ||
310 | fb_info->var.blue = (struct fb_bitfield){0, 8, 0}; | ||
311 | |||
312 | fb_info->var.activate = FB_ACTIVATE_NOW; | ||
313 | fb_info->var.height = -1; | ||
314 | fb_info->var.width = -1; | ||
315 | fb_info->var.vmode = FB_VMODE_NONINTERLACED; | ||
316 | |||
317 | fb_info->fix.visual = FB_VISUAL_TRUECOLOR; | ||
318 | fb_info->fix.line_length = info->page->line_length; | ||
319 | fb_info->fix.smem_start = 0; | ||
320 | fb_info->fix.smem_len = xenfb_mem_len; | ||
321 | strcpy(fb_info->fix.id, "xen"); | ||
322 | fb_info->fix.type = FB_TYPE_PACKED_PIXELS; | ||
323 | fb_info->fix.accel = FB_ACCEL_NONE; | ||
324 | |||
325 | fb_info->flags = FBINFO_FLAG_DEFAULT; | ||
326 | |||
327 | ret = fb_alloc_cmap(&fb_info->cmap, 256, 0); | ||
328 | if (ret < 0) { | ||
329 | framebuffer_release(fb_info); | ||
330 | xenbus_dev_fatal(dev, ret, "fb_alloc_cmap"); | ||
331 | goto error; | ||
332 | } | ||
333 | |||
334 | fb_info->fbdefio = &xenfb_defio; | ||
335 | fb_deferred_io_init(fb_info); | ||
336 | |||
337 | ret = register_framebuffer(fb_info); | ||
338 | if (ret) { | ||
339 | fb_deferred_io_cleanup(fb_info); | ||
340 | fb_dealloc_cmap(&fb_info->cmap); | ||
341 | framebuffer_release(fb_info); | ||
342 | xenbus_dev_fatal(dev, ret, "register_framebuffer"); | ||
343 | goto error; | ||
344 | } | ||
345 | info->fb_info = fb_info; | ||
346 | |||
347 | ret = xenfb_connect_backend(dev, info); | ||
348 | if (ret < 0) | ||
349 | goto error; | ||
350 | |||
351 | return 0; | ||
352 | |||
353 | error_nomem: | ||
354 | ret = -ENOMEM; | ||
355 | xenbus_dev_fatal(dev, ret, "allocating device memory"); | ||
356 | error: | ||
357 | xenfb_remove(dev); | ||
358 | return ret; | ||
359 | } | ||
360 | |||
361 | static int xenfb_resume(struct xenbus_device *dev) | ||
362 | { | ||
363 | struct xenfb_info *info = dev->dev.driver_data; | ||
364 | |||
365 | xenfb_disconnect_backend(info); | ||
366 | xenfb_init_shared_page(info); | ||
367 | return xenfb_connect_backend(dev, info); | ||
368 | } | ||
369 | |||
370 | static int xenfb_remove(struct xenbus_device *dev) | ||
371 | { | ||
372 | struct xenfb_info *info = dev->dev.driver_data; | ||
373 | |||
374 | xenfb_disconnect_backend(info); | ||
375 | if (info->fb_info) { | ||
376 | fb_deferred_io_cleanup(info->fb_info); | ||
377 | unregister_framebuffer(info->fb_info); | ||
378 | fb_dealloc_cmap(&info->fb_info->cmap); | ||
379 | framebuffer_release(info->fb_info); | ||
380 | } | ||
381 | free_page((unsigned long)info->page); | ||
382 | vfree(info->mfns); | ||
383 | vfree(info->fb); | ||
384 | kfree(info); | ||
385 | |||
386 | return 0; | ||
387 | } | ||
388 | |||
389 | static unsigned long vmalloc_to_mfn(void *address) | ||
390 | { | ||
391 | return pfn_to_mfn(vmalloc_to_pfn(address)); | ||
392 | } | ||
393 | |||
394 | static void xenfb_init_shared_page(struct xenfb_info *info) | ||
395 | { | ||
396 | int i; | ||
397 | |||
398 | for (i = 0; i < info->nr_pages; i++) | ||
399 | info->mfns[i] = vmalloc_to_mfn(info->fb + i * PAGE_SIZE); | ||
400 | |||
401 | info->page->pd[0] = vmalloc_to_mfn(info->mfns); | ||
402 | info->page->pd[1] = 0; | ||
403 | info->page->width = XENFB_WIDTH; | ||
404 | info->page->height = XENFB_HEIGHT; | ||
405 | info->page->depth = XENFB_DEPTH; | ||
406 | info->page->line_length = (info->page->depth / 8) * info->page->width; | ||
407 | info->page->mem_length = xenfb_mem_len; | ||
408 | info->page->in_cons = info->page->in_prod = 0; | ||
409 | info->page->out_cons = info->page->out_prod = 0; | ||
410 | } | ||
411 | |||
412 | static int xenfb_connect_backend(struct xenbus_device *dev, | ||
413 | struct xenfb_info *info) | ||
414 | { | ||
415 | int ret, evtchn; | ||
416 | struct xenbus_transaction xbt; | ||
417 | |||
418 | ret = xenbus_alloc_evtchn(dev, &evtchn); | ||
419 | if (ret) | ||
420 | return ret; | ||
421 | ret = bind_evtchn_to_irqhandler(evtchn, xenfb_event_handler, | ||
422 | 0, dev->devicetype, info); | ||
423 | if (ret < 0) { | ||
424 | xenbus_free_evtchn(dev, evtchn); | ||
425 | xenbus_dev_fatal(dev, ret, "bind_evtchn_to_irqhandler"); | ||
426 | return ret; | ||
427 | } | ||
428 | info->irq = ret; | ||
429 | |||
430 | again: | ||
431 | ret = xenbus_transaction_start(&xbt); | ||
432 | if (ret) { | ||
433 | xenbus_dev_fatal(dev, ret, "starting transaction"); | ||
434 | return ret; | ||
435 | } | ||
436 | ret = xenbus_printf(xbt, dev->nodename, "page-ref", "%lu", | ||
437 | virt_to_mfn(info->page)); | ||
438 | if (ret) | ||
439 | goto error_xenbus; | ||
440 | ret = xenbus_printf(xbt, dev->nodename, "event-channel", "%u", | ||
441 | evtchn); | ||
442 | if (ret) | ||
443 | goto error_xenbus; | ||
444 | ret = xenbus_printf(xbt, dev->nodename, "protocol", "%s", | ||
445 | XEN_IO_PROTO_ABI_NATIVE); | ||
446 | if (ret) | ||
447 | goto error_xenbus; | ||
448 | ret = xenbus_printf(xbt, dev->nodename, "feature-update", "1"); | ||
449 | if (ret) | ||
450 | goto error_xenbus; | ||
451 | ret = xenbus_transaction_end(xbt, 0); | ||
452 | if (ret) { | ||
453 | if (ret == -EAGAIN) | ||
454 | goto again; | ||
455 | xenbus_dev_fatal(dev, ret, "completing transaction"); | ||
456 | return ret; | ||
457 | } | ||
458 | |||
459 | xenbus_switch_state(dev, XenbusStateInitialised); | ||
460 | return 0; | ||
461 | |||
462 | error_xenbus: | ||
463 | xenbus_transaction_end(xbt, 1); | ||
464 | xenbus_dev_fatal(dev, ret, "writing xenstore"); | ||
465 | return ret; | ||
466 | } | ||
467 | |||
468 | static void xenfb_disconnect_backend(struct xenfb_info *info) | ||
469 | { | ||
470 | if (info->irq >= 0) | ||
471 | unbind_from_irqhandler(info->irq, info); | ||
472 | info->irq = -1; | ||
473 | } | ||
474 | |||
475 | static void xenfb_backend_changed(struct xenbus_device *dev, | ||
476 | enum xenbus_state backend_state) | ||
477 | { | ||
478 | struct xenfb_info *info = dev->dev.driver_data; | ||
479 | int val; | ||
480 | |||
481 | switch (backend_state) { | ||
482 | case XenbusStateInitialising: | ||
483 | case XenbusStateInitialised: | ||
484 | case XenbusStateUnknown: | ||
485 | case XenbusStateClosed: | ||
486 | break; | ||
487 | |||
488 | case XenbusStateInitWait: | ||
489 | InitWait: | ||
490 | xenbus_switch_state(dev, XenbusStateConnected); | ||
491 | break; | ||
492 | |||
493 | case XenbusStateConnected: | ||
494 | /* | ||
495 | * Work around xenbus race condition: If backend goes | ||
496 | * through InitWait to Connected fast enough, we can | ||
497 | * get Connected twice here. | ||
498 | */ | ||
499 | if (dev->state != XenbusStateConnected) | ||
500 | goto InitWait; /* no InitWait seen yet, fudge it */ | ||
501 | |||
502 | if (xenbus_scanf(XBT_NIL, info->xbdev->otherend, | ||
503 | "request-update", "%d", &val) < 0) | ||
504 | val = 0; | ||
505 | if (val) | ||
506 | info->update_wanted = 1; | ||
507 | break; | ||
508 | |||
509 | case XenbusStateClosing: | ||
510 | xenbus_frontend_closed(dev); | ||
511 | break; | ||
512 | } | ||
513 | } | ||
514 | |||
515 | static struct xenbus_device_id xenfb_ids[] = { | ||
516 | { "vfb" }, | ||
517 | { "" } | ||
518 | }; | ||
519 | |||
520 | static struct xenbus_driver xenfb = { | ||
521 | .name = "vfb", | ||
522 | .owner = THIS_MODULE, | ||
523 | .ids = xenfb_ids, | ||
524 | .probe = xenfb_probe, | ||
525 | .remove = xenfb_remove, | ||
526 | .resume = xenfb_resume, | ||
527 | .otherend_changed = xenfb_backend_changed, | ||
528 | }; | ||
529 | |||
530 | static int __init xenfb_init(void) | ||
531 | { | ||
532 | if (!is_running_on_xen()) | ||
533 | return -ENODEV; | ||
534 | |||
535 | /* Nothing to do if running in dom0. */ | ||
536 | if (is_initial_xendomain()) | ||
537 | return -ENODEV; | ||
538 | |||
539 | return xenbus_register_frontend(&xenfb); | ||
540 | } | ||
541 | |||
542 | static void __exit xenfb_cleanup(void) | ||
543 | { | ||
544 | xenbus_unregister_driver(&xenfb); | ||
545 | } | ||
546 | |||
547 | module_init(xenfb_init); | ||
548 | module_exit(xenfb_cleanup); | ||
549 | |||
550 | MODULE_LICENSE("GPL"); | ||
diff --git a/drivers/xen/Kconfig b/drivers/xen/Kconfig new file mode 100644 index 000000000000..4b75a16de009 --- /dev/null +++ b/drivers/xen/Kconfig | |||
@@ -0,0 +1,19 @@ | |||
1 | config XEN_BALLOON | ||
2 | bool "Xen memory balloon driver" | ||
3 | depends on XEN | ||
4 | default y | ||
5 | help | ||
6 | The balloon driver allows the Xen domain to request more memory from | ||
7 | the system to expand the domain's memory allocation, or alternatively | ||
8 | return unneeded memory to the system. | ||
9 | |||
10 | config XEN_SCRUB_PAGES | ||
11 | bool "Scrub pages before returning them to system" | ||
12 | depends on XEN_BALLOON | ||
13 | default y | ||
14 | help | ||
15 | Scrub pages before returning them to the system for reuse by | ||
16 | other domains. This makes sure that any confidential data | ||
17 | is not accidentally visible to other domains. Is it more | ||
18 | secure, but slightly less efficient. | ||
19 | If in doubt, say yes. | ||
diff --git a/drivers/xen/Makefile b/drivers/xen/Makefile index 56592f0d6cef..37af04f1ffd9 100644 --- a/drivers/xen/Makefile +++ b/drivers/xen/Makefile | |||
@@ -1,2 +1,4 @@ | |||
1 | obj-y += grant-table.o | 1 | obj-y += grant-table.o features.o events.o |
2 | obj-y += xenbus/ | 2 | obj-y += xenbus/ |
3 | obj-$(CONFIG_XEN_XENCOMM) += xencomm.o | ||
4 | obj-$(CONFIG_XEN_BALLOON) += balloon.o | ||
diff --git a/drivers/xen/balloon.c b/drivers/xen/balloon.c new file mode 100644 index 000000000000..ab25ba6cbbb9 --- /dev/null +++ b/drivers/xen/balloon.c | |||
@@ -0,0 +1,712 @@ | |||
1 | /****************************************************************************** | ||
2 | * balloon.c | ||
3 | * | ||
4 | * Xen balloon driver - enables returning/claiming memory to/from Xen. | ||
5 | * | ||
6 | * Copyright (c) 2003, B Dragovic | ||
7 | * Copyright (c) 2003-2004, M Williamson, K Fraser | ||
8 | * Copyright (c) 2005 Dan M. Smith, IBM Corporation | ||
9 | * | ||
10 | * This program is free software; you can redistribute it and/or | ||
11 | * modify it under the terms of the GNU General Public License version 2 | ||
12 | * as published by the Free Software Foundation; or, when distributed | ||
13 | * separately from the Linux kernel or incorporated into other | ||
14 | * software packages, subject to the following license: | ||
15 | * | ||
16 | * Permission is hereby granted, free of charge, to any person obtaining a copy | ||
17 | * of this source file (the "Software"), to deal in the Software without | ||
18 | * restriction, including without limitation the rights to use, copy, modify, | ||
19 | * merge, publish, distribute, sublicense, and/or sell copies of the Software, | ||
20 | * and to permit persons to whom the Software is furnished to do so, subject to | ||
21 | * the following conditions: | ||
22 | * | ||
23 | * The above copyright notice and this permission notice shall be included in | ||
24 | * all copies or substantial portions of the Software. | ||
25 | * | ||
26 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
27 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
28 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE | ||
29 | * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | ||
30 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING | ||
31 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS | ||
32 | * IN THE SOFTWARE. | ||
33 | */ | ||
34 | |||
35 | #include <linux/kernel.h> | ||
36 | #include <linux/module.h> | ||
37 | #include <linux/sched.h> | ||
38 | #include <linux/errno.h> | ||
39 | #include <linux/mm.h> | ||
40 | #include <linux/bootmem.h> | ||
41 | #include <linux/pagemap.h> | ||
42 | #include <linux/highmem.h> | ||
43 | #include <linux/mutex.h> | ||
44 | #include <linux/highmem.h> | ||
45 | #include <linux/list.h> | ||
46 | #include <linux/sysdev.h> | ||
47 | |||
48 | #include <asm/xen/hypervisor.h> | ||
49 | #include <asm/page.h> | ||
50 | #include <asm/pgalloc.h> | ||
51 | #include <asm/pgtable.h> | ||
52 | #include <asm/uaccess.h> | ||
53 | #include <asm/tlb.h> | ||
54 | |||
55 | #include <xen/interface/memory.h> | ||
56 | #include <xen/balloon.h> | ||
57 | #include <xen/xenbus.h> | ||
58 | #include <xen/features.h> | ||
59 | #include <xen/page.h> | ||
60 | |||
61 | #define PAGES2KB(_p) ((_p)<<(PAGE_SHIFT-10)) | ||
62 | |||
63 | #define BALLOON_CLASS_NAME "memory" | ||
64 | |||
65 | struct balloon_stats { | ||
66 | /* We aim for 'current allocation' == 'target allocation'. */ | ||
67 | unsigned long current_pages; | ||
68 | unsigned long target_pages; | ||
69 | /* We may hit the hard limit in Xen. If we do then we remember it. */ | ||
70 | unsigned long hard_limit; | ||
71 | /* | ||
72 | * Drivers may alter the memory reservation independently, but they | ||
73 | * must inform the balloon driver so we avoid hitting the hard limit. | ||
74 | */ | ||
75 | unsigned long driver_pages; | ||
76 | /* Number of pages in high- and low-memory balloons. */ | ||
77 | unsigned long balloon_low; | ||
78 | unsigned long balloon_high; | ||
79 | }; | ||
80 | |||
81 | static DEFINE_MUTEX(balloon_mutex); | ||
82 | |||
83 | static struct sys_device balloon_sysdev; | ||
84 | |||
85 | static int register_balloon(struct sys_device *sysdev); | ||
86 | |||
87 | /* | ||
88 | * Protects atomic reservation decrease/increase against concurrent increases. | ||
89 | * Also protects non-atomic updates of current_pages and driver_pages, and | ||
90 | * balloon lists. | ||
91 | */ | ||
92 | static DEFINE_SPINLOCK(balloon_lock); | ||
93 | |||
94 | static struct balloon_stats balloon_stats; | ||
95 | |||
96 | /* We increase/decrease in batches which fit in a page */ | ||
97 | static unsigned long frame_list[PAGE_SIZE / sizeof(unsigned long)]; | ||
98 | |||
99 | /* VM /proc information for memory */ | ||
100 | extern unsigned long totalram_pages; | ||
101 | |||
102 | #ifdef CONFIG_HIGHMEM | ||
103 | extern unsigned long totalhigh_pages; | ||
104 | #define inc_totalhigh_pages() (totalhigh_pages++) | ||
105 | #define dec_totalhigh_pages() (totalhigh_pages--) | ||
106 | #else | ||
107 | #define inc_totalhigh_pages() do {} while(0) | ||
108 | #define dec_totalhigh_pages() do {} while(0) | ||
109 | #endif | ||
110 | |||
111 | /* List of ballooned pages, threaded through the mem_map array. */ | ||
112 | static LIST_HEAD(ballooned_pages); | ||
113 | |||
114 | /* Main work function, always executed in process context. */ | ||
115 | static void balloon_process(struct work_struct *work); | ||
116 | static DECLARE_WORK(balloon_worker, balloon_process); | ||
117 | static struct timer_list balloon_timer; | ||
118 | |||
119 | /* When ballooning out (allocating memory to return to Xen) we don't really | ||
120 | want the kernel to try too hard since that can trigger the oom killer. */ | ||
121 | #define GFP_BALLOON \ | ||
122 | (GFP_HIGHUSER | __GFP_NOWARN | __GFP_NORETRY | __GFP_NOMEMALLOC) | ||
123 | |||
124 | static void scrub_page(struct page *page) | ||
125 | { | ||
126 | #ifdef CONFIG_XEN_SCRUB_PAGES | ||
127 | if (PageHighMem(page)) { | ||
128 | void *v = kmap(page); | ||
129 | clear_page(v); | ||
130 | kunmap(v); | ||
131 | } else { | ||
132 | void *v = page_address(page); | ||
133 | clear_page(v); | ||
134 | } | ||
135 | #endif | ||
136 | } | ||
137 | |||
138 | /* balloon_append: add the given page to the balloon. */ | ||
139 | static void balloon_append(struct page *page) | ||
140 | { | ||
141 | /* Lowmem is re-populated first, so highmem pages go at list tail. */ | ||
142 | if (PageHighMem(page)) { | ||
143 | list_add_tail(&page->lru, &ballooned_pages); | ||
144 | balloon_stats.balloon_high++; | ||
145 | dec_totalhigh_pages(); | ||
146 | } else { | ||
147 | list_add(&page->lru, &ballooned_pages); | ||
148 | balloon_stats.balloon_low++; | ||
149 | } | ||
150 | } | ||
151 | |||
152 | /* balloon_retrieve: rescue a page from the balloon, if it is not empty. */ | ||
153 | static struct page *balloon_retrieve(void) | ||
154 | { | ||
155 | struct page *page; | ||
156 | |||
157 | if (list_empty(&ballooned_pages)) | ||
158 | return NULL; | ||
159 | |||
160 | page = list_entry(ballooned_pages.next, struct page, lru); | ||
161 | list_del(&page->lru); | ||
162 | |||
163 | if (PageHighMem(page)) { | ||
164 | balloon_stats.balloon_high--; | ||
165 | inc_totalhigh_pages(); | ||
166 | } | ||
167 | else | ||
168 | balloon_stats.balloon_low--; | ||
169 | |||
170 | return page; | ||
171 | } | ||
172 | |||
173 | static struct page *balloon_first_page(void) | ||
174 | { | ||
175 | if (list_empty(&ballooned_pages)) | ||
176 | return NULL; | ||
177 | return list_entry(ballooned_pages.next, struct page, lru); | ||
178 | } | ||
179 | |||
180 | static struct page *balloon_next_page(struct page *page) | ||
181 | { | ||
182 | struct list_head *next = page->lru.next; | ||
183 | if (next == &ballooned_pages) | ||
184 | return NULL; | ||
185 | return list_entry(next, struct page, lru); | ||
186 | } | ||
187 | |||
188 | static void balloon_alarm(unsigned long unused) | ||
189 | { | ||
190 | schedule_work(&balloon_worker); | ||
191 | } | ||
192 | |||
193 | static unsigned long current_target(void) | ||
194 | { | ||
195 | unsigned long target = min(balloon_stats.target_pages, balloon_stats.hard_limit); | ||
196 | |||
197 | target = min(target, | ||
198 | balloon_stats.current_pages + | ||
199 | balloon_stats.balloon_low + | ||
200 | balloon_stats.balloon_high); | ||
201 | |||
202 | return target; | ||
203 | } | ||
204 | |||
205 | static int increase_reservation(unsigned long nr_pages) | ||
206 | { | ||
207 | unsigned long pfn, i, flags; | ||
208 | struct page *page; | ||
209 | long rc; | ||
210 | struct xen_memory_reservation reservation = { | ||
211 | .address_bits = 0, | ||
212 | .extent_order = 0, | ||
213 | .domid = DOMID_SELF | ||
214 | }; | ||
215 | |||
216 | if (nr_pages > ARRAY_SIZE(frame_list)) | ||
217 | nr_pages = ARRAY_SIZE(frame_list); | ||
218 | |||
219 | spin_lock_irqsave(&balloon_lock, flags); | ||
220 | |||
221 | page = balloon_first_page(); | ||
222 | for (i = 0; i < nr_pages; i++) { | ||
223 | BUG_ON(page == NULL); | ||
224 | frame_list[i] = page_to_pfn(page);; | ||
225 | page = balloon_next_page(page); | ||
226 | } | ||
227 | |||
228 | reservation.extent_start = (unsigned long)frame_list; | ||
229 | reservation.nr_extents = nr_pages; | ||
230 | rc = HYPERVISOR_memory_op( | ||
231 | XENMEM_populate_physmap, &reservation); | ||
232 | if (rc < nr_pages) { | ||
233 | if (rc > 0) { | ||
234 | int ret; | ||
235 | |||
236 | /* We hit the Xen hard limit: reprobe. */ | ||
237 | reservation.nr_extents = rc; | ||
238 | ret = HYPERVISOR_memory_op(XENMEM_decrease_reservation, | ||
239 | &reservation); | ||
240 | BUG_ON(ret != rc); | ||
241 | } | ||
242 | if (rc >= 0) | ||
243 | balloon_stats.hard_limit = (balloon_stats.current_pages + rc - | ||
244 | balloon_stats.driver_pages); | ||
245 | goto out; | ||
246 | } | ||
247 | |||
248 | for (i = 0; i < nr_pages; i++) { | ||
249 | page = balloon_retrieve(); | ||
250 | BUG_ON(page == NULL); | ||
251 | |||
252 | pfn = page_to_pfn(page); | ||
253 | BUG_ON(!xen_feature(XENFEAT_auto_translated_physmap) && | ||
254 | phys_to_machine_mapping_valid(pfn)); | ||
255 | |||
256 | set_phys_to_machine(pfn, frame_list[i]); | ||
257 | |||
258 | /* Link back into the page tables if not highmem. */ | ||
259 | if (pfn < max_low_pfn) { | ||
260 | int ret; | ||
261 | ret = HYPERVISOR_update_va_mapping( | ||
262 | (unsigned long)__va(pfn << PAGE_SHIFT), | ||
263 | mfn_pte(frame_list[i], PAGE_KERNEL), | ||
264 | 0); | ||
265 | BUG_ON(ret); | ||
266 | } | ||
267 | |||
268 | /* Relinquish the page back to the allocator. */ | ||
269 | ClearPageReserved(page); | ||
270 | init_page_count(page); | ||
271 | __free_page(page); | ||
272 | } | ||
273 | |||
274 | balloon_stats.current_pages += nr_pages; | ||
275 | totalram_pages = balloon_stats.current_pages; | ||
276 | |||
277 | out: | ||
278 | spin_unlock_irqrestore(&balloon_lock, flags); | ||
279 | |||
280 | return 0; | ||
281 | } | ||
282 | |||
283 | static int decrease_reservation(unsigned long nr_pages) | ||
284 | { | ||
285 | unsigned long pfn, i, flags; | ||
286 | struct page *page; | ||
287 | int need_sleep = 0; | ||
288 | int ret; | ||
289 | struct xen_memory_reservation reservation = { | ||
290 | .address_bits = 0, | ||
291 | .extent_order = 0, | ||
292 | .domid = DOMID_SELF | ||
293 | }; | ||
294 | |||
295 | if (nr_pages > ARRAY_SIZE(frame_list)) | ||
296 | nr_pages = ARRAY_SIZE(frame_list); | ||
297 | |||
298 | for (i = 0; i < nr_pages; i++) { | ||
299 | if ((page = alloc_page(GFP_BALLOON)) == NULL) { | ||
300 | nr_pages = i; | ||
301 | need_sleep = 1; | ||
302 | break; | ||
303 | } | ||
304 | |||
305 | pfn = page_to_pfn(page); | ||
306 | frame_list[i] = pfn_to_mfn(pfn); | ||
307 | |||
308 | scrub_page(page); | ||
309 | } | ||
310 | |||
311 | /* Ensure that ballooned highmem pages don't have kmaps. */ | ||
312 | kmap_flush_unused(); | ||
313 | flush_tlb_all(); | ||
314 | |||
315 | spin_lock_irqsave(&balloon_lock, flags); | ||
316 | |||
317 | /* No more mappings: invalidate P2M and add to balloon. */ | ||
318 | for (i = 0; i < nr_pages; i++) { | ||
319 | pfn = mfn_to_pfn(frame_list[i]); | ||
320 | set_phys_to_machine(pfn, INVALID_P2M_ENTRY); | ||
321 | balloon_append(pfn_to_page(pfn)); | ||
322 | } | ||
323 | |||
324 | reservation.extent_start = (unsigned long)frame_list; | ||
325 | reservation.nr_extents = nr_pages; | ||
326 | ret = HYPERVISOR_memory_op(XENMEM_decrease_reservation, &reservation); | ||
327 | BUG_ON(ret != nr_pages); | ||
328 | |||
329 | balloon_stats.current_pages -= nr_pages; | ||
330 | totalram_pages = balloon_stats.current_pages; | ||
331 | |||
332 | spin_unlock_irqrestore(&balloon_lock, flags); | ||
333 | |||
334 | return need_sleep; | ||
335 | } | ||
336 | |||
337 | /* | ||
338 | * We avoid multiple worker processes conflicting via the balloon mutex. | ||
339 | * We may of course race updates of the target counts (which are protected | ||
340 | * by the balloon lock), or with changes to the Xen hard limit, but we will | ||
341 | * recover from these in time. | ||
342 | */ | ||
343 | static void balloon_process(struct work_struct *work) | ||
344 | { | ||
345 | int need_sleep = 0; | ||
346 | long credit; | ||
347 | |||
348 | mutex_lock(&balloon_mutex); | ||
349 | |||
350 | do { | ||
351 | credit = current_target() - balloon_stats.current_pages; | ||
352 | if (credit > 0) | ||
353 | need_sleep = (increase_reservation(credit) != 0); | ||
354 | if (credit < 0) | ||
355 | need_sleep = (decrease_reservation(-credit) != 0); | ||
356 | |||
357 | #ifndef CONFIG_PREEMPT | ||
358 | if (need_resched()) | ||
359 | schedule(); | ||
360 | #endif | ||
361 | } while ((credit != 0) && !need_sleep); | ||
362 | |||
363 | /* Schedule more work if there is some still to be done. */ | ||
364 | if (current_target() != balloon_stats.current_pages) | ||
365 | mod_timer(&balloon_timer, jiffies + HZ); | ||
366 | |||
367 | mutex_unlock(&balloon_mutex); | ||
368 | } | ||
369 | |||
370 | /* Resets the Xen limit, sets new target, and kicks off processing. */ | ||
371 | void balloon_set_new_target(unsigned long target) | ||
372 | { | ||
373 | /* No need for lock. Not read-modify-write updates. */ | ||
374 | balloon_stats.hard_limit = ~0UL; | ||
375 | balloon_stats.target_pages = target; | ||
376 | schedule_work(&balloon_worker); | ||
377 | } | ||
378 | |||
379 | static struct xenbus_watch target_watch = | ||
380 | { | ||
381 | .node = "memory/target" | ||
382 | }; | ||
383 | |||
384 | /* React to a change in the target key */ | ||
385 | static void watch_target(struct xenbus_watch *watch, | ||
386 | const char **vec, unsigned int len) | ||
387 | { | ||
388 | unsigned long long new_target; | ||
389 | int err; | ||
390 | |||
391 | err = xenbus_scanf(XBT_NIL, "memory", "target", "%llu", &new_target); | ||
392 | if (err != 1) { | ||
393 | /* This is ok (for domain0 at least) - so just return */ | ||
394 | return; | ||
395 | } | ||
396 | |||
397 | /* The given memory/target value is in KiB, so it needs converting to | ||
398 | * pages. PAGE_SHIFT converts bytes to pages, hence PAGE_SHIFT - 10. | ||
399 | */ | ||
400 | balloon_set_new_target(new_target >> (PAGE_SHIFT - 10)); | ||
401 | } | ||
402 | |||
403 | static int balloon_init_watcher(struct notifier_block *notifier, | ||
404 | unsigned long event, | ||
405 | void *data) | ||
406 | { | ||
407 | int err; | ||
408 | |||
409 | err = register_xenbus_watch(&target_watch); | ||
410 | if (err) | ||
411 | printk(KERN_ERR "Failed to set balloon watcher\n"); | ||
412 | |||
413 | return NOTIFY_DONE; | ||
414 | } | ||
415 | |||
416 | static struct notifier_block xenstore_notifier; | ||
417 | |||
418 | static int __init balloon_init(void) | ||
419 | { | ||
420 | unsigned long pfn; | ||
421 | struct page *page; | ||
422 | |||
423 | if (!is_running_on_xen()) | ||
424 | return -ENODEV; | ||
425 | |||
426 | pr_info("xen_balloon: Initialising balloon driver.\n"); | ||
427 | |||
428 | balloon_stats.current_pages = min(xen_start_info->nr_pages, max_pfn); | ||
429 | totalram_pages = balloon_stats.current_pages; | ||
430 | balloon_stats.target_pages = balloon_stats.current_pages; | ||
431 | balloon_stats.balloon_low = 0; | ||
432 | balloon_stats.balloon_high = 0; | ||
433 | balloon_stats.driver_pages = 0UL; | ||
434 | balloon_stats.hard_limit = ~0UL; | ||
435 | |||
436 | init_timer(&balloon_timer); | ||
437 | balloon_timer.data = 0; | ||
438 | balloon_timer.function = balloon_alarm; | ||
439 | |||
440 | register_balloon(&balloon_sysdev); | ||
441 | |||
442 | /* Initialise the balloon with excess memory space. */ | ||
443 | for (pfn = xen_start_info->nr_pages; pfn < max_pfn; pfn++) { | ||
444 | page = pfn_to_page(pfn); | ||
445 | if (!PageReserved(page)) | ||
446 | balloon_append(page); | ||
447 | } | ||
448 | |||
449 | target_watch.callback = watch_target; | ||
450 | xenstore_notifier.notifier_call = balloon_init_watcher; | ||
451 | |||
452 | register_xenstore_notifier(&xenstore_notifier); | ||
453 | |||
454 | return 0; | ||
455 | } | ||
456 | |||
457 | subsys_initcall(balloon_init); | ||
458 | |||
459 | static void balloon_exit(void) | ||
460 | { | ||
461 | /* XXX - release balloon here */ | ||
462 | return; | ||
463 | } | ||
464 | |||
465 | module_exit(balloon_exit); | ||
466 | |||
467 | static void balloon_update_driver_allowance(long delta) | ||
468 | { | ||
469 | unsigned long flags; | ||
470 | |||
471 | spin_lock_irqsave(&balloon_lock, flags); | ||
472 | balloon_stats.driver_pages += delta; | ||
473 | spin_unlock_irqrestore(&balloon_lock, flags); | ||
474 | } | ||
475 | |||
476 | static int dealloc_pte_fn( | ||
477 | pte_t *pte, struct page *pmd_page, unsigned long addr, void *data) | ||
478 | { | ||
479 | unsigned long mfn = pte_mfn(*pte); | ||
480 | int ret; | ||
481 | struct xen_memory_reservation reservation = { | ||
482 | .nr_extents = 1, | ||
483 | .extent_order = 0, | ||
484 | .domid = DOMID_SELF | ||
485 | }; | ||
486 | reservation.extent_start = (unsigned long)&mfn; | ||
487 | set_pte_at(&init_mm, addr, pte, __pte_ma(0ull)); | ||
488 | set_phys_to_machine(__pa(addr) >> PAGE_SHIFT, INVALID_P2M_ENTRY); | ||
489 | ret = HYPERVISOR_memory_op(XENMEM_decrease_reservation, &reservation); | ||
490 | BUG_ON(ret != 1); | ||
491 | return 0; | ||
492 | } | ||
493 | |||
494 | static struct page **alloc_empty_pages_and_pagevec(int nr_pages) | ||
495 | { | ||
496 | unsigned long vaddr, flags; | ||
497 | struct page *page, **pagevec; | ||
498 | int i, ret; | ||
499 | |||
500 | pagevec = kmalloc(sizeof(page) * nr_pages, GFP_KERNEL); | ||
501 | if (pagevec == NULL) | ||
502 | return NULL; | ||
503 | |||
504 | for (i = 0; i < nr_pages; i++) { | ||
505 | page = pagevec[i] = alloc_page(GFP_KERNEL); | ||
506 | if (page == NULL) | ||
507 | goto err; | ||
508 | |||
509 | vaddr = (unsigned long)page_address(page); | ||
510 | |||
511 | scrub_page(page); | ||
512 | |||
513 | spin_lock_irqsave(&balloon_lock, flags); | ||
514 | |||
515 | if (xen_feature(XENFEAT_auto_translated_physmap)) { | ||
516 | unsigned long gmfn = page_to_pfn(page); | ||
517 | struct xen_memory_reservation reservation = { | ||
518 | .nr_extents = 1, | ||
519 | .extent_order = 0, | ||
520 | .domid = DOMID_SELF | ||
521 | }; | ||
522 | reservation.extent_start = (unsigned long)&gmfn; | ||
523 | ret = HYPERVISOR_memory_op(XENMEM_decrease_reservation, | ||
524 | &reservation); | ||
525 | if (ret == 1) | ||
526 | ret = 0; /* success */ | ||
527 | } else { | ||
528 | ret = apply_to_page_range(&init_mm, vaddr, PAGE_SIZE, | ||
529 | dealloc_pte_fn, NULL); | ||
530 | } | ||
531 | |||
532 | if (ret != 0) { | ||
533 | spin_unlock_irqrestore(&balloon_lock, flags); | ||
534 | __free_page(page); | ||
535 | goto err; | ||
536 | } | ||
537 | |||
538 | totalram_pages = --balloon_stats.current_pages; | ||
539 | |||
540 | spin_unlock_irqrestore(&balloon_lock, flags); | ||
541 | } | ||
542 | |||
543 | out: | ||
544 | schedule_work(&balloon_worker); | ||
545 | flush_tlb_all(); | ||
546 | return pagevec; | ||
547 | |||
548 | err: | ||
549 | spin_lock_irqsave(&balloon_lock, flags); | ||
550 | while (--i >= 0) | ||
551 | balloon_append(pagevec[i]); | ||
552 | spin_unlock_irqrestore(&balloon_lock, flags); | ||
553 | kfree(pagevec); | ||
554 | pagevec = NULL; | ||
555 | goto out; | ||
556 | } | ||
557 | |||
558 | static void free_empty_pages_and_pagevec(struct page **pagevec, int nr_pages) | ||
559 | { | ||
560 | unsigned long flags; | ||
561 | int i; | ||
562 | |||
563 | if (pagevec == NULL) | ||
564 | return; | ||
565 | |||
566 | spin_lock_irqsave(&balloon_lock, flags); | ||
567 | for (i = 0; i < nr_pages; i++) { | ||
568 | BUG_ON(page_count(pagevec[i]) != 1); | ||
569 | balloon_append(pagevec[i]); | ||
570 | } | ||
571 | spin_unlock_irqrestore(&balloon_lock, flags); | ||
572 | |||
573 | kfree(pagevec); | ||
574 | |||
575 | schedule_work(&balloon_worker); | ||
576 | } | ||
577 | |||
578 | static void balloon_release_driver_page(struct page *page) | ||
579 | { | ||
580 | unsigned long flags; | ||
581 | |||
582 | spin_lock_irqsave(&balloon_lock, flags); | ||
583 | balloon_append(page); | ||
584 | balloon_stats.driver_pages--; | ||
585 | spin_unlock_irqrestore(&balloon_lock, flags); | ||
586 | |||
587 | schedule_work(&balloon_worker); | ||
588 | } | ||
589 | |||
590 | |||
591 | #define BALLOON_SHOW(name, format, args...) \ | ||
592 | static ssize_t show_##name(struct sys_device *dev, \ | ||
593 | char *buf) \ | ||
594 | { \ | ||
595 | return sprintf(buf, format, ##args); \ | ||
596 | } \ | ||
597 | static SYSDEV_ATTR(name, S_IRUGO, show_##name, NULL) | ||
598 | |||
599 | BALLOON_SHOW(current_kb, "%lu\n", PAGES2KB(balloon_stats.current_pages)); | ||
600 | BALLOON_SHOW(low_kb, "%lu\n", PAGES2KB(balloon_stats.balloon_low)); | ||
601 | BALLOON_SHOW(high_kb, "%lu\n", PAGES2KB(balloon_stats.balloon_high)); | ||
602 | BALLOON_SHOW(hard_limit_kb, | ||
603 | (balloon_stats.hard_limit!=~0UL) ? "%lu\n" : "???\n", | ||
604 | (balloon_stats.hard_limit!=~0UL) ? PAGES2KB(balloon_stats.hard_limit) : 0); | ||
605 | BALLOON_SHOW(driver_kb, "%lu\n", PAGES2KB(balloon_stats.driver_pages)); | ||
606 | |||
607 | static ssize_t show_target_kb(struct sys_device *dev, char *buf) | ||
608 | { | ||
609 | return sprintf(buf, "%lu\n", PAGES2KB(balloon_stats.target_pages)); | ||
610 | } | ||
611 | |||
612 | static ssize_t store_target_kb(struct sys_device *dev, | ||
613 | const char *buf, | ||
614 | size_t count) | ||
615 | { | ||
616 | char memstring[64], *endchar; | ||
617 | unsigned long long target_bytes; | ||
618 | |||
619 | if (!capable(CAP_SYS_ADMIN)) | ||
620 | return -EPERM; | ||
621 | |||
622 | if (count <= 1) | ||
623 | return -EBADMSG; /* runt */ | ||
624 | if (count > sizeof(memstring)) | ||
625 | return -EFBIG; /* too long */ | ||
626 | strcpy(memstring, buf); | ||
627 | |||
628 | target_bytes = memparse(memstring, &endchar); | ||
629 | balloon_set_new_target(target_bytes >> PAGE_SHIFT); | ||
630 | |||
631 | return count; | ||
632 | } | ||
633 | |||
634 | static SYSDEV_ATTR(target_kb, S_IRUGO | S_IWUSR, | ||
635 | show_target_kb, store_target_kb); | ||
636 | |||
637 | static struct sysdev_attribute *balloon_attrs[] = { | ||
638 | &attr_target_kb, | ||
639 | }; | ||
640 | |||
641 | static struct attribute *balloon_info_attrs[] = { | ||
642 | &attr_current_kb.attr, | ||
643 | &attr_low_kb.attr, | ||
644 | &attr_high_kb.attr, | ||
645 | &attr_hard_limit_kb.attr, | ||
646 | &attr_driver_kb.attr, | ||
647 | NULL | ||
648 | }; | ||
649 | |||
650 | static struct attribute_group balloon_info_group = { | ||
651 | .name = "info", | ||
652 | .attrs = balloon_info_attrs, | ||
653 | }; | ||
654 | |||
655 | static struct sysdev_class balloon_sysdev_class = { | ||
656 | .name = BALLOON_CLASS_NAME, | ||
657 | }; | ||
658 | |||
659 | static int register_balloon(struct sys_device *sysdev) | ||
660 | { | ||
661 | int i, error; | ||
662 | |||
663 | error = sysdev_class_register(&balloon_sysdev_class); | ||
664 | if (error) | ||
665 | return error; | ||
666 | |||
667 | sysdev->id = 0; | ||
668 | sysdev->cls = &balloon_sysdev_class; | ||
669 | |||
670 | error = sysdev_register(sysdev); | ||
671 | if (error) { | ||
672 | sysdev_class_unregister(&balloon_sysdev_class); | ||
673 | return error; | ||
674 | } | ||
675 | |||
676 | for (i = 0; i < ARRAY_SIZE(balloon_attrs); i++) { | ||
677 | error = sysdev_create_file(sysdev, balloon_attrs[i]); | ||
678 | if (error) | ||
679 | goto fail; | ||
680 | } | ||
681 | |||
682 | error = sysfs_create_group(&sysdev->kobj, &balloon_info_group); | ||
683 | if (error) | ||
684 | goto fail; | ||
685 | |||
686 | return 0; | ||
687 | |||
688 | fail: | ||
689 | while (--i >= 0) | ||
690 | sysdev_remove_file(sysdev, balloon_attrs[i]); | ||
691 | sysdev_unregister(sysdev); | ||
692 | sysdev_class_unregister(&balloon_sysdev_class); | ||
693 | return error; | ||
694 | } | ||
695 | |||
696 | static void unregister_balloon(struct sys_device *sysdev) | ||
697 | { | ||
698 | int i; | ||
699 | |||
700 | sysfs_remove_group(&sysdev->kobj, &balloon_info_group); | ||
701 | for (i = 0; i < ARRAY_SIZE(balloon_attrs); i++) | ||
702 | sysdev_remove_file(sysdev, balloon_attrs[i]); | ||
703 | sysdev_unregister(sysdev); | ||
704 | sysdev_class_unregister(&balloon_sysdev_class); | ||
705 | } | ||
706 | |||
707 | static void balloon_sysfs_exit(void) | ||
708 | { | ||
709 | unregister_balloon(&balloon_sysdev); | ||
710 | } | ||
711 | |||
712 | MODULE_LICENSE("GPL"); | ||
diff --git a/drivers/xen/events.c b/drivers/xen/events.c new file mode 100644 index 000000000000..4f0f22b020ea --- /dev/null +++ b/drivers/xen/events.c | |||
@@ -0,0 +1,674 @@ | |||
1 | /* | ||
2 | * Xen event channels | ||
3 | * | ||
4 | * Xen models interrupts with abstract event channels. Because each | ||
5 | * domain gets 1024 event channels, but NR_IRQ is not that large, we | ||
6 | * must dynamically map irqs<->event channels. The event channels | ||
7 | * interface with the rest of the kernel by defining a xen interrupt | ||
8 | * chip. When an event is recieved, it is mapped to an irq and sent | ||
9 | * through the normal interrupt processing path. | ||
10 | * | ||
11 | * There are four kinds of events which can be mapped to an event | ||
12 | * channel: | ||
13 | * | ||
14 | * 1. Inter-domain notifications. This includes all the virtual | ||
15 | * device events, since they're driven by front-ends in another domain | ||
16 | * (typically dom0). | ||
17 | * 2. VIRQs, typically used for timers. These are per-cpu events. | ||
18 | * 3. IPIs. | ||
19 | * 4. Hardware interrupts. Not supported at present. | ||
20 | * | ||
21 | * Jeremy Fitzhardinge <jeremy@xensource.com>, XenSource Inc, 2007 | ||
22 | */ | ||
23 | |||
24 | #include <linux/linkage.h> | ||
25 | #include <linux/interrupt.h> | ||
26 | #include <linux/irq.h> | ||
27 | #include <linux/module.h> | ||
28 | #include <linux/string.h> | ||
29 | |||
30 | #include <asm/ptrace.h> | ||
31 | #include <asm/irq.h> | ||
32 | #include <asm/sync_bitops.h> | ||
33 | #include <asm/xen/hypercall.h> | ||
34 | #include <asm/xen/hypervisor.h> | ||
35 | |||
36 | #include <xen/xen-ops.h> | ||
37 | #include <xen/events.h> | ||
38 | #include <xen/interface/xen.h> | ||
39 | #include <xen/interface/event_channel.h> | ||
40 | |||
41 | /* | ||
42 | * This lock protects updates to the following mapping and reference-count | ||
43 | * arrays. The lock does not need to be acquired to read the mapping tables. | ||
44 | */ | ||
45 | static DEFINE_SPINLOCK(irq_mapping_update_lock); | ||
46 | |||
47 | /* IRQ <-> VIRQ mapping. */ | ||
48 | static DEFINE_PER_CPU(int, virq_to_irq[NR_VIRQS]) = {[0 ... NR_VIRQS-1] = -1}; | ||
49 | |||
50 | /* IRQ <-> IPI mapping */ | ||
51 | static DEFINE_PER_CPU(int, ipi_to_irq[XEN_NR_IPIS]) = {[0 ... XEN_NR_IPIS-1] = -1}; | ||
52 | |||
53 | /* Packed IRQ information: binding type, sub-type index, and event channel. */ | ||
54 | struct packed_irq | ||
55 | { | ||
56 | unsigned short evtchn; | ||
57 | unsigned char index; | ||
58 | unsigned char type; | ||
59 | }; | ||
60 | |||
61 | static struct packed_irq irq_info[NR_IRQS]; | ||
62 | |||
63 | /* Binding types. */ | ||
64 | enum { | ||
65 | IRQT_UNBOUND, | ||
66 | IRQT_PIRQ, | ||
67 | IRQT_VIRQ, | ||
68 | IRQT_IPI, | ||
69 | IRQT_EVTCHN | ||
70 | }; | ||
71 | |||
72 | /* Convenient shorthand for packed representation of an unbound IRQ. */ | ||
73 | #define IRQ_UNBOUND mk_irq_info(IRQT_UNBOUND, 0, 0) | ||
74 | |||
75 | static int evtchn_to_irq[NR_EVENT_CHANNELS] = { | ||
76 | [0 ... NR_EVENT_CHANNELS-1] = -1 | ||
77 | }; | ||
78 | static unsigned long cpu_evtchn_mask[NR_CPUS][NR_EVENT_CHANNELS/BITS_PER_LONG]; | ||
79 | static u8 cpu_evtchn[NR_EVENT_CHANNELS]; | ||
80 | |||
81 | /* Reference counts for bindings to IRQs. */ | ||
82 | static int irq_bindcount[NR_IRQS]; | ||
83 | |||
84 | /* Xen will never allocate port zero for any purpose. */ | ||
85 | #define VALID_EVTCHN(chn) ((chn) != 0) | ||
86 | |||
87 | /* | ||
88 | * Force a proper event-channel callback from Xen after clearing the | ||
89 | * callback mask. We do this in a very simple manner, by making a call | ||
90 | * down into Xen. The pending flag will be checked by Xen on return. | ||
91 | */ | ||
92 | void force_evtchn_callback(void) | ||
93 | { | ||
94 | (void)HYPERVISOR_xen_version(0, NULL); | ||
95 | } | ||
96 | EXPORT_SYMBOL_GPL(force_evtchn_callback); | ||
97 | |||
98 | static struct irq_chip xen_dynamic_chip; | ||
99 | |||
100 | /* Constructor for packed IRQ information. */ | ||
101 | static inline struct packed_irq mk_irq_info(u32 type, u32 index, u32 evtchn) | ||
102 | { | ||
103 | return (struct packed_irq) { evtchn, index, type }; | ||
104 | } | ||
105 | |||
106 | /* | ||
107 | * Accessors for packed IRQ information. | ||
108 | */ | ||
109 | static inline unsigned int evtchn_from_irq(int irq) | ||
110 | { | ||
111 | return irq_info[irq].evtchn; | ||
112 | } | ||
113 | |||
114 | static inline unsigned int index_from_irq(int irq) | ||
115 | { | ||
116 | return irq_info[irq].index; | ||
117 | } | ||
118 | |||
119 | static inline unsigned int type_from_irq(int irq) | ||
120 | { | ||
121 | return irq_info[irq].type; | ||
122 | } | ||
123 | |||
124 | static inline unsigned long active_evtchns(unsigned int cpu, | ||
125 | struct shared_info *sh, | ||
126 | unsigned int idx) | ||
127 | { | ||
128 | return (sh->evtchn_pending[idx] & | ||
129 | cpu_evtchn_mask[cpu][idx] & | ||
130 | ~sh->evtchn_mask[idx]); | ||
131 | } | ||
132 | |||
133 | static void bind_evtchn_to_cpu(unsigned int chn, unsigned int cpu) | ||
134 | { | ||
135 | int irq = evtchn_to_irq[chn]; | ||
136 | |||
137 | BUG_ON(irq == -1); | ||
138 | #ifdef CONFIG_SMP | ||
139 | irq_desc[irq].affinity = cpumask_of_cpu(cpu); | ||
140 | #endif | ||
141 | |||
142 | __clear_bit(chn, cpu_evtchn_mask[cpu_evtchn[chn]]); | ||
143 | __set_bit(chn, cpu_evtchn_mask[cpu]); | ||
144 | |||
145 | cpu_evtchn[chn] = cpu; | ||
146 | } | ||
147 | |||
148 | static void init_evtchn_cpu_bindings(void) | ||
149 | { | ||
150 | #ifdef CONFIG_SMP | ||
151 | int i; | ||
152 | /* By default all event channels notify CPU#0. */ | ||
153 | for (i = 0; i < NR_IRQS; i++) | ||
154 | irq_desc[i].affinity = cpumask_of_cpu(0); | ||
155 | #endif | ||
156 | |||
157 | memset(cpu_evtchn, 0, sizeof(cpu_evtchn)); | ||
158 | memset(cpu_evtchn_mask[0], ~0, sizeof(cpu_evtchn_mask[0])); | ||
159 | } | ||
160 | |||
161 | static inline unsigned int cpu_from_evtchn(unsigned int evtchn) | ||
162 | { | ||
163 | return cpu_evtchn[evtchn]; | ||
164 | } | ||
165 | |||
166 | static inline void clear_evtchn(int port) | ||
167 | { | ||
168 | struct shared_info *s = HYPERVISOR_shared_info; | ||
169 | sync_clear_bit(port, &s->evtchn_pending[0]); | ||
170 | } | ||
171 | |||
172 | static inline void set_evtchn(int port) | ||
173 | { | ||
174 | struct shared_info *s = HYPERVISOR_shared_info; | ||
175 | sync_set_bit(port, &s->evtchn_pending[0]); | ||
176 | } | ||
177 | |||
178 | |||
179 | /** | ||
180 | * notify_remote_via_irq - send event to remote end of event channel via irq | ||
181 | * @irq: irq of event channel to send event to | ||
182 | * | ||
183 | * Unlike notify_remote_via_evtchn(), this is safe to use across | ||
184 | * save/restore. Notifications on a broken connection are silently | ||
185 | * dropped. | ||
186 | */ | ||
187 | void notify_remote_via_irq(int irq) | ||
188 | { | ||
189 | int evtchn = evtchn_from_irq(irq); | ||
190 | |||
191 | if (VALID_EVTCHN(evtchn)) | ||
192 | notify_remote_via_evtchn(evtchn); | ||
193 | } | ||
194 | EXPORT_SYMBOL_GPL(notify_remote_via_irq); | ||
195 | |||
196 | static void mask_evtchn(int port) | ||
197 | { | ||
198 | struct shared_info *s = HYPERVISOR_shared_info; | ||
199 | sync_set_bit(port, &s->evtchn_mask[0]); | ||
200 | } | ||
201 | |||
202 | static void unmask_evtchn(int port) | ||
203 | { | ||
204 | struct shared_info *s = HYPERVISOR_shared_info; | ||
205 | unsigned int cpu = get_cpu(); | ||
206 | |||
207 | BUG_ON(!irqs_disabled()); | ||
208 | |||
209 | /* Slow path (hypercall) if this is a non-local port. */ | ||
210 | if (unlikely(cpu != cpu_from_evtchn(port))) { | ||
211 | struct evtchn_unmask unmask = { .port = port }; | ||
212 | (void)HYPERVISOR_event_channel_op(EVTCHNOP_unmask, &unmask); | ||
213 | } else { | ||
214 | struct vcpu_info *vcpu_info = __get_cpu_var(xen_vcpu); | ||
215 | |||
216 | sync_clear_bit(port, &s->evtchn_mask[0]); | ||
217 | |||
218 | /* | ||
219 | * The following is basically the equivalent of | ||
220 | * 'hw_resend_irq'. Just like a real IO-APIC we 'lose | ||
221 | * the interrupt edge' if the channel is masked. | ||
222 | */ | ||
223 | if (sync_test_bit(port, &s->evtchn_pending[0]) && | ||
224 | !sync_test_and_set_bit(port / BITS_PER_LONG, | ||
225 | &vcpu_info->evtchn_pending_sel)) | ||
226 | vcpu_info->evtchn_upcall_pending = 1; | ||
227 | } | ||
228 | |||
229 | put_cpu(); | ||
230 | } | ||
231 | |||
232 | static int find_unbound_irq(void) | ||
233 | { | ||
234 | int irq; | ||
235 | |||
236 | /* Only allocate from dynirq range */ | ||
237 | for (irq = 0; irq < NR_IRQS; irq++) | ||
238 | if (irq_bindcount[irq] == 0) | ||
239 | break; | ||
240 | |||
241 | if (irq == NR_IRQS) | ||
242 | panic("No available IRQ to bind to: increase NR_IRQS!\n"); | ||
243 | |||
244 | return irq; | ||
245 | } | ||
246 | |||
247 | int bind_evtchn_to_irq(unsigned int evtchn) | ||
248 | { | ||
249 | int irq; | ||
250 | |||
251 | spin_lock(&irq_mapping_update_lock); | ||
252 | |||
253 | irq = evtchn_to_irq[evtchn]; | ||
254 | |||
255 | if (irq == -1) { | ||
256 | irq = find_unbound_irq(); | ||
257 | |||
258 | dynamic_irq_init(irq); | ||
259 | set_irq_chip_and_handler_name(irq, &xen_dynamic_chip, | ||
260 | handle_level_irq, "event"); | ||
261 | |||
262 | evtchn_to_irq[evtchn] = irq; | ||
263 | irq_info[irq] = mk_irq_info(IRQT_EVTCHN, 0, evtchn); | ||
264 | } | ||
265 | |||
266 | irq_bindcount[irq]++; | ||
267 | |||
268 | spin_unlock(&irq_mapping_update_lock); | ||
269 | |||
270 | return irq; | ||
271 | } | ||
272 | EXPORT_SYMBOL_GPL(bind_evtchn_to_irq); | ||
273 | |||
274 | static int bind_ipi_to_irq(unsigned int ipi, unsigned int cpu) | ||
275 | { | ||
276 | struct evtchn_bind_ipi bind_ipi; | ||
277 | int evtchn, irq; | ||
278 | |||
279 | spin_lock(&irq_mapping_update_lock); | ||
280 | |||
281 | irq = per_cpu(ipi_to_irq, cpu)[ipi]; | ||
282 | if (irq == -1) { | ||
283 | irq = find_unbound_irq(); | ||
284 | if (irq < 0) | ||
285 | goto out; | ||
286 | |||
287 | dynamic_irq_init(irq); | ||
288 | set_irq_chip_and_handler_name(irq, &xen_dynamic_chip, | ||
289 | handle_level_irq, "ipi"); | ||
290 | |||
291 | bind_ipi.vcpu = cpu; | ||
292 | if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_ipi, | ||
293 | &bind_ipi) != 0) | ||
294 | BUG(); | ||
295 | evtchn = bind_ipi.port; | ||
296 | |||
297 | evtchn_to_irq[evtchn] = irq; | ||
298 | irq_info[irq] = mk_irq_info(IRQT_IPI, ipi, evtchn); | ||
299 | |||
300 | per_cpu(ipi_to_irq, cpu)[ipi] = irq; | ||
301 | |||
302 | bind_evtchn_to_cpu(evtchn, cpu); | ||
303 | } | ||
304 | |||
305 | irq_bindcount[irq]++; | ||
306 | |||
307 | out: | ||
308 | spin_unlock(&irq_mapping_update_lock); | ||
309 | return irq; | ||
310 | } | ||
311 | |||
312 | |||
313 | static int bind_virq_to_irq(unsigned int virq, unsigned int cpu) | ||
314 | { | ||
315 | struct evtchn_bind_virq bind_virq; | ||
316 | int evtchn, irq; | ||
317 | |||
318 | spin_lock(&irq_mapping_update_lock); | ||
319 | |||
320 | irq = per_cpu(virq_to_irq, cpu)[virq]; | ||
321 | |||
322 | if (irq == -1) { | ||
323 | bind_virq.virq = virq; | ||
324 | bind_virq.vcpu = cpu; | ||
325 | if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_virq, | ||
326 | &bind_virq) != 0) | ||
327 | BUG(); | ||
328 | evtchn = bind_virq.port; | ||
329 | |||
330 | irq = find_unbound_irq(); | ||
331 | |||
332 | dynamic_irq_init(irq); | ||
333 | set_irq_chip_and_handler_name(irq, &xen_dynamic_chip, | ||
334 | handle_level_irq, "virq"); | ||
335 | |||
336 | evtchn_to_irq[evtchn] = irq; | ||
337 | irq_info[irq] = mk_irq_info(IRQT_VIRQ, virq, evtchn); | ||
338 | |||
339 | per_cpu(virq_to_irq, cpu)[virq] = irq; | ||
340 | |||
341 | bind_evtchn_to_cpu(evtchn, cpu); | ||
342 | } | ||
343 | |||
344 | irq_bindcount[irq]++; | ||
345 | |||
346 | spin_unlock(&irq_mapping_update_lock); | ||
347 | |||
348 | return irq; | ||
349 | } | ||
350 | |||
351 | static void unbind_from_irq(unsigned int irq) | ||
352 | { | ||
353 | struct evtchn_close close; | ||
354 | int evtchn = evtchn_from_irq(irq); | ||
355 | |||
356 | spin_lock(&irq_mapping_update_lock); | ||
357 | |||
358 | if (VALID_EVTCHN(evtchn) && (--irq_bindcount[irq] == 0)) { | ||
359 | close.port = evtchn; | ||
360 | if (HYPERVISOR_event_channel_op(EVTCHNOP_close, &close) != 0) | ||
361 | BUG(); | ||
362 | |||
363 | switch (type_from_irq(irq)) { | ||
364 | case IRQT_VIRQ: | ||
365 | per_cpu(virq_to_irq, cpu_from_evtchn(evtchn)) | ||
366 | [index_from_irq(irq)] = -1; | ||
367 | break; | ||
368 | default: | ||
369 | break; | ||
370 | } | ||
371 | |||
372 | /* Closed ports are implicitly re-bound to VCPU0. */ | ||
373 | bind_evtchn_to_cpu(evtchn, 0); | ||
374 | |||
375 | evtchn_to_irq[evtchn] = -1; | ||
376 | irq_info[irq] = IRQ_UNBOUND; | ||
377 | |||
378 | dynamic_irq_init(irq); | ||
379 | } | ||
380 | |||
381 | spin_unlock(&irq_mapping_update_lock); | ||
382 | } | ||
383 | |||
384 | int bind_evtchn_to_irqhandler(unsigned int evtchn, | ||
385 | irq_handler_t handler, | ||
386 | unsigned long irqflags, | ||
387 | const char *devname, void *dev_id) | ||
388 | { | ||
389 | unsigned int irq; | ||
390 | int retval; | ||
391 | |||
392 | irq = bind_evtchn_to_irq(evtchn); | ||
393 | retval = request_irq(irq, handler, irqflags, devname, dev_id); | ||
394 | if (retval != 0) { | ||
395 | unbind_from_irq(irq); | ||
396 | return retval; | ||
397 | } | ||
398 | |||
399 | return irq; | ||
400 | } | ||
401 | EXPORT_SYMBOL_GPL(bind_evtchn_to_irqhandler); | ||
402 | |||
403 | int bind_virq_to_irqhandler(unsigned int virq, unsigned int cpu, | ||
404 | irq_handler_t handler, | ||
405 | unsigned long irqflags, const char *devname, void *dev_id) | ||
406 | { | ||
407 | unsigned int irq; | ||
408 | int retval; | ||
409 | |||
410 | irq = bind_virq_to_irq(virq, cpu); | ||
411 | retval = request_irq(irq, handler, irqflags, devname, dev_id); | ||
412 | if (retval != 0) { | ||
413 | unbind_from_irq(irq); | ||
414 | return retval; | ||
415 | } | ||
416 | |||
417 | return irq; | ||
418 | } | ||
419 | EXPORT_SYMBOL_GPL(bind_virq_to_irqhandler); | ||
420 | |||
421 | int bind_ipi_to_irqhandler(enum ipi_vector ipi, | ||
422 | unsigned int cpu, | ||
423 | irq_handler_t handler, | ||
424 | unsigned long irqflags, | ||
425 | const char *devname, | ||
426 | void *dev_id) | ||
427 | { | ||
428 | int irq, retval; | ||
429 | |||
430 | irq = bind_ipi_to_irq(ipi, cpu); | ||
431 | if (irq < 0) | ||
432 | return irq; | ||
433 | |||
434 | retval = request_irq(irq, handler, irqflags, devname, dev_id); | ||
435 | if (retval != 0) { | ||
436 | unbind_from_irq(irq); | ||
437 | return retval; | ||
438 | } | ||
439 | |||
440 | return irq; | ||
441 | } | ||
442 | |||
443 | void unbind_from_irqhandler(unsigned int irq, void *dev_id) | ||
444 | { | ||
445 | free_irq(irq, dev_id); | ||
446 | unbind_from_irq(irq); | ||
447 | } | ||
448 | EXPORT_SYMBOL_GPL(unbind_from_irqhandler); | ||
449 | |||
450 | void xen_send_IPI_one(unsigned int cpu, enum ipi_vector vector) | ||
451 | { | ||
452 | int irq = per_cpu(ipi_to_irq, cpu)[vector]; | ||
453 | BUG_ON(irq < 0); | ||
454 | notify_remote_via_irq(irq); | ||
455 | } | ||
456 | |||
457 | irqreturn_t xen_debug_interrupt(int irq, void *dev_id) | ||
458 | { | ||
459 | struct shared_info *sh = HYPERVISOR_shared_info; | ||
460 | int cpu = smp_processor_id(); | ||
461 | int i; | ||
462 | unsigned long flags; | ||
463 | static DEFINE_SPINLOCK(debug_lock); | ||
464 | |||
465 | spin_lock_irqsave(&debug_lock, flags); | ||
466 | |||
467 | printk("vcpu %d\n ", cpu); | ||
468 | |||
469 | for_each_online_cpu(i) { | ||
470 | struct vcpu_info *v = per_cpu(xen_vcpu, i); | ||
471 | printk("%d: masked=%d pending=%d event_sel %08lx\n ", i, | ||
472 | (get_irq_regs() && i == cpu) ? xen_irqs_disabled(get_irq_regs()) : v->evtchn_upcall_mask, | ||
473 | v->evtchn_upcall_pending, | ||
474 | v->evtchn_pending_sel); | ||
475 | } | ||
476 | printk("pending:\n "); | ||
477 | for(i = ARRAY_SIZE(sh->evtchn_pending)-1; i >= 0; i--) | ||
478 | printk("%08lx%s", sh->evtchn_pending[i], | ||
479 | i % 8 == 0 ? "\n " : " "); | ||
480 | printk("\nmasks:\n "); | ||
481 | for(i = ARRAY_SIZE(sh->evtchn_mask)-1; i >= 0; i--) | ||
482 | printk("%08lx%s", sh->evtchn_mask[i], | ||
483 | i % 8 == 0 ? "\n " : " "); | ||
484 | |||
485 | printk("\nunmasked:\n "); | ||
486 | for(i = ARRAY_SIZE(sh->evtchn_mask)-1; i >= 0; i--) | ||
487 | printk("%08lx%s", sh->evtchn_pending[i] & ~sh->evtchn_mask[i], | ||
488 | i % 8 == 0 ? "\n " : " "); | ||
489 | |||
490 | printk("\npending list:\n"); | ||
491 | for(i = 0; i < NR_EVENT_CHANNELS; i++) { | ||
492 | if (sync_test_bit(i, sh->evtchn_pending)) { | ||
493 | printk(" %d: event %d -> irq %d\n", | ||
494 | cpu_evtchn[i], i, | ||
495 | evtchn_to_irq[i]); | ||
496 | } | ||
497 | } | ||
498 | |||
499 | spin_unlock_irqrestore(&debug_lock, flags); | ||
500 | |||
501 | return IRQ_HANDLED; | ||
502 | } | ||
503 | |||
504 | |||
505 | /* | ||
506 | * Search the CPUs pending events bitmasks. For each one found, map | ||
507 | * the event number to an irq, and feed it into do_IRQ() for | ||
508 | * handling. | ||
509 | * | ||
510 | * Xen uses a two-level bitmap to speed searching. The first level is | ||
511 | * a bitset of words which contain pending event bits. The second | ||
512 | * level is a bitset of pending events themselves. | ||
513 | */ | ||
514 | void xen_evtchn_do_upcall(struct pt_regs *regs) | ||
515 | { | ||
516 | int cpu = get_cpu(); | ||
517 | struct shared_info *s = HYPERVISOR_shared_info; | ||
518 | struct vcpu_info *vcpu_info = __get_cpu_var(xen_vcpu); | ||
519 | static DEFINE_PER_CPU(unsigned, nesting_count); | ||
520 | unsigned count; | ||
521 | |||
522 | do { | ||
523 | unsigned long pending_words; | ||
524 | |||
525 | vcpu_info->evtchn_upcall_pending = 0; | ||
526 | |||
527 | if (__get_cpu_var(nesting_count)++) | ||
528 | goto out; | ||
529 | |||
530 | #ifndef CONFIG_X86 /* No need for a barrier -- XCHG is a barrier on x86. */ | ||
531 | /* Clear master flag /before/ clearing selector flag. */ | ||
532 | rmb(); | ||
533 | #endif | ||
534 | pending_words = xchg(&vcpu_info->evtchn_pending_sel, 0); | ||
535 | while (pending_words != 0) { | ||
536 | unsigned long pending_bits; | ||
537 | int word_idx = __ffs(pending_words); | ||
538 | pending_words &= ~(1UL << word_idx); | ||
539 | |||
540 | while ((pending_bits = active_evtchns(cpu, s, word_idx)) != 0) { | ||
541 | int bit_idx = __ffs(pending_bits); | ||
542 | int port = (word_idx * BITS_PER_LONG) + bit_idx; | ||
543 | int irq = evtchn_to_irq[port]; | ||
544 | |||
545 | if (irq != -1) | ||
546 | xen_do_IRQ(irq, regs); | ||
547 | } | ||
548 | } | ||
549 | |||
550 | BUG_ON(!irqs_disabled()); | ||
551 | |||
552 | count = __get_cpu_var(nesting_count); | ||
553 | __get_cpu_var(nesting_count) = 0; | ||
554 | } while(count != 1); | ||
555 | |||
556 | out: | ||
557 | put_cpu(); | ||
558 | } | ||
559 | |||
560 | /* Rebind an evtchn so that it gets delivered to a specific cpu */ | ||
561 | static void rebind_irq_to_cpu(unsigned irq, unsigned tcpu) | ||
562 | { | ||
563 | struct evtchn_bind_vcpu bind_vcpu; | ||
564 | int evtchn = evtchn_from_irq(irq); | ||
565 | |||
566 | if (!VALID_EVTCHN(evtchn)) | ||
567 | return; | ||
568 | |||
569 | /* Send future instances of this interrupt to other vcpu. */ | ||
570 | bind_vcpu.port = evtchn; | ||
571 | bind_vcpu.vcpu = tcpu; | ||
572 | |||
573 | /* | ||
574 | * If this fails, it usually just indicates that we're dealing with a | ||
575 | * virq or IPI channel, which don't actually need to be rebound. Ignore | ||
576 | * it, but don't do the xenlinux-level rebind in that case. | ||
577 | */ | ||
578 | if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_vcpu, &bind_vcpu) >= 0) | ||
579 | bind_evtchn_to_cpu(evtchn, tcpu); | ||
580 | } | ||
581 | |||
582 | |||
583 | static void set_affinity_irq(unsigned irq, cpumask_t dest) | ||
584 | { | ||
585 | unsigned tcpu = first_cpu(dest); | ||
586 | rebind_irq_to_cpu(irq, tcpu); | ||
587 | } | ||
588 | |||
589 | int resend_irq_on_evtchn(unsigned int irq) | ||
590 | { | ||
591 | int masked, evtchn = evtchn_from_irq(irq); | ||
592 | struct shared_info *s = HYPERVISOR_shared_info; | ||
593 | |||
594 | if (!VALID_EVTCHN(evtchn)) | ||
595 | return 1; | ||
596 | |||
597 | masked = sync_test_and_set_bit(evtchn, s->evtchn_mask); | ||
598 | sync_set_bit(evtchn, s->evtchn_pending); | ||
599 | if (!masked) | ||
600 | unmask_evtchn(evtchn); | ||
601 | |||
602 | return 1; | ||
603 | } | ||
604 | |||
605 | static void enable_dynirq(unsigned int irq) | ||
606 | { | ||
607 | int evtchn = evtchn_from_irq(irq); | ||
608 | |||
609 | if (VALID_EVTCHN(evtchn)) | ||
610 | unmask_evtchn(evtchn); | ||
611 | } | ||
612 | |||
613 | static void disable_dynirq(unsigned int irq) | ||
614 | { | ||
615 | int evtchn = evtchn_from_irq(irq); | ||
616 | |||
617 | if (VALID_EVTCHN(evtchn)) | ||
618 | mask_evtchn(evtchn); | ||
619 | } | ||
620 | |||
621 | static void ack_dynirq(unsigned int irq) | ||
622 | { | ||
623 | int evtchn = evtchn_from_irq(irq); | ||
624 | |||
625 | move_native_irq(irq); | ||
626 | |||
627 | if (VALID_EVTCHN(evtchn)) | ||
628 | clear_evtchn(evtchn); | ||
629 | } | ||
630 | |||
631 | static int retrigger_dynirq(unsigned int irq) | ||
632 | { | ||
633 | int evtchn = evtchn_from_irq(irq); | ||
634 | struct shared_info *sh = HYPERVISOR_shared_info; | ||
635 | int ret = 0; | ||
636 | |||
637 | if (VALID_EVTCHN(evtchn)) { | ||
638 | int masked; | ||
639 | |||
640 | masked = sync_test_and_set_bit(evtchn, sh->evtchn_mask); | ||
641 | sync_set_bit(evtchn, sh->evtchn_pending); | ||
642 | if (!masked) | ||
643 | unmask_evtchn(evtchn); | ||
644 | ret = 1; | ||
645 | } | ||
646 | |||
647 | return ret; | ||
648 | } | ||
649 | |||
650 | static struct irq_chip xen_dynamic_chip __read_mostly = { | ||
651 | .name = "xen-dyn", | ||
652 | .mask = disable_dynirq, | ||
653 | .unmask = enable_dynirq, | ||
654 | .ack = ack_dynirq, | ||
655 | .set_affinity = set_affinity_irq, | ||
656 | .retrigger = retrigger_dynirq, | ||
657 | }; | ||
658 | |||
659 | void __init xen_init_IRQ(void) | ||
660 | { | ||
661 | int i; | ||
662 | |||
663 | init_evtchn_cpu_bindings(); | ||
664 | |||
665 | /* No event channels are 'live' right now. */ | ||
666 | for (i = 0; i < NR_EVENT_CHANNELS; i++) | ||
667 | mask_evtchn(i); | ||
668 | |||
669 | /* Dynamic IRQ space is currently unbound. Zero the refcnts. */ | ||
670 | for (i = 0; i < NR_IRQS; i++) | ||
671 | irq_bindcount[i] = 0; | ||
672 | |||
673 | irq_ctx_init(smp_processor_id()); | ||
674 | } | ||
diff --git a/drivers/xen/features.c b/drivers/xen/features.c new file mode 100644 index 000000000000..0707714e40d6 --- /dev/null +++ b/drivers/xen/features.c | |||
@@ -0,0 +1,29 @@ | |||
1 | /****************************************************************************** | ||
2 | * features.c | ||
3 | * | ||
4 | * Xen feature flags. | ||
5 | * | ||
6 | * Copyright (c) 2006, Ian Campbell, XenSource Inc. | ||
7 | */ | ||
8 | #include <linux/types.h> | ||
9 | #include <linux/cache.h> | ||
10 | #include <linux/module.h> | ||
11 | #include <asm/xen/hypervisor.h> | ||
12 | #include <xen/features.h> | ||
13 | |||
14 | u8 xen_features[XENFEAT_NR_SUBMAPS * 32] __read_mostly; | ||
15 | EXPORT_SYMBOL_GPL(xen_features); | ||
16 | |||
17 | void xen_setup_features(void) | ||
18 | { | ||
19 | struct xen_feature_info fi; | ||
20 | int i, j; | ||
21 | |||
22 | for (i = 0; i < XENFEAT_NR_SUBMAPS; i++) { | ||
23 | fi.submap_idx = i; | ||
24 | if (HYPERVISOR_xen_version(XENVER_get_features, &fi) < 0) | ||
25 | break; | ||
26 | for (j = 0; j < 32; j++) | ||
27 | xen_features[i * 32 + j] = !!(fi.submap & 1<<j); | ||
28 | } | ||
29 | } | ||
diff --git a/drivers/xen/grant-table.c b/drivers/xen/grant-table.c index d85dc6d41c2a..52b6b41b909d 100644 --- a/drivers/xen/grant-table.c +++ b/drivers/xen/grant-table.c | |||
@@ -439,24 +439,6 @@ static inline unsigned int max_nr_grant_frames(void) | |||
439 | return xen_max; | 439 | return xen_max; |
440 | } | 440 | } |
441 | 441 | ||
442 | static int map_pte_fn(pte_t *pte, struct page *pmd_page, | ||
443 | unsigned long addr, void *data) | ||
444 | { | ||
445 | unsigned long **frames = (unsigned long **)data; | ||
446 | |||
447 | set_pte_at(&init_mm, addr, pte, mfn_pte((*frames)[0], PAGE_KERNEL)); | ||
448 | (*frames)++; | ||
449 | return 0; | ||
450 | } | ||
451 | |||
452 | static int unmap_pte_fn(pte_t *pte, struct page *pmd_page, | ||
453 | unsigned long addr, void *data) | ||
454 | { | ||
455 | |||
456 | set_pte_at(&init_mm, addr, pte, __pte(0)); | ||
457 | return 0; | ||
458 | } | ||
459 | |||
460 | static int gnttab_map(unsigned int start_idx, unsigned int end_idx) | 442 | static int gnttab_map(unsigned int start_idx, unsigned int end_idx) |
461 | { | 443 | { |
462 | struct gnttab_setup_table setup; | 444 | struct gnttab_setup_table setup; |
@@ -470,7 +452,7 @@ static int gnttab_map(unsigned int start_idx, unsigned int end_idx) | |||
470 | 452 | ||
471 | setup.dom = DOMID_SELF; | 453 | setup.dom = DOMID_SELF; |
472 | setup.nr_frames = nr_gframes; | 454 | setup.nr_frames = nr_gframes; |
473 | setup.frame_list = frames; | 455 | set_xen_guest_handle(setup.frame_list, frames); |
474 | 456 | ||
475 | rc = HYPERVISOR_grant_table_op(GNTTABOP_setup_table, &setup, 1); | 457 | rc = HYPERVISOR_grant_table_op(GNTTABOP_setup_table, &setup, 1); |
476 | if (rc == -ENOSYS) { | 458 | if (rc == -ENOSYS) { |
@@ -480,17 +462,9 @@ static int gnttab_map(unsigned int start_idx, unsigned int end_idx) | |||
480 | 462 | ||
481 | BUG_ON(rc || setup.status); | 463 | BUG_ON(rc || setup.status); |
482 | 464 | ||
483 | if (shared == NULL) { | 465 | rc = arch_gnttab_map_shared(frames, nr_gframes, max_nr_grant_frames(), |
484 | struct vm_struct *area; | 466 | &shared); |
485 | area = alloc_vm_area(PAGE_SIZE * max_nr_grant_frames()); | ||
486 | BUG_ON(area == NULL); | ||
487 | shared = area->addr; | ||
488 | } | ||
489 | rc = apply_to_page_range(&init_mm, (unsigned long)shared, | ||
490 | PAGE_SIZE * nr_gframes, | ||
491 | map_pte_fn, &frames); | ||
492 | BUG_ON(rc); | 467 | BUG_ON(rc); |
493 | frames -= nr_gframes; /* adjust after map_pte_fn() */ | ||
494 | 468 | ||
495 | kfree(frames); | 469 | kfree(frames); |
496 | 470 | ||
@@ -506,10 +480,7 @@ static int gnttab_resume(void) | |||
506 | 480 | ||
507 | static int gnttab_suspend(void) | 481 | static int gnttab_suspend(void) |
508 | { | 482 | { |
509 | apply_to_page_range(&init_mm, (unsigned long)shared, | 483 | arch_gnttab_unmap_shared(shared, nr_grant_frames); |
510 | PAGE_SIZE * nr_grant_frames, | ||
511 | unmap_pte_fn, NULL); | ||
512 | |||
513 | return 0; | 484 | return 0; |
514 | } | 485 | } |
515 | 486 | ||
diff --git a/drivers/xen/xenbus/xenbus_client.c b/drivers/xen/xenbus/xenbus_client.c index 9fd2f70ab46d..0f86b0ff7879 100644 --- a/drivers/xen/xenbus/xenbus_client.c +++ b/drivers/xen/xenbus/xenbus_client.c | |||
@@ -399,7 +399,7 @@ int xenbus_map_ring_valloc(struct xenbus_device *dev, int gnt_ref, void **vaddr) | |||
399 | 399 | ||
400 | *vaddr = NULL; | 400 | *vaddr = NULL; |
401 | 401 | ||
402 | area = alloc_vm_area(PAGE_SIZE); | 402 | area = xen_alloc_vm_area(PAGE_SIZE); |
403 | if (!area) | 403 | if (!area) |
404 | return -ENOMEM; | 404 | return -ENOMEM; |
405 | 405 | ||
@@ -409,7 +409,7 @@ int xenbus_map_ring_valloc(struct xenbus_device *dev, int gnt_ref, void **vaddr) | |||
409 | BUG(); | 409 | BUG(); |
410 | 410 | ||
411 | if (op.status != GNTST_okay) { | 411 | if (op.status != GNTST_okay) { |
412 | free_vm_area(area); | 412 | xen_free_vm_area(area); |
413 | xenbus_dev_fatal(dev, op.status, | 413 | xenbus_dev_fatal(dev, op.status, |
414 | "mapping in shared page %d from domain %d", | 414 | "mapping in shared page %d from domain %d", |
415 | gnt_ref, dev->otherend_id); | 415 | gnt_ref, dev->otherend_id); |
@@ -508,7 +508,7 @@ int xenbus_unmap_ring_vfree(struct xenbus_device *dev, void *vaddr) | |||
508 | BUG(); | 508 | BUG(); |
509 | 509 | ||
510 | if (op.status == GNTST_okay) | 510 | if (op.status == GNTST_okay) |
511 | free_vm_area(area); | 511 | xen_free_vm_area(area); |
512 | else | 512 | else |
513 | xenbus_dev_error(dev, op.status, | 513 | xenbus_dev_error(dev, op.status, |
514 | "unmapping page at handle %d error %d", | 514 | "unmapping page at handle %d error %d", |
diff --git a/drivers/xen/xenbus/xenbus_probe.c b/drivers/xen/xenbus/xenbus_probe.c index 4750de316ad3..57ceb5346b74 100644 --- a/drivers/xen/xenbus/xenbus_probe.c +++ b/drivers/xen/xenbus/xenbus_probe.c | |||
@@ -88,6 +88,16 @@ int xenbus_match(struct device *_dev, struct device_driver *_drv) | |||
88 | return match_device(drv->ids, to_xenbus_device(_dev)) != NULL; | 88 | return match_device(drv->ids, to_xenbus_device(_dev)) != NULL; |
89 | } | 89 | } |
90 | 90 | ||
91 | static int xenbus_uevent(struct device *_dev, struct kobj_uevent_env *env) | ||
92 | { | ||
93 | struct xenbus_device *dev = to_xenbus_device(_dev); | ||
94 | |||
95 | if (add_uevent_var(env, "MODALIAS=xen:%s", dev->devicetype)) | ||
96 | return -ENOMEM; | ||
97 | |||
98 | return 0; | ||
99 | } | ||
100 | |||
91 | /* device/<type>/<id> => <type>-<id> */ | 101 | /* device/<type>/<id> => <type>-<id> */ |
92 | static int frontend_bus_id(char bus_id[BUS_ID_SIZE], const char *nodename) | 102 | static int frontend_bus_id(char bus_id[BUS_ID_SIZE], const char *nodename) |
93 | { | 103 | { |
@@ -166,6 +176,7 @@ static struct xen_bus_type xenbus_frontend = { | |||
166 | .bus = { | 176 | .bus = { |
167 | .name = "xen", | 177 | .name = "xen", |
168 | .match = xenbus_match, | 178 | .match = xenbus_match, |
179 | .uevent = xenbus_uevent, | ||
169 | .probe = xenbus_dev_probe, | 180 | .probe = xenbus_dev_probe, |
170 | .remove = xenbus_dev_remove, | 181 | .remove = xenbus_dev_remove, |
171 | .shutdown = xenbus_dev_shutdown, | 182 | .shutdown = xenbus_dev_shutdown, |
@@ -438,6 +449,12 @@ static ssize_t xendev_show_devtype(struct device *dev, | |||
438 | } | 449 | } |
439 | DEVICE_ATTR(devtype, S_IRUSR | S_IRGRP | S_IROTH, xendev_show_devtype, NULL); | 450 | DEVICE_ATTR(devtype, S_IRUSR | S_IRGRP | S_IROTH, xendev_show_devtype, NULL); |
440 | 451 | ||
452 | static ssize_t xendev_show_modalias(struct device *dev, | ||
453 | struct device_attribute *attr, char *buf) | ||
454 | { | ||
455 | return sprintf(buf, "xen:%s\n", to_xenbus_device(dev)->devicetype); | ||
456 | } | ||
457 | DEVICE_ATTR(modalias, S_IRUSR | S_IRGRP | S_IROTH, xendev_show_modalias, NULL); | ||
441 | 458 | ||
442 | int xenbus_probe_node(struct xen_bus_type *bus, | 459 | int xenbus_probe_node(struct xen_bus_type *bus, |
443 | const char *type, | 460 | const char *type, |
@@ -492,10 +509,16 @@ int xenbus_probe_node(struct xen_bus_type *bus, | |||
492 | 509 | ||
493 | err = device_create_file(&xendev->dev, &dev_attr_devtype); | 510 | err = device_create_file(&xendev->dev, &dev_attr_devtype); |
494 | if (err) | 511 | if (err) |
495 | goto fail_remove_file; | 512 | goto fail_remove_nodename; |
513 | |||
514 | err = device_create_file(&xendev->dev, &dev_attr_modalias); | ||
515 | if (err) | ||
516 | goto fail_remove_devtype; | ||
496 | 517 | ||
497 | return 0; | 518 | return 0; |
498 | fail_remove_file: | 519 | fail_remove_devtype: |
520 | device_remove_file(&xendev->dev, &dev_attr_devtype); | ||
521 | fail_remove_nodename: | ||
499 | device_remove_file(&xendev->dev, &dev_attr_nodename); | 522 | device_remove_file(&xendev->dev, &dev_attr_nodename); |
500 | fail_unregister: | 523 | fail_unregister: |
501 | device_unregister(&xendev->dev); | 524 | device_unregister(&xendev->dev); |
@@ -846,6 +869,7 @@ static int is_disconnected_device(struct device *dev, void *data) | |||
846 | { | 869 | { |
847 | struct xenbus_device *xendev = to_xenbus_device(dev); | 870 | struct xenbus_device *xendev = to_xenbus_device(dev); |
848 | struct device_driver *drv = data; | 871 | struct device_driver *drv = data; |
872 | struct xenbus_driver *xendrv; | ||
849 | 873 | ||
850 | /* | 874 | /* |
851 | * A device with no driver will never connect. We care only about | 875 | * A device with no driver will never connect. We care only about |
@@ -858,7 +882,9 @@ static int is_disconnected_device(struct device *dev, void *data) | |||
858 | if (drv && (dev->driver != drv)) | 882 | if (drv && (dev->driver != drv)) |
859 | return 0; | 883 | return 0; |
860 | 884 | ||
861 | return (xendev->state != XenbusStateConnected); | 885 | xendrv = to_xenbus_driver(dev->driver); |
886 | return (xendev->state != XenbusStateConnected || | ||
887 | (xendrv->is_ready && !xendrv->is_ready(xendev))); | ||
862 | } | 888 | } |
863 | 889 | ||
864 | static int exists_disconnected_device(struct device_driver *drv) | 890 | static int exists_disconnected_device(struct device_driver *drv) |
diff --git a/drivers/xen/xencomm.c b/drivers/xen/xencomm.c new file mode 100644 index 000000000000..797cb4e31f07 --- /dev/null +++ b/drivers/xen/xencomm.c | |||
@@ -0,0 +1,232 @@ | |||
1 | /* | ||
2 | * This program is free software; you can redistribute it and/or modify | ||
3 | * it under the terms of the GNU General Public License as published by | ||
4 | * the Free Software Foundation; either version 2 of the License, or | ||
5 | * (at your option) any later version. | ||
6 | * | ||
7 | * This program is distributed in the hope that it will be useful, | ||
8 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
9 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
10 | * GNU General Public License for more details. | ||
11 | * | ||
12 | * You should have received a copy of the GNU General Public License | ||
13 | * along with this program; if not, write to the Free Software | ||
14 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | ||
15 | * | ||
16 | * Copyright (C) IBM Corp. 2006 | ||
17 | * | ||
18 | * Authors: Hollis Blanchard <hollisb@us.ibm.com> | ||
19 | */ | ||
20 | |||
21 | #include <linux/gfp.h> | ||
22 | #include <linux/mm.h> | ||
23 | #include <asm/page.h> | ||
24 | #include <xen/xencomm.h> | ||
25 | #include <xen/interface/xen.h> | ||
26 | #ifdef __ia64__ | ||
27 | #include <asm/xen/xencomm.h> /* for is_kern_addr() */ | ||
28 | #endif | ||
29 | |||
30 | #ifdef HAVE_XEN_PLATFORM_COMPAT_H | ||
31 | #include <xen/platform-compat.h> | ||
32 | #endif | ||
33 | |||
34 | static int xencomm_init(struct xencomm_desc *desc, | ||
35 | void *buffer, unsigned long bytes) | ||
36 | { | ||
37 | unsigned long recorded = 0; | ||
38 | int i = 0; | ||
39 | |||
40 | while ((recorded < bytes) && (i < desc->nr_addrs)) { | ||
41 | unsigned long vaddr = (unsigned long)buffer + recorded; | ||
42 | unsigned long paddr; | ||
43 | int offset; | ||
44 | int chunksz; | ||
45 | |||
46 | offset = vaddr % PAGE_SIZE; /* handle partial pages */ | ||
47 | chunksz = min(PAGE_SIZE - offset, bytes - recorded); | ||
48 | |||
49 | paddr = xencomm_vtop(vaddr); | ||
50 | if (paddr == ~0UL) { | ||
51 | printk(KERN_DEBUG "%s: couldn't translate vaddr %lx\n", | ||
52 | __func__, vaddr); | ||
53 | return -EINVAL; | ||
54 | } | ||
55 | |||
56 | desc->address[i++] = paddr; | ||
57 | recorded += chunksz; | ||
58 | } | ||
59 | |||
60 | if (recorded < bytes) { | ||
61 | printk(KERN_DEBUG | ||
62 | "%s: could only translate %ld of %ld bytes\n", | ||
63 | __func__, recorded, bytes); | ||
64 | return -ENOSPC; | ||
65 | } | ||
66 | |||
67 | /* mark remaining addresses invalid (just for safety) */ | ||
68 | while (i < desc->nr_addrs) | ||
69 | desc->address[i++] = XENCOMM_INVALID; | ||
70 | |||
71 | desc->magic = XENCOMM_MAGIC; | ||
72 | |||
73 | return 0; | ||
74 | } | ||
75 | |||
76 | static struct xencomm_desc *xencomm_alloc(gfp_t gfp_mask, | ||
77 | void *buffer, unsigned long bytes) | ||
78 | { | ||
79 | struct xencomm_desc *desc; | ||
80 | unsigned long buffer_ulong = (unsigned long)buffer; | ||
81 | unsigned long start = buffer_ulong & PAGE_MASK; | ||
82 | unsigned long end = (buffer_ulong + bytes) | ~PAGE_MASK; | ||
83 | unsigned long nr_addrs = (end - start + 1) >> PAGE_SHIFT; | ||
84 | unsigned long size = sizeof(*desc) + | ||
85 | sizeof(desc->address[0]) * nr_addrs; | ||
86 | |||
87 | /* | ||
88 | * slab allocator returns at least sizeof(void*) aligned pointer. | ||
89 | * When sizeof(*desc) > sizeof(void*), struct xencomm_desc might | ||
90 | * cross page boundary. | ||
91 | */ | ||
92 | if (sizeof(*desc) > sizeof(void *)) { | ||
93 | unsigned long order = get_order(size); | ||
94 | desc = (struct xencomm_desc *)__get_free_pages(gfp_mask, | ||
95 | order); | ||
96 | if (desc == NULL) | ||
97 | return NULL; | ||
98 | |||
99 | desc->nr_addrs = | ||
100 | ((PAGE_SIZE << order) - sizeof(struct xencomm_desc)) / | ||
101 | sizeof(*desc->address); | ||
102 | } else { | ||
103 | desc = kmalloc(size, gfp_mask); | ||
104 | if (desc == NULL) | ||
105 | return NULL; | ||
106 | |||
107 | desc->nr_addrs = nr_addrs; | ||
108 | } | ||
109 | return desc; | ||
110 | } | ||
111 | |||
112 | void xencomm_free(struct xencomm_handle *desc) | ||
113 | { | ||
114 | if (desc && !((ulong)desc & XENCOMM_INLINE_FLAG)) { | ||
115 | struct xencomm_desc *desc__ = (struct xencomm_desc *)desc; | ||
116 | if (sizeof(*desc__) > sizeof(void *)) { | ||
117 | unsigned long size = sizeof(*desc__) + | ||
118 | sizeof(desc__->address[0]) * desc__->nr_addrs; | ||
119 | unsigned long order = get_order(size); | ||
120 | free_pages((unsigned long)__va(desc), order); | ||
121 | } else | ||
122 | kfree(__va(desc)); | ||
123 | } | ||
124 | } | ||
125 | |||
126 | static int xencomm_create(void *buffer, unsigned long bytes, | ||
127 | struct xencomm_desc **ret, gfp_t gfp_mask) | ||
128 | { | ||
129 | struct xencomm_desc *desc; | ||
130 | int rc; | ||
131 | |||
132 | pr_debug("%s: %p[%ld]\n", __func__, buffer, bytes); | ||
133 | |||
134 | if (bytes == 0) { | ||
135 | /* don't create a descriptor; Xen recognizes NULL. */ | ||
136 | BUG_ON(buffer != NULL); | ||
137 | *ret = NULL; | ||
138 | return 0; | ||
139 | } | ||
140 | |||
141 | BUG_ON(buffer == NULL); /* 'bytes' is non-zero */ | ||
142 | |||
143 | desc = xencomm_alloc(gfp_mask, buffer, bytes); | ||
144 | if (!desc) { | ||
145 | printk(KERN_DEBUG "%s failure\n", "xencomm_alloc"); | ||
146 | return -ENOMEM; | ||
147 | } | ||
148 | |||
149 | rc = xencomm_init(desc, buffer, bytes); | ||
150 | if (rc) { | ||
151 | printk(KERN_DEBUG "%s failure: %d\n", "xencomm_init", rc); | ||
152 | xencomm_free((struct xencomm_handle *)__pa(desc)); | ||
153 | return rc; | ||
154 | } | ||
155 | |||
156 | *ret = desc; | ||
157 | return 0; | ||
158 | } | ||
159 | |||
160 | /* check if memory address is within VMALLOC region */ | ||
161 | static int is_phys_contiguous(unsigned long addr) | ||
162 | { | ||
163 | if (!is_kernel_addr(addr)) | ||
164 | return 0; | ||
165 | |||
166 | return (addr < VMALLOC_START) || (addr >= VMALLOC_END); | ||
167 | } | ||
168 | |||
169 | static struct xencomm_handle *xencomm_create_inline(void *ptr) | ||
170 | { | ||
171 | unsigned long paddr; | ||
172 | |||
173 | BUG_ON(!is_phys_contiguous((unsigned long)ptr)); | ||
174 | |||
175 | paddr = (unsigned long)xencomm_pa(ptr); | ||
176 | BUG_ON(paddr & XENCOMM_INLINE_FLAG); | ||
177 | return (struct xencomm_handle *)(paddr | XENCOMM_INLINE_FLAG); | ||
178 | } | ||
179 | |||
180 | /* "mini" routine, for stack-based communications: */ | ||
181 | static int xencomm_create_mini(void *buffer, | ||
182 | unsigned long bytes, struct xencomm_mini *xc_desc, | ||
183 | struct xencomm_desc **ret) | ||
184 | { | ||
185 | int rc = 0; | ||
186 | struct xencomm_desc *desc; | ||
187 | BUG_ON(((unsigned long)xc_desc) % sizeof(*xc_desc) != 0); | ||
188 | |||
189 | desc = (void *)xc_desc; | ||
190 | |||
191 | desc->nr_addrs = XENCOMM_MINI_ADDRS; | ||
192 | |||
193 | rc = xencomm_init(desc, buffer, bytes); | ||
194 | if (!rc) | ||
195 | *ret = desc; | ||
196 | |||
197 | return rc; | ||
198 | } | ||
199 | |||
200 | struct xencomm_handle *xencomm_map(void *ptr, unsigned long bytes) | ||
201 | { | ||
202 | int rc; | ||
203 | struct xencomm_desc *desc; | ||
204 | |||
205 | if (is_phys_contiguous((unsigned long)ptr)) | ||
206 | return xencomm_create_inline(ptr); | ||
207 | |||
208 | rc = xencomm_create(ptr, bytes, &desc, GFP_KERNEL); | ||
209 | |||
210 | if (rc || desc == NULL) | ||
211 | return NULL; | ||
212 | |||
213 | return xencomm_pa(desc); | ||
214 | } | ||
215 | |||
216 | struct xencomm_handle *__xencomm_map_no_alloc(void *ptr, unsigned long bytes, | ||
217 | struct xencomm_mini *xc_desc) | ||
218 | { | ||
219 | int rc; | ||
220 | struct xencomm_desc *desc = NULL; | ||
221 | |||
222 | if (is_phys_contiguous((unsigned long)ptr)) | ||
223 | return xencomm_create_inline(ptr); | ||
224 | |||
225 | rc = xencomm_create_mini(ptr, bytes, xc_desc, | ||
226 | &desc); | ||
227 | |||
228 | if (rc) | ||
229 | return NULL; | ||
230 | |||
231 | return xencomm_pa(desc); | ||
232 | } | ||