diff options
Diffstat (limited to 'drivers/xen')
26 files changed, 1119 insertions, 277 deletions
diff --git a/drivers/xen/Kconfig b/drivers/xen/Kconfig index 8795480c2350..a1ced521cf74 100644 --- a/drivers/xen/Kconfig +++ b/drivers/xen/Kconfig | |||
@@ -86,6 +86,7 @@ config XEN_BACKEND | |||
86 | 86 | ||
87 | config XENFS | 87 | config XENFS |
88 | tristate "Xen filesystem" | 88 | tristate "Xen filesystem" |
89 | select XEN_PRIVCMD | ||
89 | default y | 90 | default y |
90 | help | 91 | help |
91 | The xen filesystem provides a way for domains to share | 92 | The xen filesystem provides a way for domains to share |
@@ -171,4 +172,10 @@ config XEN_PCIDEV_BACKEND | |||
171 | xen-pciback.hide=(03:00.0)(04:00.0) | 172 | xen-pciback.hide=(03:00.0)(04:00.0) |
172 | 173 | ||
173 | If in doubt, say m. | 174 | If in doubt, say m. |
175 | |||
176 | config XEN_PRIVCMD | ||
177 | tristate | ||
178 | depends on XEN | ||
179 | default m | ||
180 | |||
174 | endmenu | 181 | endmenu |
diff --git a/drivers/xen/Makefile b/drivers/xen/Makefile index 974fffdf22b2..aa31337192cc 100644 --- a/drivers/xen/Makefile +++ b/drivers/xen/Makefile | |||
@@ -19,7 +19,9 @@ obj-$(CONFIG_XEN_TMEM) += tmem.o | |||
19 | obj-$(CONFIG_SWIOTLB_XEN) += swiotlb-xen.o | 19 | obj-$(CONFIG_SWIOTLB_XEN) += swiotlb-xen.o |
20 | obj-$(CONFIG_XEN_DOM0) += pci.o | 20 | obj-$(CONFIG_XEN_DOM0) += pci.o |
21 | obj-$(CONFIG_XEN_PCIDEV_BACKEND) += xen-pciback/ | 21 | obj-$(CONFIG_XEN_PCIDEV_BACKEND) += xen-pciback/ |
22 | obj-$(CONFIG_XEN_PRIVCMD) += xen-privcmd.o | ||
22 | 23 | ||
23 | xen-evtchn-y := evtchn.o | 24 | xen-evtchn-y := evtchn.o |
24 | xen-gntdev-y := gntdev.o | 25 | xen-gntdev-y := gntdev.o |
25 | xen-gntalloc-y := gntalloc.o | 26 | xen-gntalloc-y := gntalloc.o |
27 | xen-privcmd-y := privcmd.o | ||
diff --git a/drivers/xen/events.c b/drivers/xen/events.c index 6e075cdd0c6b..e5e5812a1014 100644 --- a/drivers/xen/events.c +++ b/drivers/xen/events.c | |||
@@ -87,6 +87,7 @@ enum xen_irq_type { | |||
87 | */ | 87 | */ |
88 | struct irq_info { | 88 | struct irq_info { |
89 | struct list_head list; | 89 | struct list_head list; |
90 | int refcnt; | ||
90 | enum xen_irq_type type; /* type */ | 91 | enum xen_irq_type type; /* type */ |
91 | unsigned irq; | 92 | unsigned irq; |
92 | unsigned short evtchn; /* event channel */ | 93 | unsigned short evtchn; /* event channel */ |
@@ -406,6 +407,7 @@ static void xen_irq_init(unsigned irq) | |||
406 | panic("Unable to allocate metadata for IRQ%d\n", irq); | 407 | panic("Unable to allocate metadata for IRQ%d\n", irq); |
407 | 408 | ||
408 | info->type = IRQT_UNBOUND; | 409 | info->type = IRQT_UNBOUND; |
410 | info->refcnt = -1; | ||
409 | 411 | ||
410 | irq_set_handler_data(irq, info); | 412 | irq_set_handler_data(irq, info); |
411 | 413 | ||
@@ -469,6 +471,8 @@ static void xen_free_irq(unsigned irq) | |||
469 | 471 | ||
470 | irq_set_handler_data(irq, NULL); | 472 | irq_set_handler_data(irq, NULL); |
471 | 473 | ||
474 | WARN_ON(info->refcnt > 0); | ||
475 | |||
472 | kfree(info); | 476 | kfree(info); |
473 | 477 | ||
474 | /* Legacy IRQ descriptors are managed by the arch. */ | 478 | /* Legacy IRQ descriptors are managed by the arch. */ |
@@ -637,7 +641,7 @@ int xen_bind_pirq_gsi_to_irq(unsigned gsi, | |||
637 | if (irq != -1) { | 641 | if (irq != -1) { |
638 | printk(KERN_INFO "xen_map_pirq_gsi: returning irq %d for gsi %u\n", | 642 | printk(KERN_INFO "xen_map_pirq_gsi: returning irq %d for gsi %u\n", |
639 | irq, gsi); | 643 | irq, gsi); |
640 | goto out; /* XXX need refcount? */ | 644 | goto out; |
641 | } | 645 | } |
642 | 646 | ||
643 | irq = xen_allocate_irq_gsi(gsi); | 647 | irq = xen_allocate_irq_gsi(gsi); |
@@ -939,9 +943,16 @@ static void unbind_from_irq(unsigned int irq) | |||
939 | { | 943 | { |
940 | struct evtchn_close close; | 944 | struct evtchn_close close; |
941 | int evtchn = evtchn_from_irq(irq); | 945 | int evtchn = evtchn_from_irq(irq); |
946 | struct irq_info *info = irq_get_handler_data(irq); | ||
942 | 947 | ||
943 | mutex_lock(&irq_mapping_update_lock); | 948 | mutex_lock(&irq_mapping_update_lock); |
944 | 949 | ||
950 | if (info->refcnt > 0) { | ||
951 | info->refcnt--; | ||
952 | if (info->refcnt != 0) | ||
953 | goto done; | ||
954 | } | ||
955 | |||
945 | if (VALID_EVTCHN(evtchn)) { | 956 | if (VALID_EVTCHN(evtchn)) { |
946 | close.port = evtchn; | 957 | close.port = evtchn; |
947 | if (HYPERVISOR_event_channel_op(EVTCHNOP_close, &close) != 0) | 958 | if (HYPERVISOR_event_channel_op(EVTCHNOP_close, &close) != 0) |
@@ -970,6 +981,7 @@ static void unbind_from_irq(unsigned int irq) | |||
970 | 981 | ||
971 | xen_free_irq(irq); | 982 | xen_free_irq(irq); |
972 | 983 | ||
984 | done: | ||
973 | mutex_unlock(&irq_mapping_update_lock); | 985 | mutex_unlock(&irq_mapping_update_lock); |
974 | } | 986 | } |
975 | 987 | ||
@@ -1065,6 +1077,69 @@ void unbind_from_irqhandler(unsigned int irq, void *dev_id) | |||
1065 | } | 1077 | } |
1066 | EXPORT_SYMBOL_GPL(unbind_from_irqhandler); | 1078 | EXPORT_SYMBOL_GPL(unbind_from_irqhandler); |
1067 | 1079 | ||
1080 | int evtchn_make_refcounted(unsigned int evtchn) | ||
1081 | { | ||
1082 | int irq = evtchn_to_irq[evtchn]; | ||
1083 | struct irq_info *info; | ||
1084 | |||
1085 | if (irq == -1) | ||
1086 | return -ENOENT; | ||
1087 | |||
1088 | info = irq_get_handler_data(irq); | ||
1089 | |||
1090 | if (!info) | ||
1091 | return -ENOENT; | ||
1092 | |||
1093 | WARN_ON(info->refcnt != -1); | ||
1094 | |||
1095 | info->refcnt = 1; | ||
1096 | |||
1097 | return 0; | ||
1098 | } | ||
1099 | EXPORT_SYMBOL_GPL(evtchn_make_refcounted); | ||
1100 | |||
1101 | int evtchn_get(unsigned int evtchn) | ||
1102 | { | ||
1103 | int irq; | ||
1104 | struct irq_info *info; | ||
1105 | int err = -ENOENT; | ||
1106 | |||
1107 | if (evtchn >= NR_EVENT_CHANNELS) | ||
1108 | return -EINVAL; | ||
1109 | |||
1110 | mutex_lock(&irq_mapping_update_lock); | ||
1111 | |||
1112 | irq = evtchn_to_irq[evtchn]; | ||
1113 | if (irq == -1) | ||
1114 | goto done; | ||
1115 | |||
1116 | info = irq_get_handler_data(irq); | ||
1117 | |||
1118 | if (!info) | ||
1119 | goto done; | ||
1120 | |||
1121 | err = -EINVAL; | ||
1122 | if (info->refcnt <= 0) | ||
1123 | goto done; | ||
1124 | |||
1125 | info->refcnt++; | ||
1126 | err = 0; | ||
1127 | done: | ||
1128 | mutex_unlock(&irq_mapping_update_lock); | ||
1129 | |||
1130 | return err; | ||
1131 | } | ||
1132 | EXPORT_SYMBOL_GPL(evtchn_get); | ||
1133 | |||
1134 | void evtchn_put(unsigned int evtchn) | ||
1135 | { | ||
1136 | int irq = evtchn_to_irq[evtchn]; | ||
1137 | if (WARN_ON(irq == -1)) | ||
1138 | return; | ||
1139 | unbind_from_irq(irq); | ||
1140 | } | ||
1141 | EXPORT_SYMBOL_GPL(evtchn_put); | ||
1142 | |||
1068 | void xen_send_IPI_one(unsigned int cpu, enum ipi_vector vector) | 1143 | void xen_send_IPI_one(unsigned int cpu, enum ipi_vector vector) |
1069 | { | 1144 | { |
1070 | int irq = per_cpu(ipi_to_irq, cpu)[vector]; | 1145 | int irq = per_cpu(ipi_to_irq, cpu)[vector]; |
diff --git a/drivers/xen/evtchn.c b/drivers/xen/evtchn.c index dbc13e94b612..b1f60a0c0bea 100644 --- a/drivers/xen/evtchn.c +++ b/drivers/xen/evtchn.c | |||
@@ -268,7 +268,7 @@ static int evtchn_bind_to_user(struct per_user_data *u, int port) | |||
268 | rc = bind_evtchn_to_irqhandler(port, evtchn_interrupt, IRQF_DISABLED, | 268 | rc = bind_evtchn_to_irqhandler(port, evtchn_interrupt, IRQF_DISABLED, |
269 | u->name, (void *)(unsigned long)port); | 269 | u->name, (void *)(unsigned long)port); |
270 | if (rc >= 0) | 270 | if (rc >= 0) |
271 | rc = 0; | 271 | rc = evtchn_make_refcounted(port); |
272 | 272 | ||
273 | return rc; | 273 | return rc; |
274 | } | 274 | } |
diff --git a/drivers/xen/gntalloc.c b/drivers/xen/gntalloc.c index e1c4c6e5b469..934985d14c24 100644 --- a/drivers/xen/gntalloc.c +++ b/drivers/xen/gntalloc.c | |||
@@ -74,7 +74,7 @@ MODULE_PARM_DESC(limit, "Maximum number of grants that may be allocated by " | |||
74 | "the gntalloc device"); | 74 | "the gntalloc device"); |
75 | 75 | ||
76 | static LIST_HEAD(gref_list); | 76 | static LIST_HEAD(gref_list); |
77 | static DEFINE_SPINLOCK(gref_lock); | 77 | static DEFINE_MUTEX(gref_mutex); |
78 | static int gref_size; | 78 | static int gref_size; |
79 | 79 | ||
80 | struct notify_info { | 80 | struct notify_info { |
@@ -99,6 +99,12 @@ struct gntalloc_file_private_data { | |||
99 | uint64_t index; | 99 | uint64_t index; |
100 | }; | 100 | }; |
101 | 101 | ||
102 | struct gntalloc_vma_private_data { | ||
103 | struct gntalloc_gref *gref; | ||
104 | int users; | ||
105 | int count; | ||
106 | }; | ||
107 | |||
102 | static void __del_gref(struct gntalloc_gref *gref); | 108 | static void __del_gref(struct gntalloc_gref *gref); |
103 | 109 | ||
104 | static void do_cleanup(void) | 110 | static void do_cleanup(void) |
@@ -143,15 +149,15 @@ static int add_grefs(struct ioctl_gntalloc_alloc_gref *op, | |||
143 | } | 149 | } |
144 | 150 | ||
145 | /* Add to gref lists. */ | 151 | /* Add to gref lists. */ |
146 | spin_lock(&gref_lock); | 152 | mutex_lock(&gref_mutex); |
147 | list_splice_tail(&queue_gref, &gref_list); | 153 | list_splice_tail(&queue_gref, &gref_list); |
148 | list_splice_tail(&queue_file, &priv->list); | 154 | list_splice_tail(&queue_file, &priv->list); |
149 | spin_unlock(&gref_lock); | 155 | mutex_unlock(&gref_mutex); |
150 | 156 | ||
151 | return 0; | 157 | return 0; |
152 | 158 | ||
153 | undo: | 159 | undo: |
154 | spin_lock(&gref_lock); | 160 | mutex_lock(&gref_mutex); |
155 | gref_size -= (op->count - i); | 161 | gref_size -= (op->count - i); |
156 | 162 | ||
157 | list_for_each_entry(gref, &queue_file, next_file) { | 163 | list_for_each_entry(gref, &queue_file, next_file) { |
@@ -167,7 +173,7 @@ undo: | |||
167 | */ | 173 | */ |
168 | if (unlikely(!list_empty(&queue_gref))) | 174 | if (unlikely(!list_empty(&queue_gref))) |
169 | list_splice_tail(&queue_gref, &gref_list); | 175 | list_splice_tail(&queue_gref, &gref_list); |
170 | spin_unlock(&gref_lock); | 176 | mutex_unlock(&gref_mutex); |
171 | return rc; | 177 | return rc; |
172 | } | 178 | } |
173 | 179 | ||
@@ -178,8 +184,10 @@ static void __del_gref(struct gntalloc_gref *gref) | |||
178 | tmp[gref->notify.pgoff] = 0; | 184 | tmp[gref->notify.pgoff] = 0; |
179 | kunmap(gref->page); | 185 | kunmap(gref->page); |
180 | } | 186 | } |
181 | if (gref->notify.flags & UNMAP_NOTIFY_SEND_EVENT) | 187 | if (gref->notify.flags & UNMAP_NOTIFY_SEND_EVENT) { |
182 | notify_remote_via_evtchn(gref->notify.event); | 188 | notify_remote_via_evtchn(gref->notify.event); |
189 | evtchn_put(gref->notify.event); | ||
190 | } | ||
183 | 191 | ||
184 | gref->notify.flags = 0; | 192 | gref->notify.flags = 0; |
185 | 193 | ||
@@ -189,6 +197,8 @@ static void __del_gref(struct gntalloc_gref *gref) | |||
189 | 197 | ||
190 | if (!gnttab_end_foreign_access_ref(gref->gref_id, 0)) | 198 | if (!gnttab_end_foreign_access_ref(gref->gref_id, 0)) |
191 | return; | 199 | return; |
200 | |||
201 | gnttab_free_grant_reference(gref->gref_id); | ||
192 | } | 202 | } |
193 | 203 | ||
194 | gref_size--; | 204 | gref_size--; |
@@ -251,7 +261,7 @@ static int gntalloc_release(struct inode *inode, struct file *filp) | |||
251 | 261 | ||
252 | pr_debug("%s: priv %p\n", __func__, priv); | 262 | pr_debug("%s: priv %p\n", __func__, priv); |
253 | 263 | ||
254 | spin_lock(&gref_lock); | 264 | mutex_lock(&gref_mutex); |
255 | while (!list_empty(&priv->list)) { | 265 | while (!list_empty(&priv->list)) { |
256 | gref = list_entry(priv->list.next, | 266 | gref = list_entry(priv->list.next, |
257 | struct gntalloc_gref, next_file); | 267 | struct gntalloc_gref, next_file); |
@@ -261,7 +271,7 @@ static int gntalloc_release(struct inode *inode, struct file *filp) | |||
261 | __del_gref(gref); | 271 | __del_gref(gref); |
262 | } | 272 | } |
263 | kfree(priv); | 273 | kfree(priv); |
264 | spin_unlock(&gref_lock); | 274 | mutex_unlock(&gref_mutex); |
265 | 275 | ||
266 | return 0; | 276 | return 0; |
267 | } | 277 | } |
@@ -286,21 +296,21 @@ static long gntalloc_ioctl_alloc(struct gntalloc_file_private_data *priv, | |||
286 | goto out; | 296 | goto out; |
287 | } | 297 | } |
288 | 298 | ||
289 | spin_lock(&gref_lock); | 299 | mutex_lock(&gref_mutex); |
290 | /* Clean up pages that were at zero (local) users but were still mapped | 300 | /* Clean up pages that were at zero (local) users but were still mapped |
291 | * by remote domains. Since those pages count towards the limit that we | 301 | * by remote domains. Since those pages count towards the limit that we |
292 | * are about to enforce, removing them here is a good idea. | 302 | * are about to enforce, removing them here is a good idea. |
293 | */ | 303 | */ |
294 | do_cleanup(); | 304 | do_cleanup(); |
295 | if (gref_size + op.count > limit) { | 305 | if (gref_size + op.count > limit) { |
296 | spin_unlock(&gref_lock); | 306 | mutex_unlock(&gref_mutex); |
297 | rc = -ENOSPC; | 307 | rc = -ENOSPC; |
298 | goto out_free; | 308 | goto out_free; |
299 | } | 309 | } |
300 | gref_size += op.count; | 310 | gref_size += op.count; |
301 | op.index = priv->index; | 311 | op.index = priv->index; |
302 | priv->index += op.count * PAGE_SIZE; | 312 | priv->index += op.count * PAGE_SIZE; |
303 | spin_unlock(&gref_lock); | 313 | mutex_unlock(&gref_mutex); |
304 | 314 | ||
305 | rc = add_grefs(&op, gref_ids, priv); | 315 | rc = add_grefs(&op, gref_ids, priv); |
306 | if (rc < 0) | 316 | if (rc < 0) |
@@ -343,7 +353,7 @@ static long gntalloc_ioctl_dealloc(struct gntalloc_file_private_data *priv, | |||
343 | goto dealloc_grant_out; | 353 | goto dealloc_grant_out; |
344 | } | 354 | } |
345 | 355 | ||
346 | spin_lock(&gref_lock); | 356 | mutex_lock(&gref_mutex); |
347 | gref = find_grefs(priv, op.index, op.count); | 357 | gref = find_grefs(priv, op.index, op.count); |
348 | if (gref) { | 358 | if (gref) { |
349 | /* Remove from the file list only, and decrease reference count. | 359 | /* Remove from the file list only, and decrease reference count. |
@@ -363,7 +373,7 @@ static long gntalloc_ioctl_dealloc(struct gntalloc_file_private_data *priv, | |||
363 | 373 | ||
364 | do_cleanup(); | 374 | do_cleanup(); |
365 | 375 | ||
366 | spin_unlock(&gref_lock); | 376 | mutex_unlock(&gref_mutex); |
367 | dealloc_grant_out: | 377 | dealloc_grant_out: |
368 | return rc; | 378 | return rc; |
369 | } | 379 | } |
@@ -383,7 +393,7 @@ static long gntalloc_ioctl_unmap_notify(struct gntalloc_file_private_data *priv, | |||
383 | index = op.index & ~(PAGE_SIZE - 1); | 393 | index = op.index & ~(PAGE_SIZE - 1); |
384 | pgoff = op.index & (PAGE_SIZE - 1); | 394 | pgoff = op.index & (PAGE_SIZE - 1); |
385 | 395 | ||
386 | spin_lock(&gref_lock); | 396 | mutex_lock(&gref_mutex); |
387 | 397 | ||
388 | gref = find_grefs(priv, index, 1); | 398 | gref = find_grefs(priv, index, 1); |
389 | if (!gref) { | 399 | if (!gref) { |
@@ -396,12 +406,30 @@ static long gntalloc_ioctl_unmap_notify(struct gntalloc_file_private_data *priv, | |||
396 | goto unlock_out; | 406 | goto unlock_out; |
397 | } | 407 | } |
398 | 408 | ||
409 | /* We need to grab a reference to the event channel we are going to use | ||
410 | * to send the notify before releasing the reference we may already have | ||
411 | * (if someone has called this ioctl twice). This is required so that | ||
412 | * it is possible to change the clear_byte part of the notification | ||
413 | * without disturbing the event channel part, which may now be the last | ||
414 | * reference to that event channel. | ||
415 | */ | ||
416 | if (op.action & UNMAP_NOTIFY_SEND_EVENT) { | ||
417 | if (evtchn_get(op.event_channel_port)) { | ||
418 | rc = -EINVAL; | ||
419 | goto unlock_out; | ||
420 | } | ||
421 | } | ||
422 | |||
423 | if (gref->notify.flags & UNMAP_NOTIFY_SEND_EVENT) | ||
424 | evtchn_put(gref->notify.event); | ||
425 | |||
399 | gref->notify.flags = op.action; | 426 | gref->notify.flags = op.action; |
400 | gref->notify.pgoff = pgoff; | 427 | gref->notify.pgoff = pgoff; |
401 | gref->notify.event = op.event_channel_port; | 428 | gref->notify.event = op.event_channel_port; |
402 | rc = 0; | 429 | rc = 0; |
430 | |||
403 | unlock_out: | 431 | unlock_out: |
404 | spin_unlock(&gref_lock); | 432 | mutex_unlock(&gref_mutex); |
405 | return rc; | 433 | return rc; |
406 | } | 434 | } |
407 | 435 | ||
@@ -429,26 +457,40 @@ static long gntalloc_ioctl(struct file *filp, unsigned int cmd, | |||
429 | 457 | ||
430 | static void gntalloc_vma_open(struct vm_area_struct *vma) | 458 | static void gntalloc_vma_open(struct vm_area_struct *vma) |
431 | { | 459 | { |
432 | struct gntalloc_gref *gref = vma->vm_private_data; | 460 | struct gntalloc_vma_private_data *priv = vma->vm_private_data; |
433 | if (!gref) | 461 | |
462 | if (!priv) | ||
434 | return; | 463 | return; |
435 | 464 | ||
436 | spin_lock(&gref_lock); | 465 | mutex_lock(&gref_mutex); |
437 | gref->users++; | 466 | priv->users++; |
438 | spin_unlock(&gref_lock); | 467 | mutex_unlock(&gref_mutex); |
439 | } | 468 | } |
440 | 469 | ||
441 | static void gntalloc_vma_close(struct vm_area_struct *vma) | 470 | static void gntalloc_vma_close(struct vm_area_struct *vma) |
442 | { | 471 | { |
443 | struct gntalloc_gref *gref = vma->vm_private_data; | 472 | struct gntalloc_vma_private_data *priv = vma->vm_private_data; |
444 | if (!gref) | 473 | struct gntalloc_gref *gref, *next; |
474 | int i; | ||
475 | |||
476 | if (!priv) | ||
445 | return; | 477 | return; |
446 | 478 | ||
447 | spin_lock(&gref_lock); | 479 | mutex_lock(&gref_mutex); |
448 | gref->users--; | 480 | priv->users--; |
449 | if (gref->users == 0) | 481 | if (priv->users == 0) { |
450 | __del_gref(gref); | 482 | gref = priv->gref; |
451 | spin_unlock(&gref_lock); | 483 | for (i = 0; i < priv->count; i++) { |
484 | gref->users--; | ||
485 | next = list_entry(gref->next_gref.next, | ||
486 | struct gntalloc_gref, next_gref); | ||
487 | if (gref->users == 0) | ||
488 | __del_gref(gref); | ||
489 | gref = next; | ||
490 | } | ||
491 | kfree(priv); | ||
492 | } | ||
493 | mutex_unlock(&gref_mutex); | ||
452 | } | 494 | } |
453 | 495 | ||
454 | static struct vm_operations_struct gntalloc_vmops = { | 496 | static struct vm_operations_struct gntalloc_vmops = { |
@@ -459,30 +501,41 @@ static struct vm_operations_struct gntalloc_vmops = { | |||
459 | static int gntalloc_mmap(struct file *filp, struct vm_area_struct *vma) | 501 | static int gntalloc_mmap(struct file *filp, struct vm_area_struct *vma) |
460 | { | 502 | { |
461 | struct gntalloc_file_private_data *priv = filp->private_data; | 503 | struct gntalloc_file_private_data *priv = filp->private_data; |
504 | struct gntalloc_vma_private_data *vm_priv; | ||
462 | struct gntalloc_gref *gref; | 505 | struct gntalloc_gref *gref; |
463 | int count = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT; | 506 | int count = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT; |
464 | int rv, i; | 507 | int rv, i; |
465 | 508 | ||
466 | pr_debug("%s: priv %p, page %lu+%d\n", __func__, | ||
467 | priv, vma->vm_pgoff, count); | ||
468 | |||
469 | if (!(vma->vm_flags & VM_SHARED)) { | 509 | if (!(vma->vm_flags & VM_SHARED)) { |
470 | printk(KERN_ERR "%s: Mapping must be shared.\n", __func__); | 510 | printk(KERN_ERR "%s: Mapping must be shared.\n", __func__); |
471 | return -EINVAL; | 511 | return -EINVAL; |
472 | } | 512 | } |
473 | 513 | ||
474 | spin_lock(&gref_lock); | 514 | vm_priv = kmalloc(sizeof(*vm_priv), GFP_KERNEL); |
515 | if (!vm_priv) | ||
516 | return -ENOMEM; | ||
517 | |||
518 | mutex_lock(&gref_mutex); | ||
519 | |||
520 | pr_debug("%s: priv %p,%p, page %lu+%d\n", __func__, | ||
521 | priv, vm_priv, vma->vm_pgoff, count); | ||
522 | |||
475 | gref = find_grefs(priv, vma->vm_pgoff << PAGE_SHIFT, count); | 523 | gref = find_grefs(priv, vma->vm_pgoff << PAGE_SHIFT, count); |
476 | if (gref == NULL) { | 524 | if (gref == NULL) { |
477 | rv = -ENOENT; | 525 | rv = -ENOENT; |
478 | pr_debug("%s: Could not find grant reference", | 526 | pr_debug("%s: Could not find grant reference", |
479 | __func__); | 527 | __func__); |
528 | kfree(vm_priv); | ||
480 | goto out_unlock; | 529 | goto out_unlock; |
481 | } | 530 | } |
482 | 531 | ||
483 | vma->vm_private_data = gref; | 532 | vm_priv->gref = gref; |
533 | vm_priv->users = 1; | ||
534 | vm_priv->count = count; | ||
535 | |||
536 | vma->vm_private_data = vm_priv; | ||
484 | 537 | ||
485 | vma->vm_flags |= VM_RESERVED; | 538 | vma->vm_flags |= VM_RESERVED | VM_DONTEXPAND; |
486 | 539 | ||
487 | vma->vm_ops = &gntalloc_vmops; | 540 | vma->vm_ops = &gntalloc_vmops; |
488 | 541 | ||
@@ -499,7 +552,7 @@ static int gntalloc_mmap(struct file *filp, struct vm_area_struct *vma) | |||
499 | rv = 0; | 552 | rv = 0; |
500 | 553 | ||
501 | out_unlock: | 554 | out_unlock: |
502 | spin_unlock(&gref_lock); | 555 | mutex_unlock(&gref_mutex); |
503 | return rv; | 556 | return rv; |
504 | } | 557 | } |
505 | 558 | ||
diff --git a/drivers/xen/gntdev.c b/drivers/xen/gntdev.c index afca14d9042e..99d8151c824a 100644 --- a/drivers/xen/gntdev.c +++ b/drivers/xen/gntdev.c | |||
@@ -193,8 +193,10 @@ static void gntdev_put_map(struct grant_map *map) | |||
193 | 193 | ||
194 | atomic_sub(map->count, &pages_mapped); | 194 | atomic_sub(map->count, &pages_mapped); |
195 | 195 | ||
196 | if (map->notify.flags & UNMAP_NOTIFY_SEND_EVENT) | 196 | if (map->notify.flags & UNMAP_NOTIFY_SEND_EVENT) { |
197 | notify_remote_via_evtchn(map->notify.event); | 197 | notify_remote_via_evtchn(map->notify.event); |
198 | evtchn_put(map->notify.event); | ||
199 | } | ||
198 | 200 | ||
199 | if (map->pages) { | 201 | if (map->pages) { |
200 | if (!use_ptemod) | 202 | if (!use_ptemod) |
@@ -312,7 +314,8 @@ static int __unmap_grant_pages(struct grant_map *map, int offset, int pages) | |||
312 | } | 314 | } |
313 | } | 315 | } |
314 | 316 | ||
315 | err = gnttab_unmap_refs(map->unmap_ops + offset, map->pages + offset, pages); | 317 | err = gnttab_unmap_refs(map->unmap_ops + offset, map->pages + offset, |
318 | pages, true); | ||
316 | if (err) | 319 | if (err) |
317 | return err; | 320 | return err; |
318 | 321 | ||
@@ -599,6 +602,8 @@ static long gntdev_ioctl_notify(struct gntdev_priv *priv, void __user *u) | |||
599 | struct ioctl_gntdev_unmap_notify op; | 602 | struct ioctl_gntdev_unmap_notify op; |
600 | struct grant_map *map; | 603 | struct grant_map *map; |
601 | int rc; | 604 | int rc; |
605 | int out_flags; | ||
606 | unsigned int out_event; | ||
602 | 607 | ||
603 | if (copy_from_user(&op, u, sizeof(op))) | 608 | if (copy_from_user(&op, u, sizeof(op))) |
604 | return -EFAULT; | 609 | return -EFAULT; |
@@ -606,6 +611,21 @@ static long gntdev_ioctl_notify(struct gntdev_priv *priv, void __user *u) | |||
606 | if (op.action & ~(UNMAP_NOTIFY_CLEAR_BYTE|UNMAP_NOTIFY_SEND_EVENT)) | 611 | if (op.action & ~(UNMAP_NOTIFY_CLEAR_BYTE|UNMAP_NOTIFY_SEND_EVENT)) |
607 | return -EINVAL; | 612 | return -EINVAL; |
608 | 613 | ||
614 | /* We need to grab a reference to the event channel we are going to use | ||
615 | * to send the notify before releasing the reference we may already have | ||
616 | * (if someone has called this ioctl twice). This is required so that | ||
617 | * it is possible to change the clear_byte part of the notification | ||
618 | * without disturbing the event channel part, which may now be the last | ||
619 | * reference to that event channel. | ||
620 | */ | ||
621 | if (op.action & UNMAP_NOTIFY_SEND_EVENT) { | ||
622 | if (evtchn_get(op.event_channel_port)) | ||
623 | return -EINVAL; | ||
624 | } | ||
625 | |||
626 | out_flags = op.action; | ||
627 | out_event = op.event_channel_port; | ||
628 | |||
609 | spin_lock(&priv->lock); | 629 | spin_lock(&priv->lock); |
610 | 630 | ||
611 | list_for_each_entry(map, &priv->maps, next) { | 631 | list_for_each_entry(map, &priv->maps, next) { |
@@ -624,12 +644,22 @@ static long gntdev_ioctl_notify(struct gntdev_priv *priv, void __user *u) | |||
624 | goto unlock_out; | 644 | goto unlock_out; |
625 | } | 645 | } |
626 | 646 | ||
647 | out_flags = map->notify.flags; | ||
648 | out_event = map->notify.event; | ||
649 | |||
627 | map->notify.flags = op.action; | 650 | map->notify.flags = op.action; |
628 | map->notify.addr = op.index - (map->index << PAGE_SHIFT); | 651 | map->notify.addr = op.index - (map->index << PAGE_SHIFT); |
629 | map->notify.event = op.event_channel_port; | 652 | map->notify.event = op.event_channel_port; |
653 | |||
630 | rc = 0; | 654 | rc = 0; |
655 | |||
631 | unlock_out: | 656 | unlock_out: |
632 | spin_unlock(&priv->lock); | 657 | spin_unlock(&priv->lock); |
658 | |||
659 | /* Drop the reference to the event channel we did not save in the map */ | ||
660 | if (out_flags & UNMAP_NOTIFY_SEND_EVENT) | ||
661 | evtchn_put(out_event); | ||
662 | |||
633 | return rc; | 663 | return rc; |
634 | } | 664 | } |
635 | 665 | ||
diff --git a/drivers/xen/grant-table.c b/drivers/xen/grant-table.c index bf1c094f4ebf..1cd94daa71db 100644 --- a/drivers/xen/grant-table.c +++ b/drivers/xen/grant-table.c | |||
@@ -44,16 +44,19 @@ | |||
44 | #include <xen/page.h> | 44 | #include <xen/page.h> |
45 | #include <xen/grant_table.h> | 45 | #include <xen/grant_table.h> |
46 | #include <xen/interface/memory.h> | 46 | #include <xen/interface/memory.h> |
47 | #include <xen/hvc-console.h> | ||
47 | #include <asm/xen/hypercall.h> | 48 | #include <asm/xen/hypercall.h> |
48 | 49 | ||
49 | #include <asm/pgtable.h> | 50 | #include <asm/pgtable.h> |
50 | #include <asm/sync_bitops.h> | 51 | #include <asm/sync_bitops.h> |
51 | 52 | ||
52 | |||
53 | /* External tools reserve first few grant table entries. */ | 53 | /* External tools reserve first few grant table entries. */ |
54 | #define NR_RESERVED_ENTRIES 8 | 54 | #define NR_RESERVED_ENTRIES 8 |
55 | #define GNTTAB_LIST_END 0xffffffff | 55 | #define GNTTAB_LIST_END 0xffffffff |
56 | #define GREFS_PER_GRANT_FRAME (PAGE_SIZE / sizeof(struct grant_entry)) | 56 | #define GREFS_PER_GRANT_FRAME \ |
57 | (grant_table_version == 1 ? \ | ||
58 | (PAGE_SIZE / sizeof(struct grant_entry_v1)) : \ | ||
59 | (PAGE_SIZE / sizeof(union grant_entry_v2))) | ||
57 | 60 | ||
58 | static grant_ref_t **gnttab_list; | 61 | static grant_ref_t **gnttab_list; |
59 | static unsigned int nr_grant_frames; | 62 | static unsigned int nr_grant_frames; |
@@ -64,13 +67,97 @@ static DEFINE_SPINLOCK(gnttab_list_lock); | |||
64 | unsigned long xen_hvm_resume_frames; | 67 | unsigned long xen_hvm_resume_frames; |
65 | EXPORT_SYMBOL_GPL(xen_hvm_resume_frames); | 68 | EXPORT_SYMBOL_GPL(xen_hvm_resume_frames); |
66 | 69 | ||
67 | static struct grant_entry *shared; | 70 | static union { |
71 | struct grant_entry_v1 *v1; | ||
72 | union grant_entry_v2 *v2; | ||
73 | void *addr; | ||
74 | } gnttab_shared; | ||
75 | |||
76 | /*This is a structure of function pointers for grant table*/ | ||
77 | struct gnttab_ops { | ||
78 | /* | ||
79 | * Mapping a list of frames for storing grant entries. Frames parameter | ||
80 | * is used to store grant table address when grant table being setup, | ||
81 | * nr_gframes is the number of frames to map grant table. Returning | ||
82 | * GNTST_okay means success and negative value means failure. | ||
83 | */ | ||
84 | int (*map_frames)(unsigned long *frames, unsigned int nr_gframes); | ||
85 | /* | ||
86 | * Release a list of frames which are mapped in map_frames for grant | ||
87 | * entry status. | ||
88 | */ | ||
89 | void (*unmap_frames)(void); | ||
90 | /* | ||
91 | * Introducing a valid entry into the grant table, granting the frame of | ||
92 | * this grant entry to domain for accessing or transfering. Ref | ||
93 | * parameter is reference of this introduced grant entry, domid is id of | ||
94 | * granted domain, frame is the page frame to be granted, and flags is | ||
95 | * status of the grant entry to be updated. | ||
96 | */ | ||
97 | void (*update_entry)(grant_ref_t ref, domid_t domid, | ||
98 | unsigned long frame, unsigned flags); | ||
99 | /* | ||
100 | * Stop granting a grant entry to domain for accessing. Ref parameter is | ||
101 | * reference of a grant entry whose grant access will be stopped, | ||
102 | * readonly is not in use in this function. If the grant entry is | ||
103 | * currently mapped for reading or writing, just return failure(==0) | ||
104 | * directly and don't tear down the grant access. Otherwise, stop grant | ||
105 | * access for this entry and return success(==1). | ||
106 | */ | ||
107 | int (*end_foreign_access_ref)(grant_ref_t ref, int readonly); | ||
108 | /* | ||
109 | * Stop granting a grant entry to domain for transfer. Ref parameter is | ||
110 | * reference of a grant entry whose grant transfer will be stopped. If | ||
111 | * tranfer has not started, just reclaim the grant entry and return | ||
112 | * failure(==0). Otherwise, wait for the transfer to complete and then | ||
113 | * return the frame. | ||
114 | */ | ||
115 | unsigned long (*end_foreign_transfer_ref)(grant_ref_t ref); | ||
116 | /* | ||
117 | * Query the status of a grant entry. Ref parameter is reference of | ||
118 | * queried grant entry, return value is the status of queried entry. | ||
119 | * Detailed status(writing/reading) can be gotten from the return value | ||
120 | * by bit operations. | ||
121 | */ | ||
122 | int (*query_foreign_access)(grant_ref_t ref); | ||
123 | /* | ||
124 | * Grant a domain to access a range of bytes within the page referred by | ||
125 | * an available grant entry. Ref parameter is reference of a grant entry | ||
126 | * which will be sub-page accessed, domid is id of grantee domain, frame | ||
127 | * is frame address of subpage grant, flags is grant type and flag | ||
128 | * information, page_off is offset of the range of bytes, and length is | ||
129 | * length of bytes to be accessed. | ||
130 | */ | ||
131 | void (*update_subpage_entry)(grant_ref_t ref, domid_t domid, | ||
132 | unsigned long frame, int flags, | ||
133 | unsigned page_off, unsigned length); | ||
134 | /* | ||
135 | * Redirect an available grant entry on domain A to another grant | ||
136 | * reference of domain B, then allow domain C to use grant reference | ||
137 | * of domain B transitively. Ref parameter is an available grant entry | ||
138 | * reference on domain A, domid is id of domain C which accesses grant | ||
139 | * entry transitively, flags is grant type and flag information, | ||
140 | * trans_domid is id of domain B whose grant entry is finally accessed | ||
141 | * transitively, trans_gref is grant entry transitive reference of | ||
142 | * domain B. | ||
143 | */ | ||
144 | void (*update_trans_entry)(grant_ref_t ref, domid_t domid, int flags, | ||
145 | domid_t trans_domid, grant_ref_t trans_gref); | ||
146 | }; | ||
147 | |||
148 | static struct gnttab_ops *gnttab_interface; | ||
149 | |||
150 | /*This reflects status of grant entries, so act as a global value*/ | ||
151 | static grant_status_t *grstatus; | ||
152 | |||
153 | static int grant_table_version; | ||
68 | 154 | ||
69 | static struct gnttab_free_callback *gnttab_free_callback_list; | 155 | static struct gnttab_free_callback *gnttab_free_callback_list; |
70 | 156 | ||
71 | static int gnttab_expand(unsigned int req_entries); | 157 | static int gnttab_expand(unsigned int req_entries); |
72 | 158 | ||
73 | #define RPP (PAGE_SIZE / sizeof(grant_ref_t)) | 159 | #define RPP (PAGE_SIZE / sizeof(grant_ref_t)) |
160 | #define SPP (PAGE_SIZE / sizeof(grant_status_t)) | ||
74 | 161 | ||
75 | static inline grant_ref_t *__gnttab_entry(grant_ref_t entry) | 162 | static inline grant_ref_t *__gnttab_entry(grant_ref_t entry) |
76 | { | 163 | { |
@@ -142,23 +229,33 @@ static void put_free_entry(grant_ref_t ref) | |||
142 | spin_unlock_irqrestore(&gnttab_list_lock, flags); | 229 | spin_unlock_irqrestore(&gnttab_list_lock, flags); |
143 | } | 230 | } |
144 | 231 | ||
145 | static void update_grant_entry(grant_ref_t ref, domid_t domid, | 232 | /* |
146 | unsigned long frame, unsigned flags) | 233 | * Following applies to gnttab_update_entry_v1 and gnttab_update_entry_v2. |
234 | * Introducing a valid entry into the grant table: | ||
235 | * 1. Write ent->domid. | ||
236 | * 2. Write ent->frame: | ||
237 | * GTF_permit_access: Frame to which access is permitted. | ||
238 | * GTF_accept_transfer: Pseudo-phys frame slot being filled by new | ||
239 | * frame, or zero if none. | ||
240 | * 3. Write memory barrier (WMB). | ||
241 | * 4. Write ent->flags, inc. valid type. | ||
242 | */ | ||
243 | static void gnttab_update_entry_v1(grant_ref_t ref, domid_t domid, | ||
244 | unsigned long frame, unsigned flags) | ||
245 | { | ||
246 | gnttab_shared.v1[ref].domid = domid; | ||
247 | gnttab_shared.v1[ref].frame = frame; | ||
248 | wmb(); | ||
249 | gnttab_shared.v1[ref].flags = flags; | ||
250 | } | ||
251 | |||
252 | static void gnttab_update_entry_v2(grant_ref_t ref, domid_t domid, | ||
253 | unsigned long frame, unsigned flags) | ||
147 | { | 254 | { |
148 | /* | 255 | gnttab_shared.v2[ref].hdr.domid = domid; |
149 | * Introducing a valid entry into the grant table: | 256 | gnttab_shared.v2[ref].full_page.frame = frame; |
150 | * 1. Write ent->domid. | ||
151 | * 2. Write ent->frame: | ||
152 | * GTF_permit_access: Frame to which access is permitted. | ||
153 | * GTF_accept_transfer: Pseudo-phys frame slot being filled by new | ||
154 | * frame, or zero if none. | ||
155 | * 3. Write memory barrier (WMB). | ||
156 | * 4. Write ent->flags, inc. valid type. | ||
157 | */ | ||
158 | shared[ref].frame = frame; | ||
159 | shared[ref].domid = domid; | ||
160 | wmb(); | 257 | wmb(); |
161 | shared[ref].flags = flags; | 258 | gnttab_shared.v2[ref].hdr.flags = GTF_permit_access | flags; |
162 | } | 259 | } |
163 | 260 | ||
164 | /* | 261 | /* |
@@ -167,7 +264,7 @@ static void update_grant_entry(grant_ref_t ref, domid_t domid, | |||
167 | void gnttab_grant_foreign_access_ref(grant_ref_t ref, domid_t domid, | 264 | void gnttab_grant_foreign_access_ref(grant_ref_t ref, domid_t domid, |
168 | unsigned long frame, int readonly) | 265 | unsigned long frame, int readonly) |
169 | { | 266 | { |
170 | update_grant_entry(ref, domid, frame, | 267 | gnttab_interface->update_entry(ref, domid, frame, |
171 | GTF_permit_access | (readonly ? GTF_readonly : 0)); | 268 | GTF_permit_access | (readonly ? GTF_readonly : 0)); |
172 | } | 269 | } |
173 | EXPORT_SYMBOL_GPL(gnttab_grant_foreign_access_ref); | 270 | EXPORT_SYMBOL_GPL(gnttab_grant_foreign_access_ref); |
@@ -187,31 +284,184 @@ int gnttab_grant_foreign_access(domid_t domid, unsigned long frame, | |||
187 | } | 284 | } |
188 | EXPORT_SYMBOL_GPL(gnttab_grant_foreign_access); | 285 | EXPORT_SYMBOL_GPL(gnttab_grant_foreign_access); |
189 | 286 | ||
190 | int gnttab_query_foreign_access(grant_ref_t ref) | 287 | void gnttab_update_subpage_entry_v2(grant_ref_t ref, domid_t domid, |
288 | unsigned long frame, int flags, | ||
289 | unsigned page_off, | ||
290 | unsigned length) | ||
291 | { | ||
292 | gnttab_shared.v2[ref].sub_page.frame = frame; | ||
293 | gnttab_shared.v2[ref].sub_page.page_off = page_off; | ||
294 | gnttab_shared.v2[ref].sub_page.length = length; | ||
295 | gnttab_shared.v2[ref].hdr.domid = domid; | ||
296 | wmb(); | ||
297 | gnttab_shared.v2[ref].hdr.flags = | ||
298 | GTF_permit_access | GTF_sub_page | flags; | ||
299 | } | ||
300 | |||
301 | int gnttab_grant_foreign_access_subpage_ref(grant_ref_t ref, domid_t domid, | ||
302 | unsigned long frame, int flags, | ||
303 | unsigned page_off, | ||
304 | unsigned length) | ||
191 | { | 305 | { |
192 | u16 nflags; | 306 | if (flags & (GTF_accept_transfer | GTF_reading | |
307 | GTF_writing | GTF_transitive)) | ||
308 | return -EPERM; | ||
193 | 309 | ||
194 | nflags = shared[ref].flags; | 310 | if (gnttab_interface->update_subpage_entry == NULL) |
311 | return -ENOSYS; | ||
195 | 312 | ||
196 | return nflags & (GTF_reading|GTF_writing); | 313 | gnttab_interface->update_subpage_entry(ref, domid, frame, flags, |
314 | page_off, length); | ||
315 | |||
316 | return 0; | ||
317 | } | ||
318 | EXPORT_SYMBOL_GPL(gnttab_grant_foreign_access_subpage_ref); | ||
319 | |||
320 | int gnttab_grant_foreign_access_subpage(domid_t domid, unsigned long frame, | ||
321 | int flags, unsigned page_off, | ||
322 | unsigned length) | ||
323 | { | ||
324 | int ref, rc; | ||
325 | |||
326 | ref = get_free_entries(1); | ||
327 | if (unlikely(ref < 0)) | ||
328 | return -ENOSPC; | ||
329 | |||
330 | rc = gnttab_grant_foreign_access_subpage_ref(ref, domid, frame, flags, | ||
331 | page_off, length); | ||
332 | if (rc < 0) { | ||
333 | put_free_entry(ref); | ||
334 | return rc; | ||
335 | } | ||
336 | |||
337 | return ref; | ||
338 | } | ||
339 | EXPORT_SYMBOL_GPL(gnttab_grant_foreign_access_subpage); | ||
340 | |||
341 | bool gnttab_subpage_grants_available(void) | ||
342 | { | ||
343 | return gnttab_interface->update_subpage_entry != NULL; | ||
344 | } | ||
345 | EXPORT_SYMBOL_GPL(gnttab_subpage_grants_available); | ||
346 | |||
347 | void gnttab_update_trans_entry_v2(grant_ref_t ref, domid_t domid, | ||
348 | int flags, domid_t trans_domid, | ||
349 | grant_ref_t trans_gref) | ||
350 | { | ||
351 | gnttab_shared.v2[ref].transitive.trans_domid = trans_domid; | ||
352 | gnttab_shared.v2[ref].transitive.gref = trans_gref; | ||
353 | gnttab_shared.v2[ref].hdr.domid = domid; | ||
354 | wmb(); | ||
355 | gnttab_shared.v2[ref].hdr.flags = | ||
356 | GTF_permit_access | GTF_transitive | flags; | ||
357 | } | ||
358 | |||
359 | int gnttab_grant_foreign_access_trans_ref(grant_ref_t ref, domid_t domid, | ||
360 | int flags, domid_t trans_domid, | ||
361 | grant_ref_t trans_gref) | ||
362 | { | ||
363 | if (flags & (GTF_accept_transfer | GTF_reading | | ||
364 | GTF_writing | GTF_sub_page)) | ||
365 | return -EPERM; | ||
366 | |||
367 | if (gnttab_interface->update_trans_entry == NULL) | ||
368 | return -ENOSYS; | ||
369 | |||
370 | gnttab_interface->update_trans_entry(ref, domid, flags, trans_domid, | ||
371 | trans_gref); | ||
372 | |||
373 | return 0; | ||
374 | } | ||
375 | EXPORT_SYMBOL_GPL(gnttab_grant_foreign_access_trans_ref); | ||
376 | |||
377 | int gnttab_grant_foreign_access_trans(domid_t domid, int flags, | ||
378 | domid_t trans_domid, | ||
379 | grant_ref_t trans_gref) | ||
380 | { | ||
381 | int ref, rc; | ||
382 | |||
383 | ref = get_free_entries(1); | ||
384 | if (unlikely(ref < 0)) | ||
385 | return -ENOSPC; | ||
386 | |||
387 | rc = gnttab_grant_foreign_access_trans_ref(ref, domid, flags, | ||
388 | trans_domid, trans_gref); | ||
389 | if (rc < 0) { | ||
390 | put_free_entry(ref); | ||
391 | return rc; | ||
392 | } | ||
393 | |||
394 | return ref; | ||
395 | } | ||
396 | EXPORT_SYMBOL_GPL(gnttab_grant_foreign_access_trans); | ||
397 | |||
398 | bool gnttab_trans_grants_available(void) | ||
399 | { | ||
400 | return gnttab_interface->update_trans_entry != NULL; | ||
401 | } | ||
402 | EXPORT_SYMBOL_GPL(gnttab_trans_grants_available); | ||
403 | |||
404 | static int gnttab_query_foreign_access_v1(grant_ref_t ref) | ||
405 | { | ||
406 | return gnttab_shared.v1[ref].flags & (GTF_reading|GTF_writing); | ||
407 | } | ||
408 | |||
409 | static int gnttab_query_foreign_access_v2(grant_ref_t ref) | ||
410 | { | ||
411 | return grstatus[ref] & (GTF_reading|GTF_writing); | ||
412 | } | ||
413 | |||
414 | int gnttab_query_foreign_access(grant_ref_t ref) | ||
415 | { | ||
416 | return gnttab_interface->query_foreign_access(ref); | ||
197 | } | 417 | } |
198 | EXPORT_SYMBOL_GPL(gnttab_query_foreign_access); | 418 | EXPORT_SYMBOL_GPL(gnttab_query_foreign_access); |
199 | 419 | ||
200 | int gnttab_end_foreign_access_ref(grant_ref_t ref, int readonly) | 420 | static int gnttab_end_foreign_access_ref_v1(grant_ref_t ref, int readonly) |
201 | { | 421 | { |
202 | u16 flags, nflags; | 422 | u16 flags, nflags; |
423 | u16 *pflags; | ||
203 | 424 | ||
204 | nflags = shared[ref].flags; | 425 | pflags = &gnttab_shared.v1[ref].flags; |
426 | nflags = *pflags; | ||
205 | do { | 427 | do { |
206 | flags = nflags; | 428 | flags = nflags; |
207 | if (flags & (GTF_reading|GTF_writing)) { | 429 | if (flags & (GTF_reading|GTF_writing)) { |
208 | printk(KERN_ALERT "WARNING: g.e. still in use!\n"); | 430 | printk(KERN_ALERT "WARNING: g.e. still in use!\n"); |
209 | return 0; | 431 | return 0; |
210 | } | 432 | } |
211 | } while ((nflags = sync_cmpxchg(&shared[ref].flags, flags, 0)) != flags); | 433 | } while ((nflags = sync_cmpxchg(pflags, flags, 0)) != flags); |
434 | |||
435 | return 1; | ||
436 | } | ||
437 | |||
438 | static int gnttab_end_foreign_access_ref_v2(grant_ref_t ref, int readonly) | ||
439 | { | ||
440 | gnttab_shared.v2[ref].hdr.flags = 0; | ||
441 | mb(); | ||
442 | if (grstatus[ref] & (GTF_reading|GTF_writing)) { | ||
443 | return 0; | ||
444 | } else { | ||
445 | /* The read of grstatus needs to have acquire | ||
446 | semantics. On x86, reads already have | ||
447 | that, and we just need to protect against | ||
448 | compiler reorderings. On other | ||
449 | architectures we may need a full | ||
450 | barrier. */ | ||
451 | #ifdef CONFIG_X86 | ||
452 | barrier(); | ||
453 | #else | ||
454 | mb(); | ||
455 | #endif | ||
456 | } | ||
212 | 457 | ||
213 | return 1; | 458 | return 1; |
214 | } | 459 | } |
460 | |||
461 | int gnttab_end_foreign_access_ref(grant_ref_t ref, int readonly) | ||
462 | { | ||
463 | return gnttab_interface->end_foreign_access_ref(ref, readonly); | ||
464 | } | ||
215 | EXPORT_SYMBOL_GPL(gnttab_end_foreign_access_ref); | 465 | EXPORT_SYMBOL_GPL(gnttab_end_foreign_access_ref); |
216 | 466 | ||
217 | void gnttab_end_foreign_access(grant_ref_t ref, int readonly, | 467 | void gnttab_end_foreign_access(grant_ref_t ref, int readonly, |
@@ -246,37 +496,76 @@ EXPORT_SYMBOL_GPL(gnttab_grant_foreign_transfer); | |||
246 | void gnttab_grant_foreign_transfer_ref(grant_ref_t ref, domid_t domid, | 496 | void gnttab_grant_foreign_transfer_ref(grant_ref_t ref, domid_t domid, |
247 | unsigned long pfn) | 497 | unsigned long pfn) |
248 | { | 498 | { |
249 | update_grant_entry(ref, domid, pfn, GTF_accept_transfer); | 499 | gnttab_interface->update_entry(ref, domid, pfn, GTF_accept_transfer); |
250 | } | 500 | } |
251 | EXPORT_SYMBOL_GPL(gnttab_grant_foreign_transfer_ref); | 501 | EXPORT_SYMBOL_GPL(gnttab_grant_foreign_transfer_ref); |
252 | 502 | ||
253 | unsigned long gnttab_end_foreign_transfer_ref(grant_ref_t ref) | 503 | static unsigned long gnttab_end_foreign_transfer_ref_v1(grant_ref_t ref) |
254 | { | 504 | { |
255 | unsigned long frame; | 505 | unsigned long frame; |
256 | u16 flags; | 506 | u16 flags; |
507 | u16 *pflags; | ||
508 | |||
509 | pflags = &gnttab_shared.v1[ref].flags; | ||
257 | 510 | ||
258 | /* | 511 | /* |
259 | * If a transfer is not even yet started, try to reclaim the grant | 512 | * If a transfer is not even yet started, try to reclaim the grant |
260 | * reference and return failure (== 0). | 513 | * reference and return failure (== 0). |
261 | */ | 514 | */ |
262 | while (!((flags = shared[ref].flags) & GTF_transfer_committed)) { | 515 | while (!((flags = *pflags) & GTF_transfer_committed)) { |
263 | if (sync_cmpxchg(&shared[ref].flags, flags, 0) == flags) | 516 | if (sync_cmpxchg(pflags, flags, 0) == flags) |
264 | return 0; | 517 | return 0; |
265 | cpu_relax(); | 518 | cpu_relax(); |
266 | } | 519 | } |
267 | 520 | ||
268 | /* If a transfer is in progress then wait until it is completed. */ | 521 | /* If a transfer is in progress then wait until it is completed. */ |
269 | while (!(flags & GTF_transfer_completed)) { | 522 | while (!(flags & GTF_transfer_completed)) { |
270 | flags = shared[ref].flags; | 523 | flags = *pflags; |
271 | cpu_relax(); | 524 | cpu_relax(); |
272 | } | 525 | } |
273 | 526 | ||
274 | rmb(); /* Read the frame number /after/ reading completion status. */ | 527 | rmb(); /* Read the frame number /after/ reading completion status. */ |
275 | frame = shared[ref].frame; | 528 | frame = gnttab_shared.v1[ref].frame; |
529 | BUG_ON(frame == 0); | ||
530 | |||
531 | return frame; | ||
532 | } | ||
533 | |||
534 | static unsigned long gnttab_end_foreign_transfer_ref_v2(grant_ref_t ref) | ||
535 | { | ||
536 | unsigned long frame; | ||
537 | u16 flags; | ||
538 | u16 *pflags; | ||
539 | |||
540 | pflags = &gnttab_shared.v2[ref].hdr.flags; | ||
541 | |||
542 | /* | ||
543 | * If a transfer is not even yet started, try to reclaim the grant | ||
544 | * reference and return failure (== 0). | ||
545 | */ | ||
546 | while (!((flags = *pflags) & GTF_transfer_committed)) { | ||
547 | if (sync_cmpxchg(pflags, flags, 0) == flags) | ||
548 | return 0; | ||
549 | cpu_relax(); | ||
550 | } | ||
551 | |||
552 | /* If a transfer is in progress then wait until it is completed. */ | ||
553 | while (!(flags & GTF_transfer_completed)) { | ||
554 | flags = *pflags; | ||
555 | cpu_relax(); | ||
556 | } | ||
557 | |||
558 | rmb(); /* Read the frame number /after/ reading completion status. */ | ||
559 | frame = gnttab_shared.v2[ref].full_page.frame; | ||
276 | BUG_ON(frame == 0); | 560 | BUG_ON(frame == 0); |
277 | 561 | ||
278 | return frame; | 562 | return frame; |
279 | } | 563 | } |
564 | |||
565 | unsigned long gnttab_end_foreign_transfer_ref(grant_ref_t ref) | ||
566 | { | ||
567 | return gnttab_interface->end_foreign_transfer_ref(ref); | ||
568 | } | ||
280 | EXPORT_SYMBOL_GPL(gnttab_end_foreign_transfer_ref); | 569 | EXPORT_SYMBOL_GPL(gnttab_end_foreign_transfer_ref); |
281 | 570 | ||
282 | unsigned long gnttab_end_foreign_transfer(grant_ref_t ref) | 571 | unsigned long gnttab_end_foreign_transfer(grant_ref_t ref) |
@@ -448,8 +737,8 @@ unsigned int gnttab_max_grant_frames(void) | |||
448 | EXPORT_SYMBOL_GPL(gnttab_max_grant_frames); | 737 | EXPORT_SYMBOL_GPL(gnttab_max_grant_frames); |
449 | 738 | ||
450 | int gnttab_map_refs(struct gnttab_map_grant_ref *map_ops, | 739 | int gnttab_map_refs(struct gnttab_map_grant_ref *map_ops, |
451 | struct gnttab_map_grant_ref *kmap_ops, | 740 | struct gnttab_map_grant_ref *kmap_ops, |
452 | struct page **pages, unsigned int count) | 741 | struct page **pages, unsigned int count) |
453 | { | 742 | { |
454 | int i, ret; | 743 | int i, ret; |
455 | pte_t *pte; | 744 | pte_t *pte; |
@@ -472,24 +761,10 @@ int gnttab_map_refs(struct gnttab_map_grant_ref *map_ops, | |||
472 | (map_ops[i].host_addr & ~PAGE_MASK)); | 761 | (map_ops[i].host_addr & ~PAGE_MASK)); |
473 | mfn = pte_mfn(*pte); | 762 | mfn = pte_mfn(*pte); |
474 | } else { | 763 | } else { |
475 | /* If you really wanted to do this: | 764 | mfn = PFN_DOWN(map_ops[i].dev_bus_addr); |
476 | * mfn = PFN_DOWN(map_ops[i].dev_bus_addr); | ||
477 | * | ||
478 | * The reason we do not implement it is b/c on the | ||
479 | * unmap path (gnttab_unmap_refs) we have no means of | ||
480 | * checking whether the page is !GNTMAP_contains_pte. | ||
481 | * | ||
482 | * That is without some extra data-structure to carry | ||
483 | * the struct page, bool clear_pte, and list_head next | ||
484 | * tuples and deal with allocation/delallocation, etc. | ||
485 | * | ||
486 | * The users of this API set the GNTMAP_contains_pte | ||
487 | * flag so lets just return not supported until it | ||
488 | * becomes neccessary to implement. | ||
489 | */ | ||
490 | return -EOPNOTSUPP; | ||
491 | } | 765 | } |
492 | ret = m2p_add_override(mfn, pages[i], &kmap_ops[i]); | 766 | ret = m2p_add_override(mfn, pages[i], kmap_ops ? |
767 | &kmap_ops[i] : NULL); | ||
493 | if (ret) | 768 | if (ret) |
494 | return ret; | 769 | return ret; |
495 | } | 770 | } |
@@ -499,7 +774,7 @@ int gnttab_map_refs(struct gnttab_map_grant_ref *map_ops, | |||
499 | EXPORT_SYMBOL_GPL(gnttab_map_refs); | 774 | EXPORT_SYMBOL_GPL(gnttab_map_refs); |
500 | 775 | ||
501 | int gnttab_unmap_refs(struct gnttab_unmap_grant_ref *unmap_ops, | 776 | int gnttab_unmap_refs(struct gnttab_unmap_grant_ref *unmap_ops, |
502 | struct page **pages, unsigned int count) | 777 | struct page **pages, unsigned int count, bool clear_pte) |
503 | { | 778 | { |
504 | int i, ret; | 779 | int i, ret; |
505 | 780 | ||
@@ -511,7 +786,7 @@ int gnttab_unmap_refs(struct gnttab_unmap_grant_ref *unmap_ops, | |||
511 | return ret; | 786 | return ret; |
512 | 787 | ||
513 | for (i = 0; i < count; i++) { | 788 | for (i = 0; i < count; i++) { |
514 | ret = m2p_remove_override(pages[i], true /* clear the PTE */); | 789 | ret = m2p_remove_override(pages[i], clear_pte); |
515 | if (ret) | 790 | if (ret) |
516 | return ret; | 791 | return ret; |
517 | } | 792 | } |
@@ -520,6 +795,77 @@ int gnttab_unmap_refs(struct gnttab_unmap_grant_ref *unmap_ops, | |||
520 | } | 795 | } |
521 | EXPORT_SYMBOL_GPL(gnttab_unmap_refs); | 796 | EXPORT_SYMBOL_GPL(gnttab_unmap_refs); |
522 | 797 | ||
798 | static unsigned nr_status_frames(unsigned nr_grant_frames) | ||
799 | { | ||
800 | return (nr_grant_frames * GREFS_PER_GRANT_FRAME + SPP - 1) / SPP; | ||
801 | } | ||
802 | |||
803 | static int gnttab_map_frames_v1(unsigned long *frames, unsigned int nr_gframes) | ||
804 | { | ||
805 | int rc; | ||
806 | |||
807 | rc = arch_gnttab_map_shared(frames, nr_gframes, | ||
808 | gnttab_max_grant_frames(), | ||
809 | &gnttab_shared.addr); | ||
810 | BUG_ON(rc); | ||
811 | |||
812 | return 0; | ||
813 | } | ||
814 | |||
815 | static void gnttab_unmap_frames_v1(void) | ||
816 | { | ||
817 | arch_gnttab_unmap(gnttab_shared.addr, nr_grant_frames); | ||
818 | } | ||
819 | |||
820 | static int gnttab_map_frames_v2(unsigned long *frames, unsigned int nr_gframes) | ||
821 | { | ||
822 | uint64_t *sframes; | ||
823 | unsigned int nr_sframes; | ||
824 | struct gnttab_get_status_frames getframes; | ||
825 | int rc; | ||
826 | |||
827 | nr_sframes = nr_status_frames(nr_gframes); | ||
828 | |||
829 | /* No need for kzalloc as it is initialized in following hypercall | ||
830 | * GNTTABOP_get_status_frames. | ||
831 | */ | ||
832 | sframes = kmalloc(nr_sframes * sizeof(uint64_t), GFP_ATOMIC); | ||
833 | if (!sframes) | ||
834 | return -ENOMEM; | ||
835 | |||
836 | getframes.dom = DOMID_SELF; | ||
837 | getframes.nr_frames = nr_sframes; | ||
838 | set_xen_guest_handle(getframes.frame_list, sframes); | ||
839 | |||
840 | rc = HYPERVISOR_grant_table_op(GNTTABOP_get_status_frames, | ||
841 | &getframes, 1); | ||
842 | if (rc == -ENOSYS) { | ||
843 | kfree(sframes); | ||
844 | return -ENOSYS; | ||
845 | } | ||
846 | |||
847 | BUG_ON(rc || getframes.status); | ||
848 | |||
849 | rc = arch_gnttab_map_status(sframes, nr_sframes, | ||
850 | nr_status_frames(gnttab_max_grant_frames()), | ||
851 | &grstatus); | ||
852 | BUG_ON(rc); | ||
853 | kfree(sframes); | ||
854 | |||
855 | rc = arch_gnttab_map_shared(frames, nr_gframes, | ||
856 | gnttab_max_grant_frames(), | ||
857 | &gnttab_shared.addr); | ||
858 | BUG_ON(rc); | ||
859 | |||
860 | return 0; | ||
861 | } | ||
862 | |||
863 | static void gnttab_unmap_frames_v2(void) | ||
864 | { | ||
865 | arch_gnttab_unmap(gnttab_shared.addr, nr_grant_frames); | ||
866 | arch_gnttab_unmap(grstatus, nr_status_frames(nr_grant_frames)); | ||
867 | } | ||
868 | |||
523 | static int gnttab_map(unsigned int start_idx, unsigned int end_idx) | 869 | static int gnttab_map(unsigned int start_idx, unsigned int end_idx) |
524 | { | 870 | { |
525 | struct gnttab_setup_table setup; | 871 | struct gnttab_setup_table setup; |
@@ -551,6 +897,9 @@ static int gnttab_map(unsigned int start_idx, unsigned int end_idx) | |||
551 | return rc; | 897 | return rc; |
552 | } | 898 | } |
553 | 899 | ||
900 | /* No need for kzalloc as it is initialized in following hypercall | ||
901 | * GNTTABOP_setup_table. | ||
902 | */ | ||
554 | frames = kmalloc(nr_gframes * sizeof(unsigned long), GFP_ATOMIC); | 903 | frames = kmalloc(nr_gframes * sizeof(unsigned long), GFP_ATOMIC); |
555 | if (!frames) | 904 | if (!frames) |
556 | return -ENOMEM; | 905 | return -ENOMEM; |
@@ -567,19 +916,65 @@ static int gnttab_map(unsigned int start_idx, unsigned int end_idx) | |||
567 | 916 | ||
568 | BUG_ON(rc || setup.status); | 917 | BUG_ON(rc || setup.status); |
569 | 918 | ||
570 | rc = arch_gnttab_map_shared(frames, nr_gframes, gnttab_max_grant_frames(), | 919 | rc = gnttab_interface->map_frames(frames, nr_gframes); |
571 | &shared); | ||
572 | BUG_ON(rc); | ||
573 | 920 | ||
574 | kfree(frames); | 921 | kfree(frames); |
575 | 922 | ||
576 | return 0; | 923 | return rc; |
924 | } | ||
925 | |||
926 | static struct gnttab_ops gnttab_v1_ops = { | ||
927 | .map_frames = gnttab_map_frames_v1, | ||
928 | .unmap_frames = gnttab_unmap_frames_v1, | ||
929 | .update_entry = gnttab_update_entry_v1, | ||
930 | .end_foreign_access_ref = gnttab_end_foreign_access_ref_v1, | ||
931 | .end_foreign_transfer_ref = gnttab_end_foreign_transfer_ref_v1, | ||
932 | .query_foreign_access = gnttab_query_foreign_access_v1, | ||
933 | }; | ||
934 | |||
935 | static struct gnttab_ops gnttab_v2_ops = { | ||
936 | .map_frames = gnttab_map_frames_v2, | ||
937 | .unmap_frames = gnttab_unmap_frames_v2, | ||
938 | .update_entry = gnttab_update_entry_v2, | ||
939 | .end_foreign_access_ref = gnttab_end_foreign_access_ref_v2, | ||
940 | .end_foreign_transfer_ref = gnttab_end_foreign_transfer_ref_v2, | ||
941 | .query_foreign_access = gnttab_query_foreign_access_v2, | ||
942 | .update_subpage_entry = gnttab_update_subpage_entry_v2, | ||
943 | .update_trans_entry = gnttab_update_trans_entry_v2, | ||
944 | }; | ||
945 | |||
946 | static void gnttab_request_version(void) | ||
947 | { | ||
948 | int rc; | ||
949 | struct gnttab_set_version gsv; | ||
950 | |||
951 | gsv.version = 2; | ||
952 | rc = HYPERVISOR_grant_table_op(GNTTABOP_set_version, &gsv, 1); | ||
953 | if (rc == 0) { | ||
954 | grant_table_version = 2; | ||
955 | gnttab_interface = &gnttab_v2_ops; | ||
956 | } else if (grant_table_version == 2) { | ||
957 | /* | ||
958 | * If we've already used version 2 features, | ||
959 | * but then suddenly discover that they're not | ||
960 | * available (e.g. migrating to an older | ||
961 | * version of Xen), almost unbounded badness | ||
962 | * can happen. | ||
963 | */ | ||
964 | panic("we need grant tables version 2, but only version 1 is available"); | ||
965 | } else { | ||
966 | grant_table_version = 1; | ||
967 | gnttab_interface = &gnttab_v1_ops; | ||
968 | } | ||
969 | printk(KERN_INFO "Grant tables using version %d layout.\n", | ||
970 | grant_table_version); | ||
577 | } | 971 | } |
578 | 972 | ||
579 | int gnttab_resume(void) | 973 | int gnttab_resume(void) |
580 | { | 974 | { |
581 | unsigned int max_nr_gframes; | 975 | unsigned int max_nr_gframes; |
582 | 976 | ||
977 | gnttab_request_version(); | ||
583 | max_nr_gframes = gnttab_max_grant_frames(); | 978 | max_nr_gframes = gnttab_max_grant_frames(); |
584 | if (max_nr_gframes < nr_grant_frames) | 979 | if (max_nr_gframes < nr_grant_frames) |
585 | return -ENOSYS; | 980 | return -ENOSYS; |
@@ -587,9 +982,10 @@ int gnttab_resume(void) | |||
587 | if (xen_pv_domain()) | 982 | if (xen_pv_domain()) |
588 | return gnttab_map(0, nr_grant_frames - 1); | 983 | return gnttab_map(0, nr_grant_frames - 1); |
589 | 984 | ||
590 | if (!shared) { | 985 | if (gnttab_shared.addr == NULL) { |
591 | shared = ioremap(xen_hvm_resume_frames, PAGE_SIZE * max_nr_gframes); | 986 | gnttab_shared.addr = ioremap(xen_hvm_resume_frames, |
592 | if (shared == NULL) { | 987 | PAGE_SIZE * max_nr_gframes); |
988 | if (gnttab_shared.addr == NULL) { | ||
593 | printk(KERN_WARNING | 989 | printk(KERN_WARNING |
594 | "Failed to ioremap gnttab share frames!"); | 990 | "Failed to ioremap gnttab share frames!"); |
595 | return -ENOMEM; | 991 | return -ENOMEM; |
@@ -603,7 +999,7 @@ int gnttab_resume(void) | |||
603 | 999 | ||
604 | int gnttab_suspend(void) | 1000 | int gnttab_suspend(void) |
605 | { | 1001 | { |
606 | arch_gnttab_unmap_shared(shared, nr_grant_frames); | 1002 | gnttab_interface->unmap_frames(); |
607 | return 0; | 1003 | return 0; |
608 | } | 1004 | } |
609 | 1005 | ||
diff --git a/drivers/xen/xenfs/privcmd.c b/drivers/xen/privcmd.c index dbd3b16fd131..ccee0f16bcf8 100644 --- a/drivers/xen/xenfs/privcmd.c +++ b/drivers/xen/privcmd.c | |||
@@ -7,6 +7,7 @@ | |||
7 | */ | 7 | */ |
8 | 8 | ||
9 | #include <linux/kernel.h> | 9 | #include <linux/kernel.h> |
10 | #include <linux/module.h> | ||
10 | #include <linux/sched.h> | 11 | #include <linux/sched.h> |
11 | #include <linux/slab.h> | 12 | #include <linux/slab.h> |
12 | #include <linux/string.h> | 13 | #include <linux/string.h> |
@@ -18,6 +19,7 @@ | |||
18 | #include <linux/highmem.h> | 19 | #include <linux/highmem.h> |
19 | #include <linux/pagemap.h> | 20 | #include <linux/pagemap.h> |
20 | #include <linux/seq_file.h> | 21 | #include <linux/seq_file.h> |
22 | #include <linux/miscdevice.h> | ||
21 | 23 | ||
22 | #include <asm/pgalloc.h> | 24 | #include <asm/pgalloc.h> |
23 | #include <asm/pgtable.h> | 25 | #include <asm/pgtable.h> |
@@ -32,6 +34,10 @@ | |||
32 | #include <xen/page.h> | 34 | #include <xen/page.h> |
33 | #include <xen/xen-ops.h> | 35 | #include <xen/xen-ops.h> |
34 | 36 | ||
37 | #include "privcmd.h" | ||
38 | |||
39 | MODULE_LICENSE("GPL"); | ||
40 | |||
35 | #ifndef HAVE_ARCH_PRIVCMD_MMAP | 41 | #ifndef HAVE_ARCH_PRIVCMD_MMAP |
36 | static int privcmd_enforce_singleshot_mapping(struct vm_area_struct *vma); | 42 | static int privcmd_enforce_singleshot_mapping(struct vm_area_struct *vma); |
37 | #endif | 43 | #endif |
@@ -359,7 +365,6 @@ static long privcmd_ioctl(struct file *file, | |||
359 | return ret; | 365 | return ret; |
360 | } | 366 | } |
361 | 367 | ||
362 | #ifndef HAVE_ARCH_PRIVCMD_MMAP | ||
363 | static int privcmd_fault(struct vm_area_struct *vma, struct vm_fault *vmf) | 368 | static int privcmd_fault(struct vm_area_struct *vma, struct vm_fault *vmf) |
364 | { | 369 | { |
365 | printk(KERN_DEBUG "privcmd_fault: vma=%p %lx-%lx, pgoff=%lx, uv=%p\n", | 370 | printk(KERN_DEBUG "privcmd_fault: vma=%p %lx-%lx, pgoff=%lx, uv=%p\n", |
@@ -392,9 +397,39 @@ static int privcmd_enforce_singleshot_mapping(struct vm_area_struct *vma) | |||
392 | { | 397 | { |
393 | return (xchg(&vma->vm_private_data, (void *)1) == NULL); | 398 | return (xchg(&vma->vm_private_data, (void *)1) == NULL); |
394 | } | 399 | } |
395 | #endif | ||
396 | 400 | ||
397 | const struct file_operations privcmd_file_ops = { | 401 | const struct file_operations xen_privcmd_fops = { |
402 | .owner = THIS_MODULE, | ||
398 | .unlocked_ioctl = privcmd_ioctl, | 403 | .unlocked_ioctl = privcmd_ioctl, |
399 | .mmap = privcmd_mmap, | 404 | .mmap = privcmd_mmap, |
400 | }; | 405 | }; |
406 | EXPORT_SYMBOL_GPL(xen_privcmd_fops); | ||
407 | |||
408 | static struct miscdevice privcmd_dev = { | ||
409 | .minor = MISC_DYNAMIC_MINOR, | ||
410 | .name = "xen/privcmd", | ||
411 | .fops = &xen_privcmd_fops, | ||
412 | }; | ||
413 | |||
414 | static int __init privcmd_init(void) | ||
415 | { | ||
416 | int err; | ||
417 | |||
418 | if (!xen_domain()) | ||
419 | return -ENODEV; | ||
420 | |||
421 | err = misc_register(&privcmd_dev); | ||
422 | if (err != 0) { | ||
423 | printk(KERN_ERR "Could not register Xen privcmd device\n"); | ||
424 | return err; | ||
425 | } | ||
426 | return 0; | ||
427 | } | ||
428 | |||
429 | static void __exit privcmd_exit(void) | ||
430 | { | ||
431 | misc_deregister(&privcmd_dev); | ||
432 | } | ||
433 | |||
434 | module_init(privcmd_init); | ||
435 | module_exit(privcmd_exit); | ||
diff --git a/drivers/xen/privcmd.h b/drivers/xen/privcmd.h new file mode 100644 index 000000000000..14facaeed36f --- /dev/null +++ b/drivers/xen/privcmd.h | |||
@@ -0,0 +1,3 @@ | |||
1 | #include <linux/fs.h> | ||
2 | |||
3 | extern const struct file_operations xen_privcmd_fops; | ||
diff --git a/drivers/xen/xen-balloon.c b/drivers/xen/xen-balloon.c index 9cc2259c9992..3832e303c33a 100644 --- a/drivers/xen/xen-balloon.c +++ b/drivers/xen/xen-balloon.c | |||
@@ -32,7 +32,6 @@ | |||
32 | 32 | ||
33 | #include <linux/kernel.h> | 33 | #include <linux/kernel.h> |
34 | #include <linux/module.h> | 34 | #include <linux/module.h> |
35 | #include <linux/sysdev.h> | ||
36 | #include <linux/capability.h> | 35 | #include <linux/capability.h> |
37 | 36 | ||
38 | #include <xen/xen.h> | 37 | #include <xen/xen.h> |
@@ -46,9 +45,9 @@ | |||
46 | 45 | ||
47 | #define BALLOON_CLASS_NAME "xen_memory" | 46 | #define BALLOON_CLASS_NAME "xen_memory" |
48 | 47 | ||
49 | static struct sys_device balloon_sysdev; | 48 | static struct device balloon_dev; |
50 | 49 | ||
51 | static int register_balloon(struct sys_device *sysdev); | 50 | static int register_balloon(struct device *dev); |
52 | 51 | ||
53 | /* React to a change in the target key */ | 52 | /* React to a change in the target key */ |
54 | static void watch_target(struct xenbus_watch *watch, | 53 | static void watch_target(struct xenbus_watch *watch, |
@@ -98,9 +97,9 @@ static int __init balloon_init(void) | |||
98 | 97 | ||
99 | pr_info("xen-balloon: Initialising balloon driver.\n"); | 98 | pr_info("xen-balloon: Initialising balloon driver.\n"); |
100 | 99 | ||
101 | register_balloon(&balloon_sysdev); | 100 | register_balloon(&balloon_dev); |
102 | 101 | ||
103 | register_xen_selfballooning(&balloon_sysdev); | 102 | register_xen_selfballooning(&balloon_dev); |
104 | 103 | ||
105 | register_xenstore_notifier(&xenstore_notifier); | 104 | register_xenstore_notifier(&xenstore_notifier); |
106 | 105 | ||
@@ -117,31 +116,31 @@ static void balloon_exit(void) | |||
117 | module_exit(balloon_exit); | 116 | module_exit(balloon_exit); |
118 | 117 | ||
119 | #define BALLOON_SHOW(name, format, args...) \ | 118 | #define BALLOON_SHOW(name, format, args...) \ |
120 | static ssize_t show_##name(struct sys_device *dev, \ | 119 | static ssize_t show_##name(struct device *dev, \ |
121 | struct sysdev_attribute *attr, \ | 120 | struct device_attribute *attr, \ |
122 | char *buf) \ | 121 | char *buf) \ |
123 | { \ | 122 | { \ |
124 | return sprintf(buf, format, ##args); \ | 123 | return sprintf(buf, format, ##args); \ |
125 | } \ | 124 | } \ |
126 | static SYSDEV_ATTR(name, S_IRUGO, show_##name, NULL) | 125 | static DEVICE_ATTR(name, S_IRUGO, show_##name, NULL) |
127 | 126 | ||
128 | BALLOON_SHOW(current_kb, "%lu\n", PAGES2KB(balloon_stats.current_pages)); | 127 | BALLOON_SHOW(current_kb, "%lu\n", PAGES2KB(balloon_stats.current_pages)); |
129 | BALLOON_SHOW(low_kb, "%lu\n", PAGES2KB(balloon_stats.balloon_low)); | 128 | BALLOON_SHOW(low_kb, "%lu\n", PAGES2KB(balloon_stats.balloon_low)); |
130 | BALLOON_SHOW(high_kb, "%lu\n", PAGES2KB(balloon_stats.balloon_high)); | 129 | BALLOON_SHOW(high_kb, "%lu\n", PAGES2KB(balloon_stats.balloon_high)); |
131 | 130 | ||
132 | static SYSDEV_ULONG_ATTR(schedule_delay, 0444, balloon_stats.schedule_delay); | 131 | static DEVICE_ULONG_ATTR(schedule_delay, 0444, balloon_stats.schedule_delay); |
133 | static SYSDEV_ULONG_ATTR(max_schedule_delay, 0644, balloon_stats.max_schedule_delay); | 132 | static DEVICE_ULONG_ATTR(max_schedule_delay, 0644, balloon_stats.max_schedule_delay); |
134 | static SYSDEV_ULONG_ATTR(retry_count, 0444, balloon_stats.retry_count); | 133 | static DEVICE_ULONG_ATTR(retry_count, 0444, balloon_stats.retry_count); |
135 | static SYSDEV_ULONG_ATTR(max_retry_count, 0644, balloon_stats.max_retry_count); | 134 | static DEVICE_ULONG_ATTR(max_retry_count, 0644, balloon_stats.max_retry_count); |
136 | 135 | ||
137 | static ssize_t show_target_kb(struct sys_device *dev, struct sysdev_attribute *attr, | 136 | static ssize_t show_target_kb(struct device *dev, struct device_attribute *attr, |
138 | char *buf) | 137 | char *buf) |
139 | { | 138 | { |
140 | return sprintf(buf, "%lu\n", PAGES2KB(balloon_stats.target_pages)); | 139 | return sprintf(buf, "%lu\n", PAGES2KB(balloon_stats.target_pages)); |
141 | } | 140 | } |
142 | 141 | ||
143 | static ssize_t store_target_kb(struct sys_device *dev, | 142 | static ssize_t store_target_kb(struct device *dev, |
144 | struct sysdev_attribute *attr, | 143 | struct device_attribute *attr, |
145 | const char *buf, | 144 | const char *buf, |
146 | size_t count) | 145 | size_t count) |
147 | { | 146 | { |
@@ -158,11 +157,11 @@ static ssize_t store_target_kb(struct sys_device *dev, | |||
158 | return count; | 157 | return count; |
159 | } | 158 | } |
160 | 159 | ||
161 | static SYSDEV_ATTR(target_kb, S_IRUGO | S_IWUSR, | 160 | static DEVICE_ATTR(target_kb, S_IRUGO | S_IWUSR, |
162 | show_target_kb, store_target_kb); | 161 | show_target_kb, store_target_kb); |
163 | 162 | ||
164 | 163 | ||
165 | static ssize_t show_target(struct sys_device *dev, struct sysdev_attribute *attr, | 164 | static ssize_t show_target(struct device *dev, struct device_attribute *attr, |
166 | char *buf) | 165 | char *buf) |
167 | { | 166 | { |
168 | return sprintf(buf, "%llu\n", | 167 | return sprintf(buf, "%llu\n", |
@@ -170,8 +169,8 @@ static ssize_t show_target(struct sys_device *dev, struct sysdev_attribute *attr | |||
170 | << PAGE_SHIFT); | 169 | << PAGE_SHIFT); |
171 | } | 170 | } |
172 | 171 | ||
173 | static ssize_t store_target(struct sys_device *dev, | 172 | static ssize_t store_target(struct device *dev, |
174 | struct sysdev_attribute *attr, | 173 | struct device_attribute *attr, |
175 | const char *buf, | 174 | const char *buf, |
176 | size_t count) | 175 | size_t count) |
177 | { | 176 | { |
@@ -188,23 +187,23 @@ static ssize_t store_target(struct sys_device *dev, | |||
188 | return count; | 187 | return count; |
189 | } | 188 | } |
190 | 189 | ||
191 | static SYSDEV_ATTR(target, S_IRUGO | S_IWUSR, | 190 | static DEVICE_ATTR(target, S_IRUGO | S_IWUSR, |
192 | show_target, store_target); | 191 | show_target, store_target); |
193 | 192 | ||
194 | 193 | ||
195 | static struct sysdev_attribute *balloon_attrs[] = { | 194 | static struct device_attribute *balloon_attrs[] = { |
196 | &attr_target_kb, | 195 | &dev_attr_target_kb, |
197 | &attr_target, | 196 | &dev_attr_target, |
198 | &attr_schedule_delay.attr, | 197 | &dev_attr_schedule_delay.attr, |
199 | &attr_max_schedule_delay.attr, | 198 | &dev_attr_max_schedule_delay.attr, |
200 | &attr_retry_count.attr, | 199 | &dev_attr_retry_count.attr, |
201 | &attr_max_retry_count.attr | 200 | &dev_attr_max_retry_count.attr |
202 | }; | 201 | }; |
203 | 202 | ||
204 | static struct attribute *balloon_info_attrs[] = { | 203 | static struct attribute *balloon_info_attrs[] = { |
205 | &attr_current_kb.attr, | 204 | &dev_attr_current_kb.attr, |
206 | &attr_low_kb.attr, | 205 | &dev_attr_low_kb.attr, |
207 | &attr_high_kb.attr, | 206 | &dev_attr_high_kb.attr, |
208 | NULL | 207 | NULL |
209 | }; | 208 | }; |
210 | 209 | ||
@@ -213,34 +212,35 @@ static struct attribute_group balloon_info_group = { | |||
213 | .attrs = balloon_info_attrs | 212 | .attrs = balloon_info_attrs |
214 | }; | 213 | }; |
215 | 214 | ||
216 | static struct sysdev_class balloon_sysdev_class = { | 215 | static struct bus_type balloon_subsys = { |
217 | .name = BALLOON_CLASS_NAME | 216 | .name = BALLOON_CLASS_NAME, |
217 | .dev_name = BALLOON_CLASS_NAME, | ||
218 | }; | 218 | }; |
219 | 219 | ||
220 | static int register_balloon(struct sys_device *sysdev) | 220 | static int register_balloon(struct device *dev) |
221 | { | 221 | { |
222 | int i, error; | 222 | int i, error; |
223 | 223 | ||
224 | error = sysdev_class_register(&balloon_sysdev_class); | 224 | error = bus_register(&balloon_subsys); |
225 | if (error) | 225 | if (error) |
226 | return error; | 226 | return error; |
227 | 227 | ||
228 | sysdev->id = 0; | 228 | dev->id = 0; |
229 | sysdev->cls = &balloon_sysdev_class; | 229 | dev->bus = &balloon_subsys; |
230 | 230 | ||
231 | error = sysdev_register(sysdev); | 231 | error = device_register(dev); |
232 | if (error) { | 232 | if (error) { |
233 | sysdev_class_unregister(&balloon_sysdev_class); | 233 | bus_unregister(&balloon_subsys); |
234 | return error; | 234 | return error; |
235 | } | 235 | } |
236 | 236 | ||
237 | for (i = 0; i < ARRAY_SIZE(balloon_attrs); i++) { | 237 | for (i = 0; i < ARRAY_SIZE(balloon_attrs); i++) { |
238 | error = sysdev_create_file(sysdev, balloon_attrs[i]); | 238 | error = device_create_file(dev, balloon_attrs[i]); |
239 | if (error) | 239 | if (error) |
240 | goto fail; | 240 | goto fail; |
241 | } | 241 | } |
242 | 242 | ||
243 | error = sysfs_create_group(&sysdev->kobj, &balloon_info_group); | 243 | error = sysfs_create_group(&dev->kobj, &balloon_info_group); |
244 | if (error) | 244 | if (error) |
245 | goto fail; | 245 | goto fail; |
246 | 246 | ||
@@ -248,9 +248,9 @@ static int register_balloon(struct sys_device *sysdev) | |||
248 | 248 | ||
249 | fail: | 249 | fail: |
250 | while (--i >= 0) | 250 | while (--i >= 0) |
251 | sysdev_remove_file(sysdev, balloon_attrs[i]); | 251 | device_remove_file(dev, balloon_attrs[i]); |
252 | sysdev_unregister(sysdev); | 252 | device_unregister(dev); |
253 | sysdev_class_unregister(&balloon_sysdev_class); | 253 | bus_unregister(&balloon_subsys); |
254 | return error; | 254 | return error; |
255 | } | 255 | } |
256 | 256 | ||
diff --git a/drivers/xen/xen-pciback/pci_stub.c b/drivers/xen/xen-pciback/pci_stub.c index 8f06e1ed028c..7944a17f5cbf 100644 --- a/drivers/xen/xen-pciback/pci_stub.c +++ b/drivers/xen/xen-pciback/pci_stub.c | |||
@@ -99,6 +99,7 @@ static void pcistub_device_release(struct kref *kref) | |||
99 | kfree(pci_get_drvdata(psdev->dev)); | 99 | kfree(pci_get_drvdata(psdev->dev)); |
100 | pci_set_drvdata(psdev->dev, NULL); | 100 | pci_set_drvdata(psdev->dev, NULL); |
101 | 101 | ||
102 | psdev->dev->dev_flags &= ~PCI_DEV_FLAGS_ASSIGNED; | ||
102 | pci_dev_put(psdev->dev); | 103 | pci_dev_put(psdev->dev); |
103 | 104 | ||
104 | kfree(psdev); | 105 | kfree(psdev); |
@@ -234,6 +235,8 @@ void pcistub_put_pci_dev(struct pci_dev *dev) | |||
234 | xen_pcibk_config_free_dyn_fields(found_psdev->dev); | 235 | xen_pcibk_config_free_dyn_fields(found_psdev->dev); |
235 | xen_pcibk_config_reset_dev(found_psdev->dev); | 236 | xen_pcibk_config_reset_dev(found_psdev->dev); |
236 | 237 | ||
238 | xen_unregister_device_domain_owner(found_psdev->dev); | ||
239 | |||
237 | spin_lock_irqsave(&found_psdev->lock, flags); | 240 | spin_lock_irqsave(&found_psdev->lock, flags); |
238 | found_psdev->pdev = NULL; | 241 | found_psdev->pdev = NULL; |
239 | spin_unlock_irqrestore(&found_psdev->lock, flags); | 242 | spin_unlock_irqrestore(&found_psdev->lock, flags); |
@@ -331,6 +334,7 @@ static int __devinit pcistub_init_device(struct pci_dev *dev) | |||
331 | dev_dbg(&dev->dev, "reset device\n"); | 334 | dev_dbg(&dev->dev, "reset device\n"); |
332 | xen_pcibk_reset_device(dev); | 335 | xen_pcibk_reset_device(dev); |
333 | 336 | ||
337 | dev->dev_flags |= PCI_DEV_FLAGS_ASSIGNED; | ||
334 | return 0; | 338 | return 0; |
335 | 339 | ||
336 | config_release: | 340 | config_release: |
diff --git a/drivers/xen/xen-pciback/xenbus.c b/drivers/xen/xen-pciback/xenbus.c index 075525945e36..8e1c44d8ab46 100644 --- a/drivers/xen/xen-pciback/xenbus.c +++ b/drivers/xen/xen-pciback/xenbus.c | |||
@@ -241,11 +241,10 @@ static int xen_pcibk_export_device(struct xen_pcibk_device *pdev, | |||
241 | goto out; | 241 | goto out; |
242 | 242 | ||
243 | dev_dbg(&dev->dev, "registering for %d\n", pdev->xdev->otherend_id); | 243 | dev_dbg(&dev->dev, "registering for %d\n", pdev->xdev->otherend_id); |
244 | dev->dev_flags |= PCI_DEV_FLAGS_ASSIGNED; | ||
245 | if (xen_register_device_domain_owner(dev, | 244 | if (xen_register_device_domain_owner(dev, |
246 | pdev->xdev->otherend_id) != 0) { | 245 | pdev->xdev->otherend_id) != 0) { |
247 | dev_err(&dev->dev, "device has been assigned to another " \ | 246 | dev_err(&dev->dev, "Stealing ownership from dom%d.\n", |
248 | "domain! Over-writting the ownership, but beware.\n"); | 247 | xen_find_device_domain_owner(dev)); |
249 | xen_unregister_device_domain_owner(dev); | 248 | xen_unregister_device_domain_owner(dev); |
250 | xen_register_device_domain_owner(dev, pdev->xdev->otherend_id); | 249 | xen_register_device_domain_owner(dev, pdev->xdev->otherend_id); |
251 | } | 250 | } |
@@ -281,7 +280,6 @@ static int xen_pcibk_remove_device(struct xen_pcibk_device *pdev, | |||
281 | } | 280 | } |
282 | 281 | ||
283 | dev_dbg(&dev->dev, "unregistering for %d\n", pdev->xdev->otherend_id); | 282 | dev_dbg(&dev->dev, "unregistering for %d\n", pdev->xdev->otherend_id); |
284 | dev->dev_flags &= ~PCI_DEV_FLAGS_ASSIGNED; | ||
285 | xen_unregister_device_domain_owner(dev); | 283 | xen_unregister_device_domain_owner(dev); |
286 | 284 | ||
287 | xen_pcibk_release_pci_dev(pdev, dev); | 285 | xen_pcibk_release_pci_dev(pdev, dev); |
@@ -707,19 +705,16 @@ static int xen_pcibk_xenbus_remove(struct xenbus_device *dev) | |||
707 | return 0; | 705 | return 0; |
708 | } | 706 | } |
709 | 707 | ||
710 | static const struct xenbus_device_id xenpci_ids[] = { | 708 | static const struct xenbus_device_id xen_pcibk_ids[] = { |
711 | {"pci"}, | 709 | {"pci"}, |
712 | {""}, | 710 | {""}, |
713 | }; | 711 | }; |
714 | 712 | ||
715 | static struct xenbus_driver xenbus_xen_pcibk_driver = { | 713 | static DEFINE_XENBUS_DRIVER(xen_pcibk, DRV_NAME, |
716 | .name = DRV_NAME, | ||
717 | .owner = THIS_MODULE, | ||
718 | .ids = xenpci_ids, | ||
719 | .probe = xen_pcibk_xenbus_probe, | 714 | .probe = xen_pcibk_xenbus_probe, |
720 | .remove = xen_pcibk_xenbus_remove, | 715 | .remove = xen_pcibk_xenbus_remove, |
721 | .otherend_changed = xen_pcibk_frontend_changed, | 716 | .otherend_changed = xen_pcibk_frontend_changed, |
722 | }; | 717 | ); |
723 | 718 | ||
724 | const struct xen_pcibk_backend *__read_mostly xen_pcibk_backend; | 719 | const struct xen_pcibk_backend *__read_mostly xen_pcibk_backend; |
725 | 720 | ||
@@ -735,11 +730,11 @@ int __init xen_pcibk_xenbus_register(void) | |||
735 | if (passthrough) | 730 | if (passthrough) |
736 | xen_pcibk_backend = &xen_pcibk_passthrough_backend; | 731 | xen_pcibk_backend = &xen_pcibk_passthrough_backend; |
737 | pr_info(DRV_NAME ": backend is %s\n", xen_pcibk_backend->name); | 732 | pr_info(DRV_NAME ": backend is %s\n", xen_pcibk_backend->name); |
738 | return xenbus_register_backend(&xenbus_xen_pcibk_driver); | 733 | return xenbus_register_backend(&xen_pcibk_driver); |
739 | } | 734 | } |
740 | 735 | ||
741 | void __exit xen_pcibk_xenbus_unregister(void) | 736 | void __exit xen_pcibk_xenbus_unregister(void) |
742 | { | 737 | { |
743 | destroy_workqueue(xen_pcibk_wq); | 738 | destroy_workqueue(xen_pcibk_wq); |
744 | xenbus_unregister_driver(&xenbus_xen_pcibk_driver); | 739 | xenbus_unregister_driver(&xen_pcibk_driver); |
745 | } | 740 | } |
diff --git a/drivers/xen/xen-selfballoon.c b/drivers/xen/xen-selfballoon.c index d93c70857e03..767ff656d5a7 100644 --- a/drivers/xen/xen-selfballoon.c +++ b/drivers/xen/xen-selfballoon.c | |||
@@ -74,6 +74,7 @@ | |||
74 | #include <linux/mman.h> | 74 | #include <linux/mman.h> |
75 | #include <linux/module.h> | 75 | #include <linux/module.h> |
76 | #include <linux/workqueue.h> | 76 | #include <linux/workqueue.h> |
77 | #include <linux/device.h> | ||
77 | #include <xen/balloon.h> | 78 | #include <xen/balloon.h> |
78 | #include <xen/tmem.h> | 79 | #include <xen/tmem.h> |
79 | #include <xen/xen.h> | 80 | #include <xen/xen.h> |
@@ -266,21 +267,20 @@ static void selfballoon_process(struct work_struct *work) | |||
266 | 267 | ||
267 | #ifdef CONFIG_SYSFS | 268 | #ifdef CONFIG_SYSFS |
268 | 269 | ||
269 | #include <linux/sysdev.h> | ||
270 | #include <linux/capability.h> | 270 | #include <linux/capability.h> |
271 | 271 | ||
272 | #define SELFBALLOON_SHOW(name, format, args...) \ | 272 | #define SELFBALLOON_SHOW(name, format, args...) \ |
273 | static ssize_t show_##name(struct sys_device *dev, \ | 273 | static ssize_t show_##name(struct device *dev, \ |
274 | struct sysdev_attribute *attr, \ | 274 | struct device_attribute *attr, \ |
275 | char *buf) \ | 275 | char *buf) \ |
276 | { \ | 276 | { \ |
277 | return sprintf(buf, format, ##args); \ | 277 | return sprintf(buf, format, ##args); \ |
278 | } | 278 | } |
279 | 279 | ||
280 | SELFBALLOON_SHOW(selfballooning, "%d\n", xen_selfballooning_enabled); | 280 | SELFBALLOON_SHOW(selfballooning, "%d\n", xen_selfballooning_enabled); |
281 | 281 | ||
282 | static ssize_t store_selfballooning(struct sys_device *dev, | 282 | static ssize_t store_selfballooning(struct device *dev, |
283 | struct sysdev_attribute *attr, | 283 | struct device_attribute *attr, |
284 | const char *buf, | 284 | const char *buf, |
285 | size_t count) | 285 | size_t count) |
286 | { | 286 | { |
@@ -303,13 +303,13 @@ static ssize_t store_selfballooning(struct sys_device *dev, | |||
303 | return count; | 303 | return count; |
304 | } | 304 | } |
305 | 305 | ||
306 | static SYSDEV_ATTR(selfballooning, S_IRUGO | S_IWUSR, | 306 | static DEVICE_ATTR(selfballooning, S_IRUGO | S_IWUSR, |
307 | show_selfballooning, store_selfballooning); | 307 | show_selfballooning, store_selfballooning); |
308 | 308 | ||
309 | SELFBALLOON_SHOW(selfballoon_interval, "%d\n", selfballoon_interval); | 309 | SELFBALLOON_SHOW(selfballoon_interval, "%d\n", selfballoon_interval); |
310 | 310 | ||
311 | static ssize_t store_selfballoon_interval(struct sys_device *dev, | 311 | static ssize_t store_selfballoon_interval(struct device *dev, |
312 | struct sysdev_attribute *attr, | 312 | struct device_attribute *attr, |
313 | const char *buf, | 313 | const char *buf, |
314 | size_t count) | 314 | size_t count) |
315 | { | 315 | { |
@@ -325,13 +325,13 @@ static ssize_t store_selfballoon_interval(struct sys_device *dev, | |||
325 | return count; | 325 | return count; |
326 | } | 326 | } |
327 | 327 | ||
328 | static SYSDEV_ATTR(selfballoon_interval, S_IRUGO | S_IWUSR, | 328 | static DEVICE_ATTR(selfballoon_interval, S_IRUGO | S_IWUSR, |
329 | show_selfballoon_interval, store_selfballoon_interval); | 329 | show_selfballoon_interval, store_selfballoon_interval); |
330 | 330 | ||
331 | SELFBALLOON_SHOW(selfballoon_downhys, "%d\n", selfballoon_downhysteresis); | 331 | SELFBALLOON_SHOW(selfballoon_downhys, "%d\n", selfballoon_downhysteresis); |
332 | 332 | ||
333 | static ssize_t store_selfballoon_downhys(struct sys_device *dev, | 333 | static ssize_t store_selfballoon_downhys(struct device *dev, |
334 | struct sysdev_attribute *attr, | 334 | struct device_attribute *attr, |
335 | const char *buf, | 335 | const char *buf, |
336 | size_t count) | 336 | size_t count) |
337 | { | 337 | { |
@@ -347,14 +347,14 @@ static ssize_t store_selfballoon_downhys(struct sys_device *dev, | |||
347 | return count; | 347 | return count; |
348 | } | 348 | } |
349 | 349 | ||
350 | static SYSDEV_ATTR(selfballoon_downhysteresis, S_IRUGO | S_IWUSR, | 350 | static DEVICE_ATTR(selfballoon_downhysteresis, S_IRUGO | S_IWUSR, |
351 | show_selfballoon_downhys, store_selfballoon_downhys); | 351 | show_selfballoon_downhys, store_selfballoon_downhys); |
352 | 352 | ||
353 | 353 | ||
354 | SELFBALLOON_SHOW(selfballoon_uphys, "%d\n", selfballoon_uphysteresis); | 354 | SELFBALLOON_SHOW(selfballoon_uphys, "%d\n", selfballoon_uphysteresis); |
355 | 355 | ||
356 | static ssize_t store_selfballoon_uphys(struct sys_device *dev, | 356 | static ssize_t store_selfballoon_uphys(struct device *dev, |
357 | struct sysdev_attribute *attr, | 357 | struct device_attribute *attr, |
358 | const char *buf, | 358 | const char *buf, |
359 | size_t count) | 359 | size_t count) |
360 | { | 360 | { |
@@ -370,14 +370,14 @@ static ssize_t store_selfballoon_uphys(struct sys_device *dev, | |||
370 | return count; | 370 | return count; |
371 | } | 371 | } |
372 | 372 | ||
373 | static SYSDEV_ATTR(selfballoon_uphysteresis, S_IRUGO | S_IWUSR, | 373 | static DEVICE_ATTR(selfballoon_uphysteresis, S_IRUGO | S_IWUSR, |
374 | show_selfballoon_uphys, store_selfballoon_uphys); | 374 | show_selfballoon_uphys, store_selfballoon_uphys); |
375 | 375 | ||
376 | SELFBALLOON_SHOW(selfballoon_min_usable_mb, "%d\n", | 376 | SELFBALLOON_SHOW(selfballoon_min_usable_mb, "%d\n", |
377 | selfballoon_min_usable_mb); | 377 | selfballoon_min_usable_mb); |
378 | 378 | ||
379 | static ssize_t store_selfballoon_min_usable_mb(struct sys_device *dev, | 379 | static ssize_t store_selfballoon_min_usable_mb(struct device *dev, |
380 | struct sysdev_attribute *attr, | 380 | struct device_attribute *attr, |
381 | const char *buf, | 381 | const char *buf, |
382 | size_t count) | 382 | size_t count) |
383 | { | 383 | { |
@@ -393,7 +393,7 @@ static ssize_t store_selfballoon_min_usable_mb(struct sys_device *dev, | |||
393 | return count; | 393 | return count; |
394 | } | 394 | } |
395 | 395 | ||
396 | static SYSDEV_ATTR(selfballoon_min_usable_mb, S_IRUGO | S_IWUSR, | 396 | static DEVICE_ATTR(selfballoon_min_usable_mb, S_IRUGO | S_IWUSR, |
397 | show_selfballoon_min_usable_mb, | 397 | show_selfballoon_min_usable_mb, |
398 | store_selfballoon_min_usable_mb); | 398 | store_selfballoon_min_usable_mb); |
399 | 399 | ||
@@ -401,8 +401,8 @@ static SYSDEV_ATTR(selfballoon_min_usable_mb, S_IRUGO | S_IWUSR, | |||
401 | #ifdef CONFIG_FRONTSWAP | 401 | #ifdef CONFIG_FRONTSWAP |
402 | SELFBALLOON_SHOW(frontswap_selfshrinking, "%d\n", frontswap_selfshrinking); | 402 | SELFBALLOON_SHOW(frontswap_selfshrinking, "%d\n", frontswap_selfshrinking); |
403 | 403 | ||
404 | static ssize_t store_frontswap_selfshrinking(struct sys_device *dev, | 404 | static ssize_t store_frontswap_selfshrinking(struct device *dev, |
405 | struct sysdev_attribute *attr, | 405 | struct device_attribute *attr, |
406 | const char *buf, | 406 | const char *buf, |
407 | size_t count) | 407 | size_t count) |
408 | { | 408 | { |
@@ -424,13 +424,13 @@ static ssize_t store_frontswap_selfshrinking(struct sys_device *dev, | |||
424 | return count; | 424 | return count; |
425 | } | 425 | } |
426 | 426 | ||
427 | static SYSDEV_ATTR(frontswap_selfshrinking, S_IRUGO | S_IWUSR, | 427 | static DEVICE_ATTR(frontswap_selfshrinking, S_IRUGO | S_IWUSR, |
428 | show_frontswap_selfshrinking, store_frontswap_selfshrinking); | 428 | show_frontswap_selfshrinking, store_frontswap_selfshrinking); |
429 | 429 | ||
430 | SELFBALLOON_SHOW(frontswap_inertia, "%d\n", frontswap_inertia); | 430 | SELFBALLOON_SHOW(frontswap_inertia, "%d\n", frontswap_inertia); |
431 | 431 | ||
432 | static ssize_t store_frontswap_inertia(struct sys_device *dev, | 432 | static ssize_t store_frontswap_inertia(struct device *dev, |
433 | struct sysdev_attribute *attr, | 433 | struct device_attribute *attr, |
434 | const char *buf, | 434 | const char *buf, |
435 | size_t count) | 435 | size_t count) |
436 | { | 436 | { |
@@ -447,13 +447,13 @@ static ssize_t store_frontswap_inertia(struct sys_device *dev, | |||
447 | return count; | 447 | return count; |
448 | } | 448 | } |
449 | 449 | ||
450 | static SYSDEV_ATTR(frontswap_inertia, S_IRUGO | S_IWUSR, | 450 | static DEVICE_ATTR(frontswap_inertia, S_IRUGO | S_IWUSR, |
451 | show_frontswap_inertia, store_frontswap_inertia); | 451 | show_frontswap_inertia, store_frontswap_inertia); |
452 | 452 | ||
453 | SELFBALLOON_SHOW(frontswap_hysteresis, "%d\n", frontswap_hysteresis); | 453 | SELFBALLOON_SHOW(frontswap_hysteresis, "%d\n", frontswap_hysteresis); |
454 | 454 | ||
455 | static ssize_t store_frontswap_hysteresis(struct sys_device *dev, | 455 | static ssize_t store_frontswap_hysteresis(struct device *dev, |
456 | struct sysdev_attribute *attr, | 456 | struct device_attribute *attr, |
457 | const char *buf, | 457 | const char *buf, |
458 | size_t count) | 458 | size_t count) |
459 | { | 459 | { |
@@ -469,21 +469,21 @@ static ssize_t store_frontswap_hysteresis(struct sys_device *dev, | |||
469 | return count; | 469 | return count; |
470 | } | 470 | } |
471 | 471 | ||
472 | static SYSDEV_ATTR(frontswap_hysteresis, S_IRUGO | S_IWUSR, | 472 | static DEVICE_ATTR(frontswap_hysteresis, S_IRUGO | S_IWUSR, |
473 | show_frontswap_hysteresis, store_frontswap_hysteresis); | 473 | show_frontswap_hysteresis, store_frontswap_hysteresis); |
474 | 474 | ||
475 | #endif /* CONFIG_FRONTSWAP */ | 475 | #endif /* CONFIG_FRONTSWAP */ |
476 | 476 | ||
477 | static struct attribute *selfballoon_attrs[] = { | 477 | static struct attribute *selfballoon_attrs[] = { |
478 | &attr_selfballooning.attr, | 478 | &dev_attr_selfballooning.attr, |
479 | &attr_selfballoon_interval.attr, | 479 | &dev_attr_selfballoon_interval.attr, |
480 | &attr_selfballoon_downhysteresis.attr, | 480 | &dev_attr_selfballoon_downhysteresis.attr, |
481 | &attr_selfballoon_uphysteresis.attr, | 481 | &dev_attr_selfballoon_uphysteresis.attr, |
482 | &attr_selfballoon_min_usable_mb.attr, | 482 | &dev_attr_selfballoon_min_usable_mb.attr, |
483 | #ifdef CONFIG_FRONTSWAP | 483 | #ifdef CONFIG_FRONTSWAP |
484 | &attr_frontswap_selfshrinking.attr, | 484 | &dev_attr_frontswap_selfshrinking.attr, |
485 | &attr_frontswap_hysteresis.attr, | 485 | &dev_attr_frontswap_hysteresis.attr, |
486 | &attr_frontswap_inertia.attr, | 486 | &dev_attr_frontswap_inertia.attr, |
487 | #endif | 487 | #endif |
488 | NULL | 488 | NULL |
489 | }; | 489 | }; |
@@ -494,12 +494,12 @@ static struct attribute_group selfballoon_group = { | |||
494 | }; | 494 | }; |
495 | #endif | 495 | #endif |
496 | 496 | ||
497 | int register_xen_selfballooning(struct sys_device *sysdev) | 497 | int register_xen_selfballooning(struct device *dev) |
498 | { | 498 | { |
499 | int error = -1; | 499 | int error = -1; |
500 | 500 | ||
501 | #ifdef CONFIG_SYSFS | 501 | #ifdef CONFIG_SYSFS |
502 | error = sysfs_create_group(&sysdev->kobj, &selfballoon_group); | 502 | error = sysfs_create_group(&dev->kobj, &selfballoon_group); |
503 | #endif | 503 | #endif |
504 | return error; | 504 | return error; |
505 | } | 505 | } |
diff --git a/drivers/xen/xenbus/Makefile b/drivers/xen/xenbus/Makefile index 8dca685358b4..31e2e9050c7a 100644 --- a/drivers/xen/xenbus/Makefile +++ b/drivers/xen/xenbus/Makefile | |||
@@ -1,4 +1,5 @@ | |||
1 | obj-y += xenbus.o | 1 | obj-y += xenbus.o |
2 | obj-y += xenbus_dev_frontend.o | ||
2 | 3 | ||
3 | xenbus-objs = | 4 | xenbus-objs = |
4 | xenbus-objs += xenbus_client.o | 5 | xenbus-objs += xenbus_client.o |
@@ -9,4 +10,5 @@ xenbus-objs += xenbus_probe.o | |||
9 | xenbus-be-objs-$(CONFIG_XEN_BACKEND) += xenbus_probe_backend.o | 10 | xenbus-be-objs-$(CONFIG_XEN_BACKEND) += xenbus_probe_backend.o |
10 | xenbus-objs += $(xenbus-be-objs-y) | 11 | xenbus-objs += $(xenbus-be-objs-y) |
11 | 12 | ||
13 | obj-$(CONFIG_XEN_BACKEND) += xenbus_dev_backend.o | ||
12 | obj-$(CONFIG_XEN_XENBUS_FRONTEND) += xenbus_probe_frontend.o | 14 | obj-$(CONFIG_XEN_XENBUS_FRONTEND) += xenbus_probe_frontend.o |
diff --git a/drivers/xen/xenbus/xenbus_client.c b/drivers/xen/xenbus/xenbus_client.c index 1906125eab49..566d2adbd6ea 100644 --- a/drivers/xen/xenbus/xenbus_client.c +++ b/drivers/xen/xenbus/xenbus_client.c | |||
@@ -32,15 +32,39 @@ | |||
32 | 32 | ||
33 | #include <linux/slab.h> | 33 | #include <linux/slab.h> |
34 | #include <linux/types.h> | 34 | #include <linux/types.h> |
35 | #include <linux/spinlock.h> | ||
35 | #include <linux/vmalloc.h> | 36 | #include <linux/vmalloc.h> |
36 | #include <linux/export.h> | 37 | #include <linux/export.h> |
37 | #include <asm/xen/hypervisor.h> | 38 | #include <asm/xen/hypervisor.h> |
38 | #include <asm/xen/page.h> | 39 | #include <asm/xen/page.h> |
39 | #include <xen/interface/xen.h> | 40 | #include <xen/interface/xen.h> |
40 | #include <xen/interface/event_channel.h> | 41 | #include <xen/interface/event_channel.h> |
42 | #include <xen/balloon.h> | ||
41 | #include <xen/events.h> | 43 | #include <xen/events.h> |
42 | #include <xen/grant_table.h> | 44 | #include <xen/grant_table.h> |
43 | #include <xen/xenbus.h> | 45 | #include <xen/xenbus.h> |
46 | #include <xen/xen.h> | ||
47 | |||
48 | #include "xenbus_probe.h" | ||
49 | |||
50 | struct xenbus_map_node { | ||
51 | struct list_head next; | ||
52 | union { | ||
53 | struct vm_struct *area; /* PV */ | ||
54 | struct page *page; /* HVM */ | ||
55 | }; | ||
56 | grant_handle_t handle; | ||
57 | }; | ||
58 | |||
59 | static DEFINE_SPINLOCK(xenbus_valloc_lock); | ||
60 | static LIST_HEAD(xenbus_valloc_pages); | ||
61 | |||
62 | struct xenbus_ring_ops { | ||
63 | int (*map)(struct xenbus_device *dev, int gnt, void **vaddr); | ||
64 | int (*unmap)(struct xenbus_device *dev, void *vaddr); | ||
65 | }; | ||
66 | |||
67 | static const struct xenbus_ring_ops *ring_ops __read_mostly; | ||
44 | 68 | ||
45 | const char *xenbus_strstate(enum xenbus_state state) | 69 | const char *xenbus_strstate(enum xenbus_state state) |
46 | { | 70 | { |
@@ -436,19 +460,33 @@ EXPORT_SYMBOL_GPL(xenbus_free_evtchn); | |||
436 | */ | 460 | */ |
437 | int xenbus_map_ring_valloc(struct xenbus_device *dev, int gnt_ref, void **vaddr) | 461 | int xenbus_map_ring_valloc(struct xenbus_device *dev, int gnt_ref, void **vaddr) |
438 | { | 462 | { |
463 | return ring_ops->map(dev, gnt_ref, vaddr); | ||
464 | } | ||
465 | EXPORT_SYMBOL_GPL(xenbus_map_ring_valloc); | ||
466 | |||
467 | static int xenbus_map_ring_valloc_pv(struct xenbus_device *dev, | ||
468 | int gnt_ref, void **vaddr) | ||
469 | { | ||
439 | struct gnttab_map_grant_ref op = { | 470 | struct gnttab_map_grant_ref op = { |
440 | .flags = GNTMAP_host_map | GNTMAP_contains_pte, | 471 | .flags = GNTMAP_host_map | GNTMAP_contains_pte, |
441 | .ref = gnt_ref, | 472 | .ref = gnt_ref, |
442 | .dom = dev->otherend_id, | 473 | .dom = dev->otherend_id, |
443 | }; | 474 | }; |
475 | struct xenbus_map_node *node; | ||
444 | struct vm_struct *area; | 476 | struct vm_struct *area; |
445 | pte_t *pte; | 477 | pte_t *pte; |
446 | 478 | ||
447 | *vaddr = NULL; | 479 | *vaddr = NULL; |
448 | 480 | ||
481 | node = kzalloc(sizeof(*node), GFP_KERNEL); | ||
482 | if (!node) | ||
483 | return -ENOMEM; | ||
484 | |||
449 | area = alloc_vm_area(PAGE_SIZE, &pte); | 485 | area = alloc_vm_area(PAGE_SIZE, &pte); |
450 | if (!area) | 486 | if (!area) { |
487 | kfree(node); | ||
451 | return -ENOMEM; | 488 | return -ENOMEM; |
489 | } | ||
452 | 490 | ||
453 | op.host_addr = arbitrary_virt_to_machine(pte).maddr; | 491 | op.host_addr = arbitrary_virt_to_machine(pte).maddr; |
454 | 492 | ||
@@ -457,19 +495,59 @@ int xenbus_map_ring_valloc(struct xenbus_device *dev, int gnt_ref, void **vaddr) | |||
457 | 495 | ||
458 | if (op.status != GNTST_okay) { | 496 | if (op.status != GNTST_okay) { |
459 | free_vm_area(area); | 497 | free_vm_area(area); |
498 | kfree(node); | ||
460 | xenbus_dev_fatal(dev, op.status, | 499 | xenbus_dev_fatal(dev, op.status, |
461 | "mapping in shared page %d from domain %d", | 500 | "mapping in shared page %d from domain %d", |
462 | gnt_ref, dev->otherend_id); | 501 | gnt_ref, dev->otherend_id); |
463 | return op.status; | 502 | return op.status; |
464 | } | 503 | } |
465 | 504 | ||
466 | /* Stuff the handle in an unused field */ | 505 | node->handle = op.handle; |
467 | area->phys_addr = (unsigned long)op.handle; | 506 | node->area = area; |
507 | |||
508 | spin_lock(&xenbus_valloc_lock); | ||
509 | list_add(&node->next, &xenbus_valloc_pages); | ||
510 | spin_unlock(&xenbus_valloc_lock); | ||
468 | 511 | ||
469 | *vaddr = area->addr; | 512 | *vaddr = area->addr; |
470 | return 0; | 513 | return 0; |
471 | } | 514 | } |
472 | EXPORT_SYMBOL_GPL(xenbus_map_ring_valloc); | 515 | |
516 | static int xenbus_map_ring_valloc_hvm(struct xenbus_device *dev, | ||
517 | int gnt_ref, void **vaddr) | ||
518 | { | ||
519 | struct xenbus_map_node *node; | ||
520 | int err; | ||
521 | void *addr; | ||
522 | |||
523 | *vaddr = NULL; | ||
524 | |||
525 | node = kzalloc(sizeof(*node), GFP_KERNEL); | ||
526 | if (!node) | ||
527 | return -ENOMEM; | ||
528 | |||
529 | err = alloc_xenballooned_pages(1, &node->page, false /* lowmem */); | ||
530 | if (err) | ||
531 | goto out_err; | ||
532 | |||
533 | addr = pfn_to_kaddr(page_to_pfn(node->page)); | ||
534 | |||
535 | err = xenbus_map_ring(dev, gnt_ref, &node->handle, addr); | ||
536 | if (err) | ||
537 | goto out_err; | ||
538 | |||
539 | spin_lock(&xenbus_valloc_lock); | ||
540 | list_add(&node->next, &xenbus_valloc_pages); | ||
541 | spin_unlock(&xenbus_valloc_lock); | ||
542 | |||
543 | *vaddr = addr; | ||
544 | return 0; | ||
545 | |||
546 | out_err: | ||
547 | free_xenballooned_pages(1, &node->page); | ||
548 | kfree(node); | ||
549 | return err; | ||
550 | } | ||
473 | 551 | ||
474 | 552 | ||
475 | /** | 553 | /** |
@@ -489,12 +567,10 @@ EXPORT_SYMBOL_GPL(xenbus_map_ring_valloc); | |||
489 | int xenbus_map_ring(struct xenbus_device *dev, int gnt_ref, | 567 | int xenbus_map_ring(struct xenbus_device *dev, int gnt_ref, |
490 | grant_handle_t *handle, void *vaddr) | 568 | grant_handle_t *handle, void *vaddr) |
491 | { | 569 | { |
492 | struct gnttab_map_grant_ref op = { | 570 | struct gnttab_map_grant_ref op; |
493 | .host_addr = (unsigned long)vaddr, | 571 | |
494 | .flags = GNTMAP_host_map, | 572 | gnttab_set_map_op(&op, (phys_addr_t)vaddr, GNTMAP_host_map, gnt_ref, |
495 | .ref = gnt_ref, | 573 | dev->otherend_id); |
496 | .dom = dev->otherend_id, | ||
497 | }; | ||
498 | 574 | ||
499 | if (HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, &op, 1)) | 575 | if (HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, &op, 1)) |
500 | BUG(); | 576 | BUG(); |
@@ -525,32 +601,36 @@ EXPORT_SYMBOL_GPL(xenbus_map_ring); | |||
525 | */ | 601 | */ |
526 | int xenbus_unmap_ring_vfree(struct xenbus_device *dev, void *vaddr) | 602 | int xenbus_unmap_ring_vfree(struct xenbus_device *dev, void *vaddr) |
527 | { | 603 | { |
528 | struct vm_struct *area; | 604 | return ring_ops->unmap(dev, vaddr); |
605 | } | ||
606 | EXPORT_SYMBOL_GPL(xenbus_unmap_ring_vfree); | ||
607 | |||
608 | static int xenbus_unmap_ring_vfree_pv(struct xenbus_device *dev, void *vaddr) | ||
609 | { | ||
610 | struct xenbus_map_node *node; | ||
529 | struct gnttab_unmap_grant_ref op = { | 611 | struct gnttab_unmap_grant_ref op = { |
530 | .host_addr = (unsigned long)vaddr, | 612 | .host_addr = (unsigned long)vaddr, |
531 | }; | 613 | }; |
532 | unsigned int level; | 614 | unsigned int level; |
533 | 615 | ||
534 | /* It'd be nice if linux/vmalloc.h provided a find_vm_area(void *addr) | 616 | spin_lock(&xenbus_valloc_lock); |
535 | * method so that we don't have to muck with vmalloc internals here. | 617 | list_for_each_entry(node, &xenbus_valloc_pages, next) { |
536 | * We could force the user to hang on to their struct vm_struct from | 618 | if (node->area->addr == vaddr) { |
537 | * xenbus_map_ring_valloc, but these 6 lines considerably simplify | 619 | list_del(&node->next); |
538 | * this API. | 620 | goto found; |
539 | */ | 621 | } |
540 | read_lock(&vmlist_lock); | ||
541 | for (area = vmlist; area != NULL; area = area->next) { | ||
542 | if (area->addr == vaddr) | ||
543 | break; | ||
544 | } | 622 | } |
545 | read_unlock(&vmlist_lock); | 623 | node = NULL; |
624 | found: | ||
625 | spin_unlock(&xenbus_valloc_lock); | ||
546 | 626 | ||
547 | if (!area) { | 627 | if (!node) { |
548 | xenbus_dev_error(dev, -ENOENT, | 628 | xenbus_dev_error(dev, -ENOENT, |
549 | "can't find mapped virtual address %p", vaddr); | 629 | "can't find mapped virtual address %p", vaddr); |
550 | return GNTST_bad_virt_addr; | 630 | return GNTST_bad_virt_addr; |
551 | } | 631 | } |
552 | 632 | ||
553 | op.handle = (grant_handle_t)area->phys_addr; | 633 | op.handle = node->handle; |
554 | op.host_addr = arbitrary_virt_to_machine( | 634 | op.host_addr = arbitrary_virt_to_machine( |
555 | lookup_address((unsigned long)vaddr, &level)).maddr; | 635 | lookup_address((unsigned long)vaddr, &level)).maddr; |
556 | 636 | ||
@@ -558,16 +638,50 @@ int xenbus_unmap_ring_vfree(struct xenbus_device *dev, void *vaddr) | |||
558 | BUG(); | 638 | BUG(); |
559 | 639 | ||
560 | if (op.status == GNTST_okay) | 640 | if (op.status == GNTST_okay) |
561 | free_vm_area(area); | 641 | free_vm_area(node->area); |
562 | else | 642 | else |
563 | xenbus_dev_error(dev, op.status, | 643 | xenbus_dev_error(dev, op.status, |
564 | "unmapping page at handle %d error %d", | 644 | "unmapping page at handle %d error %d", |
565 | (int16_t)area->phys_addr, op.status); | 645 | node->handle, op.status); |
566 | 646 | ||
647 | kfree(node); | ||
567 | return op.status; | 648 | return op.status; |
568 | } | 649 | } |
569 | EXPORT_SYMBOL_GPL(xenbus_unmap_ring_vfree); | ||
570 | 650 | ||
651 | static int xenbus_unmap_ring_vfree_hvm(struct xenbus_device *dev, void *vaddr) | ||
652 | { | ||
653 | int rv; | ||
654 | struct xenbus_map_node *node; | ||
655 | void *addr; | ||
656 | |||
657 | spin_lock(&xenbus_valloc_lock); | ||
658 | list_for_each_entry(node, &xenbus_valloc_pages, next) { | ||
659 | addr = pfn_to_kaddr(page_to_pfn(node->page)); | ||
660 | if (addr == vaddr) { | ||
661 | list_del(&node->next); | ||
662 | goto found; | ||
663 | } | ||
664 | } | ||
665 | node = NULL; | ||
666 | found: | ||
667 | spin_unlock(&xenbus_valloc_lock); | ||
668 | |||
669 | if (!node) { | ||
670 | xenbus_dev_error(dev, -ENOENT, | ||
671 | "can't find mapped virtual address %p", vaddr); | ||
672 | return GNTST_bad_virt_addr; | ||
673 | } | ||
674 | |||
675 | rv = xenbus_unmap_ring(dev, node->handle, addr); | ||
676 | |||
677 | if (!rv) | ||
678 | free_xenballooned_pages(1, &node->page); | ||
679 | else | ||
680 | WARN(1, "Leaking %p\n", vaddr); | ||
681 | |||
682 | kfree(node); | ||
683 | return rv; | ||
684 | } | ||
571 | 685 | ||
572 | /** | 686 | /** |
573 | * xenbus_unmap_ring | 687 | * xenbus_unmap_ring |
@@ -582,10 +696,9 @@ EXPORT_SYMBOL_GPL(xenbus_unmap_ring_vfree); | |||
582 | int xenbus_unmap_ring(struct xenbus_device *dev, | 696 | int xenbus_unmap_ring(struct xenbus_device *dev, |
583 | grant_handle_t handle, void *vaddr) | 697 | grant_handle_t handle, void *vaddr) |
584 | { | 698 | { |
585 | struct gnttab_unmap_grant_ref op = { | 699 | struct gnttab_unmap_grant_ref op; |
586 | .host_addr = (unsigned long)vaddr, | 700 | |
587 | .handle = handle, | 701 | gnttab_set_unmap_op(&op, (phys_addr_t)vaddr, GNTMAP_host_map, handle); |
588 | }; | ||
589 | 702 | ||
590 | if (HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, &op, 1)) | 703 | if (HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, &op, 1)) |
591 | BUG(); | 704 | BUG(); |
@@ -617,3 +730,21 @@ enum xenbus_state xenbus_read_driver_state(const char *path) | |||
617 | return result; | 730 | return result; |
618 | } | 731 | } |
619 | EXPORT_SYMBOL_GPL(xenbus_read_driver_state); | 732 | EXPORT_SYMBOL_GPL(xenbus_read_driver_state); |
733 | |||
734 | static const struct xenbus_ring_ops ring_ops_pv = { | ||
735 | .map = xenbus_map_ring_valloc_pv, | ||
736 | .unmap = xenbus_unmap_ring_vfree_pv, | ||
737 | }; | ||
738 | |||
739 | static const struct xenbus_ring_ops ring_ops_hvm = { | ||
740 | .map = xenbus_map_ring_valloc_hvm, | ||
741 | .unmap = xenbus_unmap_ring_vfree_hvm, | ||
742 | }; | ||
743 | |||
744 | void __init xenbus_ring_ops_init(void) | ||
745 | { | ||
746 | if (xen_pv_domain()) | ||
747 | ring_ops = &ring_ops_pv; | ||
748 | else | ||
749 | ring_ops = &ring_ops_hvm; | ||
750 | } | ||
diff --git a/drivers/xen/xenbus/xenbus_comms.h b/drivers/xen/xenbus/xenbus_comms.h index c21db7513736..6e42800fa499 100644 --- a/drivers/xen/xenbus/xenbus_comms.h +++ b/drivers/xen/xenbus/xenbus_comms.h | |||
@@ -31,6 +31,8 @@ | |||
31 | #ifndef _XENBUS_COMMS_H | 31 | #ifndef _XENBUS_COMMS_H |
32 | #define _XENBUS_COMMS_H | 32 | #define _XENBUS_COMMS_H |
33 | 33 | ||
34 | #include <linux/fs.h> | ||
35 | |||
34 | int xs_init(void); | 36 | int xs_init(void); |
35 | int xb_init_comms(void); | 37 | int xb_init_comms(void); |
36 | 38 | ||
@@ -43,4 +45,6 @@ int xs_input_avail(void); | |||
43 | extern struct xenstore_domain_interface *xen_store_interface; | 45 | extern struct xenstore_domain_interface *xen_store_interface; |
44 | extern int xen_store_evtchn; | 46 | extern int xen_store_evtchn; |
45 | 47 | ||
48 | extern const struct file_operations xen_xenbus_fops; | ||
49 | |||
46 | #endif /* _XENBUS_COMMS_H */ | 50 | #endif /* _XENBUS_COMMS_H */ |
diff --git a/drivers/xen/xenbus/xenbus_dev_backend.c b/drivers/xen/xenbus/xenbus_dev_backend.c new file mode 100644 index 000000000000..3d3be78c1093 --- /dev/null +++ b/drivers/xen/xenbus/xenbus_dev_backend.c | |||
@@ -0,0 +1,90 @@ | |||
1 | #include <linux/slab.h> | ||
2 | #include <linux/types.h> | ||
3 | #include <linux/mm.h> | ||
4 | #include <linux/fs.h> | ||
5 | #include <linux/miscdevice.h> | ||
6 | #include <linux/module.h> | ||
7 | #include <linux/capability.h> | ||
8 | |||
9 | #include <xen/xen.h> | ||
10 | #include <xen/page.h> | ||
11 | #include <xen/xenbus_dev.h> | ||
12 | |||
13 | #include "xenbus_comms.h" | ||
14 | |||
15 | MODULE_LICENSE("GPL"); | ||
16 | |||
17 | static int xenbus_backend_open(struct inode *inode, struct file *filp) | ||
18 | { | ||
19 | if (!capable(CAP_SYS_ADMIN)) | ||
20 | return -EPERM; | ||
21 | |||
22 | return nonseekable_open(inode, filp); | ||
23 | } | ||
24 | |||
25 | static long xenbus_backend_ioctl(struct file *file, unsigned int cmd, unsigned long data) | ||
26 | { | ||
27 | if (!capable(CAP_SYS_ADMIN)) | ||
28 | return -EPERM; | ||
29 | |||
30 | switch (cmd) { | ||
31 | case IOCTL_XENBUS_BACKEND_EVTCHN: | ||
32 | if (xen_store_evtchn > 0) | ||
33 | return xen_store_evtchn; | ||
34 | return -ENODEV; | ||
35 | |||
36 | default: | ||
37 | return -ENOTTY; | ||
38 | } | ||
39 | } | ||
40 | |||
41 | static int xenbus_backend_mmap(struct file *file, struct vm_area_struct *vma) | ||
42 | { | ||
43 | size_t size = vma->vm_end - vma->vm_start; | ||
44 | |||
45 | if (!capable(CAP_SYS_ADMIN)) | ||
46 | return -EPERM; | ||
47 | |||
48 | if ((size > PAGE_SIZE) || (vma->vm_pgoff != 0)) | ||
49 | return -EINVAL; | ||
50 | |||
51 | if (remap_pfn_range(vma, vma->vm_start, | ||
52 | virt_to_pfn(xen_store_interface), | ||
53 | size, vma->vm_page_prot)) | ||
54 | return -EAGAIN; | ||
55 | |||
56 | return 0; | ||
57 | } | ||
58 | |||
59 | const struct file_operations xenbus_backend_fops = { | ||
60 | .open = xenbus_backend_open, | ||
61 | .mmap = xenbus_backend_mmap, | ||
62 | .unlocked_ioctl = xenbus_backend_ioctl, | ||
63 | }; | ||
64 | |||
65 | static struct miscdevice xenbus_backend_dev = { | ||
66 | .minor = MISC_DYNAMIC_MINOR, | ||
67 | .name = "xen/xenbus_backend", | ||
68 | .fops = &xenbus_backend_fops, | ||
69 | }; | ||
70 | |||
71 | static int __init xenbus_backend_init(void) | ||
72 | { | ||
73 | int err; | ||
74 | |||
75 | if (!xen_initial_domain()) | ||
76 | return -ENODEV; | ||
77 | |||
78 | err = misc_register(&xenbus_backend_dev); | ||
79 | if (err) | ||
80 | printk(KERN_ERR "Could not register xenbus backend device\n"); | ||
81 | return err; | ||
82 | } | ||
83 | |||
84 | static void __exit xenbus_backend_exit(void) | ||
85 | { | ||
86 | misc_deregister(&xenbus_backend_dev); | ||
87 | } | ||
88 | |||
89 | module_init(xenbus_backend_init); | ||
90 | module_exit(xenbus_backend_exit); | ||
diff --git a/drivers/xen/xenfs/xenbus.c b/drivers/xen/xenbus/xenbus_dev_frontend.c index bbd000f88af7..527dc2a3b89f 100644 --- a/drivers/xen/xenfs/xenbus.c +++ b/drivers/xen/xenbus/xenbus_dev_frontend.c | |||
@@ -52,13 +52,17 @@ | |||
52 | #include <linux/namei.h> | 52 | #include <linux/namei.h> |
53 | #include <linux/string.h> | 53 | #include <linux/string.h> |
54 | #include <linux/slab.h> | 54 | #include <linux/slab.h> |
55 | #include <linux/miscdevice.h> | ||
56 | #include <linux/module.h> | ||
55 | 57 | ||
56 | #include "xenfs.h" | 58 | #include "xenbus_comms.h" |
57 | #include "../xenbus/xenbus_comms.h" | ||
58 | 59 | ||
59 | #include <xen/xenbus.h> | 60 | #include <xen/xenbus.h> |
61 | #include <xen/xen.h> | ||
60 | #include <asm/xen/hypervisor.h> | 62 | #include <asm/xen/hypervisor.h> |
61 | 63 | ||
64 | MODULE_LICENSE("GPL"); | ||
65 | |||
62 | /* | 66 | /* |
63 | * An element of a list of outstanding transactions, for which we're | 67 | * An element of a list of outstanding transactions, for which we're |
64 | * still waiting a reply. | 68 | * still waiting a reply. |
@@ -101,7 +105,7 @@ struct xenbus_file_priv { | |||
101 | unsigned int len; | 105 | unsigned int len; |
102 | union { | 106 | union { |
103 | struct xsd_sockmsg msg; | 107 | struct xsd_sockmsg msg; |
104 | char buffer[PAGE_SIZE]; | 108 | char buffer[XENSTORE_PAYLOAD_MAX]; |
105 | } u; | 109 | } u; |
106 | 110 | ||
107 | /* Response queue. */ | 111 | /* Response queue. */ |
@@ -583,7 +587,7 @@ static unsigned int xenbus_file_poll(struct file *file, poll_table *wait) | |||
583 | return 0; | 587 | return 0; |
584 | } | 588 | } |
585 | 589 | ||
586 | const struct file_operations xenbus_file_ops = { | 590 | const struct file_operations xen_xenbus_fops = { |
587 | .read = xenbus_file_read, | 591 | .read = xenbus_file_read, |
588 | .write = xenbus_file_write, | 592 | .write = xenbus_file_write, |
589 | .open = xenbus_file_open, | 593 | .open = xenbus_file_open, |
@@ -591,3 +595,31 @@ const struct file_operations xenbus_file_ops = { | |||
591 | .poll = xenbus_file_poll, | 595 | .poll = xenbus_file_poll, |
592 | .llseek = no_llseek, | 596 | .llseek = no_llseek, |
593 | }; | 597 | }; |
598 | EXPORT_SYMBOL_GPL(xen_xenbus_fops); | ||
599 | |||
600 | static struct miscdevice xenbus_dev = { | ||
601 | .minor = MISC_DYNAMIC_MINOR, | ||
602 | .name = "xen/xenbus", | ||
603 | .fops = &xen_xenbus_fops, | ||
604 | }; | ||
605 | |||
606 | static int __init xenbus_init(void) | ||
607 | { | ||
608 | int err; | ||
609 | |||
610 | if (!xen_domain()) | ||
611 | return -ENODEV; | ||
612 | |||
613 | err = misc_register(&xenbus_dev); | ||
614 | if (err) | ||
615 | printk(KERN_ERR "Could not register xenbus frontend device\n"); | ||
616 | return err; | ||
617 | } | ||
618 | |||
619 | static void __exit xenbus_exit(void) | ||
620 | { | ||
621 | misc_deregister(&xenbus_dev); | ||
622 | } | ||
623 | |||
624 | module_init(xenbus_init); | ||
625 | module_exit(xenbus_exit); | ||
diff --git a/drivers/xen/xenbus/xenbus_probe.c b/drivers/xen/xenbus/xenbus_probe.c index 1b178c6e8937..3864967202b5 100644 --- a/drivers/xen/xenbus/xenbus_probe.c +++ b/drivers/xen/xenbus/xenbus_probe.c | |||
@@ -291,14 +291,9 @@ void xenbus_dev_shutdown(struct device *_dev) | |||
291 | EXPORT_SYMBOL_GPL(xenbus_dev_shutdown); | 291 | EXPORT_SYMBOL_GPL(xenbus_dev_shutdown); |
292 | 292 | ||
293 | int xenbus_register_driver_common(struct xenbus_driver *drv, | 293 | int xenbus_register_driver_common(struct xenbus_driver *drv, |
294 | struct xen_bus_type *bus, | 294 | struct xen_bus_type *bus) |
295 | struct module *owner, | ||
296 | const char *mod_name) | ||
297 | { | 295 | { |
298 | drv->driver.name = drv->name; | ||
299 | drv->driver.bus = &bus->bus; | 296 | drv->driver.bus = &bus->bus; |
300 | drv->driver.owner = owner; | ||
301 | drv->driver.mod_name = mod_name; | ||
302 | 297 | ||
303 | return driver_register(&drv->driver); | 298 | return driver_register(&drv->driver); |
304 | } | 299 | } |
@@ -730,6 +725,8 @@ static int __init xenbus_init(void) | |||
730 | if (!xen_domain()) | 725 | if (!xen_domain()) |
731 | return -ENODEV; | 726 | return -ENODEV; |
732 | 727 | ||
728 | xenbus_ring_ops_init(); | ||
729 | |||
733 | if (xen_hvm_domain()) { | 730 | if (xen_hvm_domain()) { |
734 | uint64_t v = 0; | 731 | uint64_t v = 0; |
735 | err = hvm_get_parameter(HVM_PARAM_STORE_EVTCHN, &v); | 732 | err = hvm_get_parameter(HVM_PARAM_STORE_EVTCHN, &v); |
diff --git a/drivers/xen/xenbus/xenbus_probe.h b/drivers/xen/xenbus/xenbus_probe.h index 9b1de4e34c64..bb4f92ed8730 100644 --- a/drivers/xen/xenbus/xenbus_probe.h +++ b/drivers/xen/xenbus/xenbus_probe.h | |||
@@ -53,9 +53,7 @@ extern int xenbus_match(struct device *_dev, struct device_driver *_drv); | |||
53 | extern int xenbus_dev_probe(struct device *_dev); | 53 | extern int xenbus_dev_probe(struct device *_dev); |
54 | extern int xenbus_dev_remove(struct device *_dev); | 54 | extern int xenbus_dev_remove(struct device *_dev); |
55 | extern int xenbus_register_driver_common(struct xenbus_driver *drv, | 55 | extern int xenbus_register_driver_common(struct xenbus_driver *drv, |
56 | struct xen_bus_type *bus, | 56 | struct xen_bus_type *bus); |
57 | struct module *owner, | ||
58 | const char *mod_name); | ||
59 | extern int xenbus_probe_node(struct xen_bus_type *bus, | 57 | extern int xenbus_probe_node(struct xen_bus_type *bus, |
60 | const char *type, | 58 | const char *type, |
61 | const char *nodename); | 59 | const char *nodename); |
@@ -76,4 +74,6 @@ extern void xenbus_otherend_changed(struct xenbus_watch *watch, | |||
76 | extern int xenbus_read_otherend_details(struct xenbus_device *xendev, | 74 | extern int xenbus_read_otherend_details(struct xenbus_device *xendev, |
77 | char *id_node, char *path_node); | 75 | char *id_node, char *path_node); |
78 | 76 | ||
77 | void xenbus_ring_ops_init(void); | ||
78 | |||
79 | #endif | 79 | #endif |
diff --git a/drivers/xen/xenbus/xenbus_probe_backend.c b/drivers/xen/xenbus/xenbus_probe_backend.c index c3c7cd195c11..257be37d9091 100644 --- a/drivers/xen/xenbus/xenbus_probe_backend.c +++ b/drivers/xen/xenbus/xenbus_probe_backend.c | |||
@@ -232,15 +232,13 @@ int xenbus_dev_is_online(struct xenbus_device *dev) | |||
232 | } | 232 | } |
233 | EXPORT_SYMBOL_GPL(xenbus_dev_is_online); | 233 | EXPORT_SYMBOL_GPL(xenbus_dev_is_online); |
234 | 234 | ||
235 | int __xenbus_register_backend(struct xenbus_driver *drv, | 235 | int xenbus_register_backend(struct xenbus_driver *drv) |
236 | struct module *owner, const char *mod_name) | ||
237 | { | 236 | { |
238 | drv->read_otherend_details = read_frontend_details; | 237 | drv->read_otherend_details = read_frontend_details; |
239 | 238 | ||
240 | return xenbus_register_driver_common(drv, &xenbus_backend, | 239 | return xenbus_register_driver_common(drv, &xenbus_backend); |
241 | owner, mod_name); | ||
242 | } | 240 | } |
243 | EXPORT_SYMBOL_GPL(__xenbus_register_backend); | 241 | EXPORT_SYMBOL_GPL(xenbus_register_backend); |
244 | 242 | ||
245 | static int backend_probe_and_watch(struct notifier_block *notifier, | 243 | static int backend_probe_and_watch(struct notifier_block *notifier, |
246 | unsigned long event, | 244 | unsigned long event, |
diff --git a/drivers/xen/xenbus/xenbus_probe_frontend.c b/drivers/xen/xenbus/xenbus_probe_frontend.c index 2f73195512b4..9c57819df51a 100644 --- a/drivers/xen/xenbus/xenbus_probe_frontend.c +++ b/drivers/xen/xenbus/xenbus_probe_frontend.c | |||
@@ -230,15 +230,13 @@ static void wait_for_devices(struct xenbus_driver *xendrv) | |||
230 | print_device_status); | 230 | print_device_status); |
231 | } | 231 | } |
232 | 232 | ||
233 | int __xenbus_register_frontend(struct xenbus_driver *drv, | 233 | int xenbus_register_frontend(struct xenbus_driver *drv) |
234 | struct module *owner, const char *mod_name) | ||
235 | { | 234 | { |
236 | int ret; | 235 | int ret; |
237 | 236 | ||
238 | drv->read_otherend_details = read_backend_details; | 237 | drv->read_otherend_details = read_backend_details; |
239 | 238 | ||
240 | ret = xenbus_register_driver_common(drv, &xenbus_frontend, | 239 | ret = xenbus_register_driver_common(drv, &xenbus_frontend); |
241 | owner, mod_name); | ||
242 | if (ret) | 240 | if (ret) |
243 | return ret; | 241 | return ret; |
244 | 242 | ||
@@ -247,7 +245,7 @@ int __xenbus_register_frontend(struct xenbus_driver *drv, | |||
247 | 245 | ||
248 | return 0; | 246 | return 0; |
249 | } | 247 | } |
250 | EXPORT_SYMBOL_GPL(__xenbus_register_frontend); | 248 | EXPORT_SYMBOL_GPL(xenbus_register_frontend); |
251 | 249 | ||
252 | static DECLARE_WAIT_QUEUE_HEAD(backend_state_wq); | 250 | static DECLARE_WAIT_QUEUE_HEAD(backend_state_wq); |
253 | static int backend_state; | 251 | static int backend_state; |
diff --git a/drivers/xen/xenbus/xenbus_xs.c b/drivers/xen/xenbus/xenbus_xs.c index b3b8f2f3ad10..d1c217b23a42 100644 --- a/drivers/xen/xenbus/xenbus_xs.c +++ b/drivers/xen/xenbus/xenbus_xs.c | |||
@@ -532,21 +532,18 @@ int xenbus_printf(struct xenbus_transaction t, | |||
532 | { | 532 | { |
533 | va_list ap; | 533 | va_list ap; |
534 | int ret; | 534 | int ret; |
535 | #define PRINTF_BUFFER_SIZE 4096 | 535 | char *buf; |
536 | char *printf_buffer; | ||
537 | |||
538 | printf_buffer = kmalloc(PRINTF_BUFFER_SIZE, GFP_NOIO | __GFP_HIGH); | ||
539 | if (printf_buffer == NULL) | ||
540 | return -ENOMEM; | ||
541 | 536 | ||
542 | va_start(ap, fmt); | 537 | va_start(ap, fmt); |
543 | ret = vsnprintf(printf_buffer, PRINTF_BUFFER_SIZE, fmt, ap); | 538 | buf = kvasprintf(GFP_NOIO | __GFP_HIGH, fmt, ap); |
544 | va_end(ap); | 539 | va_end(ap); |
545 | 540 | ||
546 | BUG_ON(ret > PRINTF_BUFFER_SIZE-1); | 541 | if (!buf) |
547 | ret = xenbus_write(t, dir, node, printf_buffer); | 542 | return -ENOMEM; |
543 | |||
544 | ret = xenbus_write(t, dir, node, buf); | ||
548 | 545 | ||
549 | kfree(printf_buffer); | 546 | kfree(buf); |
550 | 547 | ||
551 | return ret; | 548 | return ret; |
552 | } | 549 | } |
@@ -621,15 +618,6 @@ static struct xenbus_watch *find_watch(const char *token) | |||
621 | return NULL; | 618 | return NULL; |
622 | } | 619 | } |
623 | 620 | ||
624 | static void xs_reset_watches(void) | ||
625 | { | ||
626 | int err; | ||
627 | |||
628 | err = xs_error(xs_single(XBT_NIL, XS_RESET_WATCHES, "", NULL)); | ||
629 | if (err && err != -EEXIST) | ||
630 | printk(KERN_WARNING "xs_reset_watches failed: %d\n", err); | ||
631 | } | ||
632 | |||
633 | /* Register callback to watch this node. */ | 621 | /* Register callback to watch this node. */ |
634 | int register_xenbus_watch(struct xenbus_watch *watch) | 622 | int register_xenbus_watch(struct xenbus_watch *watch) |
635 | { | 623 | { |
@@ -810,6 +798,12 @@ static int process_msg(void) | |||
810 | goto out; | 798 | goto out; |
811 | } | 799 | } |
812 | 800 | ||
801 | if (msg->hdr.len > XENSTORE_PAYLOAD_MAX) { | ||
802 | kfree(msg); | ||
803 | err = -EINVAL; | ||
804 | goto out; | ||
805 | } | ||
806 | |||
813 | body = kmalloc(msg->hdr.len + 1, GFP_NOIO | __GFP_HIGH); | 807 | body = kmalloc(msg->hdr.len + 1, GFP_NOIO | __GFP_HIGH); |
814 | if (body == NULL) { | 808 | if (body == NULL) { |
815 | kfree(msg); | 809 | kfree(msg); |
@@ -906,9 +900,5 @@ int xs_init(void) | |||
906 | if (IS_ERR(task)) | 900 | if (IS_ERR(task)) |
907 | return PTR_ERR(task); | 901 | return PTR_ERR(task); |
908 | 902 | ||
909 | /* shutdown watches for kexec boot */ | ||
910 | if (xen_hvm_domain()) | ||
911 | xs_reset_watches(); | ||
912 | |||
913 | return 0; | 903 | return 0; |
914 | } | 904 | } |
diff --git a/drivers/xen/xenfs/Makefile b/drivers/xen/xenfs/Makefile index 4fde9440fe1f..b019865fcc56 100644 --- a/drivers/xen/xenfs/Makefile +++ b/drivers/xen/xenfs/Makefile | |||
@@ -1,4 +1,4 @@ | |||
1 | obj-$(CONFIG_XENFS) += xenfs.o | 1 | obj-$(CONFIG_XENFS) += xenfs.o |
2 | 2 | ||
3 | xenfs-y = super.o xenbus.o privcmd.o | 3 | xenfs-y = super.o |
4 | xenfs-$(CONFIG_XEN_DOM0) += xenstored.o | 4 | xenfs-$(CONFIG_XEN_DOM0) += xenstored.o |
diff --git a/drivers/xen/xenfs/super.c b/drivers/xen/xenfs/super.c index 1aa389719846..a84b53c01436 100644 --- a/drivers/xen/xenfs/super.c +++ b/drivers/xen/xenfs/super.c | |||
@@ -16,6 +16,8 @@ | |||
16 | #include <xen/xen.h> | 16 | #include <xen/xen.h> |
17 | 17 | ||
18 | #include "xenfs.h" | 18 | #include "xenfs.h" |
19 | #include "../privcmd.h" | ||
20 | #include "../xenbus/xenbus_comms.h" | ||
19 | 21 | ||
20 | #include <asm/xen/hypervisor.h> | 22 | #include <asm/xen/hypervisor.h> |
21 | 23 | ||
@@ -82,9 +84,9 @@ static int xenfs_fill_super(struct super_block *sb, void *data, int silent) | |||
82 | { | 84 | { |
83 | static struct tree_descr xenfs_files[] = { | 85 | static struct tree_descr xenfs_files[] = { |
84 | [1] = {}, | 86 | [1] = {}, |
85 | { "xenbus", &xenbus_file_ops, S_IRUSR|S_IWUSR }, | 87 | { "xenbus", &xen_xenbus_fops, S_IRUSR|S_IWUSR }, |
86 | { "capabilities", &capabilities_file_ops, S_IRUGO }, | 88 | { "capabilities", &capabilities_file_ops, S_IRUGO }, |
87 | { "privcmd", &privcmd_file_ops, S_IRUSR|S_IWUSR }, | 89 | { "privcmd", &xen_privcmd_fops, S_IRUSR|S_IWUSR }, |
88 | {""}, | 90 | {""}, |
89 | }; | 91 | }; |
90 | int rc; | 92 | int rc; |
diff --git a/drivers/xen/xenfs/xenfs.h b/drivers/xen/xenfs/xenfs.h index b68aa6200003..6b80c7779c02 100644 --- a/drivers/xen/xenfs/xenfs.h +++ b/drivers/xen/xenfs/xenfs.h | |||
@@ -1,8 +1,6 @@ | |||
1 | #ifndef _XENFS_XENBUS_H | 1 | #ifndef _XENFS_XENBUS_H |
2 | #define _XENFS_XENBUS_H | 2 | #define _XENFS_XENBUS_H |
3 | 3 | ||
4 | extern const struct file_operations xenbus_file_ops; | ||
5 | extern const struct file_operations privcmd_file_ops; | ||
6 | extern const struct file_operations xsd_kva_file_ops; | 4 | extern const struct file_operations xsd_kva_file_ops; |
7 | extern const struct file_operations xsd_port_file_ops; | 5 | extern const struct file_operations xsd_port_file_ops; |
8 | 6 | ||