aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/xen
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/xen')
-rw-r--r--drivers/xen/events.c30
-rw-r--r--drivers/xen/gntdev.c6
2 files changed, 19 insertions, 17 deletions
diff --git a/drivers/xen/events.c b/drivers/xen/events.c
index 02b5a9c05cfa..42d6c930cc87 100644
--- a/drivers/xen/events.c
+++ b/drivers/xen/events.c
@@ -5,7 +5,7 @@
5 * domain gets 1024 event channels, but NR_IRQ is not that large, we 5 * domain gets 1024 event channels, but NR_IRQ is not that large, we
6 * must dynamically map irqs<->event channels. The event channels 6 * must dynamically map irqs<->event channels. The event channels
7 * interface with the rest of the kernel by defining a xen interrupt 7 * interface with the rest of the kernel by defining a xen interrupt
8 * chip. When an event is recieved, it is mapped to an irq and sent 8 * chip. When an event is received, it is mapped to an irq and sent
9 * through the normal interrupt processing path. 9 * through the normal interrupt processing path.
10 * 10 *
11 * There are four kinds of events which can be mapped to an event 11 * There are four kinds of events which can be mapped to an event
@@ -122,7 +122,7 @@ static struct irq_chip xen_pirq_chip;
122/* Get info for IRQ */ 122/* Get info for IRQ */
123static struct irq_info *info_for_irq(unsigned irq) 123static struct irq_info *info_for_irq(unsigned irq)
124{ 124{
125 return get_irq_data(irq); 125 return irq_get_handler_data(irq);
126} 126}
127 127
128/* Constructors for packed IRQ information. */ 128/* Constructors for packed IRQ information. */
@@ -403,7 +403,7 @@ static void xen_irq_init(unsigned irq)
403 403
404 info->type = IRQT_UNBOUND; 404 info->type = IRQT_UNBOUND;
405 405
406 set_irq_data(irq, info); 406 irq_set_handler_data(irq, info);
407 407
408 list_add_tail(&info->list, &xen_irq_list_head); 408 list_add_tail(&info->list, &xen_irq_list_head);
409} 409}
@@ -416,7 +416,7 @@ static int __must_check xen_allocate_irq_dynamic(void)
416#ifdef CONFIG_X86_IO_APIC 416#ifdef CONFIG_X86_IO_APIC
417 /* 417 /*
418 * For an HVM guest or domain 0 which see "real" (emulated or 418 * For an HVM guest or domain 0 which see "real" (emulated or
419 * actual repectively) GSIs we allocate dynamic IRQs 419 * actual respectively) GSIs we allocate dynamic IRQs
420 * e.g. those corresponding to event channels or MSIs 420 * e.g. those corresponding to event channels or MSIs
421 * etc. from the range above those "real" GSIs to avoid 421 * etc. from the range above those "real" GSIs to avoid
422 * collisions. 422 * collisions.
@@ -458,11 +458,11 @@ static int __must_check xen_allocate_irq_gsi(unsigned gsi)
458 458
459static void xen_free_irq(unsigned irq) 459static void xen_free_irq(unsigned irq)
460{ 460{
461 struct irq_info *info = get_irq_data(irq); 461 struct irq_info *info = irq_get_handler_data(irq);
462 462
463 list_del(&info->list); 463 list_del(&info->list);
464 464
465 set_irq_data(irq, NULL); 465 irq_set_handler_data(irq, NULL);
466 466
467 kfree(info); 467 kfree(info);
468 468
@@ -585,7 +585,7 @@ static void ack_pirq(struct irq_data *data)
585{ 585{
586 int evtchn = evtchn_from_irq(data->irq); 586 int evtchn = evtchn_from_irq(data->irq);
587 587
588 move_native_irq(data->irq); 588 irq_move_irq(data);
589 589
590 if (VALID_EVTCHN(evtchn)) { 590 if (VALID_EVTCHN(evtchn)) {
591 mask_evtchn(evtchn); 591 mask_evtchn(evtchn);
@@ -639,8 +639,8 @@ int xen_bind_pirq_gsi_to_irq(unsigned gsi,
639 if (irq < 0) 639 if (irq < 0)
640 goto out; 640 goto out;
641 641
642 set_irq_chip_and_handler_name(irq, &xen_pirq_chip, 642 irq_set_chip_and_handler_name(irq, &xen_pirq_chip, handle_level_irq,
643 handle_level_irq, name); 643 name);
644 644
645 irq_op.irq = irq; 645 irq_op.irq = irq;
646 irq_op.vector = 0; 646 irq_op.vector = 0;
@@ -690,8 +690,8 @@ int xen_bind_pirq_msi_to_irq(struct pci_dev *dev, struct msi_desc *msidesc,
690 if (irq == -1) 690 if (irq == -1)
691 goto out; 691 goto out;
692 692
693 set_irq_chip_and_handler_name(irq, &xen_pirq_chip, 693 irq_set_chip_and_handler_name(irq, &xen_pirq_chip, handle_level_irq,
694 handle_level_irq, name); 694 name);
695 695
696 xen_irq_info_pirq_init(irq, 0, pirq, 0, vector, 0); 696 xen_irq_info_pirq_init(irq, 0, pirq, 0, vector, 0);
697 ret = irq_set_msi_desc(irq, msidesc); 697 ret = irq_set_msi_desc(irq, msidesc);
@@ -772,7 +772,7 @@ int bind_evtchn_to_irq(unsigned int evtchn)
772 if (irq == -1) 772 if (irq == -1)
773 goto out; 773 goto out;
774 774
775 set_irq_chip_and_handler_name(irq, &xen_dynamic_chip, 775 irq_set_chip_and_handler_name(irq, &xen_dynamic_chip,
776 handle_fasteoi_irq, "event"); 776 handle_fasteoi_irq, "event");
777 777
778 xen_irq_info_evtchn_init(irq, evtchn); 778 xen_irq_info_evtchn_init(irq, evtchn);
@@ -799,7 +799,7 @@ static int bind_ipi_to_irq(unsigned int ipi, unsigned int cpu)
799 if (irq < 0) 799 if (irq < 0)
800 goto out; 800 goto out;
801 801
802 set_irq_chip_and_handler_name(irq, &xen_percpu_chip, 802 irq_set_chip_and_handler_name(irq, &xen_percpu_chip,
803 handle_percpu_irq, "ipi"); 803 handle_percpu_irq, "ipi");
804 804
805 bind_ipi.vcpu = cpu; 805 bind_ipi.vcpu = cpu;
@@ -848,7 +848,7 @@ int bind_virq_to_irq(unsigned int virq, unsigned int cpu)
848 if (irq == -1) 848 if (irq == -1)
849 goto out; 849 goto out;
850 850
851 set_irq_chip_and_handler_name(irq, &xen_percpu_chip, 851 irq_set_chip_and_handler_name(irq, &xen_percpu_chip,
852 handle_percpu_irq, "virq"); 852 handle_percpu_irq, "virq");
853 853
854 bind_virq.virq = virq; 854 bind_virq.virq = virq;
@@ -1339,7 +1339,7 @@ static void ack_dynirq(struct irq_data *data)
1339{ 1339{
1340 int evtchn = evtchn_from_irq(data->irq); 1340 int evtchn = evtchn_from_irq(data->irq);
1341 1341
1342 move_masked_irq(data->irq); 1342 irq_move_masked_irq(data);
1343 1343
1344 if (VALID_EVTCHN(evtchn)) 1344 if (VALID_EVTCHN(evtchn))
1345 unmask_evtchn(evtchn); 1345 unmask_evtchn(evtchn);
diff --git a/drivers/xen/gntdev.c b/drivers/xen/gntdev.c
index 017ce600fbc6..b0f9e8fb0052 100644
--- a/drivers/xen/gntdev.c
+++ b/drivers/xen/gntdev.c
@@ -273,7 +273,7 @@ static int __unmap_grant_pages(struct grant_map *map, int offset, int pages)
273 map->vma->vm_start + map->notify.addr; 273 map->vma->vm_start + map->notify.addr;
274 err = copy_to_user(tmp, &err, 1); 274 err = copy_to_user(tmp, &err, 1);
275 if (err) 275 if (err)
276 return err; 276 return -EFAULT;
277 map->notify.flags &= ~UNMAP_NOTIFY_CLEAR_BYTE; 277 map->notify.flags &= ~UNMAP_NOTIFY_CLEAR_BYTE;
278 } else if (pgno >= offset && pgno < offset + pages) { 278 } else if (pgno >= offset && pgno < offset + pages) {
279 uint8_t *tmp = kmap(map->pages[pgno]); 279 uint8_t *tmp = kmap(map->pages[pgno]);
@@ -662,7 +662,7 @@ static int gntdev_mmap(struct file *flip, struct vm_area_struct *vma)
662 if (map->flags) { 662 if (map->flags) {
663 if ((vma->vm_flags & VM_WRITE) && 663 if ((vma->vm_flags & VM_WRITE) &&
664 (map->flags & GNTMAP_readonly)) 664 (map->flags & GNTMAP_readonly))
665 return -EINVAL; 665 goto out_unlock_put;
666 } else { 666 } else {
667 map->flags = GNTMAP_host_map; 667 map->flags = GNTMAP_host_map;
668 if (!(vma->vm_flags & VM_WRITE)) 668 if (!(vma->vm_flags & VM_WRITE))
@@ -700,6 +700,8 @@ unlock_out:
700 spin_unlock(&priv->lock); 700 spin_unlock(&priv->lock);
701 return err; 701 return err;
702 702
703out_unlock_put:
704 spin_unlock(&priv->lock);
703out_put_map: 705out_put_map:
704 if (use_ptemod) 706 if (use_ptemod)
705 map->vma = NULL; 707 map->vma = NULL;