diff options
-rw-r--r-- | arch/x86/xen/mmu.c | 2 | ||||
-rw-r--r-- | arch/x86/xen/p2m.c | 30 | ||||
-rw-r--r-- | arch/x86/xen/setup.c | 6 | ||||
-rw-r--r-- | drivers/xen/events.c | 113 |
4 files changed, 107 insertions, 44 deletions
diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c index b5f776f60b1b..02d752460371 100644 --- a/arch/x86/xen/mmu.c +++ b/arch/x86/xen/mmu.c | |||
@@ -1187,7 +1187,7 @@ static void drop_other_mm_ref(void *info) | |||
1187 | 1187 | ||
1188 | active_mm = percpu_read(cpu_tlbstate.active_mm); | 1188 | active_mm = percpu_read(cpu_tlbstate.active_mm); |
1189 | 1189 | ||
1190 | if (active_mm == mm) | 1190 | if (active_mm == mm && percpu_read(cpu_tlbstate.state) != TLBSTATE_OK) |
1191 | leave_mm(smp_processor_id()); | 1191 | leave_mm(smp_processor_id()); |
1192 | 1192 | ||
1193 | /* If this cpu still has a stale cr3 reference, then make sure | 1193 | /* If this cpu still has a stale cr3 reference, then make sure |
diff --git a/arch/x86/xen/p2m.c b/arch/x86/xen/p2m.c index c851397e657c..58efeb9d5440 100644 --- a/arch/x86/xen/p2m.c +++ b/arch/x86/xen/p2m.c | |||
@@ -522,11 +522,20 @@ static bool __init __early_alloc_p2m(unsigned long pfn) | |||
522 | /* Boundary cross-over for the edges: */ | 522 | /* Boundary cross-over for the edges: */ |
523 | if (idx) { | 523 | if (idx) { |
524 | unsigned long *p2m = extend_brk(PAGE_SIZE, PAGE_SIZE); | 524 | unsigned long *p2m = extend_brk(PAGE_SIZE, PAGE_SIZE); |
525 | unsigned long *mid_mfn_p; | ||
525 | 526 | ||
526 | p2m_init(p2m); | 527 | p2m_init(p2m); |
527 | 528 | ||
528 | p2m_top[topidx][mididx] = p2m; | 529 | p2m_top[topidx][mididx] = p2m; |
529 | 530 | ||
531 | /* For save/restore we need to MFN of the P2M saved */ | ||
532 | |||
533 | mid_mfn_p = p2m_top_mfn_p[topidx]; | ||
534 | WARN(mid_mfn_p[mididx] != virt_to_mfn(p2m_missing), | ||
535 | "P2M_TOP_P[%d][%d] != MFN of p2m_missing!\n", | ||
536 | topidx, mididx); | ||
537 | mid_mfn_p[mididx] = virt_to_mfn(p2m); | ||
538 | |||
530 | } | 539 | } |
531 | return idx != 0; | 540 | return idx != 0; |
532 | } | 541 | } |
@@ -549,12 +558,29 @@ unsigned long __init set_phys_range_identity(unsigned long pfn_s, | |||
549 | pfn += P2M_MID_PER_PAGE * P2M_PER_PAGE) | 558 | pfn += P2M_MID_PER_PAGE * P2M_PER_PAGE) |
550 | { | 559 | { |
551 | unsigned topidx = p2m_top_index(pfn); | 560 | unsigned topidx = p2m_top_index(pfn); |
552 | if (p2m_top[topidx] == p2m_mid_missing) { | 561 | unsigned long *mid_mfn_p; |
553 | unsigned long **mid = extend_brk(PAGE_SIZE, PAGE_SIZE); | 562 | unsigned long **mid; |
563 | |||
564 | mid = p2m_top[topidx]; | ||
565 | mid_mfn_p = p2m_top_mfn_p[topidx]; | ||
566 | if (mid == p2m_mid_missing) { | ||
567 | mid = extend_brk(PAGE_SIZE, PAGE_SIZE); | ||
554 | 568 | ||
555 | p2m_mid_init(mid); | 569 | p2m_mid_init(mid); |
556 | 570 | ||
557 | p2m_top[topidx] = mid; | 571 | p2m_top[topidx] = mid; |
572 | |||
573 | BUG_ON(mid_mfn_p != p2m_mid_missing_mfn); | ||
574 | } | ||
575 | /* And the save/restore P2M tables.. */ | ||
576 | if (mid_mfn_p == p2m_mid_missing_mfn) { | ||
577 | mid_mfn_p = extend_brk(PAGE_SIZE, PAGE_SIZE); | ||
578 | p2m_mid_mfn_init(mid_mfn_p); | ||
579 | |||
580 | p2m_top_mfn_p[topidx] = mid_mfn_p; | ||
581 | p2m_top_mfn[topidx] = virt_to_mfn(mid_mfn_p); | ||
582 | /* Note: we don't set mid_mfn_p[midix] here, | ||
583 | * look in __early_alloc_p2m */ | ||
558 | } | 584 | } |
559 | } | 585 | } |
560 | 586 | ||
diff --git a/arch/x86/xen/setup.c b/arch/x86/xen/setup.c index d3663df2f967..be1a464f6d66 100644 --- a/arch/x86/xen/setup.c +++ b/arch/x86/xen/setup.c | |||
@@ -166,7 +166,7 @@ static unsigned long __init xen_set_identity(const struct e820entry *list, | |||
166 | if (last > end) | 166 | if (last > end) |
167 | continue; | 167 | continue; |
168 | 168 | ||
169 | if (entry->type == E820_RAM) { | 169 | if ((entry->type == E820_RAM) || (entry->type == E820_UNUSABLE)) { |
170 | if (start > start_pci) | 170 | if (start > start_pci) |
171 | identity += set_phys_range_identity( | 171 | identity += set_phys_range_identity( |
172 | PFN_UP(start_pci), PFN_DOWN(start)); | 172 | PFN_UP(start_pci), PFN_DOWN(start)); |
@@ -227,7 +227,11 @@ char * __init xen_memory_setup(void) | |||
227 | 227 | ||
228 | memcpy(map_raw, map, sizeof(map)); | 228 | memcpy(map_raw, map, sizeof(map)); |
229 | e820.nr_map = 0; | 229 | e820.nr_map = 0; |
230 | #ifdef CONFIG_X86_32 | ||
231 | xen_extra_mem_start = mem_end; | ||
232 | #else | ||
230 | xen_extra_mem_start = max((1ULL << 32), mem_end); | 233 | xen_extra_mem_start = max((1ULL << 32), mem_end); |
234 | #endif | ||
231 | for (i = 0; i < memmap.nr_entries; i++) { | 235 | for (i = 0; i < memmap.nr_entries; i++) { |
232 | unsigned long long end; | 236 | unsigned long long end; |
233 | 237 | ||
diff --git a/drivers/xen/events.c b/drivers/xen/events.c index 35e02a10110b..3ff822b48145 100644 --- a/drivers/xen/events.c +++ b/drivers/xen/events.c | |||
@@ -119,6 +119,8 @@ static DEFINE_PER_CPU(unsigned long [NR_EVENT_CHANNELS/BITS_PER_LONG], | |||
119 | static struct irq_chip xen_dynamic_chip; | 119 | static struct irq_chip xen_dynamic_chip; |
120 | static struct irq_chip xen_percpu_chip; | 120 | static struct irq_chip xen_percpu_chip; |
121 | static struct irq_chip xen_pirq_chip; | 121 | static struct irq_chip xen_pirq_chip; |
122 | static void enable_dynirq(struct irq_data *data); | ||
123 | static void disable_dynirq(struct irq_data *data); | ||
122 | 124 | ||
123 | /* Get info for IRQ */ | 125 | /* Get info for IRQ */ |
124 | static struct irq_info *info_for_irq(unsigned irq) | 126 | static struct irq_info *info_for_irq(unsigned irq) |
@@ -476,16 +478,6 @@ static void xen_free_irq(unsigned irq) | |||
476 | irq_free_desc(irq); | 478 | irq_free_desc(irq); |
477 | } | 479 | } |
478 | 480 | ||
479 | static void pirq_unmask_notify(int irq) | ||
480 | { | ||
481 | struct physdev_eoi eoi = { .irq = pirq_from_irq(irq) }; | ||
482 | |||
483 | if (unlikely(pirq_needs_eoi(irq))) { | ||
484 | int rc = HYPERVISOR_physdev_op(PHYSDEVOP_eoi, &eoi); | ||
485 | WARN_ON(rc); | ||
486 | } | ||
487 | } | ||
488 | |||
489 | static void pirq_query_unmask(int irq) | 481 | static void pirq_query_unmask(int irq) |
490 | { | 482 | { |
491 | struct physdev_irq_status_query irq_status; | 483 | struct physdev_irq_status_query irq_status; |
@@ -509,6 +501,29 @@ static bool probing_irq(int irq) | |||
509 | return desc && desc->action == NULL; | 501 | return desc && desc->action == NULL; |
510 | } | 502 | } |
511 | 503 | ||
504 | static void eoi_pirq(struct irq_data *data) | ||
505 | { | ||
506 | int evtchn = evtchn_from_irq(data->irq); | ||
507 | struct physdev_eoi eoi = { .irq = pirq_from_irq(data->irq) }; | ||
508 | int rc = 0; | ||
509 | |||
510 | irq_move_irq(data); | ||
511 | |||
512 | if (VALID_EVTCHN(evtchn)) | ||
513 | clear_evtchn(evtchn); | ||
514 | |||
515 | if (pirq_needs_eoi(data->irq)) { | ||
516 | rc = HYPERVISOR_physdev_op(PHYSDEVOP_eoi, &eoi); | ||
517 | WARN_ON(rc); | ||
518 | } | ||
519 | } | ||
520 | |||
521 | static void mask_ack_pirq(struct irq_data *data) | ||
522 | { | ||
523 | disable_dynirq(data); | ||
524 | eoi_pirq(data); | ||
525 | } | ||
526 | |||
512 | static unsigned int __startup_pirq(unsigned int irq) | 527 | static unsigned int __startup_pirq(unsigned int irq) |
513 | { | 528 | { |
514 | struct evtchn_bind_pirq bind_pirq; | 529 | struct evtchn_bind_pirq bind_pirq; |
@@ -542,7 +557,7 @@ static unsigned int __startup_pirq(unsigned int irq) | |||
542 | 557 | ||
543 | out: | 558 | out: |
544 | unmask_evtchn(evtchn); | 559 | unmask_evtchn(evtchn); |
545 | pirq_unmask_notify(irq); | 560 | eoi_pirq(irq_get_irq_data(irq)); |
546 | 561 | ||
547 | return 0; | 562 | return 0; |
548 | } | 563 | } |
@@ -582,18 +597,7 @@ static void enable_pirq(struct irq_data *data) | |||
582 | 597 | ||
583 | static void disable_pirq(struct irq_data *data) | 598 | static void disable_pirq(struct irq_data *data) |
584 | { | 599 | { |
585 | } | 600 | disable_dynirq(data); |
586 | |||
587 | static void ack_pirq(struct irq_data *data) | ||
588 | { | ||
589 | int evtchn = evtchn_from_irq(data->irq); | ||
590 | |||
591 | irq_move_irq(data); | ||
592 | |||
593 | if (VALID_EVTCHN(evtchn)) { | ||
594 | mask_evtchn(evtchn); | ||
595 | clear_evtchn(evtchn); | ||
596 | } | ||
597 | } | 601 | } |
598 | 602 | ||
599 | static int find_irq_by_gsi(unsigned gsi) | 603 | static int find_irq_by_gsi(unsigned gsi) |
@@ -642,9 +646,6 @@ int xen_bind_pirq_gsi_to_irq(unsigned gsi, | |||
642 | if (irq < 0) | 646 | if (irq < 0) |
643 | goto out; | 647 | goto out; |
644 | 648 | ||
645 | irq_set_chip_and_handler_name(irq, &xen_pirq_chip, handle_level_irq, | ||
646 | name); | ||
647 | |||
648 | irq_op.irq = irq; | 649 | irq_op.irq = irq; |
649 | irq_op.vector = 0; | 650 | irq_op.vector = 0; |
650 | 651 | ||
@@ -661,6 +662,32 @@ int xen_bind_pirq_gsi_to_irq(unsigned gsi, | |||
661 | xen_irq_info_pirq_init(irq, 0, pirq, gsi, irq_op.vector, DOMID_SELF, | 662 | xen_irq_info_pirq_init(irq, 0, pirq, gsi, irq_op.vector, DOMID_SELF, |
662 | shareable ? PIRQ_SHAREABLE : 0); | 663 | shareable ? PIRQ_SHAREABLE : 0); |
663 | 664 | ||
665 | pirq_query_unmask(irq); | ||
666 | /* We try to use the handler with the appropriate semantic for the | ||
667 | * type of interrupt: if the interrupt doesn't need an eoi | ||
668 | * (pirq_needs_eoi returns false), we treat it like an edge | ||
669 | * triggered interrupt so we use handle_edge_irq. | ||
670 | * As a matter of fact this only happens when the corresponding | ||
671 | * physical interrupt is edge triggered or an msi. | ||
672 | * | ||
673 | * On the other hand if the interrupt needs an eoi (pirq_needs_eoi | ||
674 | * returns true) we treat it like a level triggered interrupt so we | ||
675 | * use handle_fasteoi_irq like the native code does for this kind of | ||
676 | * interrupts. | ||
677 | * Depending on the Xen version, pirq_needs_eoi might return true | ||
678 | * not only for level triggered interrupts but for edge triggered | ||
679 | * interrupts too. In any case Xen always honors the eoi mechanism, | ||
680 | * not injecting any more pirqs of the same kind if the first one | ||
681 | * hasn't received an eoi yet. Therefore using the fasteoi handler | ||
682 | * is the right choice either way. | ||
683 | */ | ||
684 | if (pirq_needs_eoi(irq)) | ||
685 | irq_set_chip_and_handler_name(irq, &xen_pirq_chip, | ||
686 | handle_fasteoi_irq, name); | ||
687 | else | ||
688 | irq_set_chip_and_handler_name(irq, &xen_pirq_chip, | ||
689 | handle_edge_irq, name); | ||
690 | |||
664 | out: | 691 | out: |
665 | spin_unlock(&irq_mapping_update_lock); | 692 | spin_unlock(&irq_mapping_update_lock); |
666 | 693 | ||
@@ -694,8 +721,8 @@ int xen_bind_pirq_msi_to_irq(struct pci_dev *dev, struct msi_desc *msidesc, | |||
694 | if (irq == -1) | 721 | if (irq == -1) |
695 | goto out; | 722 | goto out; |
696 | 723 | ||
697 | irq_set_chip_and_handler_name(irq, &xen_pirq_chip, handle_level_irq, | 724 | irq_set_chip_and_handler_name(irq, &xen_pirq_chip, handle_edge_irq, |
698 | name); | 725 | name); |
699 | 726 | ||
700 | xen_irq_info_pirq_init(irq, 0, pirq, 0, vector, domid, 0); | 727 | xen_irq_info_pirq_init(irq, 0, pirq, 0, vector, domid, 0); |
701 | ret = irq_set_msi_desc(irq, msidesc); | 728 | ret = irq_set_msi_desc(irq, msidesc); |
@@ -790,7 +817,7 @@ int bind_evtchn_to_irq(unsigned int evtchn) | |||
790 | goto out; | 817 | goto out; |
791 | 818 | ||
792 | irq_set_chip_and_handler_name(irq, &xen_dynamic_chip, | 819 | irq_set_chip_and_handler_name(irq, &xen_dynamic_chip, |
793 | handle_fasteoi_irq, "event"); | 820 | handle_edge_irq, "event"); |
794 | 821 | ||
795 | xen_irq_info_evtchn_init(irq, evtchn); | 822 | xen_irq_info_evtchn_init(irq, evtchn); |
796 | } | 823 | } |
@@ -1196,9 +1223,6 @@ static void __xen_evtchn_do_upcall(void) | |||
1196 | port = (word_idx * BITS_PER_LONG) + bit_idx; | 1223 | port = (word_idx * BITS_PER_LONG) + bit_idx; |
1197 | irq = evtchn_to_irq[port]; | 1224 | irq = evtchn_to_irq[port]; |
1198 | 1225 | ||
1199 | mask_evtchn(port); | ||
1200 | clear_evtchn(port); | ||
1201 | |||
1202 | if (irq != -1) { | 1226 | if (irq != -1) { |
1203 | desc = irq_to_desc(irq); | 1227 | desc = irq_to_desc(irq); |
1204 | if (desc) | 1228 | if (desc) |
@@ -1354,10 +1378,16 @@ static void ack_dynirq(struct irq_data *data) | |||
1354 | { | 1378 | { |
1355 | int evtchn = evtchn_from_irq(data->irq); | 1379 | int evtchn = evtchn_from_irq(data->irq); |
1356 | 1380 | ||
1357 | irq_move_masked_irq(data); | 1381 | irq_move_irq(data); |
1358 | 1382 | ||
1359 | if (VALID_EVTCHN(evtchn)) | 1383 | if (VALID_EVTCHN(evtchn)) |
1360 | unmask_evtchn(evtchn); | 1384 | clear_evtchn(evtchn); |
1385 | } | ||
1386 | |||
1387 | static void mask_ack_dynirq(struct irq_data *data) | ||
1388 | { | ||
1389 | disable_dynirq(data); | ||
1390 | ack_dynirq(data); | ||
1361 | } | 1391 | } |
1362 | 1392 | ||
1363 | static int retrigger_dynirq(struct irq_data *data) | 1393 | static int retrigger_dynirq(struct irq_data *data) |
@@ -1564,7 +1594,9 @@ static struct irq_chip xen_dynamic_chip __read_mostly = { | |||
1564 | .irq_mask = disable_dynirq, | 1594 | .irq_mask = disable_dynirq, |
1565 | .irq_unmask = enable_dynirq, | 1595 | .irq_unmask = enable_dynirq, |
1566 | 1596 | ||
1567 | .irq_eoi = ack_dynirq, | 1597 | .irq_ack = ack_dynirq, |
1598 | .irq_mask_ack = mask_ack_dynirq, | ||
1599 | |||
1568 | .irq_set_affinity = set_affinity_irq, | 1600 | .irq_set_affinity = set_affinity_irq, |
1569 | .irq_retrigger = retrigger_dynirq, | 1601 | .irq_retrigger = retrigger_dynirq, |
1570 | }; | 1602 | }; |
@@ -1574,14 +1606,15 @@ static struct irq_chip xen_pirq_chip __read_mostly = { | |||
1574 | 1606 | ||
1575 | .irq_startup = startup_pirq, | 1607 | .irq_startup = startup_pirq, |
1576 | .irq_shutdown = shutdown_pirq, | 1608 | .irq_shutdown = shutdown_pirq, |
1577 | |||
1578 | .irq_enable = enable_pirq, | 1609 | .irq_enable = enable_pirq, |
1579 | .irq_unmask = enable_pirq, | ||
1580 | |||
1581 | .irq_disable = disable_pirq, | 1610 | .irq_disable = disable_pirq, |
1582 | .irq_mask = disable_pirq, | ||
1583 | 1611 | ||
1584 | .irq_ack = ack_pirq, | 1612 | .irq_mask = disable_dynirq, |
1613 | .irq_unmask = enable_dynirq, | ||
1614 | |||
1615 | .irq_ack = eoi_pirq, | ||
1616 | .irq_eoi = eoi_pirq, | ||
1617 | .irq_mask_ack = mask_ack_pirq, | ||
1585 | 1618 | ||
1586 | .irq_set_affinity = set_affinity_irq, | 1619 | .irq_set_affinity = set_affinity_irq, |
1587 | 1620 | ||