aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/xen
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2013-04-29 11:16:51 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2013-04-29 11:16:51 -0400
commit9d2da7af909e1cf529f3cac582aaae05b107aa1e (patch)
treed3845268b2f6ee22e928ad1ec751d74e121dd797 /drivers/xen
parentc1be5a5b1b355d40e6cf79cc979eb66dafa24ad1 (diff)
parent18c0025b692a293e3e4aecb34264563c0a442448 (diff)
Merge tag 'stable/for-linus-3.10-rc0-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/konrad/xen
Pull Xen updates from Konrad Rzeszutek Wilk: "Features: - Populate the boot_params with EDD data. - Cleanups in the IRQ code. Bug-fixes: - CPU hotplug offline/online in PVHVM mode. - Re-upload processor PM data after ACPI S3 suspend/resume cycle." And Konrad gets a gold star for sending the pull request early when he thought he'd be away for the first week of the merge window (but because of 3.9 dragging out to -rc8 he then re-sent the reminder on the first day of the merge window anyway) * tag 'stable/for-linus-3.10-rc0-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/konrad/xen: xen: resolve section mismatch warnings in xen-acpi-processor xen: Re-upload processor PM data to hypervisor after S3 resume (v2) xen/smp: Unifiy some of the PVs and PVHVM offline CPU path xen/smp/pvhvm: Don't initialize IRQ_WORKER as we are using the native one. xen/spinlock: Disable IRQ spinlock (PV) allocation on PVHVM xen/spinlock: Check against default value of -1 for IRQ line. xen/time: Add default value of -1 for IRQ and check for that. xen/events: Check that IRQ value passed in is valid. xen/time: Fix kasprintf splat when allocating timer%d IRQ line. xen/smp/spinlock: Fix leakage of the spinlock interrupt line for every CPU online/offline xen/smp: Fix leakage of timer interrupt line for every CPU online/offline. xen kconfig: fix select INPUT_XEN_KBDDEV_FRONTEND xen: drop tracking of IRQ vector x86/xen: populate boot_params with EDD data
Diffstat (limited to 'drivers/xen')
-rw-r--r--drivers/xen/events.c33
-rw-r--r--drivers/xen/xen-acpi-processor.c82
2 files changed, 78 insertions, 37 deletions
diff --git a/drivers/xen/events.c b/drivers/xen/events.c
index 2647ad8e1f19..d8cc8127f19c 100644
--- a/drivers/xen/events.c
+++ b/drivers/xen/events.c
@@ -85,8 +85,7 @@ enum xen_irq_type {
85 * event channel - irq->event channel mapping 85 * event channel - irq->event channel mapping
86 * cpu - cpu this event channel is bound to 86 * cpu - cpu this event channel is bound to
87 * index - type-specific information: 87 * index - type-specific information:
88 * PIRQ - vector, with MSB being "needs EIO", or physical IRQ of the HVM 88 * PIRQ - physical IRQ, GSI, flags, and owner domain
89 * guest, or GSI (real passthrough IRQ) of the device.
90 * VIRQ - virq number 89 * VIRQ - virq number
91 * IPI - IPI vector 90 * IPI - IPI vector
92 * EVTCHN - 91 * EVTCHN -
@@ -105,7 +104,6 @@ struct irq_info {
105 struct { 104 struct {
106 unsigned short pirq; 105 unsigned short pirq;
107 unsigned short gsi; 106 unsigned short gsi;
108 unsigned char vector;
109 unsigned char flags; 107 unsigned char flags;
110 uint16_t domid; 108 uint16_t domid;
111 } pirq; 109 } pirq;
@@ -211,7 +209,6 @@ static void xen_irq_info_pirq_init(unsigned irq,
211 unsigned short evtchn, 209 unsigned short evtchn,
212 unsigned short pirq, 210 unsigned short pirq,
213 unsigned short gsi, 211 unsigned short gsi,
214 unsigned short vector,
215 uint16_t domid, 212 uint16_t domid,
216 unsigned char flags) 213 unsigned char flags)
217{ 214{
@@ -221,7 +218,6 @@ static void xen_irq_info_pirq_init(unsigned irq,
221 218
222 info->u.pirq.pirq = pirq; 219 info->u.pirq.pirq = pirq;
223 info->u.pirq.gsi = gsi; 220 info->u.pirq.gsi = gsi;
224 info->u.pirq.vector = vector;
225 info->u.pirq.domid = domid; 221 info->u.pirq.domid = domid;
226 info->u.pirq.flags = flags; 222 info->u.pirq.flags = flags;
227} 223}
@@ -519,6 +515,9 @@ static void xen_free_irq(unsigned irq)
519{ 515{
520 struct irq_info *info = irq_get_handler_data(irq); 516 struct irq_info *info = irq_get_handler_data(irq);
521 517
518 if (WARN_ON(!info))
519 return;
520
522 list_del(&info->list); 521 list_del(&info->list);
523 522
524 irq_set_handler_data(irq, NULL); 523 irq_set_handler_data(irq, NULL);
@@ -714,7 +713,7 @@ int xen_bind_pirq_gsi_to_irq(unsigned gsi,
714 goto out; 713 goto out;
715 } 714 }
716 715
717 xen_irq_info_pirq_init(irq, 0, pirq, gsi, irq_op.vector, DOMID_SELF, 716 xen_irq_info_pirq_init(irq, 0, pirq, gsi, DOMID_SELF,
718 shareable ? PIRQ_SHAREABLE : 0); 717 shareable ? PIRQ_SHAREABLE : 0);
719 718
720 pirq_query_unmask(irq); 719 pirq_query_unmask(irq);
@@ -762,8 +761,7 @@ int xen_allocate_pirq_msi(struct pci_dev *dev, struct msi_desc *msidesc)
762} 761}
763 762
764int xen_bind_pirq_msi_to_irq(struct pci_dev *dev, struct msi_desc *msidesc, 763int xen_bind_pirq_msi_to_irq(struct pci_dev *dev, struct msi_desc *msidesc,
765 int pirq, int vector, const char *name, 764 int pirq, const char *name, domid_t domid)
766 domid_t domid)
767{ 765{
768 int irq, ret; 766 int irq, ret;
769 767
@@ -776,7 +774,7 @@ int xen_bind_pirq_msi_to_irq(struct pci_dev *dev, struct msi_desc *msidesc,
776 irq_set_chip_and_handler_name(irq, &xen_pirq_chip, handle_edge_irq, 774 irq_set_chip_and_handler_name(irq, &xen_pirq_chip, handle_edge_irq,
777 name); 775 name);
778 776
779 xen_irq_info_pirq_init(irq, 0, pirq, 0, vector, domid, 0); 777 xen_irq_info_pirq_init(irq, 0, pirq, 0, domid, 0);
780 ret = irq_set_msi_desc(irq, msidesc); 778 ret = irq_set_msi_desc(irq, msidesc);
781 if (ret < 0) 779 if (ret < 0)
782 goto error_irq; 780 goto error_irq;
@@ -1008,6 +1006,9 @@ static void unbind_from_irq(unsigned int irq)
1008 int evtchn = evtchn_from_irq(irq); 1006 int evtchn = evtchn_from_irq(irq);
1009 struct irq_info *info = irq_get_handler_data(irq); 1007 struct irq_info *info = irq_get_handler_data(irq);
1010 1008
1009 if (WARN_ON(!info))
1010 return;
1011
1011 mutex_lock(&irq_mapping_update_lock); 1012 mutex_lock(&irq_mapping_update_lock);
1012 1013
1013 if (info->refcnt > 0) { 1014 if (info->refcnt > 0) {
@@ -1135,6 +1136,10 @@ int bind_ipi_to_irqhandler(enum ipi_vector ipi,
1135 1136
1136void unbind_from_irqhandler(unsigned int irq, void *dev_id) 1137void unbind_from_irqhandler(unsigned int irq, void *dev_id)
1137{ 1138{
1139 struct irq_info *info = irq_get_handler_data(irq);
1140
1141 if (WARN_ON(!info))
1142 return;
1138 free_irq(irq, dev_id); 1143 free_irq(irq, dev_id);
1139 unbind_from_irq(irq); 1144 unbind_from_irq(irq);
1140} 1145}
@@ -1457,6 +1462,9 @@ void rebind_evtchn_irq(int evtchn, int irq)
1457{ 1462{
1458 struct irq_info *info = info_for_irq(irq); 1463 struct irq_info *info = info_for_irq(irq);
1459 1464
1465 if (WARN_ON(!info))
1466 return;
1467
1460 /* Make sure the irq is masked, since the new event channel 1468 /* Make sure the irq is masked, since the new event channel
1461 will also be masked. */ 1469 will also be masked. */
1462 disable_irq(irq); 1470 disable_irq(irq);
@@ -1730,7 +1738,12 @@ void xen_poll_irq(int irq)
1730int xen_test_irq_shared(int irq) 1738int xen_test_irq_shared(int irq)
1731{ 1739{
1732 struct irq_info *info = info_for_irq(irq); 1740 struct irq_info *info = info_for_irq(irq);
1733 struct physdev_irq_status_query irq_status = { .irq = info->u.pirq.pirq }; 1741 struct physdev_irq_status_query irq_status;
1742
1743 if (WARN_ON(!info))
1744 return -ENOENT;
1745
1746 irq_status.irq = info->u.pirq.pirq;
1734 1747
1735 if (HYPERVISOR_physdev_op(PHYSDEVOP_irq_status_query, &irq_status)) 1748 if (HYPERVISOR_physdev_op(PHYSDEVOP_irq_status_query, &irq_status))
1736 return 0; 1749 return 0;
diff --git a/drivers/xen/xen-acpi-processor.c b/drivers/xen/xen-acpi-processor.c
index 90e34ac7e522..8abd7d579037 100644
--- a/drivers/xen/xen-acpi-processor.c
+++ b/drivers/xen/xen-acpi-processor.c
@@ -25,6 +25,7 @@
25#include <linux/init.h> 25#include <linux/init.h>
26#include <linux/module.h> 26#include <linux/module.h>
27#include <linux/types.h> 27#include <linux/types.h>
28#include <linux/syscore_ops.h>
28#include <acpi/acpi_bus.h> 29#include <acpi/acpi_bus.h>
29#include <acpi/acpi_drivers.h> 30#include <acpi/acpi_drivers.h>
30#include <acpi/processor.h> 31#include <acpi/processor.h>
@@ -51,9 +52,9 @@ static DEFINE_MUTEX(acpi_ids_mutex);
51/* Which ACPI ID we have processed from 'struct acpi_processor'. */ 52/* Which ACPI ID we have processed from 'struct acpi_processor'. */
52static unsigned long *acpi_ids_done; 53static unsigned long *acpi_ids_done;
53/* Which ACPI ID exist in the SSDT/DSDT processor definitions. */ 54/* Which ACPI ID exist in the SSDT/DSDT processor definitions. */
54static unsigned long __initdata *acpi_id_present; 55static unsigned long *acpi_id_present;
55/* And if there is an _CST definition (or a PBLK) for the ACPI IDs */ 56/* And if there is an _CST definition (or a PBLK) for the ACPI IDs */
56static unsigned long __initdata *acpi_id_cst_present; 57static unsigned long *acpi_id_cst_present;
57 58
58static int push_cxx_to_hypervisor(struct acpi_processor *_pr) 59static int push_cxx_to_hypervisor(struct acpi_processor *_pr)
59{ 60{
@@ -329,7 +330,7 @@ static unsigned int __init get_max_acpi_id(void)
329 * for_each_[present|online]_cpu macros which are banded to the virtual 330 * for_each_[present|online]_cpu macros which are banded to the virtual
330 * CPU amount. 331 * CPU amount.
331 */ 332 */
332static acpi_status __init 333static acpi_status
333read_acpi_id(acpi_handle handle, u32 lvl, void *context, void **rv) 334read_acpi_id(acpi_handle handle, u32 lvl, void *context, void **rv)
334{ 335{
335 u32 acpi_id; 336 u32 acpi_id;
@@ -384,12 +385,16 @@ read_acpi_id(acpi_handle handle, u32 lvl, void *context, void **rv)
384 385
385 return AE_OK; 386 return AE_OK;
386} 387}
387static int __init check_acpi_ids(struct acpi_processor *pr_backup) 388static int check_acpi_ids(struct acpi_processor *pr_backup)
388{ 389{
389 390
390 if (!pr_backup) 391 if (!pr_backup)
391 return -ENODEV; 392 return -ENODEV;
392 393
394 if (acpi_id_present && acpi_id_cst_present)
395 /* OK, done this once .. skip to uploading */
396 goto upload;
397
393 /* All online CPUs have been processed at this stage. Now verify 398 /* All online CPUs have been processed at this stage. Now verify
394 * whether in fact "online CPUs" == physical CPUs. 399 * whether in fact "online CPUs" == physical CPUs.
395 */ 400 */
@@ -408,6 +413,7 @@ static int __init check_acpi_ids(struct acpi_processor *pr_backup)
408 read_acpi_id, NULL, NULL, NULL); 413 read_acpi_id, NULL, NULL, NULL);
409 acpi_get_devices("ACPI0007", read_acpi_id, NULL, NULL); 414 acpi_get_devices("ACPI0007", read_acpi_id, NULL, NULL);
410 415
416upload:
411 if (!bitmap_equal(acpi_id_present, acpi_ids_done, nr_acpi_bits)) { 417 if (!bitmap_equal(acpi_id_present, acpi_ids_done, nr_acpi_bits)) {
412 unsigned int i; 418 unsigned int i;
413 for_each_set_bit(i, acpi_id_present, nr_acpi_bits) { 419 for_each_set_bit(i, acpi_id_present, nr_acpi_bits) {
@@ -417,10 +423,7 @@ static int __init check_acpi_ids(struct acpi_processor *pr_backup)
417 (void)upload_pm_data(pr_backup); 423 (void)upload_pm_data(pr_backup);
418 } 424 }
419 } 425 }
420 kfree(acpi_id_present); 426
421 acpi_id_present = NULL;
422 kfree(acpi_id_cst_present);
423 acpi_id_cst_present = NULL;
424 return 0; 427 return 0;
425} 428}
426static int __init check_prereq(void) 429static int __init check_prereq(void)
@@ -467,10 +470,47 @@ static void free_acpi_perf_data(void)
467 free_percpu(acpi_perf_data); 470 free_percpu(acpi_perf_data);
468} 471}
469 472
470static int __init xen_acpi_processor_init(void) 473static int xen_upload_processor_pm_data(void)
471{ 474{
472 struct acpi_processor *pr_backup = NULL; 475 struct acpi_processor *pr_backup = NULL;
473 unsigned int i; 476 unsigned int i;
477 int rc = 0;
478
479 pr_info(DRV_NAME "Uploading Xen processor PM info\n");
480
481 for_each_possible_cpu(i) {
482 struct acpi_processor *_pr;
483 _pr = per_cpu(processors, i /* APIC ID */);
484 if (!_pr)
485 continue;
486
487 if (!pr_backup) {
488 pr_backup = kzalloc(sizeof(struct acpi_processor), GFP_KERNEL);
489 if (pr_backup)
490 memcpy(pr_backup, _pr, sizeof(struct acpi_processor));
491 }
492 (void)upload_pm_data(_pr);
493 }
494
495 rc = check_acpi_ids(pr_backup);
496 kfree(pr_backup);
497
498 return rc;
499}
500
501static void xen_acpi_processor_resume(void)
502{
503 bitmap_zero(acpi_ids_done, nr_acpi_bits);
504 xen_upload_processor_pm_data();
505}
506
507static struct syscore_ops xap_syscore_ops = {
508 .resume = xen_acpi_processor_resume,
509};
510
511static int __init xen_acpi_processor_init(void)
512{
513 unsigned int i;
474 int rc = check_prereq(); 514 int rc = check_prereq();
475 515
476 if (rc) 516 if (rc)
@@ -514,27 +554,12 @@ static int __init xen_acpi_processor_init(void)
514 goto err_out; 554 goto err_out;
515 } 555 }
516 556
517 for_each_possible_cpu(i) { 557 rc = xen_upload_processor_pm_data();
518 struct acpi_processor *_pr;
519 _pr = per_cpu(processors, i /* APIC ID */);
520 if (!_pr)
521 continue;
522
523 if (!pr_backup) {
524 pr_backup = kzalloc(sizeof(struct acpi_processor), GFP_KERNEL);
525 if (pr_backup)
526 memcpy(pr_backup, _pr, sizeof(struct acpi_processor));
527 }
528 (void)upload_pm_data(_pr);
529 }
530 rc = check_acpi_ids(pr_backup);
531
532 kfree(pr_backup);
533 pr_backup = NULL;
534
535 if (rc) 558 if (rc)
536 goto err_unregister; 559 goto err_unregister;
537 560
561 register_syscore_ops(&xap_syscore_ops);
562
538 return 0; 563 return 0;
539err_unregister: 564err_unregister:
540 for_each_possible_cpu(i) { 565 for_each_possible_cpu(i) {
@@ -552,7 +577,10 @@ static void __exit xen_acpi_processor_exit(void)
552{ 577{
553 int i; 578 int i;
554 579
580 unregister_syscore_ops(&xap_syscore_ops);
555 kfree(acpi_ids_done); 581 kfree(acpi_ids_done);
582 kfree(acpi_id_present);
583 kfree(acpi_id_cst_present);
556 for_each_possible_cpu(i) { 584 for_each_possible_cpu(i) {
557 struct acpi_processor_performance *perf; 585 struct acpi_processor_performance *perf;
558 perf = per_cpu_ptr(acpi_perf_data, i); 586 perf = per_cpu_ptr(acpi_perf_data, i);