diff options
Diffstat (limited to 'arch/powerpc/platforms/pseries')
-rw-r--r-- | arch/powerpc/platforms/pseries/Kconfig | 23 | ||||
-rw-r--r-- | arch/powerpc/platforms/pseries/Makefile | 2 | ||||
-rw-r--r-- | arch/powerpc/platforms/pseries/dtl.c | 20 | ||||
-rw-r--r-- | arch/powerpc/platforms/pseries/eeh.c | 82 | ||||
-rw-r--r-- | arch/powerpc/platforms/pseries/eeh_driver.c | 22 | ||||
-rw-r--r-- | arch/powerpc/platforms/pseries/hotplug-cpu.c | 5 | ||||
-rw-r--r-- | arch/powerpc/platforms/pseries/io_event_irq.c | 231 | ||||
-rw-r--r-- | arch/powerpc/platforms/pseries/iommu.c | 117 | ||||
-rw-r--r-- | arch/powerpc/platforms/pseries/kexec.c | 5 | ||||
-rw-r--r-- | arch/powerpc/platforms/pseries/lpar.c | 48 | ||||
-rw-r--r-- | arch/powerpc/platforms/pseries/plpar_wrappers.h | 27 | ||||
-rw-r--r-- | arch/powerpc/platforms/pseries/ras.c | 6 | ||||
-rw-r--r-- | arch/powerpc/platforms/pseries/setup.c | 50 | ||||
-rw-r--r-- | arch/powerpc/platforms/pseries/smp.c | 24 | ||||
-rw-r--r-- | arch/powerpc/platforms/pseries/xics.c | 949 | ||||
-rw-r--r-- | arch/powerpc/platforms/pseries/xics.h | 23 |
16 files changed, 518 insertions, 1116 deletions
diff --git a/arch/powerpc/platforms/pseries/Kconfig b/arch/powerpc/platforms/pseries/Kconfig index 5b3da4b4ea79..71af4c5d6c05 100644 --- a/arch/powerpc/platforms/pseries/Kconfig +++ b/arch/powerpc/platforms/pseries/Kconfig | |||
@@ -3,7 +3,10 @@ config PPC_PSERIES | |||
3 | bool "IBM pSeries & new (POWER5-based) iSeries" | 3 | bool "IBM pSeries & new (POWER5-based) iSeries" |
4 | select MPIC | 4 | select MPIC |
5 | select PCI_MSI | 5 | select PCI_MSI |
6 | select XICS | 6 | select PPC_XICS |
7 | select PPC_ICP_NATIVE | ||
8 | select PPC_ICP_HV | ||
9 | select PPC_ICS_RTAS | ||
7 | select PPC_I8259 | 10 | select PPC_I8259 |
8 | select PPC_RTAS | 11 | select PPC_RTAS |
9 | select PPC_RTAS_DAEMON | 12 | select PPC_RTAS_DAEMON |
@@ -47,6 +50,24 @@ config SCANLOG | |||
47 | tristate "Scanlog dump interface" | 50 | tristate "Scanlog dump interface" |
48 | depends on RTAS_PROC && PPC_PSERIES | 51 | depends on RTAS_PROC && PPC_PSERIES |
49 | 52 | ||
53 | config IO_EVENT_IRQ | ||
54 | bool "IO Event Interrupt support" | ||
55 | depends on PPC_PSERIES | ||
56 | default y | ||
57 | help | ||
58 | Select this option, if you want to enable support for IO Event | ||
59 | interrupts. IO event interrupt is a mechanism provided by RTAS | ||
60 | to return information about hardware error and non-error events | ||
61 | which may need OS attention. RTAS returns events for multiple | ||
62 | event types and scopes. Device drivers can register their handlers | ||
63 | to receive events. | ||
64 | |||
65 | This option will only enable the IO event platform code. You | ||
66 | will still need to enable or compile the actual drivers | ||
67 | that use this infrastruture to handle IO event interrupts. | ||
68 | |||
69 | Say Y if you are unsure. | ||
70 | |||
50 | config LPARCFG | 71 | config LPARCFG |
51 | bool "LPAR Configuration Data" | 72 | bool "LPAR Configuration Data" |
52 | depends on PPC_PSERIES || PPC_ISERIES | 73 | depends on PPC_PSERIES || PPC_ISERIES |
diff --git a/arch/powerpc/platforms/pseries/Makefile b/arch/powerpc/platforms/pseries/Makefile index fc5237810ece..3556e402cbf5 100644 --- a/arch/powerpc/platforms/pseries/Makefile +++ b/arch/powerpc/platforms/pseries/Makefile | |||
@@ -5,7 +5,6 @@ obj-y := lpar.o hvCall.o nvram.o reconfig.o \ | |||
5 | setup.o iommu.o event_sources.o ras.o \ | 5 | setup.o iommu.o event_sources.o ras.o \ |
6 | firmware.o power.o dlpar.o mobility.o | 6 | firmware.o power.o dlpar.o mobility.o |
7 | obj-$(CONFIG_SMP) += smp.o | 7 | obj-$(CONFIG_SMP) += smp.o |
8 | obj-$(CONFIG_XICS) += xics.o | ||
9 | obj-$(CONFIG_SCANLOG) += scanlog.o | 8 | obj-$(CONFIG_SCANLOG) += scanlog.o |
10 | obj-$(CONFIG_EEH) += eeh.o eeh_cache.o eeh_driver.o eeh_event.o eeh_sysfs.o | 9 | obj-$(CONFIG_EEH) += eeh.o eeh_cache.o eeh_driver.o eeh_event.o eeh_sysfs.o |
11 | obj-$(CONFIG_KEXEC) += kexec.o | 10 | obj-$(CONFIG_KEXEC) += kexec.o |
@@ -22,6 +21,7 @@ obj-$(CONFIG_HCALL_STATS) += hvCall_inst.o | |||
22 | obj-$(CONFIG_PHYP_DUMP) += phyp_dump.o | 21 | obj-$(CONFIG_PHYP_DUMP) += phyp_dump.o |
23 | obj-$(CONFIG_CMM) += cmm.o | 22 | obj-$(CONFIG_CMM) += cmm.o |
24 | obj-$(CONFIG_DTL) += dtl.o | 23 | obj-$(CONFIG_DTL) += dtl.o |
24 | obj-$(CONFIG_IO_EVENT_IRQ) += io_event_irq.o | ||
25 | 25 | ||
26 | ifeq ($(CONFIG_PPC_PSERIES),y) | 26 | ifeq ($(CONFIG_PPC_PSERIES),y) |
27 | obj-$(CONFIG_SUSPEND) += suspend.o | 27 | obj-$(CONFIG_SUSPEND) += suspend.o |
diff --git a/arch/powerpc/platforms/pseries/dtl.c b/arch/powerpc/platforms/pseries/dtl.c index c371bc06434b..e9190073bb97 100644 --- a/arch/powerpc/platforms/pseries/dtl.c +++ b/arch/powerpc/platforms/pseries/dtl.c | |||
@@ -52,10 +52,10 @@ static u8 dtl_event_mask = 0x7; | |||
52 | 52 | ||
53 | 53 | ||
54 | /* | 54 | /* |
55 | * Size of per-cpu log buffers. Default is just under 16 pages worth. | 55 | * Size of per-cpu log buffers. Firmware requires that the buffer does |
56 | * not cross a 4k boundary. | ||
56 | */ | 57 | */ |
57 | static int dtl_buf_entries = (16 * 85); | 58 | static int dtl_buf_entries = N_DISPATCH_LOG; |
58 | |||
59 | 59 | ||
60 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING | 60 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING |
61 | struct dtl_ring { | 61 | struct dtl_ring { |
@@ -151,7 +151,7 @@ static int dtl_start(struct dtl *dtl) | |||
151 | 151 | ||
152 | /* Register our dtl buffer with the hypervisor. The HV expects the | 152 | /* Register our dtl buffer with the hypervisor. The HV expects the |
153 | * buffer size to be passed in the second word of the buffer */ | 153 | * buffer size to be passed in the second word of the buffer */ |
154 | ((u32 *)dtl->buf)[1] = dtl->buf_entries * sizeof(struct dtl_entry); | 154 | ((u32 *)dtl->buf)[1] = DISPATCH_LOG_BYTES; |
155 | 155 | ||
156 | hwcpu = get_hard_smp_processor_id(dtl->cpu); | 156 | hwcpu = get_hard_smp_processor_id(dtl->cpu); |
157 | addr = __pa(dtl->buf); | 157 | addr = __pa(dtl->buf); |
@@ -196,13 +196,15 @@ static int dtl_enable(struct dtl *dtl) | |||
196 | long int rc; | 196 | long int rc; |
197 | struct dtl_entry *buf = NULL; | 197 | struct dtl_entry *buf = NULL; |
198 | 198 | ||
199 | if (!dtl_cache) | ||
200 | return -ENOMEM; | ||
201 | |||
199 | /* only allow one reader */ | 202 | /* only allow one reader */ |
200 | if (dtl->buf) | 203 | if (dtl->buf) |
201 | return -EBUSY; | 204 | return -EBUSY; |
202 | 205 | ||
203 | n_entries = dtl_buf_entries; | 206 | n_entries = dtl_buf_entries; |
204 | buf = kmalloc_node(n_entries * sizeof(struct dtl_entry), | 207 | buf = kmem_cache_alloc_node(dtl_cache, GFP_KERNEL, cpu_to_node(dtl->cpu)); |
205 | GFP_KERNEL, cpu_to_node(dtl->cpu)); | ||
206 | if (!buf) { | 208 | if (!buf) { |
207 | printk(KERN_WARNING "%s: buffer alloc failed for cpu %d\n", | 209 | printk(KERN_WARNING "%s: buffer alloc failed for cpu %d\n", |
208 | __func__, dtl->cpu); | 210 | __func__, dtl->cpu); |
@@ -223,7 +225,7 @@ static int dtl_enable(struct dtl *dtl) | |||
223 | spin_unlock(&dtl->lock); | 225 | spin_unlock(&dtl->lock); |
224 | 226 | ||
225 | if (rc) | 227 | if (rc) |
226 | kfree(buf); | 228 | kmem_cache_free(dtl_cache, buf); |
227 | return rc; | 229 | return rc; |
228 | } | 230 | } |
229 | 231 | ||
@@ -231,7 +233,7 @@ static void dtl_disable(struct dtl *dtl) | |||
231 | { | 233 | { |
232 | spin_lock(&dtl->lock); | 234 | spin_lock(&dtl->lock); |
233 | dtl_stop(dtl); | 235 | dtl_stop(dtl); |
234 | kfree(dtl->buf); | 236 | kmem_cache_free(dtl_cache, dtl->buf); |
235 | dtl->buf = NULL; | 237 | dtl->buf = NULL; |
236 | dtl->buf_entries = 0; | 238 | dtl->buf_entries = 0; |
237 | spin_unlock(&dtl->lock); | 239 | spin_unlock(&dtl->lock); |
@@ -365,7 +367,7 @@ static int dtl_init(void) | |||
365 | 367 | ||
366 | event_mask_file = debugfs_create_x8("dtl_event_mask", 0600, | 368 | event_mask_file = debugfs_create_x8("dtl_event_mask", 0600, |
367 | dtl_dir, &dtl_event_mask); | 369 | dtl_dir, &dtl_event_mask); |
368 | buf_entries_file = debugfs_create_u32("dtl_buf_entries", 0600, | 370 | buf_entries_file = debugfs_create_u32("dtl_buf_entries", 0400, |
369 | dtl_dir, &dtl_buf_entries); | 371 | dtl_dir, &dtl_buf_entries); |
370 | 372 | ||
371 | if (!event_mask_file || !buf_entries_file) { | 373 | if (!event_mask_file || !buf_entries_file) { |
diff --git a/arch/powerpc/platforms/pseries/eeh.c b/arch/powerpc/platforms/pseries/eeh.c index 89649173d3a3..46b55cf563e3 100644 --- a/arch/powerpc/platforms/pseries/eeh.c +++ b/arch/powerpc/platforms/pseries/eeh.c | |||
@@ -93,6 +93,7 @@ static int ibm_slot_error_detail; | |||
93 | static int ibm_get_config_addr_info; | 93 | static int ibm_get_config_addr_info; |
94 | static int ibm_get_config_addr_info2; | 94 | static int ibm_get_config_addr_info2; |
95 | static int ibm_configure_bridge; | 95 | static int ibm_configure_bridge; |
96 | static int ibm_configure_pe; | ||
96 | 97 | ||
97 | int eeh_subsystem_enabled; | 98 | int eeh_subsystem_enabled; |
98 | EXPORT_SYMBOL(eeh_subsystem_enabled); | 99 | EXPORT_SYMBOL(eeh_subsystem_enabled); |
@@ -261,6 +262,8 @@ void eeh_slot_error_detail(struct pci_dn *pdn, int severity) | |||
261 | pci_regs_buf[0] = 0; | 262 | pci_regs_buf[0] = 0; |
262 | 263 | ||
263 | rtas_pci_enable(pdn, EEH_THAW_MMIO); | 264 | rtas_pci_enable(pdn, EEH_THAW_MMIO); |
265 | rtas_configure_bridge(pdn); | ||
266 | eeh_restore_bars(pdn); | ||
264 | loglen = gather_pci_data(pdn, pci_regs_buf, EEH_PCI_REGS_LOG_LEN); | 267 | loglen = gather_pci_data(pdn, pci_regs_buf, EEH_PCI_REGS_LOG_LEN); |
265 | 268 | ||
266 | rtas_slot_error_detail(pdn, severity, pci_regs_buf, loglen); | 269 | rtas_slot_error_detail(pdn, severity, pci_regs_buf, loglen); |
@@ -448,6 +451,39 @@ void eeh_clear_slot (struct device_node *dn, int mode_flag) | |||
448 | raw_spin_unlock_irqrestore(&confirm_error_lock, flags); | 451 | raw_spin_unlock_irqrestore(&confirm_error_lock, flags); |
449 | } | 452 | } |
450 | 453 | ||
454 | void __eeh_set_pe_freset(struct device_node *parent, unsigned int *freset) | ||
455 | { | ||
456 | struct device_node *dn; | ||
457 | |||
458 | for_each_child_of_node(parent, dn) { | ||
459 | if (PCI_DN(dn)) { | ||
460 | |||
461 | struct pci_dev *dev = PCI_DN(dn)->pcidev; | ||
462 | |||
463 | if (dev && dev->driver) | ||
464 | *freset |= dev->needs_freset; | ||
465 | |||
466 | __eeh_set_pe_freset(dn, freset); | ||
467 | } | ||
468 | } | ||
469 | } | ||
470 | |||
471 | void eeh_set_pe_freset(struct device_node *dn, unsigned int *freset) | ||
472 | { | ||
473 | struct pci_dev *dev; | ||
474 | dn = find_device_pe(dn); | ||
475 | |||
476 | /* Back up one, since config addrs might be shared */ | ||
477 | if (!pcibios_find_pci_bus(dn) && PCI_DN(dn->parent)) | ||
478 | dn = dn->parent; | ||
479 | |||
480 | dev = PCI_DN(dn)->pcidev; | ||
481 | if (dev) | ||
482 | *freset |= dev->needs_freset; | ||
483 | |||
484 | __eeh_set_pe_freset(dn, freset); | ||
485 | } | ||
486 | |||
451 | /** | 487 | /** |
452 | * eeh_dn_check_failure - check if all 1's data is due to EEH slot freeze | 488 | * eeh_dn_check_failure - check if all 1's data is due to EEH slot freeze |
453 | * @dn device node | 489 | * @dn device node |
@@ -692,15 +728,24 @@ rtas_pci_slot_reset(struct pci_dn *pdn, int state) | |||
692 | if (pdn->eeh_pe_config_addr) | 728 | if (pdn->eeh_pe_config_addr) |
693 | config_addr = pdn->eeh_pe_config_addr; | 729 | config_addr = pdn->eeh_pe_config_addr; |
694 | 730 | ||
695 | rc = rtas_call(ibm_set_slot_reset,4,1, NULL, | 731 | rc = rtas_call(ibm_set_slot_reset, 4, 1, NULL, |
696 | config_addr, | 732 | config_addr, |
697 | BUID_HI(pdn->phb->buid), | 733 | BUID_HI(pdn->phb->buid), |
698 | BUID_LO(pdn->phb->buid), | 734 | BUID_LO(pdn->phb->buid), |
699 | state); | 735 | state); |
700 | if (rc) | 736 | |
701 | printk (KERN_WARNING "EEH: Unable to reset the failed slot," | 737 | /* Fundamental-reset not supported on this PE, try hot-reset */ |
702 | " (%d) #RST=%d dn=%s\n", | 738 | if (rc == -8 && state == 3) { |
703 | rc, state, pdn->node->full_name); | 739 | rc = rtas_call(ibm_set_slot_reset, 4, 1, NULL, |
740 | config_addr, | ||
741 | BUID_HI(pdn->phb->buid), | ||
742 | BUID_LO(pdn->phb->buid), 1); | ||
743 | if (rc) | ||
744 | printk(KERN_WARNING | ||
745 | "EEH: Unable to reset the failed slot," | ||
746 | " #RST=%d dn=%s\n", | ||
747 | rc, pdn->node->full_name); | ||
748 | } | ||
704 | } | 749 | } |
705 | 750 | ||
706 | /** | 751 | /** |
@@ -736,18 +781,21 @@ int pcibios_set_pcie_reset_state(struct pci_dev *dev, enum pcie_reset_state stat | |||
736 | /** | 781 | /** |
737 | * rtas_set_slot_reset -- assert the pci #RST line for 1/4 second | 782 | * rtas_set_slot_reset -- assert the pci #RST line for 1/4 second |
738 | * @pdn: pci device node to be reset. | 783 | * @pdn: pci device node to be reset. |
739 | * | ||
740 | * Return 0 if success, else a non-zero value. | ||
741 | */ | 784 | */ |
742 | 785 | ||
743 | static void __rtas_set_slot_reset(struct pci_dn *pdn) | 786 | static void __rtas_set_slot_reset(struct pci_dn *pdn) |
744 | { | 787 | { |
745 | struct pci_dev *dev = pdn->pcidev; | 788 | unsigned int freset = 0; |
746 | 789 | ||
747 | /* Determine type of EEH reset required by device, | 790 | /* Determine type of EEH reset required for |
748 | * default hot reset or fundamental reset | 791 | * Partitionable Endpoint, a hot-reset (1) |
749 | */ | 792 | * or a fundamental reset (3). |
750 | if (dev && dev->needs_freset) | 793 | * A fundamental reset required by any device under |
794 | * Partitionable Endpoint trumps hot-reset. | ||
795 | */ | ||
796 | eeh_set_pe_freset(pdn->node, &freset); | ||
797 | |||
798 | if (freset) | ||
751 | rtas_pci_slot_reset(pdn, 3); | 799 | rtas_pci_slot_reset(pdn, 3); |
752 | else | 800 | else |
753 | rtas_pci_slot_reset(pdn, 1); | 801 | rtas_pci_slot_reset(pdn, 1); |
@@ -895,13 +943,20 @@ rtas_configure_bridge(struct pci_dn *pdn) | |||
895 | { | 943 | { |
896 | int config_addr; | 944 | int config_addr; |
897 | int rc; | 945 | int rc; |
946 | int token; | ||
898 | 947 | ||
899 | /* Use PE configuration address, if present */ | 948 | /* Use PE configuration address, if present */ |
900 | config_addr = pdn->eeh_config_addr; | 949 | config_addr = pdn->eeh_config_addr; |
901 | if (pdn->eeh_pe_config_addr) | 950 | if (pdn->eeh_pe_config_addr) |
902 | config_addr = pdn->eeh_pe_config_addr; | 951 | config_addr = pdn->eeh_pe_config_addr; |
903 | 952 | ||
904 | rc = rtas_call(ibm_configure_bridge,3,1, NULL, | 953 | /* Use new configure-pe function, if supported */ |
954 | if (ibm_configure_pe != RTAS_UNKNOWN_SERVICE) | ||
955 | token = ibm_configure_pe; | ||
956 | else | ||
957 | token = ibm_configure_bridge; | ||
958 | |||
959 | rc = rtas_call(token, 3, 1, NULL, | ||
905 | config_addr, | 960 | config_addr, |
906 | BUID_HI(pdn->phb->buid), | 961 | BUID_HI(pdn->phb->buid), |
907 | BUID_LO(pdn->phb->buid)); | 962 | BUID_LO(pdn->phb->buid)); |
@@ -1077,6 +1132,7 @@ void __init eeh_init(void) | |||
1077 | ibm_get_config_addr_info = rtas_token("ibm,get-config-addr-info"); | 1132 | ibm_get_config_addr_info = rtas_token("ibm,get-config-addr-info"); |
1078 | ibm_get_config_addr_info2 = rtas_token("ibm,get-config-addr-info2"); | 1133 | ibm_get_config_addr_info2 = rtas_token("ibm,get-config-addr-info2"); |
1079 | ibm_configure_bridge = rtas_token ("ibm,configure-bridge"); | 1134 | ibm_configure_bridge = rtas_token ("ibm,configure-bridge"); |
1135 | ibm_configure_pe = rtas_token("ibm,configure-pe"); | ||
1080 | 1136 | ||
1081 | if (ibm_set_eeh_option == RTAS_UNKNOWN_SERVICE) | 1137 | if (ibm_set_eeh_option == RTAS_UNKNOWN_SERVICE) |
1082 | return; | 1138 | return; |
diff --git a/arch/powerpc/platforms/pseries/eeh_driver.c b/arch/powerpc/platforms/pseries/eeh_driver.c index b8d70f5d9aa9..1b6cb10589e0 100644 --- a/arch/powerpc/platforms/pseries/eeh_driver.c +++ b/arch/powerpc/platforms/pseries/eeh_driver.c | |||
@@ -328,7 +328,7 @@ struct pci_dn * handle_eeh_events (struct eeh_event *event) | |||
328 | struct pci_bus *frozen_bus; | 328 | struct pci_bus *frozen_bus; |
329 | int rc = 0; | 329 | int rc = 0; |
330 | enum pci_ers_result result = PCI_ERS_RESULT_NONE; | 330 | enum pci_ers_result result = PCI_ERS_RESULT_NONE; |
331 | const char *location, *pci_str, *drv_str; | 331 | const char *location, *pci_str, *drv_str, *bus_pci_str, *bus_drv_str; |
332 | 332 | ||
333 | frozen_dn = find_device_pe(event->dn); | 333 | frozen_dn = find_device_pe(event->dn); |
334 | if (!frozen_dn) { | 334 | if (!frozen_dn) { |
@@ -364,13 +364,8 @@ struct pci_dn * handle_eeh_events (struct eeh_event *event) | |||
364 | frozen_pdn = PCI_DN(frozen_dn); | 364 | frozen_pdn = PCI_DN(frozen_dn); |
365 | frozen_pdn->eeh_freeze_count++; | 365 | frozen_pdn->eeh_freeze_count++; |
366 | 366 | ||
367 | if (frozen_pdn->pcidev) { | 367 | pci_str = eeh_pci_name(event->dev); |
368 | pci_str = pci_name (frozen_pdn->pcidev); | 368 | drv_str = pcid_name(event->dev); |
369 | drv_str = pcid_name (frozen_pdn->pcidev); | ||
370 | } else { | ||
371 | pci_str = eeh_pci_name(event->dev); | ||
372 | drv_str = pcid_name (event->dev); | ||
373 | } | ||
374 | 369 | ||
375 | if (frozen_pdn->eeh_freeze_count > EEH_MAX_ALLOWED_FREEZES) | 370 | if (frozen_pdn->eeh_freeze_count > EEH_MAX_ALLOWED_FREEZES) |
376 | goto excess_failures; | 371 | goto excess_failures; |
@@ -378,8 +373,17 @@ struct pci_dn * handle_eeh_events (struct eeh_event *event) | |||
378 | printk(KERN_WARNING | 373 | printk(KERN_WARNING |
379 | "EEH: This PCI device has failed %d times in the last hour:\n", | 374 | "EEH: This PCI device has failed %d times in the last hour:\n", |
380 | frozen_pdn->eeh_freeze_count); | 375 | frozen_pdn->eeh_freeze_count); |
376 | |||
377 | if (frozen_pdn->pcidev) { | ||
378 | bus_pci_str = pci_name(frozen_pdn->pcidev); | ||
379 | bus_drv_str = pcid_name(frozen_pdn->pcidev); | ||
380 | printk(KERN_WARNING | ||
381 | "EEH: Bus location=%s driver=%s pci addr=%s\n", | ||
382 | location, bus_drv_str, bus_pci_str); | ||
383 | } | ||
384 | |||
381 | printk(KERN_WARNING | 385 | printk(KERN_WARNING |
382 | "EEH: location=%s driver=%s pci addr=%s\n", | 386 | "EEH: Device location=%s driver=%s pci addr=%s\n", |
383 | location, drv_str, pci_str); | 387 | location, drv_str, pci_str); |
384 | 388 | ||
385 | /* Walk the various device drivers attached to this slot through | 389 | /* Walk the various device drivers attached to this slot through |
diff --git a/arch/powerpc/platforms/pseries/hotplug-cpu.c b/arch/powerpc/platforms/pseries/hotplug-cpu.c index ef8c45489e20..46f13a3c5d09 100644 --- a/arch/powerpc/platforms/pseries/hotplug-cpu.c +++ b/arch/powerpc/platforms/pseries/hotplug-cpu.c | |||
@@ -19,6 +19,7 @@ | |||
19 | */ | 19 | */ |
20 | 20 | ||
21 | #include <linux/kernel.h> | 21 | #include <linux/kernel.h> |
22 | #include <linux/interrupt.h> | ||
22 | #include <linux/delay.h> | 23 | #include <linux/delay.h> |
23 | #include <linux/cpu.h> | 24 | #include <linux/cpu.h> |
24 | #include <asm/system.h> | 25 | #include <asm/system.h> |
@@ -28,7 +29,7 @@ | |||
28 | #include <asm/machdep.h> | 29 | #include <asm/machdep.h> |
29 | #include <asm/vdso_datapage.h> | 30 | #include <asm/vdso_datapage.h> |
30 | #include <asm/pSeries_reconfig.h> | 31 | #include <asm/pSeries_reconfig.h> |
31 | #include "xics.h" | 32 | #include <asm/xics.h> |
32 | #include "plpar_wrappers.h" | 33 | #include "plpar_wrappers.h" |
33 | #include "offline_states.h" | 34 | #include "offline_states.h" |
34 | 35 | ||
@@ -280,7 +281,7 @@ static int pseries_add_processor(struct device_node *np) | |||
280 | } | 281 | } |
281 | 282 | ||
282 | for_each_cpu(cpu, tmp) { | 283 | for_each_cpu(cpu, tmp) { |
283 | BUG_ON(cpumask_test_cpu(cpu, cpu_present_mask)); | 284 | BUG_ON(cpu_present(cpu)); |
284 | set_cpu_present(cpu, true); | 285 | set_cpu_present(cpu, true); |
285 | set_hard_smp_processor_id(cpu, *intserv++); | 286 | set_hard_smp_processor_id(cpu, *intserv++); |
286 | } | 287 | } |
diff --git a/arch/powerpc/platforms/pseries/io_event_irq.c b/arch/powerpc/platforms/pseries/io_event_irq.c new file mode 100644 index 000000000000..c829e6067d54 --- /dev/null +++ b/arch/powerpc/platforms/pseries/io_event_irq.c | |||
@@ -0,0 +1,231 @@ | |||
1 | /* | ||
2 | * Copyright 2010 2011 Mark Nelson and Tseng-Hui (Frank) Lin, IBM Corporation | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation; either version | ||
7 | * 2 of the License, or (at your option) any later version. | ||
8 | */ | ||
9 | |||
10 | #include <linux/errno.h> | ||
11 | #include <linux/slab.h> | ||
12 | #include <linux/module.h> | ||
13 | #include <linux/irq.h> | ||
14 | #include <linux/interrupt.h> | ||
15 | #include <linux/of.h> | ||
16 | #include <linux/list.h> | ||
17 | #include <linux/notifier.h> | ||
18 | |||
19 | #include <asm/machdep.h> | ||
20 | #include <asm/rtas.h> | ||
21 | #include <asm/irq.h> | ||
22 | #include <asm/io_event_irq.h> | ||
23 | |||
24 | #include "pseries.h" | ||
25 | |||
26 | /* | ||
27 | * IO event interrupt is a mechanism provided by RTAS to return | ||
28 | * information about hardware error and non-error events. Device | ||
29 | * drivers can register their event handlers to receive events. | ||
30 | * Device drivers are expected to use atomic_notifier_chain_register() | ||
31 | * and atomic_notifier_chain_unregister() to register and unregister | ||
32 | * their event handlers. Since multiple IO event types and scopes | ||
33 | * share an IO event interrupt, the event handlers are called one | ||
34 | * by one until the IO event is claimed by one of the handlers. | ||
35 | * The event handlers are expected to return NOTIFY_OK if the | ||
36 | * event is handled by the event handler or NOTIFY_DONE if the | ||
37 | * event does not belong to the handler. | ||
38 | * | ||
39 | * Usage: | ||
40 | * | ||
41 | * Notifier function: | ||
42 | * #include <asm/io_event_irq.h> | ||
43 | * int event_handler(struct notifier_block *nb, unsigned long val, void *data) { | ||
44 | * p = (struct pseries_io_event_sect_data *) data; | ||
45 | * if (! is_my_event(p->scope, p->event_type)) return NOTIFY_DONE; | ||
46 | * : | ||
47 | * : | ||
48 | * return NOTIFY_OK; | ||
49 | * } | ||
50 | * struct notifier_block event_nb = { | ||
51 | * .notifier_call = event_handler, | ||
52 | * } | ||
53 | * | ||
54 | * Registration: | ||
55 | * atomic_notifier_chain_register(&pseries_ioei_notifier_list, &event_nb); | ||
56 | * | ||
57 | * Unregistration: | ||
58 | * atomic_notifier_chain_unregister(&pseries_ioei_notifier_list, &event_nb); | ||
59 | */ | ||
60 | |||
61 | ATOMIC_NOTIFIER_HEAD(pseries_ioei_notifier_list); | ||
62 | EXPORT_SYMBOL_GPL(pseries_ioei_notifier_list); | ||
63 | |||
64 | static int ioei_check_exception_token; | ||
65 | |||
66 | /* pSeries event log format */ | ||
67 | |||
68 | /* Two bytes ASCII section IDs */ | ||
69 | #define PSERIES_ELOG_SECT_ID_PRIV_HDR (('P' << 8) | 'H') | ||
70 | #define PSERIES_ELOG_SECT_ID_USER_HDR (('U' << 8) | 'H') | ||
71 | #define PSERIES_ELOG_SECT_ID_PRIMARY_SRC (('P' << 8) | 'S') | ||
72 | #define PSERIES_ELOG_SECT_ID_EXTENDED_UH (('E' << 8) | 'H') | ||
73 | #define PSERIES_ELOG_SECT_ID_FAILING_MTMS (('M' << 8) | 'T') | ||
74 | #define PSERIES_ELOG_SECT_ID_SECONDARY_SRC (('S' << 8) | 'S') | ||
75 | #define PSERIES_ELOG_SECT_ID_DUMP_LOCATOR (('D' << 8) | 'H') | ||
76 | #define PSERIES_ELOG_SECT_ID_FW_ERROR (('S' << 8) | 'W') | ||
77 | #define PSERIES_ELOG_SECT_ID_IMPACT_PART_ID (('L' << 8) | 'P') | ||
78 | #define PSERIES_ELOG_SECT_ID_LOGIC_RESOURCE_ID (('L' << 8) | 'R') | ||
79 | #define PSERIES_ELOG_SECT_ID_HMC_ID (('H' << 8) | 'M') | ||
80 | #define PSERIES_ELOG_SECT_ID_EPOW (('E' << 8) | 'P') | ||
81 | #define PSERIES_ELOG_SECT_ID_IO_EVENT (('I' << 8) | 'E') | ||
82 | #define PSERIES_ELOG_SECT_ID_MANUFACT_INFO (('M' << 8) | 'I') | ||
83 | #define PSERIES_ELOG_SECT_ID_CALL_HOME (('C' << 8) | 'H') | ||
84 | #define PSERIES_ELOG_SECT_ID_USER_DEF (('U' << 8) | 'D') | ||
85 | |||
86 | /* Vendor specific Platform Event Log Format, Version 6, section header */ | ||
87 | struct pseries_elog_section { | ||
88 | uint16_t id; /* 0x00 2-byte ASCII section ID */ | ||
89 | uint16_t length; /* 0x02 Section length in bytes */ | ||
90 | uint8_t version; /* 0x04 Section version */ | ||
91 | uint8_t subtype; /* 0x05 Section subtype */ | ||
92 | uint16_t creator_component; /* 0x06 Creator component ID */ | ||
93 | uint8_t data[]; /* 0x08 Start of section data */ | ||
94 | }; | ||
95 | |||
96 | static char ioei_rtas_buf[RTAS_DATA_BUF_SIZE] __cacheline_aligned; | ||
97 | |||
98 | /** | ||
99 | * Find data portion of a specific section in RTAS extended event log. | ||
100 | * @elog: RTAS error/event log. | ||
101 | * @sect_id: secsion ID. | ||
102 | * | ||
103 | * Return: | ||
104 | * pointer to the section data of the specified section | ||
105 | * NULL if not found | ||
106 | */ | ||
107 | static struct pseries_elog_section *find_xelog_section(struct rtas_error_log *elog, | ||
108 | uint16_t sect_id) | ||
109 | { | ||
110 | struct rtas_ext_event_log_v6 *xelog = | ||
111 | (struct rtas_ext_event_log_v6 *) elog->buffer; | ||
112 | struct pseries_elog_section *sect; | ||
113 | unsigned char *p, *log_end; | ||
114 | |||
115 | /* Check that we understand the format */ | ||
116 | if (elog->extended_log_length < sizeof(struct rtas_ext_event_log_v6) || | ||
117 | xelog->log_format != RTAS_V6EXT_LOG_FORMAT_EVENT_LOG || | ||
118 | xelog->company_id != RTAS_V6EXT_COMPANY_ID_IBM) | ||
119 | return NULL; | ||
120 | |||
121 | log_end = elog->buffer + elog->extended_log_length; | ||
122 | p = xelog->vendor_log; | ||
123 | while (p < log_end) { | ||
124 | sect = (struct pseries_elog_section *)p; | ||
125 | if (sect->id == sect_id) | ||
126 | return sect; | ||
127 | p += sect->length; | ||
128 | } | ||
129 | return NULL; | ||
130 | } | ||
131 | |||
132 | /** | ||
133 | * Find the data portion of an IO Event section from event log. | ||
134 | * @elog: RTAS error/event log. | ||
135 | * | ||
136 | * Return: | ||
137 | * pointer to a valid IO event section data. NULL if not found. | ||
138 | */ | ||
139 | static struct pseries_io_event * ioei_find_event(struct rtas_error_log *elog) | ||
140 | { | ||
141 | struct pseries_elog_section *sect; | ||
142 | |||
143 | /* We should only ever get called for io-event interrupts, but if | ||
144 | * we do get called for another type then something went wrong so | ||
145 | * make some noise about it. | ||
146 | * RTAS_TYPE_IO only exists in extended event log version 6 or later. | ||
147 | * No need to check event log version. | ||
148 | */ | ||
149 | if (unlikely(elog->type != RTAS_TYPE_IO)) { | ||
150 | printk_once(KERN_WARNING "io_event_irq: Unexpected event type %d", | ||
151 | elog->type); | ||
152 | return NULL; | ||
153 | } | ||
154 | |||
155 | sect = find_xelog_section(elog, PSERIES_ELOG_SECT_ID_IO_EVENT); | ||
156 | if (unlikely(!sect)) { | ||
157 | printk_once(KERN_WARNING "io_event_irq: RTAS extended event " | ||
158 | "log does not contain an IO Event section. " | ||
159 | "Could be a bug in system firmware!\n"); | ||
160 | return NULL; | ||
161 | } | ||
162 | return (struct pseries_io_event *) §->data; | ||
163 | } | ||
164 | |||
165 | /* | ||
166 | * PAPR: | ||
167 | * - check-exception returns the first found error or event and clear that | ||
168 | * error or event so it is reported once. | ||
169 | * - Each interrupt returns one event. If a plateform chooses to report | ||
170 | * multiple events through a single interrupt, it must ensure that the | ||
171 | * interrupt remains asserted until check-exception has been used to | ||
172 | * process all out-standing events for that interrupt. | ||
173 | * | ||
174 | * Implementation notes: | ||
175 | * - Events must be processed in the order they are returned. Hence, | ||
176 | * sequential in nature. | ||
177 | * - The owner of an event is determined by combinations of scope, | ||
178 | * event type, and sub-type. There is no easy way to pre-sort clients | ||
179 | * by scope or event type alone. For example, Torrent ISR route change | ||
180 | * event is reported with scope 0x00 (Not Applicatable) rather than | ||
181 | * 0x3B (Torrent-hub). It is better to let the clients to identify | ||
182 | * who owns the the event. | ||
183 | */ | ||
184 | |||
185 | static irqreturn_t ioei_interrupt(int irq, void *dev_id) | ||
186 | { | ||
187 | struct pseries_io_event *event; | ||
188 | int rtas_rc; | ||
189 | |||
190 | for (;;) { | ||
191 | rtas_rc = rtas_call(ioei_check_exception_token, 6, 1, NULL, | ||
192 | RTAS_VECTOR_EXTERNAL_INTERRUPT, | ||
193 | virq_to_hw(irq), | ||
194 | RTAS_IO_EVENTS, 1 /* Time Critical */, | ||
195 | __pa(ioei_rtas_buf), | ||
196 | RTAS_DATA_BUF_SIZE); | ||
197 | if (rtas_rc != 0) | ||
198 | break; | ||
199 | |||
200 | event = ioei_find_event((struct rtas_error_log *)ioei_rtas_buf); | ||
201 | if (!event) | ||
202 | continue; | ||
203 | |||
204 | atomic_notifier_call_chain(&pseries_ioei_notifier_list, | ||
205 | 0, event); | ||
206 | } | ||
207 | return IRQ_HANDLED; | ||
208 | } | ||
209 | |||
210 | static int __init ioei_init(void) | ||
211 | { | ||
212 | struct device_node *np; | ||
213 | |||
214 | ioei_check_exception_token = rtas_token("check-exception"); | ||
215 | if (ioei_check_exception_token == RTAS_UNKNOWN_SERVICE) { | ||
216 | pr_warning("IO Event IRQ not supported on this system !\n"); | ||
217 | return -ENODEV; | ||
218 | } | ||
219 | np = of_find_node_by_path("/event-sources/ibm,io-events"); | ||
220 | if (np) { | ||
221 | request_event_sources_irqs(np, ioei_interrupt, "IO_EVENT"); | ||
222 | of_node_put(np); | ||
223 | } else { | ||
224 | pr_err("io_event_irq: No ibm,io-events on system! " | ||
225 | "IO Event interrupt disabled.\n"); | ||
226 | return -ENODEV; | ||
227 | } | ||
228 | return 0; | ||
229 | } | ||
230 | machine_subsys_initcall(pseries, ioei_init); | ||
231 | |||
diff --git a/arch/powerpc/platforms/pseries/iommu.c b/arch/powerpc/platforms/pseries/iommu.c index 6d5412a18b26..01faab9456ca 100644 --- a/arch/powerpc/platforms/pseries/iommu.c +++ b/arch/powerpc/platforms/pseries/iommu.c | |||
@@ -659,15 +659,18 @@ static void remove_ddw(struct device_node *np) | |||
659 | { | 659 | { |
660 | struct dynamic_dma_window_prop *dwp; | 660 | struct dynamic_dma_window_prop *dwp; |
661 | struct property *win64; | 661 | struct property *win64; |
662 | const u32 *ddr_avail; | 662 | const u32 *ddw_avail; |
663 | u64 liobn; | 663 | u64 liobn; |
664 | int len, ret; | 664 | int len, ret; |
665 | 665 | ||
666 | ddr_avail = of_get_property(np, "ibm,ddw-applicable", &len); | 666 | ddw_avail = of_get_property(np, "ibm,ddw-applicable", &len); |
667 | win64 = of_find_property(np, DIRECT64_PROPNAME, NULL); | 667 | win64 = of_find_property(np, DIRECT64_PROPNAME, NULL); |
668 | if (!win64 || !ddr_avail || len < 3 * sizeof(u32)) | 668 | if (!win64) |
669 | return; | 669 | return; |
670 | 670 | ||
671 | if (!ddw_avail || len < 3 * sizeof(u32) || win64->length < sizeof(*dwp)) | ||
672 | goto delprop; | ||
673 | |||
671 | dwp = win64->value; | 674 | dwp = win64->value; |
672 | liobn = (u64)be32_to_cpu(dwp->liobn); | 675 | liobn = (u64)be32_to_cpu(dwp->liobn); |
673 | 676 | ||
@@ -681,28 +684,29 @@ static void remove_ddw(struct device_node *np) | |||
681 | pr_debug("%s successfully cleared tces in window.\n", | 684 | pr_debug("%s successfully cleared tces in window.\n", |
682 | np->full_name); | 685 | np->full_name); |
683 | 686 | ||
684 | ret = rtas_call(ddr_avail[2], 1, 1, NULL, liobn); | 687 | ret = rtas_call(ddw_avail[2], 1, 1, NULL, liobn); |
685 | if (ret) | 688 | if (ret) |
686 | pr_warning("%s: failed to remove direct window: rtas returned " | 689 | pr_warning("%s: failed to remove direct window: rtas returned " |
687 | "%d to ibm,remove-pe-dma-window(%x) %llx\n", | 690 | "%d to ibm,remove-pe-dma-window(%x) %llx\n", |
688 | np->full_name, ret, ddr_avail[2], liobn); | 691 | np->full_name, ret, ddw_avail[2], liobn); |
689 | else | 692 | else |
690 | pr_debug("%s: successfully removed direct window: rtas returned " | 693 | pr_debug("%s: successfully removed direct window: rtas returned " |
691 | "%d to ibm,remove-pe-dma-window(%x) %llx\n", | 694 | "%d to ibm,remove-pe-dma-window(%x) %llx\n", |
692 | np->full_name, ret, ddr_avail[2], liobn); | 695 | np->full_name, ret, ddw_avail[2], liobn); |
693 | } | ||
694 | 696 | ||
697 | delprop: | ||
698 | ret = prom_remove_property(np, win64); | ||
699 | if (ret) | ||
700 | pr_warning("%s: failed to remove direct window property: %d\n", | ||
701 | np->full_name, ret); | ||
702 | } | ||
695 | 703 | ||
696 | static int dupe_ddw_if_already_created(struct pci_dev *dev, struct device_node *pdn) | 704 | static u64 find_existing_ddw(struct device_node *pdn) |
697 | { | 705 | { |
698 | struct device_node *dn; | ||
699 | struct pci_dn *pcidn; | ||
700 | struct direct_window *window; | 706 | struct direct_window *window; |
701 | const struct dynamic_dma_window_prop *direct64; | 707 | const struct dynamic_dma_window_prop *direct64; |
702 | u64 dma_addr = 0; | 708 | u64 dma_addr = 0; |
703 | 709 | ||
704 | dn = pci_device_to_OF_node(dev); | ||
705 | pcidn = PCI_DN(dn); | ||
706 | spin_lock(&direct_window_list_lock); | 710 | spin_lock(&direct_window_list_lock); |
707 | /* check if we already created a window and dupe that config if so */ | 711 | /* check if we already created a window and dupe that config if so */ |
708 | list_for_each_entry(window, &direct_window_list, list) { | 712 | list_for_each_entry(window, &direct_window_list, list) { |
@@ -717,36 +721,40 @@ static int dupe_ddw_if_already_created(struct pci_dev *dev, struct device_node * | |||
717 | return dma_addr; | 721 | return dma_addr; |
718 | } | 722 | } |
719 | 723 | ||
720 | static u64 dupe_ddw_if_kexec(struct pci_dev *dev, struct device_node *pdn) | 724 | static int find_existing_ddw_windows(void) |
721 | { | 725 | { |
722 | struct device_node *dn; | ||
723 | struct pci_dn *pcidn; | ||
724 | int len; | 726 | int len; |
727 | struct device_node *pdn; | ||
725 | struct direct_window *window; | 728 | struct direct_window *window; |
726 | const struct dynamic_dma_window_prop *direct64; | 729 | const struct dynamic_dma_window_prop *direct64; |
727 | u64 dma_addr = 0; | ||
728 | 730 | ||
729 | dn = pci_device_to_OF_node(dev); | 731 | if (!firmware_has_feature(FW_FEATURE_LPAR)) |
730 | pcidn = PCI_DN(dn); | 732 | return 0; |
731 | direct64 = of_get_property(pdn, DIRECT64_PROPNAME, &len); | 733 | |
732 | if (direct64) { | 734 | for_each_node_with_property(pdn, DIRECT64_PROPNAME) { |
735 | direct64 = of_get_property(pdn, DIRECT64_PROPNAME, &len); | ||
736 | if (!direct64) | ||
737 | continue; | ||
738 | |||
733 | window = kzalloc(sizeof(*window), GFP_KERNEL); | 739 | window = kzalloc(sizeof(*window), GFP_KERNEL); |
734 | if (!window) { | 740 | if (!window || len < sizeof(struct dynamic_dma_window_prop)) { |
741 | kfree(window); | ||
735 | remove_ddw(pdn); | 742 | remove_ddw(pdn); |
736 | } else { | 743 | continue; |
737 | window->device = pdn; | ||
738 | window->prop = direct64; | ||
739 | spin_lock(&direct_window_list_lock); | ||
740 | list_add(&window->list, &direct_window_list); | ||
741 | spin_unlock(&direct_window_list_lock); | ||
742 | dma_addr = direct64->dma_base; | ||
743 | } | 744 | } |
745 | |||
746 | window->device = pdn; | ||
747 | window->prop = direct64; | ||
748 | spin_lock(&direct_window_list_lock); | ||
749 | list_add(&window->list, &direct_window_list); | ||
750 | spin_unlock(&direct_window_list_lock); | ||
744 | } | 751 | } |
745 | 752 | ||
746 | return dma_addr; | 753 | return 0; |
747 | } | 754 | } |
755 | machine_arch_initcall(pseries, find_existing_ddw_windows); | ||
748 | 756 | ||
749 | static int query_ddw(struct pci_dev *dev, const u32 *ddr_avail, | 757 | static int query_ddw(struct pci_dev *dev, const u32 *ddw_avail, |
750 | struct ddw_query_response *query) | 758 | struct ddw_query_response *query) |
751 | { | 759 | { |
752 | struct device_node *dn; | 760 | struct device_node *dn; |
@@ -767,15 +775,15 @@ static int query_ddw(struct pci_dev *dev, const u32 *ddr_avail, | |||
767 | if (pcidn->eeh_pe_config_addr) | 775 | if (pcidn->eeh_pe_config_addr) |
768 | cfg_addr = pcidn->eeh_pe_config_addr; | 776 | cfg_addr = pcidn->eeh_pe_config_addr; |
769 | buid = pcidn->phb->buid; | 777 | buid = pcidn->phb->buid; |
770 | ret = rtas_call(ddr_avail[0], 3, 5, (u32 *)query, | 778 | ret = rtas_call(ddw_avail[0], 3, 5, (u32 *)query, |
771 | cfg_addr, BUID_HI(buid), BUID_LO(buid)); | 779 | cfg_addr, BUID_HI(buid), BUID_LO(buid)); |
772 | dev_info(&dev->dev, "ibm,query-pe-dma-windows(%x) %x %x %x" | 780 | dev_info(&dev->dev, "ibm,query-pe-dma-windows(%x) %x %x %x" |
773 | " returned %d\n", ddr_avail[0], cfg_addr, BUID_HI(buid), | 781 | " returned %d\n", ddw_avail[0], cfg_addr, BUID_HI(buid), |
774 | BUID_LO(buid), ret); | 782 | BUID_LO(buid), ret); |
775 | return ret; | 783 | return ret; |
776 | } | 784 | } |
777 | 785 | ||
778 | static int create_ddw(struct pci_dev *dev, const u32 *ddr_avail, | 786 | static int create_ddw(struct pci_dev *dev, const u32 *ddw_avail, |
779 | struct ddw_create_response *create, int page_shift, | 787 | struct ddw_create_response *create, int page_shift, |
780 | int window_shift) | 788 | int window_shift) |
781 | { | 789 | { |
@@ -800,12 +808,12 @@ static int create_ddw(struct pci_dev *dev, const u32 *ddr_avail, | |||
800 | 808 | ||
801 | do { | 809 | do { |
802 | /* extra outputs are LIOBN and dma-addr (hi, lo) */ | 810 | /* extra outputs are LIOBN and dma-addr (hi, lo) */ |
803 | ret = rtas_call(ddr_avail[1], 5, 4, (u32 *)create, cfg_addr, | 811 | ret = rtas_call(ddw_avail[1], 5, 4, (u32 *)create, cfg_addr, |
804 | BUID_HI(buid), BUID_LO(buid), page_shift, window_shift); | 812 | BUID_HI(buid), BUID_LO(buid), page_shift, window_shift); |
805 | } while (rtas_busy_delay(ret)); | 813 | } while (rtas_busy_delay(ret)); |
806 | dev_info(&dev->dev, | 814 | dev_info(&dev->dev, |
807 | "ibm,create-pe-dma-window(%x) %x %x %x %x %x returned %d " | 815 | "ibm,create-pe-dma-window(%x) %x %x %x %x %x returned %d " |
808 | "(liobn = 0x%x starting addr = %x %x)\n", ddr_avail[1], | 816 | "(liobn = 0x%x starting addr = %x %x)\n", ddw_avail[1], |
809 | cfg_addr, BUID_HI(buid), BUID_LO(buid), page_shift, | 817 | cfg_addr, BUID_HI(buid), BUID_LO(buid), page_shift, |
810 | window_shift, ret, create->liobn, create->addr_hi, create->addr_lo); | 818 | window_shift, ret, create->liobn, create->addr_hi, create->addr_lo); |
811 | 819 | ||
@@ -831,18 +839,14 @@ static u64 enable_ddw(struct pci_dev *dev, struct device_node *pdn) | |||
831 | int page_shift; | 839 | int page_shift; |
832 | u64 dma_addr, max_addr; | 840 | u64 dma_addr, max_addr; |
833 | struct device_node *dn; | 841 | struct device_node *dn; |
834 | const u32 *uninitialized_var(ddr_avail); | 842 | const u32 *uninitialized_var(ddw_avail); |
835 | struct direct_window *window; | 843 | struct direct_window *window; |
836 | struct property *uninitialized_var(win64); | 844 | struct property *win64; |
837 | struct dynamic_dma_window_prop *ddwprop; | 845 | struct dynamic_dma_window_prop *ddwprop; |
838 | 846 | ||
839 | mutex_lock(&direct_window_init_mutex); | 847 | mutex_lock(&direct_window_init_mutex); |
840 | 848 | ||
841 | dma_addr = dupe_ddw_if_already_created(dev, pdn); | 849 | dma_addr = find_existing_ddw(pdn); |
842 | if (dma_addr != 0) | ||
843 | goto out_unlock; | ||
844 | |||
845 | dma_addr = dupe_ddw_if_kexec(dev, pdn); | ||
846 | if (dma_addr != 0) | 850 | if (dma_addr != 0) |
847 | goto out_unlock; | 851 | goto out_unlock; |
848 | 852 | ||
@@ -854,8 +858,8 @@ static u64 enable_ddw(struct pci_dev *dev, struct device_node *pdn) | |||
854 | * for the given node in that order. | 858 | * for the given node in that order. |
855 | * the property is actually in the parent, not the PE | 859 | * the property is actually in the parent, not the PE |
856 | */ | 860 | */ |
857 | ddr_avail = of_get_property(pdn, "ibm,ddw-applicable", &len); | 861 | ddw_avail = of_get_property(pdn, "ibm,ddw-applicable", &len); |
858 | if (!ddr_avail || len < 3 * sizeof(u32)) | 862 | if (!ddw_avail || len < 3 * sizeof(u32)) |
859 | goto out_unlock; | 863 | goto out_unlock; |
860 | 864 | ||
861 | /* | 865 | /* |
@@ -865,7 +869,7 @@ static u64 enable_ddw(struct pci_dev *dev, struct device_node *pdn) | |||
865 | * of page sizes: supported and supported for migrate-dma. | 869 | * of page sizes: supported and supported for migrate-dma. |
866 | */ | 870 | */ |
867 | dn = pci_device_to_OF_node(dev); | 871 | dn = pci_device_to_OF_node(dev); |
868 | ret = query_ddw(dev, ddr_avail, &query); | 872 | ret = query_ddw(dev, ddw_avail, &query); |
869 | if (ret != 0) | 873 | if (ret != 0) |
870 | goto out_unlock; | 874 | goto out_unlock; |
871 | 875 | ||
@@ -907,13 +911,14 @@ static u64 enable_ddw(struct pci_dev *dev, struct device_node *pdn) | |||
907 | } | 911 | } |
908 | win64->name = kstrdup(DIRECT64_PROPNAME, GFP_KERNEL); | 912 | win64->name = kstrdup(DIRECT64_PROPNAME, GFP_KERNEL); |
909 | win64->value = ddwprop = kmalloc(sizeof(*ddwprop), GFP_KERNEL); | 913 | win64->value = ddwprop = kmalloc(sizeof(*ddwprop), GFP_KERNEL); |
914 | win64->length = sizeof(*ddwprop); | ||
910 | if (!win64->name || !win64->value) { | 915 | if (!win64->name || !win64->value) { |
911 | dev_info(&dev->dev, | 916 | dev_info(&dev->dev, |
912 | "couldn't allocate property name and value\n"); | 917 | "couldn't allocate property name and value\n"); |
913 | goto out_free_prop; | 918 | goto out_free_prop; |
914 | } | 919 | } |
915 | 920 | ||
916 | ret = create_ddw(dev, ddr_avail, &create, page_shift, len); | 921 | ret = create_ddw(dev, ddw_avail, &create, page_shift, len); |
917 | if (ret != 0) | 922 | if (ret != 0) |
918 | goto out_free_prop; | 923 | goto out_free_prop; |
919 | 924 | ||
@@ -1021,13 +1026,16 @@ static int dma_set_mask_pSeriesLP(struct device *dev, u64 dma_mask) | |||
1021 | const void *dma_window = NULL; | 1026 | const void *dma_window = NULL; |
1022 | u64 dma_offset; | 1027 | u64 dma_offset; |
1023 | 1028 | ||
1024 | if (!dev->dma_mask || !dma_supported(dev, dma_mask)) | 1029 | if (!dev->dma_mask) |
1025 | return -EIO; | 1030 | return -EIO; |
1026 | 1031 | ||
1032 | if (!dev_is_pci(dev)) | ||
1033 | goto check_mask; | ||
1034 | |||
1035 | pdev = to_pci_dev(dev); | ||
1036 | |||
1027 | /* only attempt to use a new window if 64-bit DMA is requested */ | 1037 | /* only attempt to use a new window if 64-bit DMA is requested */ |
1028 | if (!disable_ddw && dma_mask == DMA_BIT_MASK(64)) { | 1038 | if (!disable_ddw && dma_mask == DMA_BIT_MASK(64)) { |
1029 | pdev = to_pci_dev(dev); | ||
1030 | |||
1031 | dn = pci_device_to_OF_node(pdev); | 1039 | dn = pci_device_to_OF_node(pdev); |
1032 | dev_dbg(dev, "node is %s\n", dn->full_name); | 1040 | dev_dbg(dev, "node is %s\n", dn->full_name); |
1033 | 1041 | ||
@@ -1054,12 +1062,17 @@ static int dma_set_mask_pSeriesLP(struct device *dev, u64 dma_mask) | |||
1054 | } | 1062 | } |
1055 | } | 1063 | } |
1056 | 1064 | ||
1057 | /* fall-through to iommu ops */ | 1065 | /* fall back on iommu ops, restore table pointer with ops */ |
1058 | if (!ddw_enabled) { | 1066 | if (!ddw_enabled && get_dma_ops(dev) != &dma_iommu_ops) { |
1059 | dev_info(dev, "Using 32-bit DMA via iommu\n"); | 1067 | dev_info(dev, "Restoring 32-bit DMA via iommu\n"); |
1060 | set_dma_ops(dev, &dma_iommu_ops); | 1068 | set_dma_ops(dev, &dma_iommu_ops); |
1069 | pci_dma_dev_setup_pSeriesLP(pdev); | ||
1061 | } | 1070 | } |
1062 | 1071 | ||
1072 | check_mask: | ||
1073 | if (!dma_supported(dev, dma_mask)) | ||
1074 | return -EIO; | ||
1075 | |||
1063 | *dev->dma_mask = dma_mask; | 1076 | *dev->dma_mask = dma_mask; |
1064 | return 0; | 1077 | return 0; |
1065 | } | 1078 | } |
diff --git a/arch/powerpc/platforms/pseries/kexec.c b/arch/powerpc/platforms/pseries/kexec.c index 77d38a5e2ff9..54cf3a4aa16b 100644 --- a/arch/powerpc/platforms/pseries/kexec.c +++ b/arch/powerpc/platforms/pseries/kexec.c | |||
@@ -7,15 +7,18 @@ | |||
7 | * 2 of the License, or (at your option) any later version. | 7 | * 2 of the License, or (at your option) any later version. |
8 | */ | 8 | */ |
9 | 9 | ||
10 | #include <linux/kernel.h> | ||
11 | #include <linux/interrupt.h> | ||
12 | |||
10 | #include <asm/machdep.h> | 13 | #include <asm/machdep.h> |
11 | #include <asm/page.h> | 14 | #include <asm/page.h> |
12 | #include <asm/firmware.h> | 15 | #include <asm/firmware.h> |
13 | #include <asm/kexec.h> | 16 | #include <asm/kexec.h> |
14 | #include <asm/mpic.h> | 17 | #include <asm/mpic.h> |
18 | #include <asm/xics.h> | ||
15 | #include <asm/smp.h> | 19 | #include <asm/smp.h> |
16 | 20 | ||
17 | #include "pseries.h" | 21 | #include "pseries.h" |
18 | #include "xics.h" | ||
19 | #include "plpar_wrappers.h" | 22 | #include "plpar_wrappers.h" |
20 | 23 | ||
21 | static void pseries_kexec_cpu_down(int crash_shutdown, int secondary) | 24 | static void pseries_kexec_cpu_down(int crash_shutdown, int secondary) |
diff --git a/arch/powerpc/platforms/pseries/lpar.c b/arch/powerpc/platforms/pseries/lpar.c index ca5d5898d320..39e6e0a7b2fa 100644 --- a/arch/powerpc/platforms/pseries/lpar.c +++ b/arch/powerpc/platforms/pseries/lpar.c | |||
@@ -329,6 +329,8 @@ static long pSeries_lpar_hpte_insert(unsigned long hpte_group, | |||
329 | /* Make pHyp happy */ | 329 | /* Make pHyp happy */ |
330 | if ((rflags & _PAGE_NO_CACHE) & !(rflags & _PAGE_WRITETHRU)) | 330 | if ((rflags & _PAGE_NO_CACHE) & !(rflags & _PAGE_WRITETHRU)) |
331 | hpte_r &= ~_PAGE_COHERENT; | 331 | hpte_r &= ~_PAGE_COHERENT; |
332 | if (firmware_has_feature(FW_FEATURE_XCMO) && !(hpte_r & HPTE_R_N)) | ||
333 | flags |= H_COALESCE_CAND; | ||
332 | 334 | ||
333 | lpar_rc = plpar_pte_enter(flags, hpte_group, hpte_v, hpte_r, &slot); | 335 | lpar_rc = plpar_pte_enter(flags, hpte_group, hpte_v, hpte_r, &slot); |
334 | if (unlikely(lpar_rc == H_PTEG_FULL)) { | 336 | if (unlikely(lpar_rc == H_PTEG_FULL)) { |
@@ -573,7 +575,7 @@ static void pSeries_lpar_flush_hash_range(unsigned long number, int local) | |||
573 | unsigned long i, pix, rc; | 575 | unsigned long i, pix, rc; |
574 | unsigned long flags = 0; | 576 | unsigned long flags = 0; |
575 | struct ppc64_tlb_batch *batch = &__get_cpu_var(ppc64_tlb_batch); | 577 | struct ppc64_tlb_batch *batch = &__get_cpu_var(ppc64_tlb_batch); |
576 | int lock_tlbie = !cpu_has_feature(CPU_FTR_LOCKLESS_TLBIE); | 578 | int lock_tlbie = !mmu_has_feature(MMU_FTR_LOCKLESS_TLBIE); |
577 | unsigned long param[9]; | 579 | unsigned long param[9]; |
578 | unsigned long va; | 580 | unsigned long va; |
579 | unsigned long hash, index, shift, hidx, slot; | 581 | unsigned long hash, index, shift, hidx, slot; |
@@ -771,3 +773,47 @@ out: | |||
771 | local_irq_restore(flags); | 773 | local_irq_restore(flags); |
772 | } | 774 | } |
773 | #endif | 775 | #endif |
776 | |||
777 | /** | ||
778 | * h_get_mpp | ||
779 | * H_GET_MPP hcall returns info in 7 parms | ||
780 | */ | ||
781 | int h_get_mpp(struct hvcall_mpp_data *mpp_data) | ||
782 | { | ||
783 | int rc; | ||
784 | unsigned long retbuf[PLPAR_HCALL9_BUFSIZE]; | ||
785 | |||
786 | rc = plpar_hcall9(H_GET_MPP, retbuf); | ||
787 | |||
788 | mpp_data->entitled_mem = retbuf[0]; | ||
789 | mpp_data->mapped_mem = retbuf[1]; | ||
790 | |||
791 | mpp_data->group_num = (retbuf[2] >> 2 * 8) & 0xffff; | ||
792 | mpp_data->pool_num = retbuf[2] & 0xffff; | ||
793 | |||
794 | mpp_data->mem_weight = (retbuf[3] >> 7 * 8) & 0xff; | ||
795 | mpp_data->unallocated_mem_weight = (retbuf[3] >> 6 * 8) & 0xff; | ||
796 | mpp_data->unallocated_entitlement = retbuf[3] & 0xffffffffffff; | ||
797 | |||
798 | mpp_data->pool_size = retbuf[4]; | ||
799 | mpp_data->loan_request = retbuf[5]; | ||
800 | mpp_data->backing_mem = retbuf[6]; | ||
801 | |||
802 | return rc; | ||
803 | } | ||
804 | EXPORT_SYMBOL(h_get_mpp); | ||
805 | |||
806 | int h_get_mpp_x(struct hvcall_mpp_x_data *mpp_x_data) | ||
807 | { | ||
808 | int rc; | ||
809 | unsigned long retbuf[PLPAR_HCALL9_BUFSIZE] = { 0 }; | ||
810 | |||
811 | rc = plpar_hcall9(H_GET_MPP_X, retbuf); | ||
812 | |||
813 | mpp_x_data->coalesced_bytes = retbuf[0]; | ||
814 | mpp_x_data->pool_coalesced_bytes = retbuf[1]; | ||
815 | mpp_x_data->pool_purr_cycles = retbuf[2]; | ||
816 | mpp_x_data->pool_spurr_cycles = retbuf[3]; | ||
817 | |||
818 | return rc; | ||
819 | } | ||
diff --git a/arch/powerpc/platforms/pseries/plpar_wrappers.h b/arch/powerpc/platforms/pseries/plpar_wrappers.h index d9801117124b..4bf21207d7d3 100644 --- a/arch/powerpc/platforms/pseries/plpar_wrappers.h +++ b/arch/powerpc/platforms/pseries/plpar_wrappers.h | |||
@@ -270,31 +270,4 @@ static inline long plpar_put_term_char(unsigned long termno, unsigned long len, | |||
270 | lbuf[1]); | 270 | lbuf[1]); |
271 | } | 271 | } |
272 | 272 | ||
273 | static inline long plpar_eoi(unsigned long xirr) | ||
274 | { | ||
275 | return plpar_hcall_norets(H_EOI, xirr); | ||
276 | } | ||
277 | |||
278 | static inline long plpar_cppr(unsigned long cppr) | ||
279 | { | ||
280 | return plpar_hcall_norets(H_CPPR, cppr); | ||
281 | } | ||
282 | |||
283 | static inline long plpar_ipi(unsigned long servernum, unsigned long mfrr) | ||
284 | { | ||
285 | return plpar_hcall_norets(H_IPI, servernum, mfrr); | ||
286 | } | ||
287 | |||
288 | static inline long plpar_xirr(unsigned long *xirr_ret, unsigned char cppr) | ||
289 | { | ||
290 | long rc; | ||
291 | unsigned long retbuf[PLPAR_HCALL_BUFSIZE]; | ||
292 | |||
293 | rc = plpar_hcall(H_XIRR, retbuf, cppr); | ||
294 | |||
295 | *xirr_ret = retbuf[0]; | ||
296 | |||
297 | return rc; | ||
298 | } | ||
299 | |||
300 | #endif /* _PSERIES_PLPAR_WRAPPERS_H */ | 273 | #endif /* _PSERIES_PLPAR_WRAPPERS_H */ |
diff --git a/arch/powerpc/platforms/pseries/ras.c b/arch/powerpc/platforms/pseries/ras.c index c55d7ad9c648..086d2ae4e06a 100644 --- a/arch/powerpc/platforms/pseries/ras.c +++ b/arch/powerpc/platforms/pseries/ras.c | |||
@@ -122,7 +122,7 @@ static irqreturn_t ras_epow_interrupt(int irq, void *dev_id) | |||
122 | 122 | ||
123 | status = rtas_call(ras_check_exception_token, 6, 1, NULL, | 123 | status = rtas_call(ras_check_exception_token, 6, 1, NULL, |
124 | RTAS_VECTOR_EXTERNAL_INTERRUPT, | 124 | RTAS_VECTOR_EXTERNAL_INTERRUPT, |
125 | irq_map[irq].hwirq, | 125 | virq_to_hw(irq), |
126 | RTAS_EPOW_WARNING | RTAS_POWERMGM_EVENTS, | 126 | RTAS_EPOW_WARNING | RTAS_POWERMGM_EVENTS, |
127 | critical, __pa(&ras_log_buf), | 127 | critical, __pa(&ras_log_buf), |
128 | rtas_get_error_log_max()); | 128 | rtas_get_error_log_max()); |
@@ -157,7 +157,7 @@ static irqreturn_t ras_error_interrupt(int irq, void *dev_id) | |||
157 | 157 | ||
158 | status = rtas_call(ras_check_exception_token, 6, 1, NULL, | 158 | status = rtas_call(ras_check_exception_token, 6, 1, NULL, |
159 | RTAS_VECTOR_EXTERNAL_INTERRUPT, | 159 | RTAS_VECTOR_EXTERNAL_INTERRUPT, |
160 | irq_map[irq].hwirq, | 160 | virq_to_hw(irq), |
161 | RTAS_INTERNAL_ERROR, 1 /*Time Critical */, | 161 | RTAS_INTERNAL_ERROR, 1 /*Time Critical */, |
162 | __pa(&ras_log_buf), | 162 | __pa(&ras_log_buf), |
163 | rtas_get_error_log_max()); | 163 | rtas_get_error_log_max()); |
@@ -227,7 +227,7 @@ static struct rtas_error_log *fwnmi_get_errinfo(struct pt_regs *regs) | |||
227 | struct rtas_error_log *h, *errhdr = NULL; | 227 | struct rtas_error_log *h, *errhdr = NULL; |
228 | 228 | ||
229 | if (!VALID_FWNMI_BUFFER(regs->gpr[3])) { | 229 | if (!VALID_FWNMI_BUFFER(regs->gpr[3])) { |
230 | printk(KERN_ERR "FWNMI: corrupt r3\n"); | 230 | printk(KERN_ERR "FWNMI: corrupt r3 0x%016lx\n", regs->gpr[3]); |
231 | return NULL; | 231 | return NULL; |
232 | } | 232 | } |
233 | 233 | ||
diff --git a/arch/powerpc/platforms/pseries/setup.c b/arch/powerpc/platforms/pseries/setup.c index 6c42cfde8415..593acceeff96 100644 --- a/arch/powerpc/platforms/pseries/setup.c +++ b/arch/powerpc/platforms/pseries/setup.c | |||
@@ -53,9 +53,9 @@ | |||
53 | #include <asm/irq.h> | 53 | #include <asm/irq.h> |
54 | #include <asm/time.h> | 54 | #include <asm/time.h> |
55 | #include <asm/nvram.h> | 55 | #include <asm/nvram.h> |
56 | #include "xics.h" | ||
57 | #include <asm/pmc.h> | 56 | #include <asm/pmc.h> |
58 | #include <asm/mpic.h> | 57 | #include <asm/mpic.h> |
58 | #include <asm/xics.h> | ||
59 | #include <asm/ppc-pci.h> | 59 | #include <asm/ppc-pci.h> |
60 | #include <asm/i8259.h> | 60 | #include <asm/i8259.h> |
61 | #include <asm/udbg.h> | 61 | #include <asm/udbg.h> |
@@ -205,6 +205,9 @@ static void __init pseries_mpic_init_IRQ(void) | |||
205 | mpic_assign_isu(mpic, n, isuaddr); | 205 | mpic_assign_isu(mpic, n, isuaddr); |
206 | } | 206 | } |
207 | 207 | ||
208 | /* Setup top-level get_irq */ | ||
209 | ppc_md.get_irq = mpic_get_irq; | ||
210 | |||
208 | /* All ISUs are setup, complete initialization */ | 211 | /* All ISUs are setup, complete initialization */ |
209 | mpic_init(mpic); | 212 | mpic_init(mpic); |
210 | 213 | ||
@@ -214,7 +217,7 @@ static void __init pseries_mpic_init_IRQ(void) | |||
214 | 217 | ||
215 | static void __init pseries_xics_init_IRQ(void) | 218 | static void __init pseries_xics_init_IRQ(void) |
216 | { | 219 | { |
217 | xics_init_IRQ(); | 220 | xics_init(); |
218 | pseries_setup_i8259_cascade(); | 221 | pseries_setup_i8259_cascade(); |
219 | } | 222 | } |
220 | 223 | ||
@@ -238,7 +241,6 @@ static void __init pseries_discover_pic(void) | |||
238 | if (strstr(typep, "open-pic")) { | 241 | if (strstr(typep, "open-pic")) { |
239 | pSeries_mpic_node = of_node_get(np); | 242 | pSeries_mpic_node = of_node_get(np); |
240 | ppc_md.init_IRQ = pseries_mpic_init_IRQ; | 243 | ppc_md.init_IRQ = pseries_mpic_init_IRQ; |
241 | ppc_md.get_irq = mpic_get_irq; | ||
242 | setup_kexec_cpu_down_mpic(); | 244 | setup_kexec_cpu_down_mpic(); |
243 | smp_init_pseries_mpic(); | 245 | smp_init_pseries_mpic(); |
244 | return; | 246 | return; |
@@ -276,6 +278,8 @@ static struct notifier_block pci_dn_reconfig_nb = { | |||
276 | .notifier_call = pci_dn_reconfig_notifier, | 278 | .notifier_call = pci_dn_reconfig_notifier, |
277 | }; | 279 | }; |
278 | 280 | ||
281 | struct kmem_cache *dtl_cache; | ||
282 | |||
279 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING | 283 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING |
280 | /* | 284 | /* |
281 | * Allocate space for the dispatch trace log for all possible cpus | 285 | * Allocate space for the dispatch trace log for all possible cpus |
@@ -287,18 +291,12 @@ static int alloc_dispatch_logs(void) | |||
287 | int cpu, ret; | 291 | int cpu, ret; |
288 | struct paca_struct *pp; | 292 | struct paca_struct *pp; |
289 | struct dtl_entry *dtl; | 293 | struct dtl_entry *dtl; |
290 | struct kmem_cache *dtl_cache; | ||
291 | 294 | ||
292 | if (!firmware_has_feature(FW_FEATURE_SPLPAR)) | 295 | if (!firmware_has_feature(FW_FEATURE_SPLPAR)) |
293 | return 0; | 296 | return 0; |
294 | 297 | ||
295 | dtl_cache = kmem_cache_create("dtl", DISPATCH_LOG_BYTES, | 298 | if (!dtl_cache) |
296 | DISPATCH_LOG_BYTES, 0, NULL); | ||
297 | if (!dtl_cache) { | ||
298 | pr_warn("Failed to create dispatch trace log buffer cache\n"); | ||
299 | pr_warn("Stolen time statistics will be unreliable\n"); | ||
300 | return 0; | 299 | return 0; |
301 | } | ||
302 | 300 | ||
303 | for_each_possible_cpu(cpu) { | 301 | for_each_possible_cpu(cpu) { |
304 | pp = &paca[cpu]; | 302 | pp = &paca[cpu]; |
@@ -332,10 +330,27 @@ static int alloc_dispatch_logs(void) | |||
332 | 330 | ||
333 | return 0; | 331 | return 0; |
334 | } | 332 | } |
335 | 333 | #else /* !CONFIG_VIRT_CPU_ACCOUNTING */ | |
336 | early_initcall(alloc_dispatch_logs); | 334 | static inline int alloc_dispatch_logs(void) |
335 | { | ||
336 | return 0; | ||
337 | } | ||
337 | #endif /* CONFIG_VIRT_CPU_ACCOUNTING */ | 338 | #endif /* CONFIG_VIRT_CPU_ACCOUNTING */ |
338 | 339 | ||
340 | static int alloc_dispatch_log_kmem_cache(void) | ||
341 | { | ||
342 | dtl_cache = kmem_cache_create("dtl", DISPATCH_LOG_BYTES, | ||
343 | DISPATCH_LOG_BYTES, 0, NULL); | ||
344 | if (!dtl_cache) { | ||
345 | pr_warn("Failed to create dispatch trace log buffer cache\n"); | ||
346 | pr_warn("Stolen time statistics will be unreliable\n"); | ||
347 | return 0; | ||
348 | } | ||
349 | |||
350 | return alloc_dispatch_logs(); | ||
351 | } | ||
352 | early_initcall(alloc_dispatch_log_kmem_cache); | ||
353 | |||
339 | static void __init pSeries_setup_arch(void) | 354 | static void __init pSeries_setup_arch(void) |
340 | { | 355 | { |
341 | /* Discover PIC type and setup ppc_md accordingly */ | 356 | /* Discover PIC type and setup ppc_md accordingly */ |
@@ -403,6 +418,16 @@ static int pseries_set_xdabr(unsigned long dabr) | |||
403 | #define CMO_CHARACTERISTICS_TOKEN 44 | 418 | #define CMO_CHARACTERISTICS_TOKEN 44 |
404 | #define CMO_MAXLENGTH 1026 | 419 | #define CMO_MAXLENGTH 1026 |
405 | 420 | ||
421 | void pSeries_coalesce_init(void) | ||
422 | { | ||
423 | struct hvcall_mpp_x_data mpp_x_data; | ||
424 | |||
425 | if (firmware_has_feature(FW_FEATURE_CMO) && !h_get_mpp_x(&mpp_x_data)) | ||
426 | powerpc_firmware_features |= FW_FEATURE_XCMO; | ||
427 | else | ||
428 | powerpc_firmware_features &= ~FW_FEATURE_XCMO; | ||
429 | } | ||
430 | |||
406 | /** | 431 | /** |
407 | * fw_cmo_feature_init - FW_FEATURE_CMO is not stored in ibm,hypertas-functions, | 432 | * fw_cmo_feature_init - FW_FEATURE_CMO is not stored in ibm,hypertas-functions, |
408 | * handle that here. (Stolen from parse_system_parameter_string) | 433 | * handle that here. (Stolen from parse_system_parameter_string) |
@@ -472,6 +497,7 @@ void pSeries_cmo_feature_init(void) | |||
472 | pr_debug("CMO enabled, PrPSP=%d, SecPSP=%d\n", CMO_PrPSP, | 497 | pr_debug("CMO enabled, PrPSP=%d, SecPSP=%d\n", CMO_PrPSP, |
473 | CMO_SecPSP); | 498 | CMO_SecPSP); |
474 | powerpc_firmware_features |= FW_FEATURE_CMO; | 499 | powerpc_firmware_features |= FW_FEATURE_CMO; |
500 | pSeries_coalesce_init(); | ||
475 | } else | 501 | } else |
476 | pr_debug("CMO not enabled, PrPSP=%d, SecPSP=%d\n", CMO_PrPSP, | 502 | pr_debug("CMO not enabled, PrPSP=%d, SecPSP=%d\n", CMO_PrPSP, |
477 | CMO_SecPSP); | 503 | CMO_SecPSP); |
diff --git a/arch/powerpc/platforms/pseries/smp.c b/arch/powerpc/platforms/pseries/smp.c index a509c5292a67..fbffd7e47ab8 100644 --- a/arch/powerpc/platforms/pseries/smp.c +++ b/arch/powerpc/platforms/pseries/smp.c | |||
@@ -44,10 +44,11 @@ | |||
44 | #include <asm/mpic.h> | 44 | #include <asm/mpic.h> |
45 | #include <asm/vdso_datapage.h> | 45 | #include <asm/vdso_datapage.h> |
46 | #include <asm/cputhreads.h> | 46 | #include <asm/cputhreads.h> |
47 | #include <asm/mpic.h> | ||
48 | #include <asm/xics.h> | ||
47 | 49 | ||
48 | #include "plpar_wrappers.h" | 50 | #include "plpar_wrappers.h" |
49 | #include "pseries.h" | 51 | #include "pseries.h" |
50 | #include "xics.h" | ||
51 | #include "offline_states.h" | 52 | #include "offline_states.h" |
52 | 53 | ||
53 | 54 | ||
@@ -136,7 +137,6 @@ out: | |||
136 | return 1; | 137 | return 1; |
137 | } | 138 | } |
138 | 139 | ||
139 | #ifdef CONFIG_XICS | ||
140 | static void __devinit smp_xics_setup_cpu(int cpu) | 140 | static void __devinit smp_xics_setup_cpu(int cpu) |
141 | { | 141 | { |
142 | if (cpu != boot_cpuid) | 142 | if (cpu != boot_cpuid) |
@@ -151,14 +151,13 @@ static void __devinit smp_xics_setup_cpu(int cpu) | |||
151 | set_default_offline_state(cpu); | 151 | set_default_offline_state(cpu); |
152 | #endif | 152 | #endif |
153 | } | 153 | } |
154 | #endif /* CONFIG_XICS */ | ||
155 | 154 | ||
156 | static void __devinit smp_pSeries_kick_cpu(int nr) | 155 | static int __devinit smp_pSeries_kick_cpu(int nr) |
157 | { | 156 | { |
158 | BUG_ON(nr < 0 || nr >= NR_CPUS); | 157 | BUG_ON(nr < 0 || nr >= NR_CPUS); |
159 | 158 | ||
160 | if (!smp_startup_cpu(nr)) | 159 | if (!smp_startup_cpu(nr)) |
161 | return; | 160 | return -ENOENT; |
162 | 161 | ||
163 | /* | 162 | /* |
164 | * The processor is currently spinning, waiting for the | 163 | * The processor is currently spinning, waiting for the |
@@ -180,6 +179,8 @@ static void __devinit smp_pSeries_kick_cpu(int nr) | |||
180 | "Ret= %ld\n", nr, rc); | 179 | "Ret= %ld\n", nr, rc); |
181 | } | 180 | } |
182 | #endif | 181 | #endif |
182 | |||
183 | return 0; | ||
183 | } | 184 | } |
184 | 185 | ||
185 | static int smp_pSeries_cpu_bootable(unsigned int nr) | 186 | static int smp_pSeries_cpu_bootable(unsigned int nr) |
@@ -197,23 +198,22 @@ static int smp_pSeries_cpu_bootable(unsigned int nr) | |||
197 | 198 | ||
198 | return 1; | 199 | return 1; |
199 | } | 200 | } |
200 | #ifdef CONFIG_MPIC | 201 | |
201 | static struct smp_ops_t pSeries_mpic_smp_ops = { | 202 | static struct smp_ops_t pSeries_mpic_smp_ops = { |
202 | .message_pass = smp_mpic_message_pass, | 203 | .message_pass = smp_mpic_message_pass, |
203 | .probe = smp_mpic_probe, | 204 | .probe = smp_mpic_probe, |
204 | .kick_cpu = smp_pSeries_kick_cpu, | 205 | .kick_cpu = smp_pSeries_kick_cpu, |
205 | .setup_cpu = smp_mpic_setup_cpu, | 206 | .setup_cpu = smp_mpic_setup_cpu, |
206 | }; | 207 | }; |
207 | #endif | 208 | |
208 | #ifdef CONFIG_XICS | ||
209 | static struct smp_ops_t pSeries_xics_smp_ops = { | 209 | static struct smp_ops_t pSeries_xics_smp_ops = { |
210 | .message_pass = smp_xics_message_pass, | 210 | .message_pass = smp_muxed_ipi_message_pass, |
211 | .probe = smp_xics_probe, | 211 | .cause_ipi = NULL, /* Filled at runtime by xics_smp_probe() */ |
212 | .probe = xics_smp_probe, | ||
212 | .kick_cpu = smp_pSeries_kick_cpu, | 213 | .kick_cpu = smp_pSeries_kick_cpu, |
213 | .setup_cpu = smp_xics_setup_cpu, | 214 | .setup_cpu = smp_xics_setup_cpu, |
214 | .cpu_bootable = smp_pSeries_cpu_bootable, | 215 | .cpu_bootable = smp_pSeries_cpu_bootable, |
215 | }; | 216 | }; |
216 | #endif | ||
217 | 217 | ||
218 | /* This is called very early */ | 218 | /* This is called very early */ |
219 | static void __init smp_init_pseries(void) | 219 | static void __init smp_init_pseries(void) |
@@ -245,14 +245,12 @@ static void __init smp_init_pseries(void) | |||
245 | pr_debug(" <- smp_init_pSeries()\n"); | 245 | pr_debug(" <- smp_init_pSeries()\n"); |
246 | } | 246 | } |
247 | 247 | ||
248 | #ifdef CONFIG_MPIC | ||
249 | void __init smp_init_pseries_mpic(void) | 248 | void __init smp_init_pseries_mpic(void) |
250 | { | 249 | { |
251 | smp_ops = &pSeries_mpic_smp_ops; | 250 | smp_ops = &pSeries_mpic_smp_ops; |
252 | 251 | ||
253 | smp_init_pseries(); | 252 | smp_init_pseries(); |
254 | } | 253 | } |
255 | #endif | ||
256 | 254 | ||
257 | void __init smp_init_pseries_xics(void) | 255 | void __init smp_init_pseries_xics(void) |
258 | { | 256 | { |
diff --git a/arch/powerpc/platforms/pseries/xics.c b/arch/powerpc/platforms/pseries/xics.c deleted file mode 100644 index d6901334d66e..000000000000 --- a/arch/powerpc/platforms/pseries/xics.c +++ /dev/null | |||
@@ -1,949 +0,0 @@ | |||
1 | /* | ||
2 | * arch/powerpc/platforms/pseries/xics.c | ||
3 | * | ||
4 | * Copyright 2000 IBM Corporation. | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or | ||
7 | * modify it under the terms of the GNU General Public License | ||
8 | * as published by the Free Software Foundation; either version | ||
9 | * 2 of the License, or (at your option) any later version. | ||
10 | */ | ||
11 | |||
12 | #include <linux/types.h> | ||
13 | #include <linux/threads.h> | ||
14 | #include <linux/kernel.h> | ||
15 | #include <linux/irq.h> | ||
16 | #include <linux/smp.h> | ||
17 | #include <linux/interrupt.h> | ||
18 | #include <linux/init.h> | ||
19 | #include <linux/radix-tree.h> | ||
20 | #include <linux/cpu.h> | ||
21 | #include <linux/msi.h> | ||
22 | #include <linux/of.h> | ||
23 | #include <linux/percpu.h> | ||
24 | |||
25 | #include <asm/firmware.h> | ||
26 | #include <asm/io.h> | ||
27 | #include <asm/pgtable.h> | ||
28 | #include <asm/smp.h> | ||
29 | #include <asm/rtas.h> | ||
30 | #include <asm/hvcall.h> | ||
31 | #include <asm/machdep.h> | ||
32 | |||
33 | #include "xics.h" | ||
34 | #include "plpar_wrappers.h" | ||
35 | |||
36 | static struct irq_host *xics_host; | ||
37 | |||
38 | #define XICS_IPI 2 | ||
39 | #define XICS_IRQ_SPURIOUS 0 | ||
40 | |||
41 | /* Want a priority other than 0. Various HW issues require this. */ | ||
42 | #define DEFAULT_PRIORITY 5 | ||
43 | |||
44 | /* | ||
45 | * Mark IPIs as higher priority so we can take them inside interrupts that | ||
46 | * arent marked IRQF_DISABLED | ||
47 | */ | ||
48 | #define IPI_PRIORITY 4 | ||
49 | |||
50 | /* The least favored priority */ | ||
51 | #define LOWEST_PRIORITY 0xFF | ||
52 | |||
53 | /* The number of priorities defined above */ | ||
54 | #define MAX_NUM_PRIORITIES 3 | ||
55 | |||
56 | static unsigned int default_server = 0xFF; | ||
57 | static unsigned int default_distrib_server = 0; | ||
58 | static unsigned int interrupt_server_size = 8; | ||
59 | |||
60 | /* RTAS service tokens */ | ||
61 | static int ibm_get_xive; | ||
62 | static int ibm_set_xive; | ||
63 | static int ibm_int_on; | ||
64 | static int ibm_int_off; | ||
65 | |||
66 | struct xics_cppr { | ||
67 | unsigned char stack[MAX_NUM_PRIORITIES]; | ||
68 | int index; | ||
69 | }; | ||
70 | |||
71 | static DEFINE_PER_CPU(struct xics_cppr, xics_cppr); | ||
72 | |||
73 | /* Direct hardware low level accessors */ | ||
74 | |||
75 | /* The part of the interrupt presentation layer that we care about */ | ||
76 | struct xics_ipl { | ||
77 | union { | ||
78 | u32 word; | ||
79 | u8 bytes[4]; | ||
80 | } xirr_poll; | ||
81 | union { | ||
82 | u32 word; | ||
83 | u8 bytes[4]; | ||
84 | } xirr; | ||
85 | u32 dummy; | ||
86 | union { | ||
87 | u32 word; | ||
88 | u8 bytes[4]; | ||
89 | } qirr; | ||
90 | }; | ||
91 | |||
92 | static struct xics_ipl __iomem *xics_per_cpu[NR_CPUS]; | ||
93 | |||
94 | static inline unsigned int direct_xirr_info_get(void) | ||
95 | { | ||
96 | int cpu = smp_processor_id(); | ||
97 | |||
98 | return in_be32(&xics_per_cpu[cpu]->xirr.word); | ||
99 | } | ||
100 | |||
101 | static inline void direct_xirr_info_set(unsigned int value) | ||
102 | { | ||
103 | int cpu = smp_processor_id(); | ||
104 | |||
105 | out_be32(&xics_per_cpu[cpu]->xirr.word, value); | ||
106 | } | ||
107 | |||
108 | static inline void direct_cppr_info(u8 value) | ||
109 | { | ||
110 | int cpu = smp_processor_id(); | ||
111 | |||
112 | out_8(&xics_per_cpu[cpu]->xirr.bytes[0], value); | ||
113 | } | ||
114 | |||
115 | static inline void direct_qirr_info(int n_cpu, u8 value) | ||
116 | { | ||
117 | out_8(&xics_per_cpu[n_cpu]->qirr.bytes[0], value); | ||
118 | } | ||
119 | |||
120 | |||
121 | /* LPAR low level accessors */ | ||
122 | |||
123 | static inline unsigned int lpar_xirr_info_get(unsigned char cppr) | ||
124 | { | ||
125 | unsigned long lpar_rc; | ||
126 | unsigned long return_value; | ||
127 | |||
128 | lpar_rc = plpar_xirr(&return_value, cppr); | ||
129 | if (lpar_rc != H_SUCCESS) | ||
130 | panic(" bad return code xirr - rc = %lx\n", lpar_rc); | ||
131 | return (unsigned int)return_value; | ||
132 | } | ||
133 | |||
134 | static inline void lpar_xirr_info_set(unsigned int value) | ||
135 | { | ||
136 | unsigned long lpar_rc; | ||
137 | |||
138 | lpar_rc = plpar_eoi(value); | ||
139 | if (lpar_rc != H_SUCCESS) | ||
140 | panic("bad return code EOI - rc = %ld, value=%x\n", lpar_rc, | ||
141 | value); | ||
142 | } | ||
143 | |||
144 | static inline void lpar_cppr_info(u8 value) | ||
145 | { | ||
146 | unsigned long lpar_rc; | ||
147 | |||
148 | lpar_rc = plpar_cppr(value); | ||
149 | if (lpar_rc != H_SUCCESS) | ||
150 | panic("bad return code cppr - rc = %lx\n", lpar_rc); | ||
151 | } | ||
152 | |||
153 | static inline void lpar_qirr_info(int n_cpu , u8 value) | ||
154 | { | ||
155 | unsigned long lpar_rc; | ||
156 | |||
157 | lpar_rc = plpar_ipi(get_hard_smp_processor_id(n_cpu), value); | ||
158 | if (lpar_rc != H_SUCCESS) | ||
159 | panic("bad return code qirr - rc = %lx\n", lpar_rc); | ||
160 | } | ||
161 | |||
162 | |||
163 | /* Interface to generic irq subsystem */ | ||
164 | |||
165 | #ifdef CONFIG_SMP | ||
166 | /* | ||
167 | * For the moment we only implement delivery to all cpus or one cpu. | ||
168 | * | ||
169 | * If the requested affinity is cpu_all_mask, we set global affinity. | ||
170 | * If not we set it to the first cpu in the mask, even if multiple cpus | ||
171 | * are set. This is so things like irqbalance (which set core and package | ||
172 | * wide affinities) do the right thing. | ||
173 | */ | ||
174 | static int get_irq_server(unsigned int virq, const struct cpumask *cpumask, | ||
175 | unsigned int strict_check) | ||
176 | { | ||
177 | |||
178 | if (!distribute_irqs) | ||
179 | return default_server; | ||
180 | |||
181 | if (!cpumask_subset(cpu_possible_mask, cpumask)) { | ||
182 | int server = cpumask_first_and(cpu_online_mask, cpumask); | ||
183 | |||
184 | if (server < nr_cpu_ids) | ||
185 | return get_hard_smp_processor_id(server); | ||
186 | |||
187 | if (strict_check) | ||
188 | return -1; | ||
189 | } | ||
190 | |||
191 | /* | ||
192 | * Workaround issue with some versions of JS20 firmware that | ||
193 | * deliver interrupts to cpus which haven't been started. This | ||
194 | * happens when using the maxcpus= boot option. | ||
195 | */ | ||
196 | if (cpumask_equal(cpu_online_mask, cpu_present_mask)) | ||
197 | return default_distrib_server; | ||
198 | |||
199 | return default_server; | ||
200 | } | ||
201 | #else | ||
202 | #define get_irq_server(virq, cpumask, strict_check) (default_server) | ||
203 | #endif | ||
204 | |||
205 | static void xics_unmask_irq(struct irq_data *d) | ||
206 | { | ||
207 | unsigned int hwirq; | ||
208 | int call_status; | ||
209 | int server; | ||
210 | |||
211 | pr_devel("xics: unmask virq %d\n", d->irq); | ||
212 | |||
213 | hwirq = (unsigned int)irq_map[d->irq].hwirq; | ||
214 | pr_devel(" -> map to hwirq 0x%x\n", hwirq); | ||
215 | if (hwirq == XICS_IPI || hwirq == XICS_IRQ_SPURIOUS) | ||
216 | return; | ||
217 | |||
218 | server = get_irq_server(d->irq, d->affinity, 0); | ||
219 | |||
220 | call_status = rtas_call(ibm_set_xive, 3, 1, NULL, hwirq, server, | ||
221 | DEFAULT_PRIORITY); | ||
222 | if (call_status != 0) { | ||
223 | printk(KERN_ERR | ||
224 | "%s: ibm_set_xive irq %u server %x returned %d\n", | ||
225 | __func__, hwirq, server, call_status); | ||
226 | return; | ||
227 | } | ||
228 | |||
229 | /* Now unmask the interrupt (often a no-op) */ | ||
230 | call_status = rtas_call(ibm_int_on, 1, 1, NULL, hwirq); | ||
231 | if (call_status != 0) { | ||
232 | printk(KERN_ERR "%s: ibm_int_on irq=%u returned %d\n", | ||
233 | __func__, hwirq, call_status); | ||
234 | return; | ||
235 | } | ||
236 | } | ||
237 | |||
238 | static unsigned int xics_startup(struct irq_data *d) | ||
239 | { | ||
240 | /* | ||
241 | * The generic MSI code returns with the interrupt disabled on the | ||
242 | * card, using the MSI mask bits. Firmware doesn't appear to unmask | ||
243 | * at that level, so we do it here by hand. | ||
244 | */ | ||
245 | if (d->msi_desc) | ||
246 | unmask_msi_irq(d); | ||
247 | |||
248 | /* unmask it */ | ||
249 | xics_unmask_irq(d); | ||
250 | return 0; | ||
251 | } | ||
252 | |||
253 | static void xics_mask_real_irq(unsigned int hwirq) | ||
254 | { | ||
255 | int call_status; | ||
256 | |||
257 | if (hwirq == XICS_IPI) | ||
258 | return; | ||
259 | |||
260 | call_status = rtas_call(ibm_int_off, 1, 1, NULL, hwirq); | ||
261 | if (call_status != 0) { | ||
262 | printk(KERN_ERR "%s: ibm_int_off irq=%u returned %d\n", | ||
263 | __func__, hwirq, call_status); | ||
264 | return; | ||
265 | } | ||
266 | |||
267 | /* Have to set XIVE to 0xff to be able to remove a slot */ | ||
268 | call_status = rtas_call(ibm_set_xive, 3, 1, NULL, hwirq, | ||
269 | default_server, 0xff); | ||
270 | if (call_status != 0) { | ||
271 | printk(KERN_ERR "%s: ibm_set_xive(0xff) irq=%u returned %d\n", | ||
272 | __func__, hwirq, call_status); | ||
273 | return; | ||
274 | } | ||
275 | } | ||
276 | |||
277 | static void xics_mask_irq(struct irq_data *d) | ||
278 | { | ||
279 | unsigned int hwirq; | ||
280 | |||
281 | pr_devel("xics: mask virq %d\n", d->irq); | ||
282 | |||
283 | hwirq = (unsigned int)irq_map[d->irq].hwirq; | ||
284 | if (hwirq == XICS_IPI || hwirq == XICS_IRQ_SPURIOUS) | ||
285 | return; | ||
286 | xics_mask_real_irq(hwirq); | ||
287 | } | ||
288 | |||
289 | static void xics_mask_unknown_vec(unsigned int vec) | ||
290 | { | ||
291 | printk(KERN_ERR "Interrupt %u (real) is invalid, disabling it.\n", vec); | ||
292 | xics_mask_real_irq(vec); | ||
293 | } | ||
294 | |||
295 | static inline unsigned int xics_xirr_vector(unsigned int xirr) | ||
296 | { | ||
297 | /* | ||
298 | * The top byte is the old cppr, to be restored on EOI. | ||
299 | * The remaining 24 bits are the vector. | ||
300 | */ | ||
301 | return xirr & 0x00ffffff; | ||
302 | } | ||
303 | |||
304 | static void push_cppr(unsigned int vec) | ||
305 | { | ||
306 | struct xics_cppr *os_cppr = &__get_cpu_var(xics_cppr); | ||
307 | |||
308 | if (WARN_ON(os_cppr->index >= MAX_NUM_PRIORITIES - 1)) | ||
309 | return; | ||
310 | |||
311 | if (vec == XICS_IPI) | ||
312 | os_cppr->stack[++os_cppr->index] = IPI_PRIORITY; | ||
313 | else | ||
314 | os_cppr->stack[++os_cppr->index] = DEFAULT_PRIORITY; | ||
315 | } | ||
316 | |||
317 | static unsigned int xics_get_irq_direct(void) | ||
318 | { | ||
319 | unsigned int xirr = direct_xirr_info_get(); | ||
320 | unsigned int vec = xics_xirr_vector(xirr); | ||
321 | unsigned int irq; | ||
322 | |||
323 | if (vec == XICS_IRQ_SPURIOUS) | ||
324 | return NO_IRQ; | ||
325 | |||
326 | irq = irq_radix_revmap_lookup(xics_host, vec); | ||
327 | if (likely(irq != NO_IRQ)) { | ||
328 | push_cppr(vec); | ||
329 | return irq; | ||
330 | } | ||
331 | |||
332 | /* We don't have a linux mapping, so have rtas mask it. */ | ||
333 | xics_mask_unknown_vec(vec); | ||
334 | |||
335 | /* We might learn about it later, so EOI it */ | ||
336 | direct_xirr_info_set(xirr); | ||
337 | return NO_IRQ; | ||
338 | } | ||
339 | |||
340 | static unsigned int xics_get_irq_lpar(void) | ||
341 | { | ||
342 | struct xics_cppr *os_cppr = &__get_cpu_var(xics_cppr); | ||
343 | unsigned int xirr = lpar_xirr_info_get(os_cppr->stack[os_cppr->index]); | ||
344 | unsigned int vec = xics_xirr_vector(xirr); | ||
345 | unsigned int irq; | ||
346 | |||
347 | if (vec == XICS_IRQ_SPURIOUS) | ||
348 | return NO_IRQ; | ||
349 | |||
350 | irq = irq_radix_revmap_lookup(xics_host, vec); | ||
351 | if (likely(irq != NO_IRQ)) { | ||
352 | push_cppr(vec); | ||
353 | return irq; | ||
354 | } | ||
355 | |||
356 | /* We don't have a linux mapping, so have RTAS mask it. */ | ||
357 | xics_mask_unknown_vec(vec); | ||
358 | |||
359 | /* We might learn about it later, so EOI it */ | ||
360 | lpar_xirr_info_set(xirr); | ||
361 | return NO_IRQ; | ||
362 | } | ||
363 | |||
364 | static unsigned char pop_cppr(void) | ||
365 | { | ||
366 | struct xics_cppr *os_cppr = &__get_cpu_var(xics_cppr); | ||
367 | |||
368 | if (WARN_ON(os_cppr->index < 1)) | ||
369 | return LOWEST_PRIORITY; | ||
370 | |||
371 | return os_cppr->stack[--os_cppr->index]; | ||
372 | } | ||
373 | |||
374 | static void xics_eoi_direct(struct irq_data *d) | ||
375 | { | ||
376 | unsigned int hwirq = (unsigned int)irq_map[d->irq].hwirq; | ||
377 | |||
378 | iosync(); | ||
379 | direct_xirr_info_set((pop_cppr() << 24) | hwirq); | ||
380 | } | ||
381 | |||
382 | static void xics_eoi_lpar(struct irq_data *d) | ||
383 | { | ||
384 | unsigned int hwirq = (unsigned int)irq_map[d->irq].hwirq; | ||
385 | |||
386 | iosync(); | ||
387 | lpar_xirr_info_set((pop_cppr() << 24) | hwirq); | ||
388 | } | ||
389 | |||
390 | static int | ||
391 | xics_set_affinity(struct irq_data *d, const struct cpumask *cpumask, bool force) | ||
392 | { | ||
393 | unsigned int hwirq; | ||
394 | int status; | ||
395 | int xics_status[2]; | ||
396 | int irq_server; | ||
397 | |||
398 | hwirq = (unsigned int)irq_map[d->irq].hwirq; | ||
399 | if (hwirq == XICS_IPI || hwirq == XICS_IRQ_SPURIOUS) | ||
400 | return -1; | ||
401 | |||
402 | status = rtas_call(ibm_get_xive, 1, 3, xics_status, hwirq); | ||
403 | |||
404 | if (status) { | ||
405 | printk(KERN_ERR "%s: ibm,get-xive irq=%u returns %d\n", | ||
406 | __func__, hwirq, status); | ||
407 | return -1; | ||
408 | } | ||
409 | |||
410 | irq_server = get_irq_server(d->irq, cpumask, 1); | ||
411 | if (irq_server == -1) { | ||
412 | char cpulist[128]; | ||
413 | cpumask_scnprintf(cpulist, sizeof(cpulist), cpumask); | ||
414 | printk(KERN_WARNING | ||
415 | "%s: No online cpus in the mask %s for irq %d\n", | ||
416 | __func__, cpulist, d->irq); | ||
417 | return -1; | ||
418 | } | ||
419 | |||
420 | status = rtas_call(ibm_set_xive, 3, 1, NULL, | ||
421 | hwirq, irq_server, xics_status[1]); | ||
422 | |||
423 | if (status) { | ||
424 | printk(KERN_ERR "%s: ibm,set-xive irq=%u returns %d\n", | ||
425 | __func__, hwirq, status); | ||
426 | return -1; | ||
427 | } | ||
428 | |||
429 | return 0; | ||
430 | } | ||
431 | |||
432 | static struct irq_chip xics_pic_direct = { | ||
433 | .name = "XICS", | ||
434 | .irq_startup = xics_startup, | ||
435 | .irq_mask = xics_mask_irq, | ||
436 | .irq_unmask = xics_unmask_irq, | ||
437 | .irq_eoi = xics_eoi_direct, | ||
438 | .irq_set_affinity = xics_set_affinity | ||
439 | }; | ||
440 | |||
441 | static struct irq_chip xics_pic_lpar = { | ||
442 | .name = "XICS", | ||
443 | .irq_startup = xics_startup, | ||
444 | .irq_mask = xics_mask_irq, | ||
445 | .irq_unmask = xics_unmask_irq, | ||
446 | .irq_eoi = xics_eoi_lpar, | ||
447 | .irq_set_affinity = xics_set_affinity | ||
448 | }; | ||
449 | |||
450 | |||
451 | /* Interface to arch irq controller subsystem layer */ | ||
452 | |||
453 | /* Points to the irq_chip we're actually using */ | ||
454 | static struct irq_chip *xics_irq_chip; | ||
455 | |||
456 | static int xics_host_match(struct irq_host *h, struct device_node *node) | ||
457 | { | ||
458 | /* IBM machines have interrupt parents of various funky types for things | ||
459 | * like vdevices, events, etc... The trick we use here is to match | ||
460 | * everything here except the legacy 8259 which is compatible "chrp,iic" | ||
461 | */ | ||
462 | return !of_device_is_compatible(node, "chrp,iic"); | ||
463 | } | ||
464 | |||
465 | static int xics_host_map(struct irq_host *h, unsigned int virq, | ||
466 | irq_hw_number_t hw) | ||
467 | { | ||
468 | pr_devel("xics: map virq %d, hwirq 0x%lx\n", virq, hw); | ||
469 | |||
470 | /* Insert the interrupt mapping into the radix tree for fast lookup */ | ||
471 | irq_radix_revmap_insert(xics_host, virq, hw); | ||
472 | |||
473 | irq_set_status_flags(virq, IRQ_LEVEL); | ||
474 | irq_set_chip_and_handler(virq, xics_irq_chip, handle_fasteoi_irq); | ||
475 | return 0; | ||
476 | } | ||
477 | |||
478 | static int xics_host_xlate(struct irq_host *h, struct device_node *ct, | ||
479 | const u32 *intspec, unsigned int intsize, | ||
480 | irq_hw_number_t *out_hwirq, unsigned int *out_flags) | ||
481 | |||
482 | { | ||
483 | /* Current xics implementation translates everything | ||
484 | * to level. It is not technically right for MSIs but this | ||
485 | * is irrelevant at this point. We might get smarter in the future | ||
486 | */ | ||
487 | *out_hwirq = intspec[0]; | ||
488 | *out_flags = IRQ_TYPE_LEVEL_LOW; | ||
489 | |||
490 | return 0; | ||
491 | } | ||
492 | |||
493 | static struct irq_host_ops xics_host_ops = { | ||
494 | .match = xics_host_match, | ||
495 | .map = xics_host_map, | ||
496 | .xlate = xics_host_xlate, | ||
497 | }; | ||
498 | |||
499 | static void __init xics_init_host(void) | ||
500 | { | ||
501 | if (firmware_has_feature(FW_FEATURE_LPAR)) | ||
502 | xics_irq_chip = &xics_pic_lpar; | ||
503 | else | ||
504 | xics_irq_chip = &xics_pic_direct; | ||
505 | |||
506 | xics_host = irq_alloc_host(NULL, IRQ_HOST_MAP_TREE, 0, &xics_host_ops, | ||
507 | XICS_IRQ_SPURIOUS); | ||
508 | BUG_ON(xics_host == NULL); | ||
509 | irq_set_default_host(xics_host); | ||
510 | } | ||
511 | |||
512 | |||
513 | /* Inter-processor interrupt support */ | ||
514 | |||
515 | #ifdef CONFIG_SMP | ||
516 | /* | ||
517 | * XICS only has a single IPI, so encode the messages per CPU | ||
518 | */ | ||
519 | static DEFINE_PER_CPU_SHARED_ALIGNED(unsigned long, xics_ipi_message); | ||
520 | |||
521 | static inline void smp_xics_do_message(int cpu, int msg) | ||
522 | { | ||
523 | unsigned long *tgt = &per_cpu(xics_ipi_message, cpu); | ||
524 | |||
525 | set_bit(msg, tgt); | ||
526 | mb(); | ||
527 | if (firmware_has_feature(FW_FEATURE_LPAR)) | ||
528 | lpar_qirr_info(cpu, IPI_PRIORITY); | ||
529 | else | ||
530 | direct_qirr_info(cpu, IPI_PRIORITY); | ||
531 | } | ||
532 | |||
533 | void smp_xics_message_pass(int target, int msg) | ||
534 | { | ||
535 | unsigned int i; | ||
536 | |||
537 | if (target < NR_CPUS) { | ||
538 | smp_xics_do_message(target, msg); | ||
539 | } else { | ||
540 | for_each_online_cpu(i) { | ||
541 | if (target == MSG_ALL_BUT_SELF | ||
542 | && i == smp_processor_id()) | ||
543 | continue; | ||
544 | smp_xics_do_message(i, msg); | ||
545 | } | ||
546 | } | ||
547 | } | ||
548 | |||
549 | static irqreturn_t xics_ipi_dispatch(int cpu) | ||
550 | { | ||
551 | unsigned long *tgt = &per_cpu(xics_ipi_message, cpu); | ||
552 | |||
553 | mb(); /* order mmio clearing qirr */ | ||
554 | while (*tgt) { | ||
555 | if (test_and_clear_bit(PPC_MSG_CALL_FUNCTION, tgt)) { | ||
556 | smp_message_recv(PPC_MSG_CALL_FUNCTION); | ||
557 | } | ||
558 | if (test_and_clear_bit(PPC_MSG_RESCHEDULE, tgt)) { | ||
559 | smp_message_recv(PPC_MSG_RESCHEDULE); | ||
560 | } | ||
561 | if (test_and_clear_bit(PPC_MSG_CALL_FUNC_SINGLE, tgt)) { | ||
562 | smp_message_recv(PPC_MSG_CALL_FUNC_SINGLE); | ||
563 | } | ||
564 | #if defined(CONFIG_DEBUGGER) || defined(CONFIG_KEXEC) | ||
565 | if (test_and_clear_bit(PPC_MSG_DEBUGGER_BREAK, tgt)) { | ||
566 | smp_message_recv(PPC_MSG_DEBUGGER_BREAK); | ||
567 | } | ||
568 | #endif | ||
569 | } | ||
570 | return IRQ_HANDLED; | ||
571 | } | ||
572 | |||
573 | static irqreturn_t xics_ipi_action_direct(int irq, void *dev_id) | ||
574 | { | ||
575 | int cpu = smp_processor_id(); | ||
576 | |||
577 | direct_qirr_info(cpu, 0xff); | ||
578 | |||
579 | return xics_ipi_dispatch(cpu); | ||
580 | } | ||
581 | |||
582 | static irqreturn_t xics_ipi_action_lpar(int irq, void *dev_id) | ||
583 | { | ||
584 | int cpu = smp_processor_id(); | ||
585 | |||
586 | lpar_qirr_info(cpu, 0xff); | ||
587 | |||
588 | return xics_ipi_dispatch(cpu); | ||
589 | } | ||
590 | |||
591 | static void xics_request_ipi(void) | ||
592 | { | ||
593 | unsigned int ipi; | ||
594 | int rc; | ||
595 | |||
596 | ipi = irq_create_mapping(xics_host, XICS_IPI); | ||
597 | BUG_ON(ipi == NO_IRQ); | ||
598 | |||
599 | /* | ||
600 | * IPIs are marked IRQF_DISABLED as they must run with irqs | ||
601 | * disabled | ||
602 | */ | ||
603 | irq_set_handler(ipi, handle_percpu_irq); | ||
604 | if (firmware_has_feature(FW_FEATURE_LPAR)) | ||
605 | rc = request_irq(ipi, xics_ipi_action_lpar, | ||
606 | IRQF_DISABLED|IRQF_PERCPU, "IPI", NULL); | ||
607 | else | ||
608 | rc = request_irq(ipi, xics_ipi_action_direct, | ||
609 | IRQF_DISABLED|IRQF_PERCPU, "IPI", NULL); | ||
610 | BUG_ON(rc); | ||
611 | } | ||
612 | |||
613 | int __init smp_xics_probe(void) | ||
614 | { | ||
615 | xics_request_ipi(); | ||
616 | |||
617 | return cpumask_weight(cpu_possible_mask); | ||
618 | } | ||
619 | |||
620 | #endif /* CONFIG_SMP */ | ||
621 | |||
622 | |||
623 | /* Initialization */ | ||
624 | |||
625 | static void xics_update_irq_servers(void) | ||
626 | { | ||
627 | int i, j; | ||
628 | struct device_node *np; | ||
629 | u32 ilen; | ||
630 | const u32 *ireg; | ||
631 | u32 hcpuid; | ||
632 | |||
633 | /* Find the server numbers for the boot cpu. */ | ||
634 | np = of_get_cpu_node(boot_cpuid, NULL); | ||
635 | BUG_ON(!np); | ||
636 | |||
637 | ireg = of_get_property(np, "ibm,ppc-interrupt-gserver#s", &ilen); | ||
638 | if (!ireg) { | ||
639 | of_node_put(np); | ||
640 | return; | ||
641 | } | ||
642 | |||
643 | i = ilen / sizeof(int); | ||
644 | hcpuid = get_hard_smp_processor_id(boot_cpuid); | ||
645 | |||
646 | /* Global interrupt distribution server is specified in the last | ||
647 | * entry of "ibm,ppc-interrupt-gserver#s" property. Get the last | ||
648 | * entry fom this property for current boot cpu id and use it as | ||
649 | * default distribution server | ||
650 | */ | ||
651 | for (j = 0; j < i; j += 2) { | ||
652 | if (ireg[j] == hcpuid) { | ||
653 | default_server = hcpuid; | ||
654 | default_distrib_server = ireg[j+1]; | ||
655 | } | ||
656 | } | ||
657 | |||
658 | of_node_put(np); | ||
659 | } | ||
660 | |||
661 | static void __init xics_map_one_cpu(int hw_id, unsigned long addr, | ||
662 | unsigned long size) | ||
663 | { | ||
664 | int i; | ||
665 | |||
666 | /* This may look gross but it's good enough for now, we don't quite | ||
667 | * have a hard -> linux processor id matching. | ||
668 | */ | ||
669 | for_each_possible_cpu(i) { | ||
670 | if (!cpu_present(i)) | ||
671 | continue; | ||
672 | if (hw_id == get_hard_smp_processor_id(i)) { | ||
673 | xics_per_cpu[i] = ioremap(addr, size); | ||
674 | return; | ||
675 | } | ||
676 | } | ||
677 | } | ||
678 | |||
679 | static void __init xics_init_one_node(struct device_node *np, | ||
680 | unsigned int *indx) | ||
681 | { | ||
682 | unsigned int ilen; | ||
683 | const u32 *ireg; | ||
684 | |||
685 | /* This code does the theorically broken assumption that the interrupt | ||
686 | * server numbers are the same as the hard CPU numbers. | ||
687 | * This happens to be the case so far but we are playing with fire... | ||
688 | * should be fixed one of these days. -BenH. | ||
689 | */ | ||
690 | ireg = of_get_property(np, "ibm,interrupt-server-ranges", NULL); | ||
691 | |||
692 | /* Do that ever happen ? we'll know soon enough... but even good'old | ||
693 | * f80 does have that property .. | ||
694 | */ | ||
695 | WARN_ON(ireg == NULL); | ||
696 | if (ireg) { | ||
697 | /* | ||
698 | * set node starting index for this node | ||
699 | */ | ||
700 | *indx = *ireg; | ||
701 | } | ||
702 | ireg = of_get_property(np, "reg", &ilen); | ||
703 | if (!ireg) | ||
704 | panic("xics_init_IRQ: can't find interrupt reg property"); | ||
705 | |||
706 | while (ilen >= (4 * sizeof(u32))) { | ||
707 | unsigned long addr, size; | ||
708 | |||
709 | /* XXX Use proper OF parsing code here !!! */ | ||
710 | addr = (unsigned long)*ireg++ << 32; | ||
711 | ilen -= sizeof(u32); | ||
712 | addr |= *ireg++; | ||
713 | ilen -= sizeof(u32); | ||
714 | size = (unsigned long)*ireg++ << 32; | ||
715 | ilen -= sizeof(u32); | ||
716 | size |= *ireg++; | ||
717 | ilen -= sizeof(u32); | ||
718 | xics_map_one_cpu(*indx, addr, size); | ||
719 | (*indx)++; | ||
720 | } | ||
721 | } | ||
722 | |||
723 | void __init xics_init_IRQ(void) | ||
724 | { | ||
725 | struct device_node *np; | ||
726 | u32 indx = 0; | ||
727 | int found = 0; | ||
728 | const u32 *isize; | ||
729 | |||
730 | ppc64_boot_msg(0x20, "XICS Init"); | ||
731 | |||
732 | ibm_get_xive = rtas_token("ibm,get-xive"); | ||
733 | ibm_set_xive = rtas_token("ibm,set-xive"); | ||
734 | ibm_int_on = rtas_token("ibm,int-on"); | ||
735 | ibm_int_off = rtas_token("ibm,int-off"); | ||
736 | |||
737 | for_each_node_by_type(np, "PowerPC-External-Interrupt-Presentation") { | ||
738 | found = 1; | ||
739 | if (firmware_has_feature(FW_FEATURE_LPAR)) { | ||
740 | of_node_put(np); | ||
741 | break; | ||
742 | } | ||
743 | xics_init_one_node(np, &indx); | ||
744 | } | ||
745 | if (found == 0) | ||
746 | return; | ||
747 | |||
748 | /* get the bit size of server numbers */ | ||
749 | found = 0; | ||
750 | |||
751 | for_each_compatible_node(np, NULL, "ibm,ppc-xics") { | ||
752 | isize = of_get_property(np, "ibm,interrupt-server#-size", NULL); | ||
753 | |||
754 | if (!isize) | ||
755 | continue; | ||
756 | |||
757 | if (!found) { | ||
758 | interrupt_server_size = *isize; | ||
759 | found = 1; | ||
760 | } else if (*isize != interrupt_server_size) { | ||
761 | printk(KERN_WARNING "XICS: " | ||
762 | "mismatched ibm,interrupt-server#-size\n"); | ||
763 | interrupt_server_size = max(*isize, | ||
764 | interrupt_server_size); | ||
765 | } | ||
766 | } | ||
767 | |||
768 | xics_update_irq_servers(); | ||
769 | xics_init_host(); | ||
770 | |||
771 | if (firmware_has_feature(FW_FEATURE_LPAR)) | ||
772 | ppc_md.get_irq = xics_get_irq_lpar; | ||
773 | else | ||
774 | ppc_md.get_irq = xics_get_irq_direct; | ||
775 | |||
776 | xics_setup_cpu(); | ||
777 | |||
778 | ppc64_boot_msg(0x21, "XICS Done"); | ||
779 | } | ||
780 | |||
781 | /* Cpu startup, shutdown, and hotplug */ | ||
782 | |||
783 | static void xics_set_cpu_priority(unsigned char cppr) | ||
784 | { | ||
785 | struct xics_cppr *os_cppr = &__get_cpu_var(xics_cppr); | ||
786 | |||
787 | /* | ||
788 | * we only really want to set the priority when there's | ||
789 | * just one cppr value on the stack | ||
790 | */ | ||
791 | WARN_ON(os_cppr->index != 0); | ||
792 | |||
793 | os_cppr->stack[0] = cppr; | ||
794 | |||
795 | if (firmware_has_feature(FW_FEATURE_LPAR)) | ||
796 | lpar_cppr_info(cppr); | ||
797 | else | ||
798 | direct_cppr_info(cppr); | ||
799 | iosync(); | ||
800 | } | ||
801 | |||
802 | /* Have the calling processor join or leave the specified global queue */ | ||
803 | static void xics_set_cpu_giq(unsigned int gserver, unsigned int join) | ||
804 | { | ||
805 | int index; | ||
806 | int status; | ||
807 | |||
808 | if (!rtas_indicator_present(GLOBAL_INTERRUPT_QUEUE, NULL)) | ||
809 | return; | ||
810 | |||
811 | index = (1UL << interrupt_server_size) - 1 - gserver; | ||
812 | |||
813 | status = rtas_set_indicator_fast(GLOBAL_INTERRUPT_QUEUE, index, join); | ||
814 | |||
815 | WARN(status < 0, "set-indicator(%d, %d, %u) returned %d\n", | ||
816 | GLOBAL_INTERRUPT_QUEUE, index, join, status); | ||
817 | } | ||
818 | |||
819 | void xics_setup_cpu(void) | ||
820 | { | ||
821 | xics_set_cpu_priority(LOWEST_PRIORITY); | ||
822 | |||
823 | xics_set_cpu_giq(default_distrib_server, 1); | ||
824 | } | ||
825 | |||
826 | void xics_teardown_cpu(void) | ||
827 | { | ||
828 | struct xics_cppr *os_cppr = &__get_cpu_var(xics_cppr); | ||
829 | int cpu = smp_processor_id(); | ||
830 | |||
831 | /* | ||
832 | * we have to reset the cppr index to 0 because we're | ||
833 | * not going to return from the IPI | ||
834 | */ | ||
835 | os_cppr->index = 0; | ||
836 | xics_set_cpu_priority(0); | ||
837 | |||
838 | /* Clear any pending IPI request */ | ||
839 | if (firmware_has_feature(FW_FEATURE_LPAR)) | ||
840 | lpar_qirr_info(cpu, 0xff); | ||
841 | else | ||
842 | direct_qirr_info(cpu, 0xff); | ||
843 | } | ||
844 | |||
845 | void xics_kexec_teardown_cpu(int secondary) | ||
846 | { | ||
847 | xics_teardown_cpu(); | ||
848 | |||
849 | /* | ||
850 | * we take the ipi irq but and never return so we | ||
851 | * need to EOI the IPI, but want to leave our priority 0 | ||
852 | * | ||
853 | * should we check all the other interrupts too? | ||
854 | * should we be flagging idle loop instead? | ||
855 | * or creating some task to be scheduled? | ||
856 | */ | ||
857 | |||
858 | if (firmware_has_feature(FW_FEATURE_LPAR)) | ||
859 | lpar_xirr_info_set((0x00 << 24) | XICS_IPI); | ||
860 | else | ||
861 | direct_xirr_info_set((0x00 << 24) | XICS_IPI); | ||
862 | |||
863 | /* | ||
864 | * Some machines need to have at least one cpu in the GIQ, | ||
865 | * so leave the master cpu in the group. | ||
866 | */ | ||
867 | if (secondary) | ||
868 | xics_set_cpu_giq(default_distrib_server, 0); | ||
869 | } | ||
870 | |||
871 | #ifdef CONFIG_HOTPLUG_CPU | ||
872 | |||
873 | /* Interrupts are disabled. */ | ||
874 | void xics_migrate_irqs_away(void) | ||
875 | { | ||
876 | int cpu = smp_processor_id(), hw_cpu = hard_smp_processor_id(); | ||
877 | int virq; | ||
878 | |||
879 | /* If we used to be the default server, move to the new "boot_cpuid" */ | ||
880 | if (hw_cpu == default_server) | ||
881 | xics_update_irq_servers(); | ||
882 | |||
883 | /* Reject any interrupt that was queued to us... */ | ||
884 | xics_set_cpu_priority(0); | ||
885 | |||
886 | /* Remove ourselves from the global interrupt queue */ | ||
887 | xics_set_cpu_giq(default_distrib_server, 0); | ||
888 | |||
889 | /* Allow IPIs again... */ | ||
890 | xics_set_cpu_priority(DEFAULT_PRIORITY); | ||
891 | |||
892 | for_each_irq(virq) { | ||
893 | struct irq_desc *desc; | ||
894 | struct irq_chip *chip; | ||
895 | unsigned int hwirq; | ||
896 | int xics_status[2]; | ||
897 | int status; | ||
898 | unsigned long flags; | ||
899 | |||
900 | /* We can't set affinity on ISA interrupts */ | ||
901 | if (virq < NUM_ISA_INTERRUPTS) | ||
902 | continue; | ||
903 | if (irq_map[virq].host != xics_host) | ||
904 | continue; | ||
905 | hwirq = (unsigned int)irq_map[virq].hwirq; | ||
906 | /* We need to get IPIs still. */ | ||
907 | if (hwirq == XICS_IPI || hwirq == XICS_IRQ_SPURIOUS) | ||
908 | continue; | ||
909 | |||
910 | desc = irq_to_desc(virq); | ||
911 | |||
912 | /* We only need to migrate enabled IRQS */ | ||
913 | if (desc == NULL || desc->action == NULL) | ||
914 | continue; | ||
915 | |||
916 | chip = irq_desc_get_chip(desc); | ||
917 | if (chip == NULL || chip->irq_set_affinity == NULL) | ||
918 | continue; | ||
919 | |||
920 | raw_spin_lock_irqsave(&desc->lock, flags); | ||
921 | |||
922 | status = rtas_call(ibm_get_xive, 1, 3, xics_status, hwirq); | ||
923 | if (status) { | ||
924 | printk(KERN_ERR "%s: ibm,get-xive irq=%u returns %d\n", | ||
925 | __func__, hwirq, status); | ||
926 | goto unlock; | ||
927 | } | ||
928 | |||
929 | /* | ||
930 | * We only support delivery to all cpus or to one cpu. | ||
931 | * The irq has to be migrated only in the single cpu | ||
932 | * case. | ||
933 | */ | ||
934 | if (xics_status[0] != hw_cpu) | ||
935 | goto unlock; | ||
936 | |||
937 | /* This is expected during cpu offline. */ | ||
938 | if (cpu_online(cpu)) | ||
939 | printk(KERN_WARNING "IRQ %u affinity broken off cpu %u\n", | ||
940 | virq, cpu); | ||
941 | |||
942 | /* Reset affinity to all cpus */ | ||
943 | cpumask_setall(desc->irq_data.affinity); | ||
944 | chip->irq_set_affinity(&desc->irq_data, cpu_all_mask, true); | ||
945 | unlock: | ||
946 | raw_spin_unlock_irqrestore(&desc->lock, flags); | ||
947 | } | ||
948 | } | ||
949 | #endif | ||
diff --git a/arch/powerpc/platforms/pseries/xics.h b/arch/powerpc/platforms/pseries/xics.h deleted file mode 100644 index d1d5a83039ae..000000000000 --- a/arch/powerpc/platforms/pseries/xics.h +++ /dev/null | |||
@@ -1,23 +0,0 @@ | |||
1 | /* | ||
2 | * arch/powerpc/platforms/pseries/xics.h | ||
3 | * | ||
4 | * Copyright 2000 IBM Corporation. | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or | ||
7 | * modify it under the terms of the GNU General Public License | ||
8 | * as published by the Free Software Foundation; either version | ||
9 | * 2 of the License, or (at your option) any later version. | ||
10 | */ | ||
11 | |||
12 | #ifndef _POWERPC_KERNEL_XICS_H | ||
13 | #define _POWERPC_KERNEL_XICS_H | ||
14 | |||
15 | extern void xics_init_IRQ(void); | ||
16 | extern void xics_setup_cpu(void); | ||
17 | extern void xics_teardown_cpu(void); | ||
18 | extern void xics_kexec_teardown_cpu(int secondary); | ||
19 | extern void xics_migrate_irqs_away(void); | ||
20 | extern int smp_xics_probe(void); | ||
21 | extern void smp_xics_message_pass(int target, int msg); | ||
22 | |||
23 | #endif /* _POWERPC_KERNEL_XICS_H */ | ||