aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2008-07-16 20:25:46 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2008-07-16 20:25:46 -0400
commitdc7c65db2845a8d17432d89252c4227a9a7cb15f (patch)
tree79030b0aaaafc04bc4303c21495134e744afc058
parent8a0ca91e1db5de5eb5b18cfa919d52ff8be375af (diff)
parent58b6e5538460be358fdf1286d9a2fbcfcc2cfaba (diff)
Merge branch 'linux-next' of git://git.kernel.org/pub/scm/linux/kernel/git/jbarnes/pci-2.6
* 'linux-next' of git://git.kernel.org/pub/scm/linux/kernel/git/jbarnes/pci-2.6: (72 commits) Revert "x86/PCI: ACPI based PCI gap calculation" PCI: remove unnecessary volatile in PCIe hotplug struct controller x86/PCI: ACPI based PCI gap calculation PCI: include linux/pm_wakeup.h for device_set_wakeup_capable PCI PM: Fix pci_prepare_to_sleep x86/PCI: Fix PCI config space for domains > 0 Fix acpi_pm_device_sleep_wake() by providing a stub for CONFIG_PM_SLEEP=n PCI: Simplify PCI device PM code PCI PM: Introduce pci_prepare_to_sleep and pci_back_from_sleep PCI ACPI: Rework PCI handling of wake-up ACPI: Introduce new device wakeup flag 'prepared' ACPI: Introduce acpi_device_sleep_wake function PCI: rework pci_set_power_state function to call platform first PCI: Introduce platform_pci_power_manageable function ACPI: Introduce acpi_bus_power_manageable function PCI: make pci_name use dev_name PCI: handle pci_name() being const PCI: add stub for pci_set_consistent_dma_mask() PCI: remove unused arch pcibios_update_resource() functions PCI: fix pci_setup_device()'s sprinting into a const buffer ... Fixed up conflicts in various files (arch/x86/kernel/setup_64.c, arch/x86/pci/irq.c, arch/x86/pci/pci.h, drivers/acpi/sleep/main.c, drivers/pci/pci.c, drivers/pci/pci.h, include/acpi/acpi_bus.h) from x86 and ACPI updates manually.
-rw-r--r--Documentation/kernel-parameters.txt9
-rw-r--r--MAINTAINERS12
-rw-r--r--arch/frv/mb93090-mb00/pci-frv.c30
-rw-r--r--arch/m68knommu/kernel/comempci.c9
-rw-r--r--arch/mips/pmc-sierra/yosemite/ht.c36
-rw-r--r--arch/sh/drivers/pci/pci.c32
-rw-r--r--arch/sparc64/kernel/pci.c2
-rw-r--r--arch/x86/kernel/acpi/sleep.c2
-rw-r--r--arch/x86/kernel/apm_32.c8
-rw-r--r--arch/x86/kernel/early-quirks.c26
-rw-r--r--arch/x86/kernel/setup.c5
-rw-r--r--arch/x86/pci/common.c30
-rw-r--r--arch/x86/pci/early.c60
-rw-r--r--arch/x86/pci/irq.c120
-rw-r--r--arch/x86/pci/pci.h1
-rw-r--r--drivers/acpi/Kconfig9
-rw-r--r--drivers/acpi/Makefile1
-rw-r--r--drivers/acpi/bus.c22
-rw-r--r--drivers/acpi/glue.c2
-rw-r--r--drivers/acpi/pci_slot.c368
-rw-r--r--drivers/acpi/power.c138
-rw-r--r--drivers/acpi/scan.c42
-rw-r--r--drivers/acpi/sleep/main.c301
-rw-r--r--drivers/acpi/sleep/wakeup.c13
-rw-r--r--drivers/base/platform.c296
-rw-r--r--drivers/base/power/main.c675
-rw-r--r--drivers/base/power/power.h2
-rw-r--r--drivers/base/power/sysfs.c3
-rw-r--r--drivers/base/power/trace.c4
-rw-r--r--drivers/pci/Makefile2
-rw-r--r--drivers/pci/hotplug/acpi_pcihp.c85
-rw-r--r--drivers/pci/hotplug/acpiphp.h1
-rw-r--r--drivers/pci/hotplug/acpiphp_core.c25
-rw-r--r--drivers/pci/hotplug/acpiphp_glue.c23
-rw-r--r--drivers/pci/hotplug/acpiphp_ibm.c6
-rw-r--r--drivers/pci/hotplug/cpci_hotplug_core.c2
-rw-r--r--drivers/pci/hotplug/cpqphp_core.c4
-rw-r--r--drivers/pci/hotplug/fakephp.c86
-rw-r--r--drivers/pci/hotplug/ibmphp_ebda.c3
-rw-r--r--drivers/pci/hotplug/pci_hotplug_core.c284
-rw-r--r--drivers/pci/hotplug/pciehp.h16
-rw-r--r--drivers/pci/hotplug/pciehp_core.c127
-rw-r--r--drivers/pci/hotplug/pciehp_hpc.c318
-rw-r--r--drivers/pci/hotplug/rpadlpar_sysfs.c5
-rw-r--r--drivers/pci/hotplug/rpaphp_slot.c44
-rw-r--r--drivers/pci/hotplug/sgi_hotplug.c12
-rw-r--r--drivers/pci/hotplug/shpchp.h14
-rw-r--r--drivers/pci/hotplug/shpchp_core.c37
-rw-r--r--drivers/pci/hotplug/shpchp_hpc.c1
-rw-r--r--drivers/pci/intel-iommu.c1
-rw-r--r--drivers/pci/msi.c22
-rw-r--r--drivers/pci/pci-acpi.c271
-rw-r--r--drivers/pci/pci-driver.c388
-rw-r--r--drivers/pci/pci.c479
-rw-r--r--drivers/pci/pci.h47
-rw-r--r--drivers/pci/pcie/aer/aerdrv.c9
-rw-r--r--drivers/pci/pcie/aer/aerdrv_acpi.c8
-rw-r--r--drivers/pci/pcie/aer/aerdrv_core.c24
-rw-r--r--drivers/pci/pcie/portdrv_bus.c1
-rw-r--r--drivers/pci/pcie/portdrv_core.c22
-rw-r--r--drivers/pci/pcie/portdrv_pci.c5
-rw-r--r--drivers/pci/probe.c38
-rw-r--r--drivers/pci/proc.c4
-rw-r--r--drivers/pci/quirks.c134
-rw-r--r--drivers/pci/setup-bus.c43
-rw-r--r--drivers/pci/setup-irq.c3
-rw-r--r--drivers/pci/setup-res.c70
-rw-r--r--drivers/pci/slot.c233
-rw-r--r--include/acpi/acpi_bus.h8
-rw-r--r--include/acpi/acpi_drivers.h4
-rw-r--r--include/asm-generic/vmlinux.lds.h6
-rw-r--r--include/asm-x86/pci-direct.h4
-rw-r--r--include/linux/acpi.h3
-rw-r--r--include/linux/device.h9
-rw-r--r--include/linux/pci.h57
-rw-r--r--include/linux/pci_hotplug.h14
-rw-r--r--include/linux/pci_regs.h1
-rw-r--r--include/linux/platform_device.h1
-rw-r--r--include/linux/pm.h314
-rw-r--r--include/linux/pm_wakeup.h28
-rw-r--r--include/linux/suspend.h14
-rw-r--r--kernel/power/disk.c50
-rw-r--r--kernel/power/main.c16
-rw-r--r--lib/kobject.c1
84 files changed, 3966 insertions, 1719 deletions
diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
index 5e497d16fb51..09ad7450647b 100644
--- a/Documentation/kernel-parameters.txt
+++ b/Documentation/kernel-parameters.txt
@@ -147,10 +147,14 @@ and is between 256 and 4096 characters. It is defined in the file
147 default: 0 147 default: 0
148 148
149 acpi_sleep= [HW,ACPI] Sleep options 149 acpi_sleep= [HW,ACPI] Sleep options
150 Format: { s3_bios, s3_mode, s3_beep } 150 Format: { s3_bios, s3_mode, s3_beep, old_ordering }
151 See Documentation/power/video.txt for s3_bios and s3_mode. 151 See Documentation/power/video.txt for s3_bios and s3_mode.
152 s3_beep is for debugging; it makes the PC's speaker beep 152 s3_beep is for debugging; it makes the PC's speaker beep
153 as soon as the kernel's real-mode entry point is called. 153 as soon as the kernel's real-mode entry point is called.
154 old_ordering causes the ACPI 1.0 ordering of the _PTS
155 control method, wrt putting devices into low power
156 states, to be enforced (the ACPI 2.0 ordering of _PTS is
157 used by default).
154 158
155 acpi_sci= [HW,ACPI] ACPI System Control Interrupt trigger mode 159 acpi_sci= [HW,ACPI] ACPI System Control Interrupt trigger mode
156 Format: { level | edge | high | low } 160 Format: { level | edge | high | low }
@@ -1537,6 +1541,9 @@ and is between 256 and 4096 characters. It is defined in the file
1537 Use with caution as certain devices share 1541 Use with caution as certain devices share
1538 address decoders between ROMs and other 1542 address decoders between ROMs and other
1539 resources. 1543 resources.
1544 norom [X86-32,X86_64] Do not assign address space to
1545 expansion ROMs that do not already have
1546 BIOS assigned address ranges.
1540 irqmask=0xMMMM [X86-32] Set a bit mask of IRQs allowed to be 1547 irqmask=0xMMMM [X86-32] Set a bit mask of IRQs allowed to be
1541 assigned automatically to PCI devices. You can 1548 assigned automatically to PCI devices. You can
1542 make the kernel exclude IRQs of your ISA cards 1549 make the kernel exclude IRQs of your ISA cards
diff --git a/MAINTAINERS b/MAINTAINERS
index 2b9212f90446..93fd6b2efeee 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -248,7 +248,7 @@ S: Supported
248ACPI PCI HOTPLUG DRIVER 248ACPI PCI HOTPLUG DRIVER
249P: Kristen Carlson Accardi 249P: Kristen Carlson Accardi
250M: kristen.c.accardi@intel.com 250M: kristen.c.accardi@intel.com
251L: pcihpd-discuss@lists.sourceforge.net 251L: linux-pci@vger.kernel.org
252S: Supported 252S: Supported
253 253
254ACPI THERMAL DRIVER 254ACPI THERMAL DRIVER
@@ -1145,21 +1145,21 @@ COMPACTPCI HOTPLUG CORE
1145P: Scott Murray 1145P: Scott Murray
1146M: scottm@somanetworks.com 1146M: scottm@somanetworks.com
1147M: scott@spiteful.org 1147M: scott@spiteful.org
1148L: pcihpd-discuss@lists.sourceforge.net 1148L: linux-pci@vger.kernel.org
1149S: Supported 1149S: Supported
1150 1150
1151COMPACTPCI HOTPLUG ZIATECH ZT5550 DRIVER 1151COMPACTPCI HOTPLUG ZIATECH ZT5550 DRIVER
1152P: Scott Murray 1152P: Scott Murray
1153M: scottm@somanetworks.com 1153M: scottm@somanetworks.com
1154M: scott@spiteful.org 1154M: scott@spiteful.org
1155L: pcihpd-discuss@lists.sourceforge.net 1155L: linux-pci@vger.kernel.org
1156S: Supported 1156S: Supported
1157 1157
1158COMPACTPCI HOTPLUG GENERIC DRIVER 1158COMPACTPCI HOTPLUG GENERIC DRIVER
1159P: Scott Murray 1159P: Scott Murray
1160M: scottm@somanetworks.com 1160M: scottm@somanetworks.com
1161M: scott@spiteful.org 1161M: scott@spiteful.org
1162L: pcihpd-discuss@lists.sourceforge.net 1162L: linux-pci@vger.kernel.org
1163S: Supported 1163S: Supported
1164 1164
1165COMPAL LAPTOP SUPPORT 1165COMPAL LAPTOP SUPPORT
@@ -3219,7 +3219,7 @@ S: Supported
3219PCIE HOTPLUG DRIVER 3219PCIE HOTPLUG DRIVER
3220P: Kristen Carlson Accardi 3220P: Kristen Carlson Accardi
3221M: kristen.c.accardi@intel.com 3221M: kristen.c.accardi@intel.com
3222L: pcihpd-discuss@lists.sourceforge.net 3222L: linux-pci@vger.kernel.org
3223S: Supported 3223S: Supported
3224 3224
3225PCMCIA SUBSYSTEM 3225PCMCIA SUBSYSTEM
@@ -3865,7 +3865,7 @@ S: Maintained
3865SHPC HOTPLUG DRIVER 3865SHPC HOTPLUG DRIVER
3866P: Kristen Carlson Accardi 3866P: Kristen Carlson Accardi
3867M: kristen.c.accardi@intel.com 3867M: kristen.c.accardi@intel.com
3868L: pcihpd-discuss@lists.sourceforge.net 3868L: linux-pci@vger.kernel.org
3869S: Supported 3869S: Supported
3870 3870
3871SECURE DIGITAL HOST CONTROLLER INTERFACE DRIVER 3871SECURE DIGITAL HOST CONTROLLER INTERFACE DRIVER
diff --git a/arch/frv/mb93090-mb00/pci-frv.c b/arch/frv/mb93090-mb00/pci-frv.c
index 4f165c93be42..edae117fcc2b 100644
--- a/arch/frv/mb93090-mb00/pci-frv.c
+++ b/arch/frv/mb93090-mb00/pci-frv.c
@@ -19,36 +19,6 @@
19 19
20#include "pci-frv.h" 20#include "pci-frv.h"
21 21
22#if 0
23void
24pcibios_update_resource(struct pci_dev *dev, struct resource *root,
25 struct resource *res, int resource)
26{
27 u32 new, check;
28 int reg;
29
30 new = res->start | (res->flags & PCI_REGION_FLAG_MASK);
31 if (resource < 6) {
32 reg = PCI_BASE_ADDRESS_0 + 4*resource;
33 } else if (resource == PCI_ROM_RESOURCE) {
34 res->flags |= IORESOURCE_ROM_ENABLE;
35 new |= PCI_ROM_ADDRESS_ENABLE;
36 reg = dev->rom_base_reg;
37 } else {
38 /* Somebody might have asked allocation of a non-standard resource */
39 return;
40 }
41
42 pci_write_config_dword(dev, reg, new);
43 pci_read_config_dword(dev, reg, &check);
44 if ((new ^ check) & ((new & PCI_BASE_ADDRESS_SPACE_IO) ? PCI_BASE_ADDRESS_IO_MASK : PCI_BASE_ADDRESS_MEM_MASK)) {
45 printk(KERN_ERR "PCI: Error while updating region "
46 "%s/%d (%08x != %08x)\n", pci_name(dev), resource,
47 new, check);
48 }
49}
50#endif
51
52/* 22/*
53 * We need to avoid collisions with `mirrored' VGA ports 23 * We need to avoid collisions with `mirrored' VGA ports
54 * and other strange ISA hardware, so we always want the 24 * and other strange ISA hardware, so we always want the
diff --git a/arch/m68knommu/kernel/comempci.c b/arch/m68knommu/kernel/comempci.c
index 6ee00effbad2..0a68b5a85f86 100644
--- a/arch/m68knommu/kernel/comempci.c
+++ b/arch/m68knommu/kernel/comempci.c
@@ -375,15 +375,6 @@ int pcibios_enable_device(struct pci_dev *dev, int mask)
375 375
376/*****************************************************************************/ 376/*****************************************************************************/
377 377
378void pcibios_update_resource(struct pci_dev *dev, struct resource *root, struct resource *r, int resource)
379{
380 printk(KERN_WARNING "%s(%d): no support for changing PCI resources...\n",
381 __FILE__, __LINE__);
382}
383
384
385/*****************************************************************************/
386
387/* 378/*
388 * Local routines to interrcept the standard I/O and vector handling 379 * Local routines to interrcept the standard I/O and vector handling
389 * code. Don't include this 'till now - initialization code above needs 380 * code. Don't include this 'till now - initialization code above needs
diff --git a/arch/mips/pmc-sierra/yosemite/ht.c b/arch/mips/pmc-sierra/yosemite/ht.c
index 6380662bbf3c..678388fd34b1 100644
--- a/arch/mips/pmc-sierra/yosemite/ht.c
+++ b/arch/mips/pmc-sierra/yosemite/ht.c
@@ -345,42 +345,6 @@ int pcibios_enable_device(struct pci_dev *dev, int mask)
345 return pcibios_enable_resources(dev); 345 return pcibios_enable_resources(dev);
346} 346}
347 347
348
349
350void pcibios_update_resource(struct pci_dev *dev, struct resource *root,
351 struct resource *res, int resource)
352{
353 u32 new, check;
354 int reg;
355
356 return;
357
358 new = res->start | (res->flags & PCI_REGION_FLAG_MASK);
359 if (resource < 6) {
360 reg = PCI_BASE_ADDRESS_0 + 4 * resource;
361 } else if (resource == PCI_ROM_RESOURCE) {
362 res->flags |= IORESOURCE_ROM_ENABLE;
363 reg = dev->rom_base_reg;
364 } else {
365 /*
366 * Somebody might have asked allocation of a non-standard
367 * resource
368 */
369 return;
370 }
371
372 pci_write_config_dword(dev, reg, new);
373 pci_read_config_dword(dev, reg, &check);
374 if ((new ^ check) &
375 ((new & PCI_BASE_ADDRESS_SPACE_IO) ? PCI_BASE_ADDRESS_IO_MASK :
376 PCI_BASE_ADDRESS_MEM_MASK)) {
377 printk(KERN_ERR "PCI: Error while updating region "
378 "%s/%d (%08x != %08x)\n", pci_name(dev), resource,
379 new, check);
380 }
381}
382
383
384void pcibios_align_resource(void *data, struct resource *res, 348void pcibios_align_resource(void *data, struct resource *res,
385 resource_size_t size, resource_size_t align) 349 resource_size_t size, resource_size_t align)
386{ 350{
diff --git a/arch/sh/drivers/pci/pci.c b/arch/sh/drivers/pci/pci.c
index 08d2e7325252..f57095a2617c 100644
--- a/arch/sh/drivers/pci/pci.c
+++ b/arch/sh/drivers/pci/pci.c
@@ -76,38 +76,6 @@ void __devinit __weak pcibios_fixup_bus(struct pci_bus *bus)
76 pci_read_bridge_bases(bus); 76 pci_read_bridge_bases(bus);
77} 77}
78 78
79void
80pcibios_update_resource(struct pci_dev *dev, struct resource *root,
81 struct resource *res, int resource)
82{
83 u32 new, check;
84 int reg;
85
86 new = res->start | (res->flags & PCI_REGION_FLAG_MASK);
87 if (resource < 6) {
88 reg = PCI_BASE_ADDRESS_0 + 4*resource;
89 } else if (resource == PCI_ROM_RESOURCE) {
90 res->flags |= IORESOURCE_ROM_ENABLE;
91 new |= PCI_ROM_ADDRESS_ENABLE;
92 reg = dev->rom_base_reg;
93 } else {
94 /*
95 * Somebody might have asked allocation of a non-standard
96 * resource
97 */
98 return;
99 }
100
101 pci_write_config_dword(dev, reg, new);
102 pci_read_config_dword(dev, reg, &check);
103 if ((new ^ check) & ((new & PCI_BASE_ADDRESS_SPACE_IO) ?
104 PCI_BASE_ADDRESS_IO_MASK : PCI_BASE_ADDRESS_MEM_MASK)) {
105 printk(KERN_ERR "PCI: Error while updating region "
106 "%s/%d (%08x != %08x)\n", pci_name(dev), resource,
107 new, check);
108 }
109}
110
111void pcibios_align_resource(void *data, struct resource *res, 79void pcibios_align_resource(void *data, struct resource *res,
112 resource_size_t size, resource_size_t align) 80 resource_size_t size, resource_size_t align)
113 __attribute__ ((weak)); 81 __attribute__ ((weak));
diff --git a/arch/sparc64/kernel/pci.c b/arch/sparc64/kernel/pci.c
index 112b09f16f36..d00a3656c287 100644
--- a/arch/sparc64/kernel/pci.c
+++ b/arch/sparc64/kernel/pci.c
@@ -408,7 +408,7 @@ struct pci_dev *of_create_pci_dev(struct pci_pbm_info *pbm,
408 dev->class = class >> 8; 408 dev->class = class >> 8;
409 dev->revision = class & 0xff; 409 dev->revision = class & 0xff;
410 410
411 sprintf(pci_name(dev), "%04x:%02x:%02x.%d", pci_domain_nr(bus), 411 sprintf(dev->dev.bus_id, "%04x:%02x:%02x.%d", pci_domain_nr(bus),
412 dev->bus->number, PCI_SLOT(devfn), PCI_FUNC(devfn)); 412 dev->bus->number, PCI_SLOT(devfn), PCI_FUNC(devfn));
413 413
414 if (ofpci_verbose) 414 if (ofpci_verbose)
diff --git a/arch/x86/kernel/acpi/sleep.c b/arch/x86/kernel/acpi/sleep.c
index 793ad2045f58..868de3d5c39d 100644
--- a/arch/x86/kernel/acpi/sleep.c
+++ b/arch/x86/kernel/acpi/sleep.c
@@ -158,6 +158,8 @@ static int __init acpi_sleep_setup(char *str)
158 acpi_realmode_flags |= 2; 158 acpi_realmode_flags |= 2;
159 if (strncmp(str, "s3_beep", 7) == 0) 159 if (strncmp(str, "s3_beep", 7) == 0)
160 acpi_realmode_flags |= 4; 160 acpi_realmode_flags |= 4;
161 if (strncmp(str, "old_ordering", 12) == 0)
162 acpi_old_suspend_ordering();
161 str = strchr(str, ','); 163 str = strchr(str, ',');
162 if (str != NULL) 164 if (str != NULL)
163 str += strspn(str, ", \t"); 165 str += strspn(str, ", \t");
diff --git a/arch/x86/kernel/apm_32.c b/arch/x86/kernel/apm_32.c
index 75cb5da4ea0a..bf9b441331e9 100644
--- a/arch/x86/kernel/apm_32.c
+++ b/arch/x86/kernel/apm_32.c
@@ -1213,9 +1213,9 @@ static int suspend(int vetoable)
1213 if (err != APM_SUCCESS) 1213 if (err != APM_SUCCESS)
1214 apm_error("suspend", err); 1214 apm_error("suspend", err);
1215 err = (err == APM_SUCCESS) ? 0 : -EIO; 1215 err = (err == APM_SUCCESS) ? 0 : -EIO;
1216 device_power_up(); 1216 device_power_up(PMSG_RESUME);
1217 local_irq_enable(); 1217 local_irq_enable();
1218 device_resume(); 1218 device_resume(PMSG_RESUME);
1219 queue_event(APM_NORMAL_RESUME, NULL); 1219 queue_event(APM_NORMAL_RESUME, NULL);
1220 spin_lock(&user_list_lock); 1220 spin_lock(&user_list_lock);
1221 for (as = user_list; as != NULL; as = as->next) { 1221 for (as = user_list; as != NULL; as = as->next) {
@@ -1240,7 +1240,7 @@ static void standby(void)
1240 apm_error("standby", err); 1240 apm_error("standby", err);
1241 1241
1242 local_irq_disable(); 1242 local_irq_disable();
1243 device_power_up(); 1243 device_power_up(PMSG_RESUME);
1244 local_irq_enable(); 1244 local_irq_enable();
1245} 1245}
1246 1246
@@ -1326,7 +1326,7 @@ static void check_events(void)
1326 ignore_bounce = 1; 1326 ignore_bounce = 1;
1327 if ((event != APM_NORMAL_RESUME) 1327 if ((event != APM_NORMAL_RESUME)
1328 || (ignore_normal_resume == 0)) { 1328 || (ignore_normal_resume == 0)) {
1329 device_resume(); 1329 device_resume(PMSG_RESUME);
1330 queue_event(event, NULL); 1330 queue_event(event, NULL);
1331 } 1331 }
1332 ignore_normal_resume = 0; 1332 ignore_normal_resume = 0;
diff --git a/arch/x86/kernel/early-quirks.c b/arch/x86/kernel/early-quirks.c
index a4665f37cfc5..a0e11c0cc872 100644
--- a/arch/x86/kernel/early-quirks.c
+++ b/arch/x86/kernel/early-quirks.c
@@ -120,7 +120,18 @@ static struct chipset early_qrk[] __initdata = {
120 {} 120 {}
121}; 121};
122 122
123static void __init check_dev_quirk(int num, int slot, int func) 123/**
124 * check_dev_quirk - apply early quirks to a given PCI device
125 * @num: bus number
126 * @slot: slot number
127 * @func: PCI function
128 *
129 * Check the vendor & device ID against the early quirks table.
130 *
131 * If the device is single function, let early_quirks() know so we don't
132 * poke at this device again.
133 */
134static int __init check_dev_quirk(int num, int slot, int func)
124{ 135{
125 u16 class; 136 u16 class;
126 u16 vendor; 137 u16 vendor;
@@ -131,7 +142,7 @@ static void __init check_dev_quirk(int num, int slot, int func)
131 class = read_pci_config_16(num, slot, func, PCI_CLASS_DEVICE); 142 class = read_pci_config_16(num, slot, func, PCI_CLASS_DEVICE);
132 143
133 if (class == 0xffff) 144 if (class == 0xffff)
134 return; 145 return -1; /* no class, treat as single function */
135 146
136 vendor = read_pci_config_16(num, slot, func, PCI_VENDOR_ID); 147 vendor = read_pci_config_16(num, slot, func, PCI_VENDOR_ID);
137 148
@@ -154,7 +165,9 @@ static void __init check_dev_quirk(int num, int slot, int func)
154 type = read_pci_config_byte(num, slot, func, 165 type = read_pci_config_byte(num, slot, func,
155 PCI_HEADER_TYPE); 166 PCI_HEADER_TYPE);
156 if (!(type & 0x80)) 167 if (!(type & 0x80))
157 return; 168 return -1;
169
170 return 0;
158} 171}
159 172
160void __init early_quirks(void) 173void __init early_quirks(void)
@@ -167,6 +180,9 @@ void __init early_quirks(void)
167 /* Poor man's PCI discovery */ 180 /* Poor man's PCI discovery */
168 for (num = 0; num < 32; num++) 181 for (num = 0; num < 32; num++)
169 for (slot = 0; slot < 32; slot++) 182 for (slot = 0; slot < 32; slot++)
170 for (func = 0; func < 8; func++) 183 for (func = 0; func < 8; func++) {
171 check_dev_quirk(num, slot, func); 184 /* Only probe function 0 on single fn devices */
185 if (check_dev_quirk(num, slot, func))
186 break;
187 }
172} 188}
diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
index 36c540d4ac4b..531b55b8e81a 100644
--- a/arch/x86/kernel/setup.c
+++ b/arch/x86/kernel/setup.c
@@ -684,6 +684,11 @@ void __init setup_arch(char **cmdline_p)
684 clear_cpu_cap(&boot_cpu_data, X86_FEATURE_APIC); 684 clear_cpu_cap(&boot_cpu_data, X86_FEATURE_APIC);
685 } 685 }
686 686
687#ifdef CONFIG_PCI
688 if (pci_early_dump_regs)
689 early_dump_pci_devices();
690#endif
691
687 finish_e820_parsing(); 692 finish_e820_parsing();
688 693
689#ifdef CONFIG_X86_32 694#ifdef CONFIG_X86_32
diff --git a/arch/x86/pci/common.c b/arch/x86/pci/common.c
index 20b9f59f95df..b67732bbb85a 100644
--- a/arch/x86/pci/common.c
+++ b/arch/x86/pci/common.c
@@ -20,6 +20,7 @@
20unsigned int pci_probe = PCI_PROBE_BIOS | PCI_PROBE_CONF1 | PCI_PROBE_CONF2 | 20unsigned int pci_probe = PCI_PROBE_BIOS | PCI_PROBE_CONF1 | PCI_PROBE_CONF2 |
21 PCI_PROBE_MMCONF; 21 PCI_PROBE_MMCONF;
22 22
23unsigned int pci_early_dump_regs;
23static int pci_bf_sort; 24static int pci_bf_sort;
24int pci_routeirq; 25int pci_routeirq;
25int pcibios_last_bus = -1; 26int pcibios_last_bus = -1;
@@ -31,7 +32,7 @@ struct pci_raw_ops *raw_pci_ext_ops;
31int raw_pci_read(unsigned int domain, unsigned int bus, unsigned int devfn, 32int raw_pci_read(unsigned int domain, unsigned int bus, unsigned int devfn,
32 int reg, int len, u32 *val) 33 int reg, int len, u32 *val)
33{ 34{
34 if (reg < 256 && raw_pci_ops) 35 if (domain == 0 && reg < 256 && raw_pci_ops)
35 return raw_pci_ops->read(domain, bus, devfn, reg, len, val); 36 return raw_pci_ops->read(domain, bus, devfn, reg, len, val);
36 if (raw_pci_ext_ops) 37 if (raw_pci_ext_ops)
37 return raw_pci_ext_ops->read(domain, bus, devfn, reg, len, val); 38 return raw_pci_ext_ops->read(domain, bus, devfn, reg, len, val);
@@ -41,7 +42,7 @@ int raw_pci_read(unsigned int domain, unsigned int bus, unsigned int devfn,
41int raw_pci_write(unsigned int domain, unsigned int bus, unsigned int devfn, 42int raw_pci_write(unsigned int domain, unsigned int bus, unsigned int devfn,
42 int reg, int len, u32 val) 43 int reg, int len, u32 val)
43{ 44{
44 if (reg < 256 && raw_pci_ops) 45 if (domain == 0 && reg < 256 && raw_pci_ops)
45 return raw_pci_ops->write(domain, bus, devfn, reg, len, val); 46 return raw_pci_ops->write(domain, bus, devfn, reg, len, val);
46 if (raw_pci_ext_ops) 47 if (raw_pci_ext_ops)
47 return raw_pci_ext_ops->write(domain, bus, devfn, reg, len, val); 48 return raw_pci_ext_ops->write(domain, bus, devfn, reg, len, val);
@@ -121,6 +122,21 @@ void __init dmi_check_skip_isa_align(void)
121 dmi_check_system(can_skip_pciprobe_dmi_table); 122 dmi_check_system(can_skip_pciprobe_dmi_table);
122} 123}
123 124
125static void __devinit pcibios_fixup_device_resources(struct pci_dev *dev)
126{
127 struct resource *rom_r = &dev->resource[PCI_ROM_RESOURCE];
128
129 if (pci_probe & PCI_NOASSIGN_ROMS) {
130 if (rom_r->parent)
131 return;
132 if (rom_r->start) {
133 /* we deal with BIOS assigned ROM later */
134 return;
135 }
136 rom_r->start = rom_r->end = rom_r->flags = 0;
137 }
138}
139
124/* 140/*
125 * Called after each bus is probed, but before its children 141 * Called after each bus is probed, but before its children
126 * are examined. 142 * are examined.
@@ -128,7 +144,11 @@ void __init dmi_check_skip_isa_align(void)
128 144
129void __devinit pcibios_fixup_bus(struct pci_bus *b) 145void __devinit pcibios_fixup_bus(struct pci_bus *b)
130{ 146{
147 struct pci_dev *dev;
148
131 pci_read_bridge_bases(b); 149 pci_read_bridge_bases(b);
150 list_for_each_entry(dev, &b->devices, bus_list)
151 pcibios_fixup_device_resources(dev);
132} 152}
133 153
134/* 154/*
@@ -481,12 +501,18 @@ char * __devinit pcibios_setup(char *str)
481 else if (!strcmp(str, "rom")) { 501 else if (!strcmp(str, "rom")) {
482 pci_probe |= PCI_ASSIGN_ROMS; 502 pci_probe |= PCI_ASSIGN_ROMS;
483 return NULL; 503 return NULL;
504 } else if (!strcmp(str, "norom")) {
505 pci_probe |= PCI_NOASSIGN_ROMS;
506 return NULL;
484 } else if (!strcmp(str, "assign-busses")) { 507 } else if (!strcmp(str, "assign-busses")) {
485 pci_probe |= PCI_ASSIGN_ALL_BUSSES; 508 pci_probe |= PCI_ASSIGN_ALL_BUSSES;
486 return NULL; 509 return NULL;
487 } else if (!strcmp(str, "use_crs")) { 510 } else if (!strcmp(str, "use_crs")) {
488 pci_probe |= PCI_USE__CRS; 511 pci_probe |= PCI_USE__CRS;
489 return NULL; 512 return NULL;
513 } else if (!strcmp(str, "earlydump")) {
514 pci_early_dump_regs = 1;
515 return NULL;
490 } else if (!strcmp(str, "routeirq")) { 516 } else if (!strcmp(str, "routeirq")) {
491 pci_routeirq = 1; 517 pci_routeirq = 1;
492 return NULL; 518 return NULL;
diff --git a/arch/x86/pci/early.c b/arch/x86/pci/early.c
index 42df4b6606df..858dbe3399f9 100644
--- a/arch/x86/pci/early.c
+++ b/arch/x86/pci/early.c
@@ -49,7 +49,14 @@ void write_pci_config_byte(u8 bus, u8 slot, u8 func, u8 offset, u8 val)
49{ 49{
50 PDprintk("%x writing to %x: %x\n", slot, offset, val); 50 PDprintk("%x writing to %x: %x\n", slot, offset, val);
51 outl(0x80000000 | (bus<<16) | (slot<<11) | (func<<8) | offset, 0xcf8); 51 outl(0x80000000 | (bus<<16) | (slot<<11) | (func<<8) | offset, 0xcf8);
52 outb(val, 0xcfc); 52 outb(val, 0xcfc + (offset&3));
53}
54
55void write_pci_config_16(u8 bus, u8 slot, u8 func, u8 offset, u16 val)
56{
57 PDprintk("%x writing to %x: %x\n", slot, offset, val);
58 outl(0x80000000 | (bus<<16) | (slot<<11) | (func<<8) | offset, 0xcf8);
59 outw(val, 0xcfc + (offset&2));
53} 60}
54 61
55int early_pci_allowed(void) 62int early_pci_allowed(void)
@@ -57,3 +64,54 @@ int early_pci_allowed(void)
57 return (pci_probe & (PCI_PROBE_CONF1|PCI_PROBE_NOEARLY)) == 64 return (pci_probe & (PCI_PROBE_CONF1|PCI_PROBE_NOEARLY)) ==
58 PCI_PROBE_CONF1; 65 PCI_PROBE_CONF1;
59} 66}
67
68void early_dump_pci_device(u8 bus, u8 slot, u8 func)
69{
70 int i;
71 int j;
72 u32 val;
73
74 printk("PCI: %02x:%02x:%02x", bus, slot, func);
75
76 for (i = 0; i < 256; i += 4) {
77 if (!(i & 0x0f))
78 printk("\n%04x:",i);
79
80 val = read_pci_config(bus, slot, func, i);
81 for (j = 0; j < 4; j++) {
82 printk(" %02x", val & 0xff);
83 val >>= 8;
84 }
85 }
86 printk("\n");
87}
88
89void early_dump_pci_devices(void)
90{
91 unsigned bus, slot, func;
92
93 if (!early_pci_allowed())
94 return;
95
96 for (bus = 0; bus < 256; bus++) {
97 for (slot = 0; slot < 32; slot++) {
98 for (func = 0; func < 8; func++) {
99 u32 class;
100 u8 type;
101 class = read_pci_config(bus, slot, func,
102 PCI_CLASS_REVISION);
103 if (class == 0xffffffff)
104 break;
105
106 early_dump_pci_device(bus, slot, func);
107
108 /* No multi-function device? */
109 type = read_pci_config_byte(bus, slot, func,
110 PCI_HEADER_TYPE);
111 if (!(type & 0x80))
112 break;
113 }
114 }
115 }
116}
117
diff --git a/arch/x86/pci/irq.c b/arch/x86/pci/irq.c
index dc568c6b83f8..6a06a2eb0597 100644
--- a/arch/x86/pci/irq.c
+++ b/arch/x86/pci/irq.c
@@ -45,7 +45,8 @@ struct irq_router {
45 char *name; 45 char *name;
46 u16 vendor, device; 46 u16 vendor, device;
47 int (*get)(struct pci_dev *router, struct pci_dev *dev, int pirq); 47 int (*get)(struct pci_dev *router, struct pci_dev *dev, int pirq);
48 int (*set)(struct pci_dev *router, struct pci_dev *dev, int pirq, int new); 48 int (*set)(struct pci_dev *router, struct pci_dev *dev, int pirq,
49 int new);
49}; 50};
50 51
51struct irq_router_handler { 52struct irq_router_handler {
@@ -77,7 +78,8 @@ static inline struct irq_routing_table *pirq_check_routing_table(u8 *addr)
77 for (i = 0; i < rt->size; i++) 78 for (i = 0; i < rt->size; i++)
78 sum += addr[i]; 79 sum += addr[i];
79 if (!sum) { 80 if (!sum) {
80 DBG(KERN_DEBUG "PCI: Interrupt Routing Table found at 0x%p\n", rt); 81 DBG(KERN_DEBUG "PCI: Interrupt Routing Table found at 0x%p\n",
82 rt);
81 return rt; 83 return rt;
82 } 84 }
83 return NULL; 85 return NULL;
@@ -183,7 +185,8 @@ static unsigned int read_config_nybble(struct pci_dev *router, unsigned offset,
183 return (nr & 1) ? (x >> 4) : (x & 0xf); 185 return (nr & 1) ? (x >> 4) : (x & 0xf);
184} 186}
185 187
186static void write_config_nybble(struct pci_dev *router, unsigned offset, unsigned nr, unsigned int val) 188static void write_config_nybble(struct pci_dev *router, unsigned offset,
189 unsigned nr, unsigned int val)
187{ 190{
188 u8 x; 191 u8 x;
189 unsigned reg = offset + (nr >> 1); 192 unsigned reg = offset + (nr >> 1);
@@ -467,7 +470,8 @@ static int pirq_serverworks_get(struct pci_dev *router, struct pci_dev *dev, int
467 return inb(0xc01) & 0xf; 470 return inb(0xc01) & 0xf;
468} 471}
469 472
470static int pirq_serverworks_set(struct pci_dev *router, struct pci_dev *dev, int pirq, int irq) 473static int pirq_serverworks_set(struct pci_dev *router, struct pci_dev *dev,
474 int pirq, int irq)
471{ 475{
472 outb(pirq, 0xc00); 476 outb(pirq, 0xc00);
473 outb(irq, 0xc01); 477 outb(irq, 0xc01);
@@ -660,7 +664,8 @@ static __init int vlsi_router_probe(struct irq_router *r, struct pci_dev *router
660} 664}
661 665
662 666
663static __init int serverworks_router_probe(struct irq_router *r, struct pci_dev *router, u16 device) 667static __init int serverworks_router_probe(struct irq_router *r,
668 struct pci_dev *router, u16 device)
664{ 669{
665 switch (device) { 670 switch (device) {
666 case PCI_DEVICE_ID_SERVERWORKS_OSB4: 671 case PCI_DEVICE_ID_SERVERWORKS_OSB4:
@@ -827,10 +832,12 @@ static void __init pirq_find_router(struct irq_router *r)
827 832
828 for (h = pirq_routers; h->vendor; h++) { 833 for (h = pirq_routers; h->vendor; h++) {
829 /* First look for a router match */ 834 /* First look for a router match */
830 if (rt->rtr_vendor == h->vendor && h->probe(r, pirq_router_dev, rt->rtr_device)) 835 if (rt->rtr_vendor == h->vendor &&
836 h->probe(r, pirq_router_dev, rt->rtr_device))
831 break; 837 break;
832 /* Fall back to a device match */ 838 /* Fall back to a device match */
833 if (pirq_router_dev->vendor == h->vendor && h->probe(r, pirq_router_dev, pirq_router_dev->device)) 839 if (pirq_router_dev->vendor == h->vendor &&
840 h->probe(r, pirq_router_dev, pirq_router_dev->device))
834 break; 841 break;
835 } 842 }
836 printk(KERN_INFO "PCI: Using IRQ router %s [%04x/%04x] at %s\n", 843 printk(KERN_INFO "PCI: Using IRQ router %s [%04x/%04x] at %s\n",
@@ -845,11 +852,13 @@ static void __init pirq_find_router(struct irq_router *r)
845static struct irq_info *pirq_get_info(struct pci_dev *dev) 852static struct irq_info *pirq_get_info(struct pci_dev *dev)
846{ 853{
847 struct irq_routing_table *rt = pirq_table; 854 struct irq_routing_table *rt = pirq_table;
848 int entries = (rt->size - sizeof(struct irq_routing_table)) / sizeof(struct irq_info); 855 int entries = (rt->size - sizeof(struct irq_routing_table)) /
856 sizeof(struct irq_info);
849 struct irq_info *info; 857 struct irq_info *info;
850 858
851 for (info = rt->slots; entries--; info++) 859 for (info = rt->slots; entries--; info++)
852 if (info->bus == dev->bus->number && PCI_SLOT(info->devfn) == PCI_SLOT(dev->devfn)) 860 if (info->bus == dev->bus->number &&
861 PCI_SLOT(info->devfn) == PCI_SLOT(dev->devfn))
853 return info; 862 return info;
854 return NULL; 863 return NULL;
855} 864}
@@ -890,7 +899,8 @@ static int pcibios_lookup_irq(struct pci_dev *dev, int assign)
890 DBG(" -> not routed\n" KERN_DEBUG); 899 DBG(" -> not routed\n" KERN_DEBUG);
891 return 0; 900 return 0;
892 } 901 }
893 DBG(" -> PIRQ %02x, mask %04x, excl %04x", pirq, mask, pirq_table->exclusive_irqs); 902 DBG(" -> PIRQ %02x, mask %04x, excl %04x", pirq, mask,
903 pirq_table->exclusive_irqs);
894 mask &= pcibios_irq_mask; 904 mask &= pcibios_irq_mask;
895 905
896 /* Work around broken HP Pavilion Notebooks which assign USB to 906 /* Work around broken HP Pavilion Notebooks which assign USB to
@@ -903,7 +913,8 @@ static int pcibios_lookup_irq(struct pci_dev *dev, int assign)
903 } 913 }
904 914
905 /* same for Acer Travelmate 360, but with CB and irq 11 -> 10 */ 915 /* same for Acer Travelmate 360, but with CB and irq 11 -> 10 */
906 if (acer_tm360_irqrouting && dev->irq == 11 && dev->vendor == PCI_VENDOR_ID_O2) { 916 if (acer_tm360_irqrouting && dev->irq == 11 &&
917 dev->vendor == PCI_VENDOR_ID_O2) {
907 pirq = 0x68; 918 pirq = 0x68;
908 mask = 0x400; 919 mask = 0x400;
909 dev->irq = r->get(pirq_router_dev, dev, pirq); 920 dev->irq = r->get(pirq_router_dev, dev, pirq);
@@ -920,15 +931,16 @@ static int pcibios_lookup_irq(struct pci_dev *dev, int assign)
920 newirq = 0; 931 newirq = 0;
921 else 932 else
922 printk("\n" KERN_WARNING 933 printk("\n" KERN_WARNING
923 "PCI: IRQ %i for device %s doesn't match PIRQ mask " 934 "PCI: IRQ %i for device %s doesn't match PIRQ mask - try pci=usepirqmask\n"
924 "- try pci=usepirqmask\n" KERN_DEBUG, newirq, 935 KERN_DEBUG, newirq,
925 pci_name(dev)); 936 pci_name(dev));
926 } 937 }
927 if (!newirq && assign) { 938 if (!newirq && assign) {
928 for (i = 0; i < 16; i++) { 939 for (i = 0; i < 16; i++) {
929 if (!(mask & (1 << i))) 940 if (!(mask & (1 << i)))
930 continue; 941 continue;
931 if (pirq_penalty[i] < pirq_penalty[newirq] && can_request_irq(i, IRQF_SHARED)) 942 if (pirq_penalty[i] < pirq_penalty[newirq] &&
943 can_request_irq(i, IRQF_SHARED))
932 newirq = i; 944 newirq = i;
933 } 945 }
934 } 946 }
@@ -944,7 +956,8 @@ static int pcibios_lookup_irq(struct pci_dev *dev, int assign)
944 DBG(" -> got IRQ %d\n", irq); 956 DBG(" -> got IRQ %d\n", irq);
945 msg = "Found"; 957 msg = "Found";
946 eisa_set_level_irq(irq); 958 eisa_set_level_irq(irq);
947 } else if (newirq && r->set && (dev->class >> 8) != PCI_CLASS_DISPLAY_VGA) { 959 } else if (newirq && r->set &&
960 (dev->class >> 8) != PCI_CLASS_DISPLAY_VGA) {
948 DBG(" -> assigning IRQ %d", newirq); 961 DBG(" -> assigning IRQ %d", newirq);
949 if (r->set(pirq_router_dev, dev, pirq, newirq)) { 962 if (r->set(pirq_router_dev, dev, pirq, newirq)) {
950 eisa_set_level_irq(newirq); 963 eisa_set_level_irq(newirq);
@@ -962,7 +975,8 @@ static int pcibios_lookup_irq(struct pci_dev *dev, int assign)
962 } else 975 } else
963 return 0; 976 return 0;
964 } 977 }
965 printk(KERN_INFO "PCI: %s IRQ %d for device %s\n", msg, irq, pci_name(dev)); 978 printk(KERN_INFO "PCI: %s IRQ %d for device %s\n", msg, irq,
979 pci_name(dev));
966 980
967 /* Update IRQ for all devices with the same pirq value */ 981 /* Update IRQ for all devices with the same pirq value */
968 while ((dev2 = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev2)) != NULL) { 982 while ((dev2 = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev2)) != NULL) {
@@ -974,7 +988,10 @@ static int pcibios_lookup_irq(struct pci_dev *dev, int assign)
974 if (!info) 988 if (!info)
975 continue; 989 continue;
976 if (info->irq[pin].link == pirq) { 990 if (info->irq[pin].link == pirq) {
977 /* We refuse to override the dev->irq information. Give a warning! */ 991 /*
992 * We refuse to override the dev->irq
993 * information. Give a warning!
994 */
978 if (dev2->irq && dev2->irq != irq && \ 995 if (dev2->irq && dev2->irq != irq && \
979 (!(pci_probe & PCI_USE_PIRQ_MASK) || \ 996 (!(pci_probe & PCI_USE_PIRQ_MASK) || \
980 ((1 << dev2->irq) & mask))) { 997 ((1 << dev2->irq) & mask))) {
@@ -987,7 +1004,9 @@ static int pcibios_lookup_irq(struct pci_dev *dev, int assign)
987 dev2->irq = irq; 1004 dev2->irq = irq;
988 pirq_penalty[irq]++; 1005 pirq_penalty[irq]++;
989 if (dev != dev2) 1006 if (dev != dev2)
990 printk(KERN_INFO "PCI: Sharing IRQ %d with %s\n", irq, pci_name(dev2)); 1007 printk(KERN_INFO
1008 "PCI: Sharing IRQ %d with %s\n",
1009 irq, pci_name(dev2));
991 } 1010 }
992 } 1011 }
993 return 1; 1012 return 1;
@@ -1001,15 +1020,21 @@ static void __init pcibios_fixup_irqs(void)
1001 DBG(KERN_DEBUG "PCI: IRQ fixup\n"); 1020 DBG(KERN_DEBUG "PCI: IRQ fixup\n");
1002 while ((dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev)) != NULL) { 1021 while ((dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev)) != NULL) {
1003 /* 1022 /*
1004 * If the BIOS has set an out of range IRQ number, just ignore it. 1023 * If the BIOS has set an out of range IRQ number, just
1005 * Also keep track of which IRQ's are already in use. 1024 * ignore it. Also keep track of which IRQ's are
1025 * already in use.
1006 */ 1026 */
1007 if (dev->irq >= 16) { 1027 if (dev->irq >= 16) {
1008 DBG(KERN_DEBUG "%s: ignoring bogus IRQ %d\n", pci_name(dev), dev->irq); 1028 DBG(KERN_DEBUG "%s: ignoring bogus IRQ %d\n",
1029 pci_name(dev), dev->irq);
1009 dev->irq = 0; 1030 dev->irq = 0;
1010 } 1031 }
1011 /* If the IRQ is already assigned to a PCI device, ignore its ISA use penalty */ 1032 /*
1012 if (pirq_penalty[dev->irq] >= 100 && pirq_penalty[dev->irq] < 100000) 1033 * If the IRQ is already assigned to a PCI device,
1034 * ignore its ISA use penalty
1035 */
1036 if (pirq_penalty[dev->irq] >= 100 &&
1037 pirq_penalty[dev->irq] < 100000)
1013 pirq_penalty[dev->irq] = 0; 1038 pirq_penalty[dev->irq] = 0;
1014 pirq_penalty[dev->irq]++; 1039 pirq_penalty[dev->irq]++;
1015 } 1040 }
@@ -1025,8 +1050,13 @@ static void __init pcibios_fixup_irqs(void)
1025 int irq; 1050 int irq;
1026 1051
1027 if (pin) { 1052 if (pin) {
1028 pin--; /* interrupt pins are numbered starting from 1 */ 1053 /*
1029 irq = IO_APIC_get_PCI_irq_vector(dev->bus->number, PCI_SLOT(dev->devfn), pin); 1054 * interrupt pins are numbered starting
1055 * from 1
1056 */
1057 pin--;
1058 irq = IO_APIC_get_PCI_irq_vector(dev->bus->number,
1059 PCI_SLOT(dev->devfn), pin);
1030 /* 1060 /*
1031 * Busses behind bridges are typically not listed in the MP-table. 1061 * Busses behind bridges are typically not listed in the MP-table.
1032 * In this case we have to look up the IRQ based on the parent bus, 1062 * In this case we have to look up the IRQ based on the parent bus,
@@ -1067,7 +1097,8 @@ static int __init fix_broken_hp_bios_irq9(const struct dmi_system_id *d)
1067{ 1097{
1068 if (!broken_hp_bios_irq9) { 1098 if (!broken_hp_bios_irq9) {
1069 broken_hp_bios_irq9 = 1; 1099 broken_hp_bios_irq9 = 1;
1070 printk(KERN_INFO "%s detected - fixing broken IRQ routing\n", d->ident); 1100 printk(KERN_INFO "%s detected - fixing broken IRQ routing\n",
1101 d->ident);
1071 } 1102 }
1072 return 0; 1103 return 0;
1073} 1104}
@@ -1080,7 +1111,8 @@ static int __init fix_acer_tm360_irqrouting(const struct dmi_system_id *d)
1080{ 1111{
1081 if (!acer_tm360_irqrouting) { 1112 if (!acer_tm360_irqrouting) {
1082 acer_tm360_irqrouting = 1; 1113 acer_tm360_irqrouting = 1;
1083 printk(KERN_INFO "%s detected - fixing broken IRQ routing\n", d->ident); 1114 printk(KERN_INFO "%s detected - fixing broken IRQ routing\n",
1115 d->ident);
1084 } 1116 }
1085 return 0; 1117 return 0;
1086} 1118}
@@ -1092,7 +1124,8 @@ static struct dmi_system_id __initdata pciirq_dmi_table[] = {
1092 .matches = { 1124 .matches = {
1093 DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"), 1125 DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
1094 DMI_MATCH(DMI_BIOS_VERSION, "GE.M1.03"), 1126 DMI_MATCH(DMI_BIOS_VERSION, "GE.M1.03"),
1095 DMI_MATCH(DMI_PRODUCT_VERSION, "HP Pavilion Notebook Model GE"), 1127 DMI_MATCH(DMI_PRODUCT_VERSION,
1128 "HP Pavilion Notebook Model GE"),
1096 DMI_MATCH(DMI_BOARD_VERSION, "OmniBook N32N-736"), 1129 DMI_MATCH(DMI_BOARD_VERSION, "OmniBook N32N-736"),
1097 }, 1130 },
1098 }, 1131 },
@@ -1131,7 +1164,10 @@ int __init pcibios_irq_init(void)
1131 if (!(pirq_table->exclusive_irqs & (1 << i))) 1164 if (!(pirq_table->exclusive_irqs & (1 << i)))
1132 pirq_penalty[i] += 100; 1165 pirq_penalty[i] += 100;
1133 } 1166 }
1134 /* If we're using the I/O APIC, avoid using the PCI IRQ routing table */ 1167 /*
1168 * If we're using the I/O APIC, avoid using the PCI IRQ
1169 * routing table
1170 */
1135 if (io_apic_assign_pci_irqs) 1171 if (io_apic_assign_pci_irqs)
1136 pirq_table = NULL; 1172 pirq_table = NULL;
1137 } 1173 }
@@ -1175,7 +1211,7 @@ static int pirq_enable_irq(struct pci_dev *dev)
1175 if (pin && !pcibios_lookup_irq(dev, 1) && !dev->irq) { 1211 if (pin && !pcibios_lookup_irq(dev, 1) && !dev->irq) {
1176 char *msg = ""; 1212 char *msg = "";
1177 1213
1178 pin--; /* interrupt pins are numbered starting from 1 */ 1214 pin--; /* interrupt pins are numbered starting from 1 */
1179 1215
1180 if (io_apic_assign_pci_irqs) { 1216 if (io_apic_assign_pci_irqs) {
1181 int irq; 1217 int irq;
@@ -1195,13 +1231,16 @@ static int pirq_enable_irq(struct pci_dev *dev)
1195 irq = IO_APIC_get_PCI_irq_vector(bridge->bus->number, 1231 irq = IO_APIC_get_PCI_irq_vector(bridge->bus->number,
1196 PCI_SLOT(bridge->devfn), pin); 1232 PCI_SLOT(bridge->devfn), pin);
1197 if (irq >= 0) 1233 if (irq >= 0)
1198 printk(KERN_WARNING "PCI: using PPB %s[%c] to get irq %d\n", 1234 printk(KERN_WARNING
1199 pci_name(bridge), 'A' + pin, irq); 1235 "PCI: using PPB %s[%c] to get irq %d\n",
1236 pci_name(bridge),
1237 'A' + pin, irq);
1200 dev = bridge; 1238 dev = bridge;
1201 } 1239 }
1202 dev = temp_dev; 1240 dev = temp_dev;
1203 if (irq >= 0) { 1241 if (irq >= 0) {
1204 printk(KERN_INFO "PCI->APIC IRQ transform: %s[%c] -> IRQ %d\n", 1242 printk(KERN_INFO
1243 "PCI->APIC IRQ transform: %s[%c] -> IRQ %d\n",
1205 pci_name(dev), 'A' + pin, irq); 1244 pci_name(dev), 'A' + pin, irq);
1206 dev->irq = irq; 1245 dev->irq = irq;
1207 return 0; 1246 return 0;
@@ -1212,12 +1251,17 @@ static int pirq_enable_irq(struct pci_dev *dev)
1212 else 1251 else
1213 msg = " Please try using pci=biosirq."; 1252 msg = " Please try using pci=biosirq.";
1214 1253
1215 /* With IDE legacy devices the IRQ lookup failure is not a problem.. */ 1254 /*
1216 if (dev->class >> 8 == PCI_CLASS_STORAGE_IDE && !(dev->class & 0x5)) 1255 * With IDE legacy devices the IRQ lookup failure is not
1256 * a problem..
1257 */
1258 if (dev->class >> 8 == PCI_CLASS_STORAGE_IDE &&
1259 !(dev->class & 0x5))
1217 return 0; 1260 return 0;
1218 1261
1219 printk(KERN_WARNING "PCI: No IRQ known for interrupt pin %c of device %s.%s\n", 1262 printk(KERN_WARNING
1220 'A' + pin, pci_name(dev), msg); 1263 "PCI: No IRQ known for interrupt pin %c of device %s.%s\n",
1264 'A' + pin, pci_name(dev), msg);
1221 } 1265 }
1222 return 0; 1266 return 0;
1223} 1267}
diff --git a/arch/x86/pci/pci.h b/arch/x86/pci/pci.h
index b2270a55b0cf..3e25deb821ac 100644
--- a/arch/x86/pci/pci.h
+++ b/arch/x86/pci/pci.h
@@ -28,6 +28,7 @@
28#define PCI_USE__CRS 0x10000 28#define PCI_USE__CRS 0x10000
29#define PCI_CHECK_ENABLE_AMD_MMCONF 0x20000 29#define PCI_CHECK_ENABLE_AMD_MMCONF 0x20000
30#define PCI_HAS_IO_ECS 0x40000 30#define PCI_HAS_IO_ECS 0x40000
31#define PCI_NOASSIGN_ROMS 0x80000
31 32
32extern unsigned int pci_probe; 33extern unsigned int pci_probe;
33extern unsigned long pirq_table_addr; 34extern unsigned long pirq_table_addr;
diff --git a/drivers/acpi/Kconfig b/drivers/acpi/Kconfig
index bba867391a85..735f5ea17473 100644
--- a/drivers/acpi/Kconfig
+++ b/drivers/acpi/Kconfig
@@ -336,6 +336,15 @@ config ACPI_EC
336 the battery and thermal drivers. If you are compiling for a 336 the battery and thermal drivers. If you are compiling for a
337 mobile system, say Y. 337 mobile system, say Y.
338 338
339config ACPI_PCI_SLOT
340 tristate "PCI slot detection driver"
341 default n
342 help
343 This driver will attempt to discover all PCI slots in your system,
344 and creates entries in /sys/bus/pci/slots/. This feature can
345 help you correlate PCI bus addresses with the physical geography
346 of your slots. If you are unsure, say N.
347
339config ACPI_POWER 348config ACPI_POWER
340 bool 349 bool
341 default y 350 default y
diff --git a/drivers/acpi/Makefile b/drivers/acpi/Makefile
index 4efbe598c817..52a4cd4b81d0 100644
--- a/drivers/acpi/Makefile
+++ b/drivers/acpi/Makefile
@@ -48,6 +48,7 @@ obj-$(CONFIG_ACPI_DOCK) += dock.o
48obj-$(CONFIG_ACPI_BAY) += bay.o 48obj-$(CONFIG_ACPI_BAY) += bay.o
49obj-$(CONFIG_ACPI_VIDEO) += video.o 49obj-$(CONFIG_ACPI_VIDEO) += video.o
50obj-y += pci_root.o pci_link.o pci_irq.o pci_bind.o 50obj-y += pci_root.o pci_link.o pci_irq.o pci_bind.o
51obj-$(CONFIG_ACPI_PCI_SLOT) += pci_slot.o
51obj-$(CONFIG_ACPI_POWER) += power.o 52obj-$(CONFIG_ACPI_POWER) += power.o
52obj-$(CONFIG_ACPI_PROCESSOR) += processor.o 53obj-$(CONFIG_ACPI_PROCESSOR) += processor.o
53obj-$(CONFIG_ACPI_CONTAINER) += container.o 54obj-$(CONFIG_ACPI_CONTAINER) += container.o
diff --git a/drivers/acpi/bus.c b/drivers/acpi/bus.c
index afb34387d5f2..ccae305ee55d 100644
--- a/drivers/acpi/bus.c
+++ b/drivers/acpi/bus.c
@@ -295,6 +295,28 @@ int acpi_bus_set_power(acpi_handle handle, int state)
295 295
296EXPORT_SYMBOL(acpi_bus_set_power); 296EXPORT_SYMBOL(acpi_bus_set_power);
297 297
298bool acpi_bus_power_manageable(acpi_handle handle)
299{
300 struct acpi_device *device;
301 int result;
302
303 result = acpi_bus_get_device(handle, &device);
304 return result ? false : device->flags.power_manageable;
305}
306
307EXPORT_SYMBOL(acpi_bus_power_manageable);
308
309bool acpi_bus_can_wakeup(acpi_handle handle)
310{
311 struct acpi_device *device;
312 int result;
313
314 result = acpi_bus_get_device(handle, &device);
315 return result ? false : device->wakeup.flags.valid;
316}
317
318EXPORT_SYMBOL(acpi_bus_can_wakeup);
319
298/* -------------------------------------------------------------------------- 320/* --------------------------------------------------------------------------
299 Event Management 321 Event Management
300 -------------------------------------------------------------------------- */ 322 -------------------------------------------------------------------------- */
diff --git a/drivers/acpi/glue.c b/drivers/acpi/glue.c
index 6d18ca34b6aa..0f2dd81736bd 100644
--- a/drivers/acpi/glue.c
+++ b/drivers/acpi/glue.c
@@ -166,6 +166,8 @@ static int acpi_bind_one(struct device *dev, acpi_handle handle)
166 "firmware_node"); 166 "firmware_node");
167 ret = sysfs_create_link(&acpi_dev->dev.kobj, &dev->kobj, 167 ret = sysfs_create_link(&acpi_dev->dev.kobj, &dev->kobj,
168 "physical_node"); 168 "physical_node");
169 if (acpi_dev->wakeup.flags.valid)
170 device_set_wakeup_capable(dev, true);
169 } 171 }
170 172
171 return 0; 173 return 0;
diff --git a/drivers/acpi/pci_slot.c b/drivers/acpi/pci_slot.c
new file mode 100644
index 000000000000..b9ab030a52d5
--- /dev/null
+++ b/drivers/acpi/pci_slot.c
@@ -0,0 +1,368 @@
1/*
2 * pci_slot.c - ACPI PCI Slot Driver
3 *
4 * The code here is heavily leveraged from the acpiphp module.
5 * Thanks to Matthew Wilcox <matthew@wil.cx> for much guidance.
6 * Thanks to Kenji Kaneshige <kaneshige.kenji@jp.fujitsu.com> for code
7 * review and fixes.
8 *
9 * Copyright (C) 2007 Alex Chiang <achiang@hp.com>
10 * Copyright (C) 2007 Hewlett-Packard Development Company, L.P.
11 *
12 * This program is free software; you can redistribute it and/or modify it
13 * under the terms and conditions of the GNU General Public License,
14 * version 2, as published by the Free Software Foundation.
15 *
16 * This program is distributed in the hope that it will be useful, but
17 * WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * General Public License for more details.
20 *
21 * You should have received a copy of the GNU General Public License along
22 * with this program; if not, write to the Free Software Foundation, Inc.,
23 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
24 */
25
26#include <linux/kernel.h>
27#include <linux/module.h>
28#include <linux/init.h>
29#include <linux/types.h>
30#include <linux/pci.h>
31#include <linux/acpi.h>
32#include <acpi/acpi_bus.h>
33#include <acpi/acpi_drivers.h>
34
35static int debug;
36static int check_sta_before_sun;
37
38#define DRIVER_VERSION "0.1"
39#define DRIVER_AUTHOR "Alex Chiang <achiang@hp.com>"
40#define DRIVER_DESC "ACPI PCI Slot Detection Driver"
41MODULE_AUTHOR(DRIVER_AUTHOR);
42MODULE_DESCRIPTION(DRIVER_DESC);
43MODULE_LICENSE("GPL");
44MODULE_PARM_DESC(debug, "Debugging mode enabled or not");
45module_param(debug, bool, 0644);
46
47#define _COMPONENT ACPI_PCI_COMPONENT
48ACPI_MODULE_NAME("pci_slot");
49
50#define MY_NAME "pci_slot"
51#define err(format, arg...) printk(KERN_ERR "%s: " format , MY_NAME , ## arg)
52#define info(format, arg...) printk(KERN_INFO "%s: " format , MY_NAME , ## arg)
53#define dbg(format, arg...) \
54 do { \
55 if (debug) \
56 printk(KERN_DEBUG "%s: " format, \
57 MY_NAME , ## arg); \
58 } while (0)
59
60#define SLOT_NAME_SIZE 20 /* Inspired by #define in acpiphp.h */
61
62struct acpi_pci_slot {
63 acpi_handle root_handle; /* handle of the root bridge */
64 struct pci_slot *pci_slot; /* corresponding pci_slot */
65 struct list_head list; /* node in the list of slots */
66};
67
68static int acpi_pci_slot_add(acpi_handle handle);
69static void acpi_pci_slot_remove(acpi_handle handle);
70
71static LIST_HEAD(slot_list);
72static DEFINE_MUTEX(slot_list_lock);
73static struct acpi_pci_driver acpi_pci_slot_driver = {
74 .add = acpi_pci_slot_add,
75 .remove = acpi_pci_slot_remove,
76};
77
78static int
79check_slot(acpi_handle handle, int *device, unsigned long *sun)
80{
81 int retval = 0;
82 unsigned long adr, sta;
83 acpi_status status;
84 struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
85
86 acpi_get_name(handle, ACPI_FULL_PATHNAME, &buffer);
87 dbg("Checking slot on path: %s\n", (char *)buffer.pointer);
88
89 if (check_sta_before_sun) {
90 /* If SxFy doesn't have _STA, we just assume it's there */
91 status = acpi_evaluate_integer(handle, "_STA", NULL, &sta);
92 if (ACPI_SUCCESS(status) && !(sta & ACPI_STA_DEVICE_PRESENT)) {
93 retval = -1;
94 goto out;
95 }
96 }
97
98 status = acpi_evaluate_integer(handle, "_ADR", NULL, &adr);
99 if (ACPI_FAILURE(status)) {
100 dbg("_ADR returned %d on %s\n", status, (char *)buffer.pointer);
101 retval = -1;
102 goto out;
103 }
104
105 *device = (adr >> 16) & 0xffff;
106
107 /* No _SUN == not a slot == bail */
108 status = acpi_evaluate_integer(handle, "_SUN", NULL, sun);
109 if (ACPI_FAILURE(status)) {
110 dbg("_SUN returned %d on %s\n", status, (char *)buffer.pointer);
111 retval = -1;
112 goto out;
113 }
114
115out:
116 kfree(buffer.pointer);
117 return retval;
118}
119
120struct callback_args {
121 acpi_walk_callback user_function; /* only for walk_p2p_bridge */
122 struct pci_bus *pci_bus;
123 acpi_handle root_handle;
124};
125
126/*
127 * register_slot
128 *
129 * Called once for each SxFy object in the namespace. Don't worry about
130 * calling pci_create_slot multiple times for the same pci_bus:device,
131 * since each subsequent call simply bumps the refcount on the pci_slot.
132 *
133 * The number of calls to pci_destroy_slot from unregister_slot is
134 * symmetrical.
135 */
136static acpi_status
137register_slot(acpi_handle handle, u32 lvl, void *context, void **rv)
138{
139 int device;
140 unsigned long sun;
141 char name[SLOT_NAME_SIZE];
142 struct acpi_pci_slot *slot;
143 struct pci_slot *pci_slot;
144 struct callback_args *parent_context = context;
145 struct pci_bus *pci_bus = parent_context->pci_bus;
146
147 if (check_slot(handle, &device, &sun))
148 return AE_OK;
149
150 slot = kmalloc(sizeof(*slot), GFP_KERNEL);
151 if (!slot) {
152 err("%s: cannot allocate memory\n", __func__);
153 return AE_OK;
154 }
155
156 snprintf(name, sizeof(name), "%u", (u32)sun);
157 pci_slot = pci_create_slot(pci_bus, device, name);
158 if (IS_ERR(pci_slot)) {
159 err("pci_create_slot returned %ld\n", PTR_ERR(pci_slot));
160 kfree(slot);
161 }
162
163 slot->root_handle = parent_context->root_handle;
164 slot->pci_slot = pci_slot;
165 INIT_LIST_HEAD(&slot->list);
166 mutex_lock(&slot_list_lock);
167 list_add(&slot->list, &slot_list);
168 mutex_unlock(&slot_list_lock);
169
170 dbg("pci_slot: %p, pci_bus: %x, device: %d, name: %s\n",
171 pci_slot, pci_bus->number, device, name);
172
173 return AE_OK;
174}
175
176/*
177 * walk_p2p_bridge - discover and walk p2p bridges
178 * @handle: points to an acpi_pci_root
179 * @context: p2p_bridge_context pointer
180 *
181 * Note that when we call ourselves recursively, we pass a different
182 * value of pci_bus in the child_context.
183 */
184static acpi_status
185walk_p2p_bridge(acpi_handle handle, u32 lvl, void *context, void **rv)
186{
187 int device, function;
188 unsigned long adr;
189 acpi_status status;
190 acpi_handle dummy_handle;
191 acpi_walk_callback user_function;
192
193 struct pci_dev *dev;
194 struct pci_bus *pci_bus;
195 struct callback_args child_context;
196 struct callback_args *parent_context = context;
197
198 pci_bus = parent_context->pci_bus;
199 user_function = parent_context->user_function;
200
201 status = acpi_get_handle(handle, "_ADR", &dummy_handle);
202 if (ACPI_FAILURE(status))
203 return AE_OK;
204
205 status = acpi_evaluate_integer(handle, "_ADR", NULL, &adr);
206 if (ACPI_FAILURE(status))
207 return AE_OK;
208
209 device = (adr >> 16) & 0xffff;
210 function = adr & 0xffff;
211
212 dev = pci_get_slot(pci_bus, PCI_DEVFN(device, function));
213 if (!dev || !dev->subordinate)
214 goto out;
215
216 child_context.pci_bus = dev->subordinate;
217 child_context.user_function = user_function;
218 child_context.root_handle = parent_context->root_handle;
219
220 dbg("p2p bridge walk, pci_bus = %x\n", dev->subordinate->number);
221 status = acpi_walk_namespace(ACPI_TYPE_DEVICE, handle, (u32)1,
222 user_function, &child_context, NULL);
223 if (ACPI_FAILURE(status))
224 goto out;
225
226 status = acpi_walk_namespace(ACPI_TYPE_DEVICE, handle, (u32)1,
227 walk_p2p_bridge, &child_context, NULL);
228out:
229 pci_dev_put(dev);
230 return AE_OK;
231}
232
233/*
234 * walk_root_bridge - generic root bridge walker
235 * @handle: points to an acpi_pci_root
236 * @user_function: user callback for slot objects
237 *
238 * Call user_function for all objects underneath this root bridge.
239 * Walk p2p bridges underneath us and call user_function on those too.
240 */
241static int
242walk_root_bridge(acpi_handle handle, acpi_walk_callback user_function)
243{
244 int seg, bus;
245 unsigned long tmp;
246 acpi_status status;
247 acpi_handle dummy_handle;
248 struct pci_bus *pci_bus;
249 struct callback_args context;
250
251 /* If the bridge doesn't have _STA, we assume it is always there */
252 status = acpi_get_handle(handle, "_STA", &dummy_handle);
253 if (ACPI_SUCCESS(status)) {
254 status = acpi_evaluate_integer(handle, "_STA", NULL, &tmp);
255 if (ACPI_FAILURE(status)) {
256 info("%s: _STA evaluation failure\n", __func__);
257 return 0;
258 }
259 if ((tmp & ACPI_STA_DEVICE_FUNCTIONING) == 0)
260 /* don't register this object */
261 return 0;
262 }
263
264 status = acpi_evaluate_integer(handle, "_SEG", NULL, &tmp);
265 seg = ACPI_SUCCESS(status) ? tmp : 0;
266
267 status = acpi_evaluate_integer(handle, "_BBN", NULL, &tmp);
268 bus = ACPI_SUCCESS(status) ? tmp : 0;
269
270 pci_bus = pci_find_bus(seg, bus);
271 if (!pci_bus)
272 return 0;
273
274 context.pci_bus = pci_bus;
275 context.user_function = user_function;
276 context.root_handle = handle;
277
278 dbg("root bridge walk, pci_bus = %x\n", pci_bus->number);
279 status = acpi_walk_namespace(ACPI_TYPE_DEVICE, handle, (u32)1,
280 user_function, &context, NULL);
281 if (ACPI_FAILURE(status))
282 return status;
283
284 status = acpi_walk_namespace(ACPI_TYPE_DEVICE, handle, (u32)1,
285 walk_p2p_bridge, &context, NULL);
286 if (ACPI_FAILURE(status))
287 err("%s: walk_p2p_bridge failure - %d\n", __func__, status);
288
289 return status;
290}
291
292/*
293 * acpi_pci_slot_add
294 * @handle: points to an acpi_pci_root
295 */
296static int
297acpi_pci_slot_add(acpi_handle handle)
298{
299 acpi_status status;
300
301 status = walk_root_bridge(handle, register_slot);
302 if (ACPI_FAILURE(status))
303 err("%s: register_slot failure - %d\n", __func__, status);
304
305 return status;
306}
307
308/*
309 * acpi_pci_slot_remove
310 * @handle: points to an acpi_pci_root
311 */
312static void
313acpi_pci_slot_remove(acpi_handle handle)
314{
315 struct acpi_pci_slot *slot, *tmp;
316
317 mutex_lock(&slot_list_lock);
318 list_for_each_entry_safe(slot, tmp, &slot_list, list) {
319 if (slot->root_handle == handle) {
320 list_del(&slot->list);
321 pci_destroy_slot(slot->pci_slot);
322 kfree(slot);
323 }
324 }
325 mutex_unlock(&slot_list_lock);
326}
327
328static int do_sta_before_sun(const struct dmi_system_id *d)
329{
330 info("%s detected: will evaluate _STA before calling _SUN\n", d->ident);
331 check_sta_before_sun = 1;
332 return 0;
333}
334
335static struct dmi_system_id acpi_pci_slot_dmi_table[] __initdata = {
336 /*
337 * Fujitsu Primequest machines will return 1023 to indicate an
338 * error if the _SUN method is evaluated on SxFy objects that
339 * are not present (as indicated by _STA), so for those machines,
340 * we want to check _STA before evaluating _SUN.
341 */
342 {
343 .callback = do_sta_before_sun,
344 .ident = "Fujitsu PRIMEQUEST",
345 .matches = {
346 DMI_MATCH(DMI_BIOS_VENDOR, "FUJITSU LIMITED"),
347 DMI_MATCH(DMI_BIOS_VERSION, "PRIMEQUEST"),
348 },
349 },
350 {}
351};
352
353static int __init
354acpi_pci_slot_init(void)
355{
356 dmi_check_system(acpi_pci_slot_dmi_table);
357 acpi_pci_register_driver(&acpi_pci_slot_driver);
358 return 0;
359}
360
361static void __exit
362acpi_pci_slot_exit(void)
363{
364 acpi_pci_unregister_driver(&acpi_pci_slot_driver);
365}
366
367module_init(acpi_pci_slot_init);
368module_exit(acpi_pci_slot_exit);
diff --git a/drivers/acpi/power.c b/drivers/acpi/power.c
index 81e4f081a4ae..4ab21cb1c8c7 100644
--- a/drivers/acpi/power.c
+++ b/drivers/acpi/power.c
@@ -292,69 +292,135 @@ static int acpi_power_off_device(acpi_handle handle, struct acpi_device *dev)
292 return 0; 292 return 0;
293} 293}
294 294
295/**
296 * acpi_device_sleep_wake - execute _DSW (Device Sleep Wake) or (deprecated in
297 * ACPI 3.0) _PSW (Power State Wake)
298 * @dev: Device to handle.
299 * @enable: 0 - disable, 1 - enable the wake capabilities of the device.
300 * @sleep_state: Target sleep state of the system.
301 * @dev_state: Target power state of the device.
302 *
303 * Execute _DSW (Device Sleep Wake) or (deprecated in ACPI 3.0) _PSW (Power
304 * State Wake) for the device, if present. On failure reset the device's
305 * wakeup.flags.valid flag.
306 *
307 * RETURN VALUE:
308 * 0 if either _DSW or _PSW has been successfully executed
309 * 0 if neither _DSW nor _PSW has been found
310 * -ENODEV if the execution of either _DSW or _PSW has failed
311 */
312int acpi_device_sleep_wake(struct acpi_device *dev,
313 int enable, int sleep_state, int dev_state)
314{
315 union acpi_object in_arg[3];
316 struct acpi_object_list arg_list = { 3, in_arg };
317 acpi_status status = AE_OK;
318
319 /*
320 * Try to execute _DSW first.
321 *
322 * Three agruments are needed for the _DSW object:
323 * Argument 0: enable/disable the wake capabilities
324 * Argument 1: target system state
325 * Argument 2: target device state
326 * When _DSW object is called to disable the wake capabilities, maybe
327 * the first argument is filled. The values of the other two agruments
328 * are meaningless.
329 */
330 in_arg[0].type = ACPI_TYPE_INTEGER;
331 in_arg[0].integer.value = enable;
332 in_arg[1].type = ACPI_TYPE_INTEGER;
333 in_arg[1].integer.value = sleep_state;
334 in_arg[2].type = ACPI_TYPE_INTEGER;
335 in_arg[2].integer.value = dev_state;
336 status = acpi_evaluate_object(dev->handle, "_DSW", &arg_list, NULL);
337 if (ACPI_SUCCESS(status)) {
338 return 0;
339 } else if (status != AE_NOT_FOUND) {
340 printk(KERN_ERR PREFIX "_DSW execution failed\n");
341 dev->wakeup.flags.valid = 0;
342 return -ENODEV;
343 }
344
345 /* Execute _PSW */
346 arg_list.count = 1;
347 in_arg[0].integer.value = enable;
348 status = acpi_evaluate_object(dev->handle, "_PSW", &arg_list, NULL);
349 if (ACPI_FAILURE(status) && (status != AE_NOT_FOUND)) {
350 printk(KERN_ERR PREFIX "_PSW execution failed\n");
351 dev->wakeup.flags.valid = 0;
352 return -ENODEV;
353 }
354
355 return 0;
356}
357
295/* 358/*
296 * Prepare a wakeup device, two steps (Ref ACPI 2.0:P229): 359 * Prepare a wakeup device, two steps (Ref ACPI 2.0:P229):
297 * 1. Power on the power resources required for the wakeup device 360 * 1. Power on the power resources required for the wakeup device
298 * 2. Enable _PSW (power state wake) for the device if present 361 * 2. Execute _DSW (Device Sleep Wake) or (deprecated in ACPI 3.0) _PSW (Power
362 * State Wake) for the device, if present
299 */ 363 */
300int acpi_enable_wakeup_device_power(struct acpi_device *dev) 364int acpi_enable_wakeup_device_power(struct acpi_device *dev, int sleep_state)
301{ 365{
302 union acpi_object arg = { ACPI_TYPE_INTEGER }; 366 int i, err;
303 struct acpi_object_list arg_list = { 1, &arg };
304 acpi_status status = AE_OK;
305 int i;
306 int ret = 0;
307 367
308 if (!dev || !dev->wakeup.flags.valid) 368 if (!dev || !dev->wakeup.flags.valid)
309 return -1; 369 return -EINVAL;
370
371 /*
372 * Do not execute the code below twice in a row without calling
373 * acpi_disable_wakeup_device_power() in between for the same device
374 */
375 if (dev->wakeup.flags.prepared)
376 return 0;
310 377
311 arg.integer.value = 1;
312 /* Open power resource */ 378 /* Open power resource */
313 for (i = 0; i < dev->wakeup.resources.count; i++) { 379 for (i = 0; i < dev->wakeup.resources.count; i++) {
314 ret = acpi_power_on(dev->wakeup.resources.handles[i], dev); 380 int ret = acpi_power_on(dev->wakeup.resources.handles[i], dev);
315 if (ret) { 381 if (ret) {
316 printk(KERN_ERR PREFIX "Transition power state\n"); 382 printk(KERN_ERR PREFIX "Transition power state\n");
317 dev->wakeup.flags.valid = 0; 383 dev->wakeup.flags.valid = 0;
318 return -1; 384 return -ENODEV;
319 } 385 }
320 } 386 }
321 387
322 /* Execute PSW */ 388 /*
323 status = acpi_evaluate_object(dev->handle, "_PSW", &arg_list, NULL); 389 * Passing 3 as the third argument below means the device may be placed
324 if (ACPI_FAILURE(status) && (status != AE_NOT_FOUND)) { 390 * in arbitrary power state afterwards.
325 printk(KERN_ERR PREFIX "Evaluate _PSW\n"); 391 */
326 dev->wakeup.flags.valid = 0; 392 err = acpi_device_sleep_wake(dev, 1, sleep_state, 3);
327 ret = -1; 393 if (!err)
328 } 394 dev->wakeup.flags.prepared = 1;
329 395
330 return ret; 396 return err;
331} 397}
332 398
333/* 399/*
334 * Shutdown a wakeup device, counterpart of above method 400 * Shutdown a wakeup device, counterpart of above method
335 * 1. Disable _PSW (power state wake) 401 * 1. Execute _DSW (Device Sleep Wake) or (deprecated in ACPI 3.0) _PSW (Power
402 * State Wake) for the device, if present
336 * 2. Shutdown down the power resources 403 * 2. Shutdown down the power resources
337 */ 404 */
338int acpi_disable_wakeup_device_power(struct acpi_device *dev) 405int acpi_disable_wakeup_device_power(struct acpi_device *dev)
339{ 406{
340 union acpi_object arg = { ACPI_TYPE_INTEGER }; 407 int i, ret;
341 struct acpi_object_list arg_list = { 1, &arg };
342 acpi_status status = AE_OK;
343 int i;
344 int ret = 0;
345
346 408
347 if (!dev || !dev->wakeup.flags.valid) 409 if (!dev || !dev->wakeup.flags.valid)
348 return -1; 410 return -EINVAL;
349 411
350 arg.integer.value = 0; 412 /*
351 /* Execute PSW */ 413 * Do not execute the code below twice in a row without calling
352 status = acpi_evaluate_object(dev->handle, "_PSW", &arg_list, NULL); 414 * acpi_enable_wakeup_device_power() in between for the same device
353 if (ACPI_FAILURE(status) && (status != AE_NOT_FOUND)) { 415 */
354 printk(KERN_ERR PREFIX "Evaluate _PSW\n"); 416 if (!dev->wakeup.flags.prepared)
355 dev->wakeup.flags.valid = 0; 417 return 0;
356 return -1; 418
357 } 419 dev->wakeup.flags.prepared = 0;
420
421 ret = acpi_device_sleep_wake(dev, 0, 0, 0);
422 if (ret)
423 return ret;
358 424
359 /* Close power resource */ 425 /* Close power resource */
360 for (i = 0; i < dev->wakeup.resources.count; i++) { 426 for (i = 0; i < dev->wakeup.resources.count; i++) {
@@ -362,7 +428,7 @@ int acpi_disable_wakeup_device_power(struct acpi_device *dev)
362 if (ret) { 428 if (ret) {
363 printk(KERN_ERR PREFIX "Transition power state\n"); 429 printk(KERN_ERR PREFIX "Transition power state\n");
364 dev->wakeup.flags.valid = 0; 430 dev->wakeup.flags.valid = 0;
365 return -1; 431 return -ENODEV;
366 } 432 }
367 } 433 }
368 434
diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c
index 5b049cd79553..f3132aa47a69 100644
--- a/drivers/acpi/scan.c
+++ b/drivers/acpi/scan.c
@@ -703,9 +703,7 @@ static int acpi_bus_get_wakeup_device_flags(struct acpi_device *device)
703 acpi_status status = 0; 703 acpi_status status = 0;
704 struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; 704 struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
705 union acpi_object *package = NULL; 705 union acpi_object *package = NULL;
706 union acpi_object in_arg[3]; 706 int psw_error;
707 struct acpi_object_list arg_list = { 3, in_arg };
708 acpi_status psw_status = AE_OK;
709 707
710 struct acpi_device_id button_device_ids[] = { 708 struct acpi_device_id button_device_ids[] = {
711 {"PNP0C0D", 0}, 709 {"PNP0C0D", 0},
@@ -737,39 +735,11 @@ static int acpi_bus_get_wakeup_device_flags(struct acpi_device *device)
737 * So it is necessary to call _DSW object first. Only when it is not 735 * So it is necessary to call _DSW object first. Only when it is not
738 * present will the _PSW object used. 736 * present will the _PSW object used.
739 */ 737 */
740 /* 738 psw_error = acpi_device_sleep_wake(device, 0, 0, 0);
741 * Three agruments are needed for the _DSW object. 739 if (psw_error)
742 * Argument 0: enable/disable the wake capabilities 740 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
743 * When _DSW object is called to disable the wake capabilities, maybe 741 "error in _DSW or _PSW evaluation\n"));
744 * the first argument is filled. The value of the other two agruments 742
745 * is meaningless.
746 */
747 in_arg[0].type = ACPI_TYPE_INTEGER;
748 in_arg[0].integer.value = 0;
749 in_arg[1].type = ACPI_TYPE_INTEGER;
750 in_arg[1].integer.value = 0;
751 in_arg[2].type = ACPI_TYPE_INTEGER;
752 in_arg[2].integer.value = 0;
753 psw_status = acpi_evaluate_object(device->handle, "_DSW",
754 &arg_list, NULL);
755 if (ACPI_FAILURE(psw_status) && (psw_status != AE_NOT_FOUND))
756 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "error in evaluate _DSW\n"));
757 /*
758 * When the _DSW object is not present, OSPM will call _PSW object.
759 */
760 if (psw_status == AE_NOT_FOUND) {
761 /*
762 * Only one agruments is required for the _PSW object.
763 * agrument 0: enable/disable the wake capabilities
764 */
765 arg_list.count = 1;
766 in_arg[0].integer.value = 0;
767 psw_status = acpi_evaluate_object(device->handle, "_PSW",
768 &arg_list, NULL);
769 if (ACPI_FAILURE(psw_status) && (psw_status != AE_NOT_FOUND))
770 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "error in "
771 "evaluate _PSW\n"));
772 }
773 /* Power button, Lid switch always enable wakeup */ 743 /* Power button, Lid switch always enable wakeup */
774 if (!acpi_match_device_ids(device, button_device_ids)) 744 if (!acpi_match_device_ids(device, button_device_ids))
775 device->wakeup.flags.run_wake = 1; 745 device->wakeup.flags.run_wake = 1;
diff --git a/drivers/acpi/sleep/main.c b/drivers/acpi/sleep/main.c
index 244e352f7661..0489a7d1d42c 100644
--- a/drivers/acpi/sleep/main.c
+++ b/drivers/acpi/sleep/main.c
@@ -24,10 +24,6 @@
24 24
25u8 sleep_states[ACPI_S_STATE_COUNT]; 25u8 sleep_states[ACPI_S_STATE_COUNT];
26 26
27#ifdef CONFIG_PM_SLEEP
28static u32 acpi_target_sleep_state = ACPI_STATE_S0;
29#endif
30
31static int acpi_sleep_prepare(u32 acpi_state) 27static int acpi_sleep_prepare(u32 acpi_state)
32{ 28{
33#ifdef CONFIG_ACPI_SLEEP 29#ifdef CONFIG_ACPI_SLEEP
@@ -49,9 +45,96 @@ static int acpi_sleep_prepare(u32 acpi_state)
49 return 0; 45 return 0;
50} 46}
51 47
52#ifdef CONFIG_SUSPEND 48#ifdef CONFIG_PM_SLEEP
53static struct platform_suspend_ops acpi_suspend_ops; 49static u32 acpi_target_sleep_state = ACPI_STATE_S0;
50
51/*
52 * ACPI 1.0 wants us to execute _PTS before suspending devices, so we allow the
53 * user to request that behavior by using the 'acpi_old_suspend_ordering'
54 * kernel command line option that causes the following variable to be set.
55 */
56static bool old_suspend_ordering;
57
58void __init acpi_old_suspend_ordering(void)
59{
60 old_suspend_ordering = true;
61}
62
63/**
64 * acpi_pm_disable_gpes - Disable the GPEs.
65 */
66static int acpi_pm_disable_gpes(void)
67{
68 acpi_hw_disable_all_gpes();
69 return 0;
70}
71
72/**
73 * __acpi_pm_prepare - Prepare the platform to enter the target state.
74 *
75 * If necessary, set the firmware waking vector and do arch-specific
76 * nastiness to get the wakeup code to the waking vector.
77 */
78static int __acpi_pm_prepare(void)
79{
80 int error = acpi_sleep_prepare(acpi_target_sleep_state);
81
82 if (error)
83 acpi_target_sleep_state = ACPI_STATE_S0;
84 return error;
85}
86
87/**
88 * acpi_pm_prepare - Prepare the platform to enter the target sleep
89 * state and disable the GPEs.
90 */
91static int acpi_pm_prepare(void)
92{
93 int error = __acpi_pm_prepare();
94
95 if (!error)
96 acpi_hw_disable_all_gpes();
97 return error;
98}
99
100/**
101 * acpi_pm_finish - Instruct the platform to leave a sleep state.
102 *
103 * This is called after we wake back up (or if entering the sleep state
104 * failed).
105 */
106static void acpi_pm_finish(void)
107{
108 u32 acpi_state = acpi_target_sleep_state;
109
110 if (acpi_state == ACPI_STATE_S0)
111 return;
112
113 printk(KERN_INFO PREFIX "Waking up from system sleep state S%d\n",
114 acpi_state);
115 acpi_disable_wakeup_device(acpi_state);
116 acpi_leave_sleep_state(acpi_state);
117
118 /* reset firmware waking vector */
119 acpi_set_firmware_waking_vector((acpi_physical_address) 0);
120
121 acpi_target_sleep_state = ACPI_STATE_S0;
122}
123
124/**
125 * acpi_pm_end - Finish up suspend sequence.
126 */
127static void acpi_pm_end(void)
128{
129 /*
130 * This is necessary in case acpi_pm_finish() is not called during a
131 * failing transition to a sleep state.
132 */
133 acpi_target_sleep_state = ACPI_STATE_S0;
134}
135#endif /* CONFIG_PM_SLEEP */
54 136
137#ifdef CONFIG_SUSPEND
55extern void do_suspend_lowlevel(void); 138extern void do_suspend_lowlevel(void);
56 139
57static u32 acpi_suspend_states[] = { 140static u32 acpi_suspend_states[] = {
@@ -65,7 +148,6 @@ static u32 acpi_suspend_states[] = {
65 * acpi_suspend_begin - Set the target system sleep state to the state 148 * acpi_suspend_begin - Set the target system sleep state to the state
66 * associated with given @pm_state, if supported. 149 * associated with given @pm_state, if supported.
67 */ 150 */
68
69static int acpi_suspend_begin(suspend_state_t pm_state) 151static int acpi_suspend_begin(suspend_state_t pm_state)
70{ 152{
71 u32 acpi_state = acpi_suspend_states[pm_state]; 153 u32 acpi_state = acpi_suspend_states[pm_state];
@@ -82,25 +164,6 @@ static int acpi_suspend_begin(suspend_state_t pm_state)
82} 164}
83 165
84/** 166/**
85 * acpi_suspend_prepare - Do preliminary suspend work.
86 *
87 * If necessary, set the firmware waking vector and do arch-specific
88 * nastiness to get the wakeup code to the waking vector.
89 */
90
91static int acpi_suspend_prepare(void)
92{
93 int error = acpi_sleep_prepare(acpi_target_sleep_state);
94
95 if (error) {
96 acpi_target_sleep_state = ACPI_STATE_S0;
97 return error;
98 }
99
100 return ACPI_SUCCESS(acpi_hw_disable_all_gpes()) ? 0 : -EFAULT;
101}
102
103/**
104 * acpi_suspend_enter - Actually enter a sleep state. 167 * acpi_suspend_enter - Actually enter a sleep state.
105 * @pm_state: ignored 168 * @pm_state: ignored
106 * 169 *
@@ -108,7 +171,6 @@ static int acpi_suspend_prepare(void)
108 * assembly, which in turn call acpi_enter_sleep_state(). 171 * assembly, which in turn call acpi_enter_sleep_state().
109 * It's unfortunate, but it works. Please fix if you're feeling frisky. 172 * It's unfortunate, but it works. Please fix if you're feeling frisky.
110 */ 173 */
111
112static int acpi_suspend_enter(suspend_state_t pm_state) 174static int acpi_suspend_enter(suspend_state_t pm_state)
113{ 175{
114 acpi_status status = AE_OK; 176 acpi_status status = AE_OK;
@@ -165,39 +227,6 @@ static int acpi_suspend_enter(suspend_state_t pm_state)
165 return ACPI_SUCCESS(status) ? 0 : -EFAULT; 227 return ACPI_SUCCESS(status) ? 0 : -EFAULT;
166} 228}
167 229
168/**
169 * acpi_suspend_finish - Instruct the platform to leave a sleep state.
170 *
171 * This is called after we wake back up (or if entering the sleep state
172 * failed).
173 */
174
175static void acpi_suspend_finish(void)
176{
177 u32 acpi_state = acpi_target_sleep_state;
178
179 acpi_disable_wakeup_device(acpi_state);
180 acpi_leave_sleep_state(acpi_state);
181
182 /* reset firmware waking vector */
183 acpi_set_firmware_waking_vector((acpi_physical_address) 0);
184
185 acpi_target_sleep_state = ACPI_STATE_S0;
186}
187
188/**
189 * acpi_suspend_end - Finish up suspend sequence.
190 */
191
192static void acpi_suspend_end(void)
193{
194 /*
195 * This is necessary in case acpi_suspend_finish() is not called during a
196 * failing transition to a sleep state.
197 */
198 acpi_target_sleep_state = ACPI_STATE_S0;
199}
200
201static int acpi_suspend_state_valid(suspend_state_t pm_state) 230static int acpi_suspend_state_valid(suspend_state_t pm_state)
202{ 231{
203 u32 acpi_state; 232 u32 acpi_state;
@@ -217,10 +246,39 @@ static int acpi_suspend_state_valid(suspend_state_t pm_state)
217static struct platform_suspend_ops acpi_suspend_ops = { 246static struct platform_suspend_ops acpi_suspend_ops = {
218 .valid = acpi_suspend_state_valid, 247 .valid = acpi_suspend_state_valid,
219 .begin = acpi_suspend_begin, 248 .begin = acpi_suspend_begin,
220 .prepare = acpi_suspend_prepare, 249 .prepare = acpi_pm_prepare,
221 .enter = acpi_suspend_enter, 250 .enter = acpi_suspend_enter,
222 .finish = acpi_suspend_finish, 251 .finish = acpi_pm_finish,
223 .end = acpi_suspend_end, 252 .end = acpi_pm_end,
253};
254
255/**
256 * acpi_suspend_begin_old - Set the target system sleep state to the
257 * state associated with given @pm_state, if supported, and
258 * execute the _PTS control method. This function is used if the
259 * pre-ACPI 2.0 suspend ordering has been requested.
260 */
261static int acpi_suspend_begin_old(suspend_state_t pm_state)
262{
263 int error = acpi_suspend_begin(pm_state);
264
265 if (!error)
266 error = __acpi_pm_prepare();
267 return error;
268}
269
270/*
271 * The following callbacks are used if the pre-ACPI 2.0 suspend ordering has
272 * been requested.
273 */
274static struct platform_suspend_ops acpi_suspend_ops_old = {
275 .valid = acpi_suspend_state_valid,
276 .begin = acpi_suspend_begin_old,
277 .prepare = acpi_pm_disable_gpes,
278 .enter = acpi_suspend_enter,
279 .finish = acpi_pm_finish,
280 .end = acpi_pm_end,
281 .recover = acpi_pm_finish,
224}; 282};
225#endif /* CONFIG_SUSPEND */ 283#endif /* CONFIG_SUSPEND */
226 284
@@ -228,22 +286,9 @@ static struct platform_suspend_ops acpi_suspend_ops = {
228static int acpi_hibernation_begin(void) 286static int acpi_hibernation_begin(void)
229{ 287{
230 acpi_target_sleep_state = ACPI_STATE_S4; 288 acpi_target_sleep_state = ACPI_STATE_S4;
231
232 return 0; 289 return 0;
233} 290}
234 291
235static int acpi_hibernation_prepare(void)
236{
237 int error = acpi_sleep_prepare(ACPI_STATE_S4);
238
239 if (error) {
240 acpi_target_sleep_state = ACPI_STATE_S0;
241 return error;
242 }
243
244 return ACPI_SUCCESS(acpi_hw_disable_all_gpes()) ? 0 : -EFAULT;
245}
246
247static int acpi_hibernation_enter(void) 292static int acpi_hibernation_enter(void)
248{ 293{
249 acpi_status status = AE_OK; 294 acpi_status status = AE_OK;
@@ -273,52 +318,55 @@ static void acpi_hibernation_leave(void)
273 acpi_leave_sleep_state_prep(ACPI_STATE_S4); 318 acpi_leave_sleep_state_prep(ACPI_STATE_S4);
274} 319}
275 320
276static void acpi_hibernation_finish(void) 321static void acpi_pm_enable_gpes(void)
277{ 322{
278 acpi_disable_wakeup_device(ACPI_STATE_S4); 323 acpi_hw_enable_all_runtime_gpes();
279 acpi_leave_sleep_state(ACPI_STATE_S4);
280
281 /* reset firmware waking vector */
282 acpi_set_firmware_waking_vector((acpi_physical_address) 0);
283
284 acpi_target_sleep_state = ACPI_STATE_S0;
285} 324}
286 325
287static void acpi_hibernation_end(void) 326static struct platform_hibernation_ops acpi_hibernation_ops = {
288{ 327 .begin = acpi_hibernation_begin,
289 /* 328 .end = acpi_pm_end,
290 * This is necessary in case acpi_hibernation_finish() is not called 329 .pre_snapshot = acpi_pm_prepare,
291 * during a failing transition to the sleep state. 330 .finish = acpi_pm_finish,
292 */ 331 .prepare = acpi_pm_prepare,
293 acpi_target_sleep_state = ACPI_STATE_S0; 332 .enter = acpi_hibernation_enter,
294} 333 .leave = acpi_hibernation_leave,
334 .pre_restore = acpi_pm_disable_gpes,
335 .restore_cleanup = acpi_pm_enable_gpes,
336};
295 337
296static int acpi_hibernation_pre_restore(void) 338/**
339 * acpi_hibernation_begin_old - Set the target system sleep state to
340 * ACPI_STATE_S4 and execute the _PTS control method. This
341 * function is used if the pre-ACPI 2.0 suspend ordering has been
342 * requested.
343 */
344static int acpi_hibernation_begin_old(void)
297{ 345{
298 acpi_status status; 346 int error = acpi_sleep_prepare(ACPI_STATE_S4);
299
300 status = acpi_hw_disable_all_gpes();
301
302 return ACPI_SUCCESS(status) ? 0 : -EFAULT;
303}
304 347
305static void acpi_hibernation_restore_cleanup(void) 348 if (!error)
306{ 349 acpi_target_sleep_state = ACPI_STATE_S4;
307 acpi_hw_enable_all_runtime_gpes(); 350 return error;
308} 351}
309 352
310static struct platform_hibernation_ops acpi_hibernation_ops = { 353/*
311 .begin = acpi_hibernation_begin, 354 * The following callbacks are used if the pre-ACPI 2.0 suspend ordering has
312 .end = acpi_hibernation_end, 355 * been requested.
313 .pre_snapshot = acpi_hibernation_prepare, 356 */
314 .finish = acpi_hibernation_finish, 357static struct platform_hibernation_ops acpi_hibernation_ops_old = {
315 .prepare = acpi_hibernation_prepare, 358 .begin = acpi_hibernation_begin_old,
359 .end = acpi_pm_end,
360 .pre_snapshot = acpi_pm_disable_gpes,
361 .finish = acpi_pm_finish,
362 .prepare = acpi_pm_disable_gpes,
316 .enter = acpi_hibernation_enter, 363 .enter = acpi_hibernation_enter,
317 .leave = acpi_hibernation_leave, 364 .leave = acpi_hibernation_leave,
318 .pre_restore = acpi_hibernation_pre_restore, 365 .pre_restore = acpi_pm_disable_gpes,
319 .restore_cleanup = acpi_hibernation_restore_cleanup, 366 .restore_cleanup = acpi_pm_enable_gpes,
367 .recover = acpi_pm_finish,
320}; 368};
321#endif /* CONFIG_HIBERNATION */ 369#endif /* CONFIG_HIBERNATION */
322 370
323int acpi_suspend(u32 acpi_state) 371int acpi_suspend(u32 acpi_state)
324{ 372{
@@ -419,6 +467,31 @@ int acpi_pm_device_sleep_state(struct device *dev, int *d_min_p)
419 *d_min_p = d_min; 467 *d_min_p = d_min;
420 return d_max; 468 return d_max;
421} 469}
470
471/**
472 * acpi_pm_device_sleep_wake - enable or disable the system wake-up
473 * capability of given device
474 * @dev: device to handle
475 * @enable: 'true' - enable, 'false' - disable the wake-up capability
476 */
477int acpi_pm_device_sleep_wake(struct device *dev, bool enable)
478{
479 acpi_handle handle;
480 struct acpi_device *adev;
481
482 if (!device_may_wakeup(dev))
483 return -EINVAL;
484
485 handle = DEVICE_ACPI_HANDLE(dev);
486 if (!handle || ACPI_FAILURE(acpi_bus_get_device(handle, &adev))) {
487 printk(KERN_DEBUG "ACPI handle has no context!\n");
488 return -ENODEV;
489 }
490
491 return enable ?
492 acpi_enable_wakeup_device_power(adev, acpi_target_sleep_state) :
493 acpi_disable_wakeup_device_power(adev);
494}
422#endif 495#endif
423 496
424static void acpi_power_off_prepare(void) 497static void acpi_power_off_prepare(void)
@@ -460,13 +533,15 @@ int __init acpi_sleep_init(void)
460 } 533 }
461 } 534 }
462 535
463 suspend_set_ops(&acpi_suspend_ops); 536 suspend_set_ops(old_suspend_ordering ?
537 &acpi_suspend_ops_old : &acpi_suspend_ops);
464#endif 538#endif
465 539
466#ifdef CONFIG_HIBERNATION 540#ifdef CONFIG_HIBERNATION
467 status = acpi_get_sleep_type_data(ACPI_STATE_S4, &type_a, &type_b); 541 status = acpi_get_sleep_type_data(ACPI_STATE_S4, &type_a, &type_b);
468 if (ACPI_SUCCESS(status)) { 542 if (ACPI_SUCCESS(status)) {
469 hibernation_set_ops(&acpi_hibernation_ops); 543 hibernation_set_ops(old_suspend_ordering ?
544 &acpi_hibernation_ops_old : &acpi_hibernation_ops);
470 sleep_states[ACPI_STATE_S4] = 1; 545 sleep_states[ACPI_STATE_S4] = 1;
471 printk(" S4"); 546 printk(" S4");
472 } 547 }
diff --git a/drivers/acpi/sleep/wakeup.c b/drivers/acpi/sleep/wakeup.c
index ed8e41becf0c..38655eb132dc 100644
--- a/drivers/acpi/sleep/wakeup.c
+++ b/drivers/acpi/sleep/wakeup.c
@@ -42,7 +42,7 @@ void acpi_enable_wakeup_device_prep(u8 sleep_state)
42 continue; 42 continue;
43 43
44 spin_unlock(&acpi_device_lock); 44 spin_unlock(&acpi_device_lock);
45 acpi_enable_wakeup_device_power(dev); 45 acpi_enable_wakeup_device_power(dev, sleep_state);
46 spin_lock(&acpi_device_lock); 46 spin_lock(&acpi_device_lock);
47 } 47 }
48 spin_unlock(&acpi_device_lock); 48 spin_unlock(&acpi_device_lock);
@@ -66,13 +66,15 @@ void acpi_enable_wakeup_device(u8 sleep_state)
66 list_for_each_safe(node, next, &acpi_wakeup_device_list) { 66 list_for_each_safe(node, next, &acpi_wakeup_device_list) {
67 struct acpi_device *dev = 67 struct acpi_device *dev =
68 container_of(node, struct acpi_device, wakeup_list); 68 container_of(node, struct acpi_device, wakeup_list);
69
69 if (!dev->wakeup.flags.valid) 70 if (!dev->wakeup.flags.valid)
70 continue; 71 continue;
72
71 /* If users want to disable run-wake GPE, 73 /* If users want to disable run-wake GPE,
72 * we only disable it for wake and leave it for runtime 74 * we only disable it for wake and leave it for runtime
73 */ 75 */
74 if (!dev->wakeup.state.enabled || 76 if ((!dev->wakeup.state.enabled && !dev->wakeup.flags.prepared)
75 sleep_state > (u32) dev->wakeup.sleep_state) { 77 || sleep_state > (u32) dev->wakeup.sleep_state) {
76 if (dev->wakeup.flags.run_wake) { 78 if (dev->wakeup.flags.run_wake) {
77 spin_unlock(&acpi_device_lock); 79 spin_unlock(&acpi_device_lock);
78 /* set_gpe_type will disable GPE, leave it like that */ 80 /* set_gpe_type will disable GPE, leave it like that */
@@ -110,8 +112,9 @@ void acpi_disable_wakeup_device(u8 sleep_state)
110 112
111 if (!dev->wakeup.flags.valid) 113 if (!dev->wakeup.flags.valid)
112 continue; 114 continue;
113 if (!dev->wakeup.state.enabled || 115
114 sleep_state > (u32) dev->wakeup.sleep_state) { 116 if ((!dev->wakeup.state.enabled && !dev->wakeup.flags.prepared)
117 || sleep_state > (u32) dev->wakeup.sleep_state) {
115 if (dev->wakeup.flags.run_wake) { 118 if (dev->wakeup.flags.run_wake) {
116 spin_unlock(&acpi_device_lock); 119 spin_unlock(&acpi_device_lock);
117 acpi_set_gpe_type(dev->wakeup.gpe_device, 120 acpi_set_gpe_type(dev->wakeup.gpe_device,
diff --git a/drivers/base/platform.c b/drivers/base/platform.c
index 911ec600fe71..3f940393d6c7 100644
--- a/drivers/base/platform.c
+++ b/drivers/base/platform.c
@@ -453,6 +453,8 @@ int platform_driver_register(struct platform_driver *drv)
453 drv->driver.suspend = platform_drv_suspend; 453 drv->driver.suspend = platform_drv_suspend;
454 if (drv->resume) 454 if (drv->resume)
455 drv->driver.resume = platform_drv_resume; 455 drv->driver.resume = platform_drv_resume;
456 if (drv->pm)
457 drv->driver.pm = &drv->pm->base;
456 return driver_register(&drv->driver); 458 return driver_register(&drv->driver);
457} 459}
458EXPORT_SYMBOL_GPL(platform_driver_register); 460EXPORT_SYMBOL_GPL(platform_driver_register);
@@ -560,7 +562,9 @@ static int platform_match(struct device *dev, struct device_driver *drv)
560 return (strncmp(pdev->name, drv->name, BUS_ID_SIZE) == 0); 562 return (strncmp(pdev->name, drv->name, BUS_ID_SIZE) == 0);
561} 563}
562 564
563static int platform_suspend(struct device *dev, pm_message_t mesg) 565#ifdef CONFIG_PM_SLEEP
566
567static int platform_legacy_suspend(struct device *dev, pm_message_t mesg)
564{ 568{
565 int ret = 0; 569 int ret = 0;
566 570
@@ -570,7 +574,7 @@ static int platform_suspend(struct device *dev, pm_message_t mesg)
570 return ret; 574 return ret;
571} 575}
572 576
573static int platform_suspend_late(struct device *dev, pm_message_t mesg) 577static int platform_legacy_suspend_late(struct device *dev, pm_message_t mesg)
574{ 578{
575 struct platform_driver *drv = to_platform_driver(dev->driver); 579 struct platform_driver *drv = to_platform_driver(dev->driver);
576 struct platform_device *pdev; 580 struct platform_device *pdev;
@@ -583,7 +587,7 @@ static int platform_suspend_late(struct device *dev, pm_message_t mesg)
583 return ret; 587 return ret;
584} 588}
585 589
586static int platform_resume_early(struct device *dev) 590static int platform_legacy_resume_early(struct device *dev)
587{ 591{
588 struct platform_driver *drv = to_platform_driver(dev->driver); 592 struct platform_driver *drv = to_platform_driver(dev->driver);
589 struct platform_device *pdev; 593 struct platform_device *pdev;
@@ -596,7 +600,7 @@ static int platform_resume_early(struct device *dev)
596 return ret; 600 return ret;
597} 601}
598 602
599static int platform_resume(struct device *dev) 603static int platform_legacy_resume(struct device *dev)
600{ 604{
601 int ret = 0; 605 int ret = 0;
602 606
@@ -606,15 +610,291 @@ static int platform_resume(struct device *dev)
606 return ret; 610 return ret;
607} 611}
608 612
613static int platform_pm_prepare(struct device *dev)
614{
615 struct device_driver *drv = dev->driver;
616 int ret = 0;
617
618 if (drv && drv->pm && drv->pm->prepare)
619 ret = drv->pm->prepare(dev);
620
621 return ret;
622}
623
624static void platform_pm_complete(struct device *dev)
625{
626 struct device_driver *drv = dev->driver;
627
628 if (drv && drv->pm && drv->pm->complete)
629 drv->pm->complete(dev);
630}
631
632#ifdef CONFIG_SUSPEND
633
634static int platform_pm_suspend(struct device *dev)
635{
636 struct device_driver *drv = dev->driver;
637 int ret = 0;
638
639 if (drv && drv->pm) {
640 if (drv->pm->suspend)
641 ret = drv->pm->suspend(dev);
642 } else {
643 ret = platform_legacy_suspend(dev, PMSG_SUSPEND);
644 }
645
646 return ret;
647}
648
649static int platform_pm_suspend_noirq(struct device *dev)
650{
651 struct platform_driver *pdrv;
652 int ret = 0;
653
654 if (!dev->driver)
655 return 0;
656
657 pdrv = to_platform_driver(dev->driver);
658 if (pdrv->pm) {
659 if (pdrv->pm->suspend_noirq)
660 ret = pdrv->pm->suspend_noirq(dev);
661 } else {
662 ret = platform_legacy_suspend_late(dev, PMSG_SUSPEND);
663 }
664
665 return ret;
666}
667
668static int platform_pm_resume(struct device *dev)
669{
670 struct device_driver *drv = dev->driver;
671 int ret = 0;
672
673 if (drv && drv->pm) {
674 if (drv->pm->resume)
675 ret = drv->pm->resume(dev);
676 } else {
677 ret = platform_legacy_resume(dev);
678 }
679
680 return ret;
681}
682
683static int platform_pm_resume_noirq(struct device *dev)
684{
685 struct platform_driver *pdrv;
686 int ret = 0;
687
688 if (!dev->driver)
689 return 0;
690
691 pdrv = to_platform_driver(dev->driver);
692 if (pdrv->pm) {
693 if (pdrv->pm->resume_noirq)
694 ret = pdrv->pm->resume_noirq(dev);
695 } else {
696 ret = platform_legacy_resume_early(dev);
697 }
698
699 return ret;
700}
701
702#else /* !CONFIG_SUSPEND */
703
704#define platform_pm_suspend NULL
705#define platform_pm_resume NULL
706#define platform_pm_suspend_noirq NULL
707#define platform_pm_resume_noirq NULL
708
709#endif /* !CONFIG_SUSPEND */
710
711#ifdef CONFIG_HIBERNATION
712
713static int platform_pm_freeze(struct device *dev)
714{
715 struct device_driver *drv = dev->driver;
716 int ret = 0;
717
718 if (!drv)
719 return 0;
720
721 if (drv->pm) {
722 if (drv->pm->freeze)
723 ret = drv->pm->freeze(dev);
724 } else {
725 ret = platform_legacy_suspend(dev, PMSG_FREEZE);
726 }
727
728 return ret;
729}
730
731static int platform_pm_freeze_noirq(struct device *dev)
732{
733 struct platform_driver *pdrv;
734 int ret = 0;
735
736 if (!dev->driver)
737 return 0;
738
739 pdrv = to_platform_driver(dev->driver);
740 if (pdrv->pm) {
741 if (pdrv->pm->freeze_noirq)
742 ret = pdrv->pm->freeze_noirq(dev);
743 } else {
744 ret = platform_legacy_suspend_late(dev, PMSG_FREEZE);
745 }
746
747 return ret;
748}
749
750static int platform_pm_thaw(struct device *dev)
751{
752 struct device_driver *drv = dev->driver;
753 int ret = 0;
754
755 if (drv && drv->pm) {
756 if (drv->pm->thaw)
757 ret = drv->pm->thaw(dev);
758 } else {
759 ret = platform_legacy_resume(dev);
760 }
761
762 return ret;
763}
764
765static int platform_pm_thaw_noirq(struct device *dev)
766{
767 struct platform_driver *pdrv;
768 int ret = 0;
769
770 if (!dev->driver)
771 return 0;
772
773 pdrv = to_platform_driver(dev->driver);
774 if (pdrv->pm) {
775 if (pdrv->pm->thaw_noirq)
776 ret = pdrv->pm->thaw_noirq(dev);
777 } else {
778 ret = platform_legacy_resume_early(dev);
779 }
780
781 return ret;
782}
783
784static int platform_pm_poweroff(struct device *dev)
785{
786 struct device_driver *drv = dev->driver;
787 int ret = 0;
788
789 if (drv && drv->pm) {
790 if (drv->pm->poweroff)
791 ret = drv->pm->poweroff(dev);
792 } else {
793 ret = platform_legacy_suspend(dev, PMSG_HIBERNATE);
794 }
795
796 return ret;
797}
798
799static int platform_pm_poweroff_noirq(struct device *dev)
800{
801 struct platform_driver *pdrv;
802 int ret = 0;
803
804 if (!dev->driver)
805 return 0;
806
807 pdrv = to_platform_driver(dev->driver);
808 if (pdrv->pm) {
809 if (pdrv->pm->poweroff_noirq)
810 ret = pdrv->pm->poweroff_noirq(dev);
811 } else {
812 ret = platform_legacy_suspend_late(dev, PMSG_HIBERNATE);
813 }
814
815 return ret;
816}
817
818static int platform_pm_restore(struct device *dev)
819{
820 struct device_driver *drv = dev->driver;
821 int ret = 0;
822
823 if (drv && drv->pm) {
824 if (drv->pm->restore)
825 ret = drv->pm->restore(dev);
826 } else {
827 ret = platform_legacy_resume(dev);
828 }
829
830 return ret;
831}
832
833static int platform_pm_restore_noirq(struct device *dev)
834{
835 struct platform_driver *pdrv;
836 int ret = 0;
837
838 if (!dev->driver)
839 return 0;
840
841 pdrv = to_platform_driver(dev->driver);
842 if (pdrv->pm) {
843 if (pdrv->pm->restore_noirq)
844 ret = pdrv->pm->restore_noirq(dev);
845 } else {
846 ret = platform_legacy_resume_early(dev);
847 }
848
849 return ret;
850}
851
852#else /* !CONFIG_HIBERNATION */
853
854#define platform_pm_freeze NULL
855#define platform_pm_thaw NULL
856#define platform_pm_poweroff NULL
857#define platform_pm_restore NULL
858#define platform_pm_freeze_noirq NULL
859#define platform_pm_thaw_noirq NULL
860#define platform_pm_poweroff_noirq NULL
861#define platform_pm_restore_noirq NULL
862
863#endif /* !CONFIG_HIBERNATION */
864
865struct pm_ext_ops platform_pm_ops = {
866 .base = {
867 .prepare = platform_pm_prepare,
868 .complete = platform_pm_complete,
869 .suspend = platform_pm_suspend,
870 .resume = platform_pm_resume,
871 .freeze = platform_pm_freeze,
872 .thaw = platform_pm_thaw,
873 .poweroff = platform_pm_poweroff,
874 .restore = platform_pm_restore,
875 },
876 .suspend_noirq = platform_pm_suspend_noirq,
877 .resume_noirq = platform_pm_resume_noirq,
878 .freeze_noirq = platform_pm_freeze_noirq,
879 .thaw_noirq = platform_pm_thaw_noirq,
880 .poweroff_noirq = platform_pm_poweroff_noirq,
881 .restore_noirq = platform_pm_restore_noirq,
882};
883
884#define PLATFORM_PM_OPS_PTR &platform_pm_ops
885
886#else /* !CONFIG_PM_SLEEP */
887
888#define PLATFORM_PM_OPS_PTR NULL
889
890#endif /* !CONFIG_PM_SLEEP */
891
609struct bus_type platform_bus_type = { 892struct bus_type platform_bus_type = {
610 .name = "platform", 893 .name = "platform",
611 .dev_attrs = platform_dev_attrs, 894 .dev_attrs = platform_dev_attrs,
612 .match = platform_match, 895 .match = platform_match,
613 .uevent = platform_uevent, 896 .uevent = platform_uevent,
614 .suspend = platform_suspend, 897 .pm = PLATFORM_PM_OPS_PTR,
615 .suspend_late = platform_suspend_late,
616 .resume_early = platform_resume_early,
617 .resume = platform_resume,
618}; 898};
619EXPORT_SYMBOL_GPL(platform_bus_type); 899EXPORT_SYMBOL_GPL(platform_bus_type);
620 900
diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c
index 45cc3d9eacb8..3250c5257b74 100644
--- a/drivers/base/power/main.c
+++ b/drivers/base/power/main.c
@@ -12,11 +12,9 @@
12 * and add it to the list of power-controlled devices. sysfs entries for 12 * and add it to the list of power-controlled devices. sysfs entries for
13 * controlling device power management will also be added. 13 * controlling device power management will also be added.
14 * 14 *
15 * A different set of lists than the global subsystem list are used to 15 * A separate list is used for keeping track of power info, because the power
16 * keep track of power info because we use different lists to hold 16 * domain dependencies may differ from the ancestral dependencies that the
17 * devices based on what stage of the power management process they 17 * subsystem list maintains.
18 * are in. The power domain dependencies may also differ from the
19 * ancestral dependencies that the subsystem list maintains.
20 */ 18 */
21 19
22#include <linux/device.h> 20#include <linux/device.h>
@@ -30,31 +28,40 @@
30#include "power.h" 28#include "power.h"
31 29
32/* 30/*
33 * The entries in the dpm_active list are in a depth first order, simply 31 * The entries in the dpm_list list are in a depth first order, simply
34 * because children are guaranteed to be discovered after parents, and 32 * because children are guaranteed to be discovered after parents, and
35 * are inserted at the back of the list on discovery. 33 * are inserted at the back of the list on discovery.
36 * 34 *
37 * All the other lists are kept in the same order, for consistency.
38 * However the lists aren't always traversed in the same order.
39 * Semaphores must be acquired from the top (i.e., front) down
40 * and released in the opposite order. Devices must be suspended
41 * from the bottom (i.e., end) up and resumed in the opposite order.
42 * That way no parent will be suspended while it still has an active
43 * child.
44 *
45 * Since device_pm_add() may be called with a device semaphore held, 35 * Since device_pm_add() may be called with a device semaphore held,
46 * we must never try to acquire a device semaphore while holding 36 * we must never try to acquire a device semaphore while holding
47 * dpm_list_mutex. 37 * dpm_list_mutex.
48 */ 38 */
49 39
50LIST_HEAD(dpm_active); 40LIST_HEAD(dpm_list);
51static LIST_HEAD(dpm_off);
52static LIST_HEAD(dpm_off_irq);
53 41
54static DEFINE_MUTEX(dpm_list_mtx); 42static DEFINE_MUTEX(dpm_list_mtx);
55 43
56/* 'true' if all devices have been suspended, protected by dpm_list_mtx */ 44/*
57static bool all_sleeping; 45 * Set once the preparation of devices for a PM transition has started, reset
46 * before starting to resume devices. Protected by dpm_list_mtx.
47 */
48static bool transition_started;
49
50/**
51 * device_pm_lock - lock the list of active devices used by the PM core
52 */
53void device_pm_lock(void)
54{
55 mutex_lock(&dpm_list_mtx);
56}
57
58/**
59 * device_pm_unlock - unlock the list of active devices used by the PM core
60 */
61void device_pm_unlock(void)
62{
63 mutex_unlock(&dpm_list_mtx);
64}
58 65
59/** 66/**
60 * device_pm_add - add a device to the list of active devices 67 * device_pm_add - add a device to the list of active devices
@@ -68,17 +75,25 @@ int device_pm_add(struct device *dev)
68 dev->bus ? dev->bus->name : "No Bus", 75 dev->bus ? dev->bus->name : "No Bus",
69 kobject_name(&dev->kobj)); 76 kobject_name(&dev->kobj));
70 mutex_lock(&dpm_list_mtx); 77 mutex_lock(&dpm_list_mtx);
71 if ((dev->parent && dev->parent->power.sleeping) || all_sleeping) { 78 if (dev->parent) {
72 if (dev->parent->power.sleeping) 79 if (dev->parent->power.status >= DPM_SUSPENDING) {
73 dev_warn(dev, "parent %s is sleeping\n", 80 dev_warn(dev, "parent %s is sleeping, will not add\n",
74 dev->parent->bus_id); 81 dev->parent->bus_id);
75 else 82 WARN_ON(true);
76 dev_warn(dev, "all devices are sleeping\n"); 83 }
84 } else if (transition_started) {
85 /*
86 * We refuse to register parentless devices while a PM
87 * transition is in progress in order to avoid leaving them
88 * unhandled down the road
89 */
77 WARN_ON(true); 90 WARN_ON(true);
78 } 91 }
79 error = dpm_sysfs_add(dev); 92 error = dpm_sysfs_add(dev);
80 if (!error) 93 if (!error) {
81 list_add_tail(&dev->power.entry, &dpm_active); 94 dev->power.status = DPM_ON;
95 list_add_tail(&dev->power.entry, &dpm_list);
96 }
82 mutex_unlock(&dpm_list_mtx); 97 mutex_unlock(&dpm_list_mtx);
83 return error; 98 return error;
84} 99}
@@ -100,73 +115,243 @@ void device_pm_remove(struct device *dev)
100 mutex_unlock(&dpm_list_mtx); 115 mutex_unlock(&dpm_list_mtx);
101} 116}
102 117
118/**
119 * pm_op - execute the PM operation appropiate for given PM event
120 * @dev: Device.
121 * @ops: PM operations to choose from.
122 * @state: PM transition of the system being carried out.
123 */
124static int pm_op(struct device *dev, struct pm_ops *ops, pm_message_t state)
125{
126 int error = 0;
127
128 switch (state.event) {
129#ifdef CONFIG_SUSPEND
130 case PM_EVENT_SUSPEND:
131 if (ops->suspend) {
132 error = ops->suspend(dev);
133 suspend_report_result(ops->suspend, error);
134 }
135 break;
136 case PM_EVENT_RESUME:
137 if (ops->resume) {
138 error = ops->resume(dev);
139 suspend_report_result(ops->resume, error);
140 }
141 break;
142#endif /* CONFIG_SUSPEND */
143#ifdef CONFIG_HIBERNATION
144 case PM_EVENT_FREEZE:
145 case PM_EVENT_QUIESCE:
146 if (ops->freeze) {
147 error = ops->freeze(dev);
148 suspend_report_result(ops->freeze, error);
149 }
150 break;
151 case PM_EVENT_HIBERNATE:
152 if (ops->poweroff) {
153 error = ops->poweroff(dev);
154 suspend_report_result(ops->poweroff, error);
155 }
156 break;
157 case PM_EVENT_THAW:
158 case PM_EVENT_RECOVER:
159 if (ops->thaw) {
160 error = ops->thaw(dev);
161 suspend_report_result(ops->thaw, error);
162 }
163 break;
164 case PM_EVENT_RESTORE:
165 if (ops->restore) {
166 error = ops->restore(dev);
167 suspend_report_result(ops->restore, error);
168 }
169 break;
170#endif /* CONFIG_HIBERNATION */
171 default:
172 error = -EINVAL;
173 }
174 return error;
175}
176
177/**
178 * pm_noirq_op - execute the PM operation appropiate for given PM event
179 * @dev: Device.
180 * @ops: PM operations to choose from.
181 * @state: PM transition of the system being carried out.
182 *
183 * The operation is executed with interrupts disabled by the only remaining
184 * functional CPU in the system.
185 */
186static int pm_noirq_op(struct device *dev, struct pm_ext_ops *ops,
187 pm_message_t state)
188{
189 int error = 0;
190
191 switch (state.event) {
192#ifdef CONFIG_SUSPEND
193 case PM_EVENT_SUSPEND:
194 if (ops->suspend_noirq) {
195 error = ops->suspend_noirq(dev);
196 suspend_report_result(ops->suspend_noirq, error);
197 }
198 break;
199 case PM_EVENT_RESUME:
200 if (ops->resume_noirq) {
201 error = ops->resume_noirq(dev);
202 suspend_report_result(ops->resume_noirq, error);
203 }
204 break;
205#endif /* CONFIG_SUSPEND */
206#ifdef CONFIG_HIBERNATION
207 case PM_EVENT_FREEZE:
208 case PM_EVENT_QUIESCE:
209 if (ops->freeze_noirq) {
210 error = ops->freeze_noirq(dev);
211 suspend_report_result(ops->freeze_noirq, error);
212 }
213 break;
214 case PM_EVENT_HIBERNATE:
215 if (ops->poweroff_noirq) {
216 error = ops->poweroff_noirq(dev);
217 suspend_report_result(ops->poweroff_noirq, error);
218 }
219 break;
220 case PM_EVENT_THAW:
221 case PM_EVENT_RECOVER:
222 if (ops->thaw_noirq) {
223 error = ops->thaw_noirq(dev);
224 suspend_report_result(ops->thaw_noirq, error);
225 }
226 break;
227 case PM_EVENT_RESTORE:
228 if (ops->restore_noirq) {
229 error = ops->restore_noirq(dev);
230 suspend_report_result(ops->restore_noirq, error);
231 }
232 break;
233#endif /* CONFIG_HIBERNATION */
234 default:
235 error = -EINVAL;
236 }
237 return error;
238}
239
240static char *pm_verb(int event)
241{
242 switch (event) {
243 case PM_EVENT_SUSPEND:
244 return "suspend";
245 case PM_EVENT_RESUME:
246 return "resume";
247 case PM_EVENT_FREEZE:
248 return "freeze";
249 case PM_EVENT_QUIESCE:
250 return "quiesce";
251 case PM_EVENT_HIBERNATE:
252 return "hibernate";
253 case PM_EVENT_THAW:
254 return "thaw";
255 case PM_EVENT_RESTORE:
256 return "restore";
257 case PM_EVENT_RECOVER:
258 return "recover";
259 default:
260 return "(unknown PM event)";
261 }
262}
263
264static void pm_dev_dbg(struct device *dev, pm_message_t state, char *info)
265{
266 dev_dbg(dev, "%s%s%s\n", info, pm_verb(state.event),
267 ((state.event & PM_EVENT_SLEEP) && device_may_wakeup(dev)) ?
268 ", may wakeup" : "");
269}
270
271static void pm_dev_err(struct device *dev, pm_message_t state, char *info,
272 int error)
273{
274 printk(KERN_ERR "PM: Device %s failed to %s%s: error %d\n",
275 kobject_name(&dev->kobj), pm_verb(state.event), info, error);
276}
277
103/*------------------------- Resume routines -------------------------*/ 278/*------------------------- Resume routines -------------------------*/
104 279
105/** 280/**
106 * resume_device_early - Power on one device (early resume). 281 * resume_device_noirq - Power on one device (early resume).
107 * @dev: Device. 282 * @dev: Device.
283 * @state: PM transition of the system being carried out.
108 * 284 *
109 * Must be called with interrupts disabled. 285 * Must be called with interrupts disabled.
110 */ 286 */
111static int resume_device_early(struct device *dev) 287static int resume_device_noirq(struct device *dev, pm_message_t state)
112{ 288{
113 int error = 0; 289 int error = 0;
114 290
115 TRACE_DEVICE(dev); 291 TRACE_DEVICE(dev);
116 TRACE_RESUME(0); 292 TRACE_RESUME(0);
117 293
118 if (dev->bus && dev->bus->resume_early) { 294 if (!dev->bus)
119 dev_dbg(dev, "EARLY resume\n"); 295 goto End;
296
297 if (dev->bus->pm) {
298 pm_dev_dbg(dev, state, "EARLY ");
299 error = pm_noirq_op(dev, dev->bus->pm, state);
300 } else if (dev->bus->resume_early) {
301 pm_dev_dbg(dev, state, "legacy EARLY ");
120 error = dev->bus->resume_early(dev); 302 error = dev->bus->resume_early(dev);
121 } 303 }
122 304 End:
123 TRACE_RESUME(error); 305 TRACE_RESUME(error);
124 return error; 306 return error;
125} 307}
126 308
127/** 309/**
128 * dpm_power_up - Power on all regular (non-sysdev) devices. 310 * dpm_power_up - Power on all regular (non-sysdev) devices.
311 * @state: PM transition of the system being carried out.
129 * 312 *
130 * Walk the dpm_off_irq list and power each device up. This 313 * Execute the appropriate "noirq resume" callback for all devices marked
131 * is used for devices that required they be powered down with 314 * as DPM_OFF_IRQ.
132 * interrupts disabled. As devices are powered on, they are moved
133 * to the dpm_off list.
134 * 315 *
135 * Must be called with interrupts disabled and only one CPU running. 316 * Must be called with interrupts disabled and only one CPU running.
136 */ 317 */
137static void dpm_power_up(void) 318static void dpm_power_up(pm_message_t state)
138{ 319{
320 struct device *dev;
139 321
140 while (!list_empty(&dpm_off_irq)) { 322 list_for_each_entry(dev, &dpm_list, power.entry)
141 struct list_head *entry = dpm_off_irq.next; 323 if (dev->power.status > DPM_OFF) {
142 struct device *dev = to_device(entry); 324 int error;
143 325
144 list_move_tail(entry, &dpm_off); 326 dev->power.status = DPM_OFF;
145 resume_device_early(dev); 327 error = resume_device_noirq(dev, state);
146 } 328 if (error)
329 pm_dev_err(dev, state, " early", error);
330 }
147} 331}
148 332
149/** 333/**
150 * device_power_up - Turn on all devices that need special attention. 334 * device_power_up - Turn on all devices that need special attention.
335 * @state: PM transition of the system being carried out.
151 * 336 *
152 * Power on system devices, then devices that required we shut them down 337 * Power on system devices, then devices that required we shut them down
153 * with interrupts disabled. 338 * with interrupts disabled.
154 * 339 *
155 * Must be called with interrupts disabled. 340 * Must be called with interrupts disabled.
156 */ 341 */
157void device_power_up(void) 342void device_power_up(pm_message_t state)
158{ 343{
159 sysdev_resume(); 344 sysdev_resume();
160 dpm_power_up(); 345 dpm_power_up(state);
161} 346}
162EXPORT_SYMBOL_GPL(device_power_up); 347EXPORT_SYMBOL_GPL(device_power_up);
163 348
164/** 349/**
165 * resume_device - Restore state for one device. 350 * resume_device - Restore state for one device.
166 * @dev: Device. 351 * @dev: Device.
167 * 352 * @state: PM transition of the system being carried out.
168 */ 353 */
169static int resume_device(struct device *dev) 354static int resume_device(struct device *dev, pm_message_t state)
170{ 355{
171 int error = 0; 356 int error = 0;
172 357
@@ -175,21 +360,40 @@ static int resume_device(struct device *dev)
175 360
176 down(&dev->sem); 361 down(&dev->sem);
177 362
178 if (dev->bus && dev->bus->resume) { 363 if (dev->bus) {
179 dev_dbg(dev,"resuming\n"); 364 if (dev->bus->pm) {
180 error = dev->bus->resume(dev); 365 pm_dev_dbg(dev, state, "");
366 error = pm_op(dev, &dev->bus->pm->base, state);
367 } else if (dev->bus->resume) {
368 pm_dev_dbg(dev, state, "legacy ");
369 error = dev->bus->resume(dev);
370 }
371 if (error)
372 goto End;
181 } 373 }
182 374
183 if (!error && dev->type && dev->type->resume) { 375 if (dev->type) {
184 dev_dbg(dev,"resuming\n"); 376 if (dev->type->pm) {
185 error = dev->type->resume(dev); 377 pm_dev_dbg(dev, state, "type ");
378 error = pm_op(dev, dev->type->pm, state);
379 } else if (dev->type->resume) {
380 pm_dev_dbg(dev, state, "legacy type ");
381 error = dev->type->resume(dev);
382 }
383 if (error)
384 goto End;
186 } 385 }
187 386
188 if (!error && dev->class && dev->class->resume) { 387 if (dev->class) {
189 dev_dbg(dev,"class resume\n"); 388 if (dev->class->pm) {
190 error = dev->class->resume(dev); 389 pm_dev_dbg(dev, state, "class ");
390 error = pm_op(dev, dev->class->pm, state);
391 } else if (dev->class->resume) {
392 pm_dev_dbg(dev, state, "legacy class ");
393 error = dev->class->resume(dev);
394 }
191 } 395 }
192 396 End:
193 up(&dev->sem); 397 up(&dev->sem);
194 398
195 TRACE_RESUME(error); 399 TRACE_RESUME(error);
@@ -198,78 +402,161 @@ static int resume_device(struct device *dev)
198 402
199/** 403/**
200 * dpm_resume - Resume every device. 404 * dpm_resume - Resume every device.
405 * @state: PM transition of the system being carried out.
201 * 406 *
202 * Resume the devices that have either not gone through 407 * Execute the appropriate "resume" callback for all devices the status of
203 * the late suspend, or that did go through it but also 408 * which indicates that they are inactive.
204 * went through the early resume. 409 */
410static void dpm_resume(pm_message_t state)
411{
412 struct list_head list;
413
414 INIT_LIST_HEAD(&list);
415 mutex_lock(&dpm_list_mtx);
416 transition_started = false;
417 while (!list_empty(&dpm_list)) {
418 struct device *dev = to_device(dpm_list.next);
419
420 get_device(dev);
421 if (dev->power.status >= DPM_OFF) {
422 int error;
423
424 dev->power.status = DPM_RESUMING;
425 mutex_unlock(&dpm_list_mtx);
426
427 error = resume_device(dev, state);
428
429 mutex_lock(&dpm_list_mtx);
430 if (error)
431 pm_dev_err(dev, state, "", error);
432 } else if (dev->power.status == DPM_SUSPENDING) {
433 /* Allow new children of the device to be registered */
434 dev->power.status = DPM_RESUMING;
435 }
436 if (!list_empty(&dev->power.entry))
437 list_move_tail(&dev->power.entry, &list);
438 put_device(dev);
439 }
440 list_splice(&list, &dpm_list);
441 mutex_unlock(&dpm_list_mtx);
442}
443
444/**
445 * complete_device - Complete a PM transition for given device
446 * @dev: Device.
447 * @state: PM transition of the system being carried out.
448 */
449static void complete_device(struct device *dev, pm_message_t state)
450{
451 down(&dev->sem);
452
453 if (dev->class && dev->class->pm && dev->class->pm->complete) {
454 pm_dev_dbg(dev, state, "completing class ");
455 dev->class->pm->complete(dev);
456 }
457
458 if (dev->type && dev->type->pm && dev->type->pm->complete) {
459 pm_dev_dbg(dev, state, "completing type ");
460 dev->type->pm->complete(dev);
461 }
462
463 if (dev->bus && dev->bus->pm && dev->bus->pm->base.complete) {
464 pm_dev_dbg(dev, state, "completing ");
465 dev->bus->pm->base.complete(dev);
466 }
467
468 up(&dev->sem);
469}
470
471/**
472 * dpm_complete - Complete a PM transition for all devices.
473 * @state: PM transition of the system being carried out.
205 * 474 *
206 * Take devices from the dpm_off_list, resume them, 475 * Execute the ->complete() callbacks for all devices that are not marked
207 * and put them on the dpm_locked list. 476 * as DPM_ON.
208 */ 477 */
209static void dpm_resume(void) 478static void dpm_complete(pm_message_t state)
210{ 479{
480 struct list_head list;
481
482 INIT_LIST_HEAD(&list);
211 mutex_lock(&dpm_list_mtx); 483 mutex_lock(&dpm_list_mtx);
212 all_sleeping = false; 484 while (!list_empty(&dpm_list)) {
213 while(!list_empty(&dpm_off)) { 485 struct device *dev = to_device(dpm_list.prev);
214 struct list_head *entry = dpm_off.next;
215 struct device *dev = to_device(entry);
216 486
217 list_move_tail(entry, &dpm_active); 487 get_device(dev);
218 dev->power.sleeping = false; 488 if (dev->power.status > DPM_ON) {
219 mutex_unlock(&dpm_list_mtx); 489 dev->power.status = DPM_ON;
220 resume_device(dev); 490 mutex_unlock(&dpm_list_mtx);
221 mutex_lock(&dpm_list_mtx); 491
492 complete_device(dev, state);
493
494 mutex_lock(&dpm_list_mtx);
495 }
496 if (!list_empty(&dev->power.entry))
497 list_move(&dev->power.entry, &list);
498 put_device(dev);
222 } 499 }
500 list_splice(&list, &dpm_list);
223 mutex_unlock(&dpm_list_mtx); 501 mutex_unlock(&dpm_list_mtx);
224} 502}
225 503
226/** 504/**
227 * device_resume - Restore state of each device in system. 505 * device_resume - Restore state of each device in system.
506 * @state: PM transition of the system being carried out.
228 * 507 *
229 * Resume all the devices, unlock them all, and allow new 508 * Resume all the devices, unlock them all, and allow new
230 * devices to be registered once again. 509 * devices to be registered once again.
231 */ 510 */
232void device_resume(void) 511void device_resume(pm_message_t state)
233{ 512{
234 might_sleep(); 513 might_sleep();
235 dpm_resume(); 514 dpm_resume(state);
515 dpm_complete(state);
236} 516}
237EXPORT_SYMBOL_GPL(device_resume); 517EXPORT_SYMBOL_GPL(device_resume);
238 518
239 519
240/*------------------------- Suspend routines -------------------------*/ 520/*------------------------- Suspend routines -------------------------*/
241 521
242static inline char *suspend_verb(u32 event) 522/**
523 * resume_event - return a PM message representing the resume event
524 * corresponding to given sleep state.
525 * @sleep_state: PM message representing a sleep state.
526 */
527static pm_message_t resume_event(pm_message_t sleep_state)
243{ 528{
244 switch (event) { 529 switch (sleep_state.event) {
245 case PM_EVENT_SUSPEND: return "suspend"; 530 case PM_EVENT_SUSPEND:
246 case PM_EVENT_FREEZE: return "freeze"; 531 return PMSG_RESUME;
247 case PM_EVENT_PRETHAW: return "prethaw"; 532 case PM_EVENT_FREEZE:
248 default: return "(unknown suspend event)"; 533 case PM_EVENT_QUIESCE:
534 return PMSG_RECOVER;
535 case PM_EVENT_HIBERNATE:
536 return PMSG_RESTORE;
249 } 537 }
250} 538 return PMSG_ON;
251
252static void
253suspend_device_dbg(struct device *dev, pm_message_t state, char *info)
254{
255 dev_dbg(dev, "%s%s%s\n", info, suspend_verb(state.event),
256 ((state.event == PM_EVENT_SUSPEND) && device_may_wakeup(dev)) ?
257 ", may wakeup" : "");
258} 539}
259 540
260/** 541/**
261 * suspend_device_late - Shut down one device (late suspend). 542 * suspend_device_noirq - Shut down one device (late suspend).
262 * @dev: Device. 543 * @dev: Device.
263 * @state: Power state device is entering. 544 * @state: PM transition of the system being carried out.
264 * 545 *
265 * This is called with interrupts off and only a single CPU running. 546 * This is called with interrupts off and only a single CPU running.
266 */ 547 */
267static int suspend_device_late(struct device *dev, pm_message_t state) 548static int suspend_device_noirq(struct device *dev, pm_message_t state)
268{ 549{
269 int error = 0; 550 int error = 0;
270 551
271 if (dev->bus && dev->bus->suspend_late) { 552 if (!dev->bus)
272 suspend_device_dbg(dev, state, "LATE "); 553 return 0;
554
555 if (dev->bus->pm) {
556 pm_dev_dbg(dev, state, "LATE ");
557 error = pm_noirq_op(dev, dev->bus->pm, state);
558 } else if (dev->bus->suspend_late) {
559 pm_dev_dbg(dev, state, "legacy LATE ");
273 error = dev->bus->suspend_late(dev, state); 560 error = dev->bus->suspend_late(dev, state);
274 suspend_report_result(dev->bus->suspend_late, error); 561 suspend_report_result(dev->bus->suspend_late, error);
275 } 562 }
@@ -278,37 +565,30 @@ static int suspend_device_late(struct device *dev, pm_message_t state)
278 565
279/** 566/**
280 * device_power_down - Shut down special devices. 567 * device_power_down - Shut down special devices.
281 * @state: Power state to enter. 568 * @state: PM transition of the system being carried out.
282 * 569 *
283 * Power down devices that require interrupts to be disabled 570 * Power down devices that require interrupts to be disabled.
284 * and move them from the dpm_off list to the dpm_off_irq list.
285 * Then power down system devices. 571 * Then power down system devices.
286 * 572 *
287 * Must be called with interrupts disabled and only one CPU running. 573 * Must be called with interrupts disabled and only one CPU running.
288 */ 574 */
289int device_power_down(pm_message_t state) 575int device_power_down(pm_message_t state)
290{ 576{
577 struct device *dev;
291 int error = 0; 578 int error = 0;
292 579
293 while (!list_empty(&dpm_off)) { 580 list_for_each_entry_reverse(dev, &dpm_list, power.entry) {
294 struct list_head *entry = dpm_off.prev; 581 error = suspend_device_noirq(dev, state);
295 struct device *dev = to_device(entry);
296
297 error = suspend_device_late(dev, state);
298 if (error) { 582 if (error) {
299 printk(KERN_ERR "Could not power down device %s: " 583 pm_dev_err(dev, state, " late", error);
300 "error %d\n",
301 kobject_name(&dev->kobj), error);
302 break; 584 break;
303 } 585 }
304 if (!list_empty(&dev->power.entry)) 586 dev->power.status = DPM_OFF_IRQ;
305 list_move(&dev->power.entry, &dpm_off_irq);
306 } 587 }
307
308 if (!error) 588 if (!error)
309 error = sysdev_suspend(state); 589 error = sysdev_suspend(state);
310 if (error) 590 if (error)
311 dpm_power_up(); 591 dpm_power_up(resume_event(state));
312 return error; 592 return error;
313} 593}
314EXPORT_SYMBOL_GPL(device_power_down); 594EXPORT_SYMBOL_GPL(device_power_down);
@@ -316,7 +596,7 @@ EXPORT_SYMBOL_GPL(device_power_down);
316/** 596/**
317 * suspend_device - Save state of one device. 597 * suspend_device - Save state of one device.
318 * @dev: Device. 598 * @dev: Device.
319 * @state: Power state device is entering. 599 * @state: PM transition of the system being carried out.
320 */ 600 */
321static int suspend_device(struct device *dev, pm_message_t state) 601static int suspend_device(struct device *dev, pm_message_t state)
322{ 602{
@@ -324,24 +604,43 @@ static int suspend_device(struct device *dev, pm_message_t state)
324 604
325 down(&dev->sem); 605 down(&dev->sem);
326 606
327 if (dev->class && dev->class->suspend) { 607 if (dev->class) {
328 suspend_device_dbg(dev, state, "class "); 608 if (dev->class->pm) {
329 error = dev->class->suspend(dev, state); 609 pm_dev_dbg(dev, state, "class ");
330 suspend_report_result(dev->class->suspend, error); 610 error = pm_op(dev, dev->class->pm, state);
611 } else if (dev->class->suspend) {
612 pm_dev_dbg(dev, state, "legacy class ");
613 error = dev->class->suspend(dev, state);
614 suspend_report_result(dev->class->suspend, error);
615 }
616 if (error)
617 goto End;
331 } 618 }
332 619
333 if (!error && dev->type && dev->type->suspend) { 620 if (dev->type) {
334 suspend_device_dbg(dev, state, "type "); 621 if (dev->type->pm) {
335 error = dev->type->suspend(dev, state); 622 pm_dev_dbg(dev, state, "type ");
336 suspend_report_result(dev->type->suspend, error); 623 error = pm_op(dev, dev->type->pm, state);
624 } else if (dev->type->suspend) {
625 pm_dev_dbg(dev, state, "legacy type ");
626 error = dev->type->suspend(dev, state);
627 suspend_report_result(dev->type->suspend, error);
628 }
629 if (error)
630 goto End;
337 } 631 }
338 632
339 if (!error && dev->bus && dev->bus->suspend) { 633 if (dev->bus) {
340 suspend_device_dbg(dev, state, ""); 634 if (dev->bus->pm) {
341 error = dev->bus->suspend(dev, state); 635 pm_dev_dbg(dev, state, "");
342 suspend_report_result(dev->bus->suspend, error); 636 error = pm_op(dev, &dev->bus->pm->base, state);
637 } else if (dev->bus->suspend) {
638 pm_dev_dbg(dev, state, "legacy ");
639 error = dev->bus->suspend(dev, state);
640 suspend_report_result(dev->bus->suspend, error);
641 }
343 } 642 }
344 643 End:
345 up(&dev->sem); 644 up(&dev->sem);
346 645
347 return error; 646 return error;
@@ -349,67 +648,139 @@ static int suspend_device(struct device *dev, pm_message_t state)
349 648
350/** 649/**
351 * dpm_suspend - Suspend every device. 650 * dpm_suspend - Suspend every device.
352 * @state: Power state to put each device in. 651 * @state: PM transition of the system being carried out.
353 *
354 * Walk the dpm_locked list. Suspend each device and move it
355 * to the dpm_off list.
356 * 652 *
357 * (For historical reasons, if it returns -EAGAIN, that used to mean 653 * Execute the appropriate "suspend" callbacks for all devices.
358 * that the device would be called again with interrupts disabled.
359 * These days, we use the "suspend_late()" callback for that, so we
360 * print a warning and consider it an error).
361 */ 654 */
362static int dpm_suspend(pm_message_t state) 655static int dpm_suspend(pm_message_t state)
363{ 656{
657 struct list_head list;
364 int error = 0; 658 int error = 0;
365 659
660 INIT_LIST_HEAD(&list);
366 mutex_lock(&dpm_list_mtx); 661 mutex_lock(&dpm_list_mtx);
367 while (!list_empty(&dpm_active)) { 662 while (!list_empty(&dpm_list)) {
368 struct list_head *entry = dpm_active.prev; 663 struct device *dev = to_device(dpm_list.prev);
369 struct device *dev = to_device(entry);
370 664
371 WARN_ON(dev->parent && dev->parent->power.sleeping); 665 get_device(dev);
372
373 dev->power.sleeping = true;
374 mutex_unlock(&dpm_list_mtx); 666 mutex_unlock(&dpm_list_mtx);
667
375 error = suspend_device(dev, state); 668 error = suspend_device(dev, state);
669
376 mutex_lock(&dpm_list_mtx); 670 mutex_lock(&dpm_list_mtx);
377 if (error) { 671 if (error) {
378 printk(KERN_ERR "Could not suspend device %s: " 672 pm_dev_err(dev, state, "", error);
379 "error %d%s\n", 673 put_device(dev);
380 kobject_name(&dev->kobj),
381 error,
382 (error == -EAGAIN ?
383 " (please convert to suspend_late)" :
384 ""));
385 dev->power.sleeping = false;
386 break; 674 break;
387 } 675 }
676 dev->power.status = DPM_OFF;
388 if (!list_empty(&dev->power.entry)) 677 if (!list_empty(&dev->power.entry))
389 list_move(&dev->power.entry, &dpm_off); 678 list_move(&dev->power.entry, &list);
679 put_device(dev);
390 } 680 }
391 if (!error) 681 list_splice(&list, dpm_list.prev);
392 all_sleeping = true;
393 mutex_unlock(&dpm_list_mtx); 682 mutex_unlock(&dpm_list_mtx);
683 return error;
684}
685
686/**
687 * prepare_device - Execute the ->prepare() callback(s) for given device.
688 * @dev: Device.
689 * @state: PM transition of the system being carried out.
690 */
691static int prepare_device(struct device *dev, pm_message_t state)
692{
693 int error = 0;
694
695 down(&dev->sem);
696
697 if (dev->bus && dev->bus->pm && dev->bus->pm->base.prepare) {
698 pm_dev_dbg(dev, state, "preparing ");
699 error = dev->bus->pm->base.prepare(dev);
700 suspend_report_result(dev->bus->pm->base.prepare, error);
701 if (error)
702 goto End;
703 }
704
705 if (dev->type && dev->type->pm && dev->type->pm->prepare) {
706 pm_dev_dbg(dev, state, "preparing type ");
707 error = dev->type->pm->prepare(dev);
708 suspend_report_result(dev->type->pm->prepare, error);
709 if (error)
710 goto End;
711 }
712
713 if (dev->class && dev->class->pm && dev->class->pm->prepare) {
714 pm_dev_dbg(dev, state, "preparing class ");
715 error = dev->class->pm->prepare(dev);
716 suspend_report_result(dev->class->pm->prepare, error);
717 }
718 End:
719 up(&dev->sem);
720
721 return error;
722}
723
724/**
725 * dpm_prepare - Prepare all devices for a PM transition.
726 * @state: PM transition of the system being carried out.
727 *
728 * Execute the ->prepare() callback for all devices.
729 */
730static int dpm_prepare(pm_message_t state)
731{
732 struct list_head list;
733 int error = 0;
734
735 INIT_LIST_HEAD(&list);
736 mutex_lock(&dpm_list_mtx);
737 transition_started = true;
738 while (!list_empty(&dpm_list)) {
739 struct device *dev = to_device(dpm_list.next);
740
741 get_device(dev);
742 dev->power.status = DPM_PREPARING;
743 mutex_unlock(&dpm_list_mtx);
394 744
745 error = prepare_device(dev, state);
746
747 mutex_lock(&dpm_list_mtx);
748 if (error) {
749 dev->power.status = DPM_ON;
750 if (error == -EAGAIN) {
751 put_device(dev);
752 continue;
753 }
754 printk(KERN_ERR "PM: Failed to prepare device %s "
755 "for power transition: error %d\n",
756 kobject_name(&dev->kobj), error);
757 put_device(dev);
758 break;
759 }
760 dev->power.status = DPM_SUSPENDING;
761 if (!list_empty(&dev->power.entry))
762 list_move_tail(&dev->power.entry, &list);
763 put_device(dev);
764 }
765 list_splice(&list, &dpm_list);
766 mutex_unlock(&dpm_list_mtx);
395 return error; 767 return error;
396} 768}
397 769
398/** 770/**
399 * device_suspend - Save state and stop all devices in system. 771 * device_suspend - Save state and stop all devices in system.
400 * @state: new power management state 772 * @state: PM transition of the system being carried out.
401 * 773 *
402 * Prevent new devices from being registered, then lock all devices 774 * Prepare and suspend all devices.
403 * and suspend them.
404 */ 775 */
405int device_suspend(pm_message_t state) 776int device_suspend(pm_message_t state)
406{ 777{
407 int error; 778 int error;
408 779
409 might_sleep(); 780 might_sleep();
410 error = dpm_suspend(state); 781 error = dpm_prepare(state);
411 if (error) 782 if (!error)
412 device_resume(); 783 error = dpm_suspend(state);
413 return error; 784 return error;
414} 785}
415EXPORT_SYMBOL_GPL(device_suspend); 786EXPORT_SYMBOL_GPL(device_suspend);
diff --git a/drivers/base/power/power.h b/drivers/base/power/power.h
index a6894f2a4b99..a3252c0e2887 100644
--- a/drivers/base/power/power.h
+++ b/drivers/base/power/power.h
@@ -4,7 +4,7 @@
4 * main.c 4 * main.c
5 */ 5 */
6 6
7extern struct list_head dpm_active; /* The active device list */ 7extern struct list_head dpm_list; /* The active device list */
8 8
9static inline struct device *to_device(struct list_head *entry) 9static inline struct device *to_device(struct list_head *entry)
10{ 10{
diff --git a/drivers/base/power/sysfs.c b/drivers/base/power/sysfs.c
index d11f74b038db..596aeecfdffe 100644
--- a/drivers/base/power/sysfs.c
+++ b/drivers/base/power/sysfs.c
@@ -6,9 +6,6 @@
6#include <linux/string.h> 6#include <linux/string.h>
7#include "power.h" 7#include "power.h"
8 8
9int (*platform_enable_wakeup)(struct device *dev, int is_on);
10
11
12/* 9/*
13 * wakeup - Report/change current wakeup option for device 10 * wakeup - Report/change current wakeup option for device
14 * 11 *
diff --git a/drivers/base/power/trace.c b/drivers/base/power/trace.c
index 87a7f1d02578..9b1b20b59e0a 100644
--- a/drivers/base/power/trace.c
+++ b/drivers/base/power/trace.c
@@ -188,9 +188,9 @@ static int show_file_hash(unsigned int value)
188static int show_dev_hash(unsigned int value) 188static int show_dev_hash(unsigned int value)
189{ 189{
190 int match = 0; 190 int match = 0;
191 struct list_head * entry = dpm_active.prev; 191 struct list_head *entry = dpm_list.prev;
192 192
193 while (entry != &dpm_active) { 193 while (entry != &dpm_list) {
194 struct device * dev = to_device(entry); 194 struct device * dev = to_device(entry);
195 unsigned int hash = hash_string(DEVSEED, dev->bus_id, DEVHASH); 195 unsigned int hash = hash_string(DEVSEED, dev->bus_id, DEVHASH);
196 if (hash == value) { 196 if (hash == value) {
diff --git a/drivers/pci/Makefile b/drivers/pci/Makefile
index 4d1ce2e7361e..7d63f8ced24b 100644
--- a/drivers/pci/Makefile
+++ b/drivers/pci/Makefile
@@ -2,7 +2,7 @@
2# Makefile for the PCI bus specific drivers. 2# Makefile for the PCI bus specific drivers.
3# 3#
4 4
5obj-y += access.o bus.o probe.o remove.o pci.o quirks.o \ 5obj-y += access.o bus.o probe.o remove.o pci.o quirks.o slot.o \
6 pci-driver.o search.o pci-sysfs.o rom.o setup-res.o 6 pci-driver.o search.o pci-sysfs.o rom.o setup-res.o
7obj-$(CONFIG_PROC_FS) += proc.o 7obj-$(CONFIG_PROC_FS) += proc.o
8 8
diff --git a/drivers/pci/hotplug/acpi_pcihp.c b/drivers/pci/hotplug/acpi_pcihp.c
index f8c187a763bd..93e37f0666ab 100644
--- a/drivers/pci/hotplug/acpi_pcihp.c
+++ b/drivers/pci/hotplug/acpi_pcihp.c
@@ -30,6 +30,7 @@
30#include <linux/types.h> 30#include <linux/types.h>
31#include <linux/pci.h> 31#include <linux/pci.h>
32#include <linux/pci_hotplug.h> 32#include <linux/pci_hotplug.h>
33#include <linux/pci-acpi.h>
33#include <acpi/acpi.h> 34#include <acpi/acpi.h>
34#include <acpi/acpi_bus.h> 35#include <acpi/acpi_bus.h>
35#include <acpi/actypes.h> 36#include <acpi/actypes.h>
@@ -299,7 +300,7 @@ free_and_return:
299 * 300 *
300 * @handle - the handle of the hotplug controller. 301 * @handle - the handle of the hotplug controller.
301 */ 302 */
302acpi_status acpi_run_oshp(acpi_handle handle) 303static acpi_status acpi_run_oshp(acpi_handle handle)
303{ 304{
304 acpi_status status; 305 acpi_status status;
305 struct acpi_buffer string = { ACPI_ALLOCATE_BUFFER, NULL }; 306 struct acpi_buffer string = { ACPI_ALLOCATE_BUFFER, NULL };
@@ -322,9 +323,6 @@ acpi_status acpi_run_oshp(acpi_handle handle)
322 kfree(string.pointer); 323 kfree(string.pointer);
323 return status; 324 return status;
324} 325}
325EXPORT_SYMBOL_GPL(acpi_run_oshp);
326
327
328 326
329/* acpi_get_hp_params_from_firmware 327/* acpi_get_hp_params_from_firmware
330 * 328 *
@@ -374,6 +372,85 @@ acpi_status acpi_get_hp_params_from_firmware(struct pci_bus *bus,
374} 372}
375EXPORT_SYMBOL_GPL(acpi_get_hp_params_from_firmware); 373EXPORT_SYMBOL_GPL(acpi_get_hp_params_from_firmware);
376 374
375/**
376 * acpi_get_hp_hw_control_from_firmware
377 * @dev: the pci_dev of the bridge that has a hotplug controller
378 * @flags: requested control bits for _OSC
379 *
380 * Attempt to take hotplug control from firmware.
381 */
382int acpi_get_hp_hw_control_from_firmware(struct pci_dev *dev, u32 flags)
383{
384 acpi_status status;
385 acpi_handle chandle, handle = DEVICE_ACPI_HANDLE(&(dev->dev));
386 struct pci_dev *pdev = dev;
387 struct pci_bus *parent;
388 struct acpi_buffer string = { ACPI_ALLOCATE_BUFFER, NULL };
389
390 flags &= (OSC_PCI_EXPRESS_NATIVE_HP_CONTROL |
391 OSC_SHPC_NATIVE_HP_CONTROL |
392 OSC_PCI_EXPRESS_CAP_STRUCTURE_CONTROL);
393 if (!flags) {
394 err("Invalid flags %u specified!\n", flags);
395 return -EINVAL;
396 }
397
398 /*
399 * Per PCI firmware specification, we should run the ACPI _OSC
400 * method to get control of hotplug hardware before using it. If
401 * an _OSC is missing, we look for an OSHP to do the same thing.
402 * To handle different BIOS behavior, we look for _OSC and OSHP
403 * within the scope of the hotplug controller and its parents,
404 * upto the host bridge under which this controller exists.
405 */
406 while (!handle) {
407 /*
408 * This hotplug controller was not listed in the ACPI name
409 * space at all. Try to get acpi handle of parent pci bus.
410 */
411 if (!pdev || !pdev->bus->parent)
412 break;
413 parent = pdev->bus->parent;
414 dbg("Could not find %s in acpi namespace, trying parent\n",
415 pci_name(pdev));
416 if (!parent->self)
417 /* Parent must be a host bridge */
418 handle = acpi_get_pci_rootbridge_handle(
419 pci_domain_nr(parent),
420 parent->number);
421 else
422 handle = DEVICE_ACPI_HANDLE(&(parent->self->dev));
423 pdev = parent->self;
424 }
425
426 while (handle) {
427 acpi_get_name(handle, ACPI_FULL_PATHNAME, &string);
428 dbg("Trying to get hotplug control for %s \n",
429 (char *)string.pointer);
430 status = pci_osc_control_set(handle, flags);
431 if (status == AE_NOT_FOUND)
432 status = acpi_run_oshp(handle);
433 if (ACPI_SUCCESS(status)) {
434 dbg("Gained control for hotplug HW for pci %s (%s)\n",
435 pci_name(dev), (char *)string.pointer);
436 kfree(string.pointer);
437 return 0;
438 }
439 if (acpi_root_bridge(handle))
440 break;
441 chandle = handle;
442 status = acpi_get_parent(chandle, &handle);
443 if (ACPI_FAILURE(status))
444 break;
445 }
446
447 dbg("Cannot get control of hotplug hardware for pci %s\n",
448 pci_name(dev));
449
450 kfree(string.pointer);
451 return -ENODEV;
452}
453EXPORT_SYMBOL(acpi_get_hp_hw_control_from_firmware);
377 454
378/* acpi_root_bridge - check to see if this acpi object is a root bridge 455/* acpi_root_bridge - check to see if this acpi object is a root bridge
379 * 456 *
diff --git a/drivers/pci/hotplug/acpiphp.h b/drivers/pci/hotplug/acpiphp.h
index 7a29164d4b32..eecf7cbf4139 100644
--- a/drivers/pci/hotplug/acpiphp.h
+++ b/drivers/pci/hotplug/acpiphp.h
@@ -215,7 +215,6 @@ extern u8 acpiphp_get_power_status (struct acpiphp_slot *slot);
215extern u8 acpiphp_get_attention_status (struct acpiphp_slot *slot); 215extern u8 acpiphp_get_attention_status (struct acpiphp_slot *slot);
216extern u8 acpiphp_get_latch_status (struct acpiphp_slot *slot); 216extern u8 acpiphp_get_latch_status (struct acpiphp_slot *slot);
217extern u8 acpiphp_get_adapter_status (struct acpiphp_slot *slot); 217extern u8 acpiphp_get_adapter_status (struct acpiphp_slot *slot);
218extern u32 acpiphp_get_address (struct acpiphp_slot *slot);
219 218
220/* variables */ 219/* variables */
221extern int acpiphp_debug; 220extern int acpiphp_debug;
diff --git a/drivers/pci/hotplug/acpiphp_core.c b/drivers/pci/hotplug/acpiphp_core.c
index 7af68ba27903..0e496e866a84 100644
--- a/drivers/pci/hotplug/acpiphp_core.c
+++ b/drivers/pci/hotplug/acpiphp_core.c
@@ -70,7 +70,6 @@ static int disable_slot (struct hotplug_slot *slot);
70static int set_attention_status (struct hotplug_slot *slot, u8 value); 70static int set_attention_status (struct hotplug_slot *slot, u8 value);
71static int get_power_status (struct hotplug_slot *slot, u8 *value); 71static int get_power_status (struct hotplug_slot *slot, u8 *value);
72static int get_attention_status (struct hotplug_slot *slot, u8 *value); 72static int get_attention_status (struct hotplug_slot *slot, u8 *value);
73static int get_address (struct hotplug_slot *slot, u32 *value);
74static int get_latch_status (struct hotplug_slot *slot, u8 *value); 73static int get_latch_status (struct hotplug_slot *slot, u8 *value);
75static int get_adapter_status (struct hotplug_slot *slot, u8 *value); 74static int get_adapter_status (struct hotplug_slot *slot, u8 *value);
76 75
@@ -83,7 +82,6 @@ static struct hotplug_slot_ops acpi_hotplug_slot_ops = {
83 .get_attention_status = get_attention_status, 82 .get_attention_status = get_attention_status,
84 .get_latch_status = get_latch_status, 83 .get_latch_status = get_latch_status,
85 .get_adapter_status = get_adapter_status, 84 .get_adapter_status = get_adapter_status,
86 .get_address = get_address,
87}; 85};
88 86
89 87
@@ -274,23 +272,6 @@ static int get_adapter_status(struct hotplug_slot *hotplug_slot, u8 *value)
274 return 0; 272 return 0;
275} 273}
276 274
277
278/**
279 * get_address - get pci address of a slot
280 * @hotplug_slot: slot to get status
281 * @value: pointer to struct pci_busdev (seg, bus, dev)
282 */
283static int get_address(struct hotplug_slot *hotplug_slot, u32 *value)
284{
285 struct slot *slot = hotplug_slot->private;
286
287 dbg("%s - physical_slot = %s\n", __func__, hotplug_slot->name);
288
289 *value = acpiphp_get_address(slot->acpi_slot);
290
291 return 0;
292}
293
294static int __init init_acpi(void) 275static int __init init_acpi(void)
295{ 276{
296 int retval; 277 int retval;
@@ -357,7 +338,11 @@ int acpiphp_register_hotplug_slot(struct acpiphp_slot *acpiphp_slot)
357 acpiphp_slot->slot = slot; 338 acpiphp_slot->slot = slot;
358 snprintf(slot->name, sizeof(slot->name), "%u", slot->acpi_slot->sun); 339 snprintf(slot->name, sizeof(slot->name), "%u", slot->acpi_slot->sun);
359 340
360 retval = pci_hp_register(slot->hotplug_slot); 341 retval = pci_hp_register(slot->hotplug_slot,
342 acpiphp_slot->bridge->pci_bus,
343 acpiphp_slot->device);
344 if (retval == -EBUSY)
345 goto error_hpslot;
361 if (retval) { 346 if (retval) {
362 err("pci_hp_register failed with error %d\n", retval); 347 err("pci_hp_register failed with error %d\n", retval);
363 goto error_hpslot; 348 goto error_hpslot;
diff --git a/drivers/pci/hotplug/acpiphp_glue.c b/drivers/pci/hotplug/acpiphp_glue.c
index 91156f85a926..a3e4705dd8f0 100644
--- a/drivers/pci/hotplug/acpiphp_glue.c
+++ b/drivers/pci/hotplug/acpiphp_glue.c
@@ -258,7 +258,12 @@ register_slot(acpi_handle handle, u32 lvl, void *context, void **rv)
258 bridge->pci_bus->number, slot->device); 258 bridge->pci_bus->number, slot->device);
259 retval = acpiphp_register_hotplug_slot(slot); 259 retval = acpiphp_register_hotplug_slot(slot);
260 if (retval) { 260 if (retval) {
261 warn("acpiphp_register_hotplug_slot failed(err code = 0x%x)\n", retval); 261 if (retval == -EBUSY)
262 warn("Slot %d already registered by another "
263 "hotplug driver\n", slot->sun);
264 else
265 warn("acpiphp_register_hotplug_slot failed "
266 "(err code = 0x%x)\n", retval);
262 goto err_exit; 267 goto err_exit;
263 } 268 }
264 } 269 }
@@ -1878,19 +1883,3 @@ u8 acpiphp_get_adapter_status(struct acpiphp_slot *slot)
1878 1883
1879 return (sta == 0) ? 0 : 1; 1884 return (sta == 0) ? 0 : 1;
1880} 1885}
1881
1882
1883/*
1884 * pci address (seg/bus/dev)
1885 */
1886u32 acpiphp_get_address(struct acpiphp_slot *slot)
1887{
1888 u32 address;
1889 struct pci_bus *pci_bus = slot->bridge->pci_bus;
1890
1891 address = (pci_domain_nr(pci_bus) << 16) |
1892 (pci_bus->number << 8) |
1893 slot->device;
1894
1895 return address;
1896}
diff --git a/drivers/pci/hotplug/acpiphp_ibm.c b/drivers/pci/hotplug/acpiphp_ibm.c
index ede9051fdb5d..2b7c45e39370 100644
--- a/drivers/pci/hotplug/acpiphp_ibm.c
+++ b/drivers/pci/hotplug/acpiphp_ibm.c
@@ -33,8 +33,10 @@
33#include <linux/kobject.h> 33#include <linux/kobject.h>
34#include <asm/uaccess.h> 34#include <asm/uaccess.h>
35#include <linux/moduleparam.h> 35#include <linux/moduleparam.h>
36#include <linux/pci.h>
36 37
37#include "acpiphp.h" 38#include "acpiphp.h"
39#include "../pci.h"
38 40
39#define DRIVER_VERSION "1.0.1" 41#define DRIVER_VERSION "1.0.1"
40#define DRIVER_AUTHOR "Irene Zubarev <zubarev@us.ibm.com>, Vernon Mauery <vernux@us.ibm.com>" 42#define DRIVER_AUTHOR "Irene Zubarev <zubarev@us.ibm.com>, Vernon Mauery <vernux@us.ibm.com>"
@@ -430,7 +432,7 @@ static int __init ibm_acpiphp_init(void)
430 int retval = 0; 432 int retval = 0;
431 acpi_status status; 433 acpi_status status;
432 struct acpi_device *device; 434 struct acpi_device *device;
433 struct kobject *sysdir = &pci_hotplug_slots_kset->kobj; 435 struct kobject *sysdir = &pci_slots_kset->kobj;
434 436
435 dbg("%s\n", __func__); 437 dbg("%s\n", __func__);
436 438
@@ -477,7 +479,7 @@ init_return:
477static void __exit ibm_acpiphp_exit(void) 479static void __exit ibm_acpiphp_exit(void)
478{ 480{
479 acpi_status status; 481 acpi_status status;
480 struct kobject *sysdir = &pci_hotplug_slots_kset->kobj; 482 struct kobject *sysdir = &pci_slots_kset->kobj;
481 483
482 dbg("%s\n", __func__); 484 dbg("%s\n", __func__);
483 485
diff --git a/drivers/pci/hotplug/cpci_hotplug_core.c b/drivers/pci/hotplug/cpci_hotplug_core.c
index d8a6b80ab42a..935947991dc9 100644
--- a/drivers/pci/hotplug/cpci_hotplug_core.c
+++ b/drivers/pci/hotplug/cpci_hotplug_core.c
@@ -285,7 +285,7 @@ cpci_hp_register_bus(struct pci_bus *bus, u8 first, u8 last)
285 info->attention_status = cpci_get_attention_status(slot); 285 info->attention_status = cpci_get_attention_status(slot);
286 286
287 dbg("registering slot %s", slot->hotplug_slot->name); 287 dbg("registering slot %s", slot->hotplug_slot->name);
288 status = pci_hp_register(slot->hotplug_slot); 288 status = pci_hp_register(slot->hotplug_slot, bus, i);
289 if (status) { 289 if (status) {
290 err("pci_hp_register failed with error %d", status); 290 err("pci_hp_register failed with error %d", status);
291 goto error_name; 291 goto error_name;
diff --git a/drivers/pci/hotplug/cpqphp_core.c b/drivers/pci/hotplug/cpqphp_core.c
index 36b115b27b0b..54defec51d08 100644
--- a/drivers/pci/hotplug/cpqphp_core.c
+++ b/drivers/pci/hotplug/cpqphp_core.c
@@ -434,7 +434,9 @@ static int ctrl_slot_setup(struct controller *ctrl,
434 slot->bus, slot->device, 434 slot->bus, slot->device,
435 slot->number, ctrl->slot_device_offset, 435 slot->number, ctrl->slot_device_offset,
436 slot_number); 436 slot_number);
437 result = pci_hp_register(hotplug_slot); 437 result = pci_hp_register(hotplug_slot,
438 ctrl->pci_dev->subordinate,
439 slot->device);
438 if (result) { 440 if (result) {
439 err("pci_hp_register failed with error %d\n", result); 441 err("pci_hp_register failed with error %d\n", result);
440 goto error_name; 442 goto error_name;
diff --git a/drivers/pci/hotplug/fakephp.c b/drivers/pci/hotplug/fakephp.c
index 7e9a827c2687..40337a06c18a 100644
--- a/drivers/pci/hotplug/fakephp.c
+++ b/drivers/pci/hotplug/fakephp.c
@@ -66,6 +66,7 @@ struct dummy_slot {
66 struct pci_dev *dev; 66 struct pci_dev *dev;
67 struct work_struct remove_work; 67 struct work_struct remove_work;
68 unsigned long removed; 68 unsigned long removed;
69 char name[8];
69}; 70};
70 71
71static int debug; 72static int debug;
@@ -100,6 +101,7 @@ static int add_slot(struct pci_dev *dev)
100 struct dummy_slot *dslot; 101 struct dummy_slot *dslot;
101 struct hotplug_slot *slot; 102 struct hotplug_slot *slot;
102 int retval = -ENOMEM; 103 int retval = -ENOMEM;
104 static int count = 1;
103 105
104 slot = kzalloc(sizeof(struct hotplug_slot), GFP_KERNEL); 106 slot = kzalloc(sizeof(struct hotplug_slot), GFP_KERNEL);
105 if (!slot) 107 if (!slot)
@@ -113,18 +115,18 @@ static int add_slot(struct pci_dev *dev)
113 slot->info->max_bus_speed = PCI_SPEED_UNKNOWN; 115 slot->info->max_bus_speed = PCI_SPEED_UNKNOWN;
114 slot->info->cur_bus_speed = PCI_SPEED_UNKNOWN; 116 slot->info->cur_bus_speed = PCI_SPEED_UNKNOWN;
115 117
116 slot->name = &dev->dev.bus_id[0];
117 dbg("slot->name = %s\n", slot->name);
118
119 dslot = kzalloc(sizeof(struct dummy_slot), GFP_KERNEL); 118 dslot = kzalloc(sizeof(struct dummy_slot), GFP_KERNEL);
120 if (!dslot) 119 if (!dslot)
121 goto error_info; 120 goto error_info;
122 121
122 slot->name = dslot->name;
123 snprintf(slot->name, sizeof(dslot->name), "fake%d", count++);
124 dbg("slot->name = %s\n", slot->name);
123 slot->ops = &dummy_hotplug_slot_ops; 125 slot->ops = &dummy_hotplug_slot_ops;
124 slot->release = &dummy_release; 126 slot->release = &dummy_release;
125 slot->private = dslot; 127 slot->private = dslot;
126 128
127 retval = pci_hp_register(slot); 129 retval = pci_hp_register(slot, dev->bus, PCI_SLOT(dev->devfn));
128 if (retval) { 130 if (retval) {
129 err("pci_hp_register failed with error %d\n", retval); 131 err("pci_hp_register failed with error %d\n", retval);
130 goto error_dslot; 132 goto error_dslot;
@@ -148,17 +150,17 @@ error:
148static int __init pci_scan_buses(void) 150static int __init pci_scan_buses(void)
149{ 151{
150 struct pci_dev *dev = NULL; 152 struct pci_dev *dev = NULL;
151 int retval = 0; 153 int lastslot = 0;
152 154
153 while ((dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev)) != NULL) { 155 while ((dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev)) != NULL) {
154 retval = add_slot(dev); 156 if (PCI_FUNC(dev->devfn) > 0 &&
155 if (retval) { 157 lastslot == PCI_SLOT(dev->devfn))
156 pci_dev_put(dev); 158 continue;
157 break; 159 lastslot = PCI_SLOT(dev->devfn);
158 } 160 add_slot(dev);
159 } 161 }
160 162
161 return retval; 163 return 0;
162} 164}
163 165
164static void remove_slot(struct dummy_slot *dslot) 166static void remove_slot(struct dummy_slot *dslot)
@@ -296,23 +298,9 @@ static int enable_slot(struct hotplug_slot *hotplug_slot)
296 return 0; 298 return 0;
297} 299}
298 300
299/* find the hotplug_slot for the pci_dev */
300static struct hotplug_slot *get_slot_from_dev(struct pci_dev *dev)
301{
302 struct dummy_slot *dslot;
303
304 list_for_each_entry(dslot, &slot_list, node) {
305 if (dslot->dev == dev)
306 return dslot->slot;
307 }
308 return NULL;
309}
310
311
312static int disable_slot(struct hotplug_slot *slot) 301static int disable_slot(struct hotplug_slot *slot)
313{ 302{
314 struct dummy_slot *dslot; 303 struct dummy_slot *dslot;
315 struct hotplug_slot *hslot;
316 struct pci_dev *dev; 304 struct pci_dev *dev;
317 int func; 305 int func;
318 306
@@ -322,41 +310,27 @@ static int disable_slot(struct hotplug_slot *slot)
322 310
323 dbg("%s - physical_slot = %s\n", __func__, slot->name); 311 dbg("%s - physical_slot = %s\n", __func__, slot->name);
324 312
325 /* don't disable bridged devices just yet, we can't handle them easily... */ 313 for (func = 7; func >= 0; func--) {
326 if (dslot->dev->subordinate) { 314 dev = pci_get_slot(dslot->dev->bus, dslot->dev->devfn + func);
327 err("Can't remove PCI devices with other PCI devices behind it yet.\n"); 315 if (!dev)
328 return -ENODEV; 316 continue;
329 } 317
330 if (test_and_set_bit(0, &dslot->removed)) { 318 if (test_and_set_bit(0, &dslot->removed)) {
331 dbg("Slot already scheduled for removal\n"); 319 dbg("Slot already scheduled for removal\n");
332 return -ENODEV; 320 return -ENODEV;
333 }
334 /* search for subfunctions and disable them first */
335 if (!(dslot->dev->devfn & 7)) {
336 for (func = 1; func < 8; func++) {
337 dev = pci_get_slot(dslot->dev->bus,
338 dslot->dev->devfn + func);
339 if (dev) {
340 hslot = get_slot_from_dev(dev);
341 if (hslot)
342 disable_slot(hslot);
343 else {
344 err("Hotplug slot not found for subfunction of PCI device\n");
345 return -ENODEV;
346 }
347 pci_dev_put(dev);
348 } else
349 dbg("No device in slot found\n");
350 } 321 }
351 }
352 322
353 /* remove the device from the pci core */ 323 /* queue work item to blow away this sysfs entry and other
354 pci_remove_bus_device(dslot->dev); 324 * parts.
325 */
326 INIT_WORK(&dslot->remove_work, remove_slot_worker);
327 queue_work(dummyphp_wq, &dslot->remove_work);
355 328
356 /* queue work item to blow away this sysfs entry and other parts. */ 329 /* blow away this sysfs entry and other parts. */
357 INIT_WORK(&dslot->remove_work, remove_slot_worker); 330 remove_slot(dslot);
358 queue_work(dummyphp_wq, &dslot->remove_work);
359 331
332 pci_dev_put(dev);
333 }
360 return 0; 334 return 0;
361} 335}
362 336
diff --git a/drivers/pci/hotplug/ibmphp_ebda.c b/drivers/pci/hotplug/ibmphp_ebda.c
index dca7efc14be2..8467d0287325 100644
--- a/drivers/pci/hotplug/ibmphp_ebda.c
+++ b/drivers/pci/hotplug/ibmphp_ebda.c
@@ -1001,7 +1001,8 @@ static int __init ebda_rsrc_controller (void)
1001 tmp_slot = list_entry (list, struct slot, ibm_slot_list); 1001 tmp_slot = list_entry (list, struct slot, ibm_slot_list);
1002 1002
1003 snprintf (tmp_slot->hotplug_slot->name, 30, "%s", create_file_name (tmp_slot)); 1003 snprintf (tmp_slot->hotplug_slot->name, 30, "%s", create_file_name (tmp_slot));
1004 pci_hp_register (tmp_slot->hotplug_slot); 1004 pci_hp_register(tmp_slot->hotplug_slot,
1005 pci_find_bus(0, tmp_slot->bus), tmp_slot->device);
1005 } 1006 }
1006 1007
1007 print_ebda_hpc (); 1008 print_ebda_hpc ();
diff --git a/drivers/pci/hotplug/pci_hotplug_core.c b/drivers/pci/hotplug/pci_hotplug_core.c
index a11021e8ce37..5f85b1b120e3 100644
--- a/drivers/pci/hotplug/pci_hotplug_core.c
+++ b/drivers/pci/hotplug/pci_hotplug_core.c
@@ -40,6 +40,7 @@
40#include <linux/pci.h> 40#include <linux/pci.h>
41#include <linux/pci_hotplug.h> 41#include <linux/pci_hotplug.h>
42#include <asm/uaccess.h> 42#include <asm/uaccess.h>
43#include "../pci.h"
43 44
44#define MY_NAME "pci_hotplug" 45#define MY_NAME "pci_hotplug"
45 46
@@ -60,41 +61,7 @@ static int debug;
60////////////////////////////////////////////////////////////////// 61//////////////////////////////////////////////////////////////////
61 62
62static LIST_HEAD(pci_hotplug_slot_list); 63static LIST_HEAD(pci_hotplug_slot_list);
63 64static DEFINE_SPINLOCK(pci_hotplug_slot_list_lock);
64struct kset *pci_hotplug_slots_kset;
65
66static ssize_t hotplug_slot_attr_show(struct kobject *kobj,
67 struct attribute *attr, char *buf)
68{
69 struct hotplug_slot *slot = to_hotplug_slot(kobj);
70 struct hotplug_slot_attribute *attribute = to_hotplug_attr(attr);
71 return attribute->show ? attribute->show(slot, buf) : -EIO;
72}
73
74static ssize_t hotplug_slot_attr_store(struct kobject *kobj,
75 struct attribute *attr, const char *buf, size_t len)
76{
77 struct hotplug_slot *slot = to_hotplug_slot(kobj);
78 struct hotplug_slot_attribute *attribute = to_hotplug_attr(attr);
79 return attribute->store ? attribute->store(slot, buf, len) : -EIO;
80}
81
82static struct sysfs_ops hotplug_slot_sysfs_ops = {
83 .show = hotplug_slot_attr_show,
84 .store = hotplug_slot_attr_store,
85};
86
87static void hotplug_slot_release(struct kobject *kobj)
88{
89 struct hotplug_slot *slot = to_hotplug_slot(kobj);
90 if (slot->release)
91 slot->release(slot);
92}
93
94static struct kobj_type hotplug_slot_ktype = {
95 .sysfs_ops = &hotplug_slot_sysfs_ops,
96 .release = &hotplug_slot_release,
97};
98 65
99/* these strings match up with the values in pci_bus_speed */ 66/* these strings match up with the values in pci_bus_speed */
100static char *pci_bus_speed_strings[] = { 67static char *pci_bus_speed_strings[] = {
@@ -149,16 +116,15 @@ GET_STATUS(power_status, u8)
149GET_STATUS(attention_status, u8) 116GET_STATUS(attention_status, u8)
150GET_STATUS(latch_status, u8) 117GET_STATUS(latch_status, u8)
151GET_STATUS(adapter_status, u8) 118GET_STATUS(adapter_status, u8)
152GET_STATUS(address, u32)
153GET_STATUS(max_bus_speed, enum pci_bus_speed) 119GET_STATUS(max_bus_speed, enum pci_bus_speed)
154GET_STATUS(cur_bus_speed, enum pci_bus_speed) 120GET_STATUS(cur_bus_speed, enum pci_bus_speed)
155 121
156static ssize_t power_read_file (struct hotplug_slot *slot, char *buf) 122static ssize_t power_read_file(struct pci_slot *slot, char *buf)
157{ 123{
158 int retval; 124 int retval;
159 u8 value; 125 u8 value;
160 126
161 retval = get_power_status (slot, &value); 127 retval = get_power_status(slot->hotplug, &value);
162 if (retval) 128 if (retval)
163 goto exit; 129 goto exit;
164 retval = sprintf (buf, "%d\n", value); 130 retval = sprintf (buf, "%d\n", value);
@@ -166,9 +132,10 @@ exit:
166 return retval; 132 return retval;
167} 133}
168 134
169static ssize_t power_write_file (struct hotplug_slot *slot, const char *buf, 135static ssize_t power_write_file(struct pci_slot *pci_slot, const char *buf,
170 size_t count) 136 size_t count)
171{ 137{
138 struct hotplug_slot *slot = pci_slot->hotplug;
172 unsigned long lpower; 139 unsigned long lpower;
173 u8 power; 140 u8 power;
174 int retval = 0; 141 int retval = 0;
@@ -204,29 +171,30 @@ exit:
204 return count; 171 return count;
205} 172}
206 173
207static struct hotplug_slot_attribute hotplug_slot_attr_power = { 174static struct pci_slot_attribute hotplug_slot_attr_power = {
208 .attr = {.name = "power", .mode = S_IFREG | S_IRUGO | S_IWUSR}, 175 .attr = {.name = "power", .mode = S_IFREG | S_IRUGO | S_IWUSR},
209 .show = power_read_file, 176 .show = power_read_file,
210 .store = power_write_file 177 .store = power_write_file
211}; 178};
212 179
213static ssize_t attention_read_file (struct hotplug_slot *slot, char *buf) 180static ssize_t attention_read_file(struct pci_slot *slot, char *buf)
214{ 181{
215 int retval; 182 int retval;
216 u8 value; 183 u8 value;
217 184
218 retval = get_attention_status (slot, &value); 185 retval = get_attention_status(slot->hotplug, &value);
219 if (retval) 186 if (retval)
220 goto exit; 187 goto exit;
221 retval = sprintf (buf, "%d\n", value); 188 retval = sprintf(buf, "%d\n", value);
222 189
223exit: 190exit:
224 return retval; 191 return retval;
225} 192}
226 193
227static ssize_t attention_write_file (struct hotplug_slot *slot, const char *buf, 194static ssize_t attention_write_file(struct pci_slot *slot, const char *buf,
228 size_t count) 195 size_t count)
229{ 196{
197 struct hotplug_slot_ops *ops = slot->hotplug->ops;
230 unsigned long lattention; 198 unsigned long lattention;
231 u8 attention; 199 u8 attention;
232 int retval = 0; 200 int retval = 0;
@@ -235,13 +203,13 @@ static ssize_t attention_write_file (struct hotplug_slot *slot, const char *buf,
235 attention = (u8)(lattention & 0xff); 203 attention = (u8)(lattention & 0xff);
236 dbg (" - attention = %d\n", attention); 204 dbg (" - attention = %d\n", attention);
237 205
238 if (!try_module_get(slot->ops->owner)) { 206 if (!try_module_get(ops->owner)) {
239 retval = -ENODEV; 207 retval = -ENODEV;
240 goto exit; 208 goto exit;
241 } 209 }
242 if (slot->ops->set_attention_status) 210 if (ops->set_attention_status)
243 retval = slot->ops->set_attention_status(slot, attention); 211 retval = ops->set_attention_status(slot->hotplug, attention);
244 module_put(slot->ops->owner); 212 module_put(ops->owner);
245 213
246exit: 214exit:
247 if (retval) 215 if (retval)
@@ -249,18 +217,18 @@ exit:
249 return count; 217 return count;
250} 218}
251 219
252static struct hotplug_slot_attribute hotplug_slot_attr_attention = { 220static struct pci_slot_attribute hotplug_slot_attr_attention = {
253 .attr = {.name = "attention", .mode = S_IFREG | S_IRUGO | S_IWUSR}, 221 .attr = {.name = "attention", .mode = S_IFREG | S_IRUGO | S_IWUSR},
254 .show = attention_read_file, 222 .show = attention_read_file,
255 .store = attention_write_file 223 .store = attention_write_file
256}; 224};
257 225
258static ssize_t latch_read_file (struct hotplug_slot *slot, char *buf) 226static ssize_t latch_read_file(struct pci_slot *slot, char *buf)
259{ 227{
260 int retval; 228 int retval;
261 u8 value; 229 u8 value;
262 230
263 retval = get_latch_status (slot, &value); 231 retval = get_latch_status(slot->hotplug, &value);
264 if (retval) 232 if (retval)
265 goto exit; 233 goto exit;
266 retval = sprintf (buf, "%d\n", value); 234 retval = sprintf (buf, "%d\n", value);
@@ -269,17 +237,17 @@ exit:
269 return retval; 237 return retval;
270} 238}
271 239
272static struct hotplug_slot_attribute hotplug_slot_attr_latch = { 240static struct pci_slot_attribute hotplug_slot_attr_latch = {
273 .attr = {.name = "latch", .mode = S_IFREG | S_IRUGO}, 241 .attr = {.name = "latch", .mode = S_IFREG | S_IRUGO},
274 .show = latch_read_file, 242 .show = latch_read_file,
275}; 243};
276 244
277static ssize_t presence_read_file (struct hotplug_slot *slot, char *buf) 245static ssize_t presence_read_file(struct pci_slot *slot, char *buf)
278{ 246{
279 int retval; 247 int retval;
280 u8 value; 248 u8 value;
281 249
282 retval = get_adapter_status (slot, &value); 250 retval = get_adapter_status(slot->hotplug, &value);
283 if (retval) 251 if (retval)
284 goto exit; 252 goto exit;
285 retval = sprintf (buf, "%d\n", value); 253 retval = sprintf (buf, "%d\n", value);
@@ -288,42 +256,20 @@ exit:
288 return retval; 256 return retval;
289} 257}
290 258
291static struct hotplug_slot_attribute hotplug_slot_attr_presence = { 259static struct pci_slot_attribute hotplug_slot_attr_presence = {
292 .attr = {.name = "adapter", .mode = S_IFREG | S_IRUGO}, 260 .attr = {.name = "adapter", .mode = S_IFREG | S_IRUGO},
293 .show = presence_read_file, 261 .show = presence_read_file,
294}; 262};
295 263
296static ssize_t address_read_file (struct hotplug_slot *slot, char *buf)
297{
298 int retval;
299 u32 address;
300
301 retval = get_address (slot, &address);
302 if (retval)
303 goto exit;
304 retval = sprintf (buf, "%04x:%02x:%02x\n",
305 (address >> 16) & 0xffff,
306 (address >> 8) & 0xff,
307 address & 0xff);
308
309exit:
310 return retval;
311}
312
313static struct hotplug_slot_attribute hotplug_slot_attr_address = {
314 .attr = {.name = "address", .mode = S_IFREG | S_IRUGO},
315 .show = address_read_file,
316};
317
318static char *unknown_speed = "Unknown bus speed"; 264static char *unknown_speed = "Unknown bus speed";
319 265
320static ssize_t max_bus_speed_read_file (struct hotplug_slot *slot, char *buf) 266static ssize_t max_bus_speed_read_file(struct pci_slot *slot, char *buf)
321{ 267{
322 char *speed_string; 268 char *speed_string;
323 int retval; 269 int retval;
324 enum pci_bus_speed value; 270 enum pci_bus_speed value;
325 271
326 retval = get_max_bus_speed (slot, &value); 272 retval = get_max_bus_speed(slot->hotplug, &value);
327 if (retval) 273 if (retval)
328 goto exit; 274 goto exit;
329 275
@@ -338,18 +284,18 @@ exit:
338 return retval; 284 return retval;
339} 285}
340 286
341static struct hotplug_slot_attribute hotplug_slot_attr_max_bus_speed = { 287static struct pci_slot_attribute hotplug_slot_attr_max_bus_speed = {
342 .attr = {.name = "max_bus_speed", .mode = S_IFREG | S_IRUGO}, 288 .attr = {.name = "max_bus_speed", .mode = S_IFREG | S_IRUGO},
343 .show = max_bus_speed_read_file, 289 .show = max_bus_speed_read_file,
344}; 290};
345 291
346static ssize_t cur_bus_speed_read_file (struct hotplug_slot *slot, char *buf) 292static ssize_t cur_bus_speed_read_file(struct pci_slot *slot, char *buf)
347{ 293{
348 char *speed_string; 294 char *speed_string;
349 int retval; 295 int retval;
350 enum pci_bus_speed value; 296 enum pci_bus_speed value;
351 297
352 retval = get_cur_bus_speed (slot, &value); 298 retval = get_cur_bus_speed(slot->hotplug, &value);
353 if (retval) 299 if (retval)
354 goto exit; 300 goto exit;
355 301
@@ -364,14 +310,15 @@ exit:
364 return retval; 310 return retval;
365} 311}
366 312
367static struct hotplug_slot_attribute hotplug_slot_attr_cur_bus_speed = { 313static struct pci_slot_attribute hotplug_slot_attr_cur_bus_speed = {
368 .attr = {.name = "cur_bus_speed", .mode = S_IFREG | S_IRUGO}, 314 .attr = {.name = "cur_bus_speed", .mode = S_IFREG | S_IRUGO},
369 .show = cur_bus_speed_read_file, 315 .show = cur_bus_speed_read_file,
370}; 316};
371 317
372static ssize_t test_write_file (struct hotplug_slot *slot, const char *buf, 318static ssize_t test_write_file(struct pci_slot *pci_slot, const char *buf,
373 size_t count) 319 size_t count)
374{ 320{
321 struct hotplug_slot *slot = pci_slot->hotplug;
375 unsigned long ltest; 322 unsigned long ltest;
376 u32 test; 323 u32 test;
377 int retval = 0; 324 int retval = 0;
@@ -394,13 +341,14 @@ exit:
394 return count; 341 return count;
395} 342}
396 343
397static struct hotplug_slot_attribute hotplug_slot_attr_test = { 344static struct pci_slot_attribute hotplug_slot_attr_test = {
398 .attr = {.name = "test", .mode = S_IFREG | S_IRUGO | S_IWUSR}, 345 .attr = {.name = "test", .mode = S_IFREG | S_IRUGO | S_IWUSR},
399 .store = test_write_file 346 .store = test_write_file
400}; 347};
401 348
402static int has_power_file (struct hotplug_slot *slot) 349static int has_power_file(struct pci_slot *pci_slot)
403{ 350{
351 struct hotplug_slot *slot = pci_slot->hotplug;
404 if ((!slot) || (!slot->ops)) 352 if ((!slot) || (!slot->ops))
405 return -ENODEV; 353 return -ENODEV;
406 if ((slot->ops->enable_slot) || 354 if ((slot->ops->enable_slot) ||
@@ -410,8 +358,9 @@ static int has_power_file (struct hotplug_slot *slot)
410 return -ENOENT; 358 return -ENOENT;
411} 359}
412 360
413static int has_attention_file (struct hotplug_slot *slot) 361static int has_attention_file(struct pci_slot *pci_slot)
414{ 362{
363 struct hotplug_slot *slot = pci_slot->hotplug;
415 if ((!slot) || (!slot->ops)) 364 if ((!slot) || (!slot->ops))
416 return -ENODEV; 365 return -ENODEV;
417 if ((slot->ops->set_attention_status) || 366 if ((slot->ops->set_attention_status) ||
@@ -420,8 +369,9 @@ static int has_attention_file (struct hotplug_slot *slot)
420 return -ENOENT; 369 return -ENOENT;
421} 370}
422 371
423static int has_latch_file (struct hotplug_slot *slot) 372static int has_latch_file(struct pci_slot *pci_slot)
424{ 373{
374 struct hotplug_slot *slot = pci_slot->hotplug;
425 if ((!slot) || (!slot->ops)) 375 if ((!slot) || (!slot->ops))
426 return -ENODEV; 376 return -ENODEV;
427 if (slot->ops->get_latch_status) 377 if (slot->ops->get_latch_status)
@@ -429,8 +379,9 @@ static int has_latch_file (struct hotplug_slot *slot)
429 return -ENOENT; 379 return -ENOENT;
430} 380}
431 381
432static int has_adapter_file (struct hotplug_slot *slot) 382static int has_adapter_file(struct pci_slot *pci_slot)
433{ 383{
384 struct hotplug_slot *slot = pci_slot->hotplug;
434 if ((!slot) || (!slot->ops)) 385 if ((!slot) || (!slot->ops))
435 return -ENODEV; 386 return -ENODEV;
436 if (slot->ops->get_adapter_status) 387 if (slot->ops->get_adapter_status)
@@ -438,17 +389,9 @@ static int has_adapter_file (struct hotplug_slot *slot)
438 return -ENOENT; 389 return -ENOENT;
439} 390}
440 391
441static int has_address_file (struct hotplug_slot *slot) 392static int has_max_bus_speed_file(struct pci_slot *pci_slot)
442{
443 if ((!slot) || (!slot->ops))
444 return -ENODEV;
445 if (slot->ops->get_address)
446 return 0;
447 return -ENOENT;
448}
449
450static int has_max_bus_speed_file (struct hotplug_slot *slot)
451{ 393{
394 struct hotplug_slot *slot = pci_slot->hotplug;
452 if ((!slot) || (!slot->ops)) 395 if ((!slot) || (!slot->ops))
453 return -ENODEV; 396 return -ENODEV;
454 if (slot->ops->get_max_bus_speed) 397 if (slot->ops->get_max_bus_speed)
@@ -456,8 +399,9 @@ static int has_max_bus_speed_file (struct hotplug_slot *slot)
456 return -ENOENT; 399 return -ENOENT;
457} 400}
458 401
459static int has_cur_bus_speed_file (struct hotplug_slot *slot) 402static int has_cur_bus_speed_file(struct pci_slot *pci_slot)
460{ 403{
404 struct hotplug_slot *slot = pci_slot->hotplug;
461 if ((!slot) || (!slot->ops)) 405 if ((!slot) || (!slot->ops))
462 return -ENODEV; 406 return -ENODEV;
463 if (slot->ops->get_cur_bus_speed) 407 if (slot->ops->get_cur_bus_speed)
@@ -465,8 +409,9 @@ static int has_cur_bus_speed_file (struct hotplug_slot *slot)
465 return -ENOENT; 409 return -ENOENT;
466} 410}
467 411
468static int has_test_file (struct hotplug_slot *slot) 412static int has_test_file(struct pci_slot *pci_slot)
469{ 413{
414 struct hotplug_slot *slot = pci_slot->hotplug;
470 if ((!slot) || (!slot->ops)) 415 if ((!slot) || (!slot->ops))
471 return -ENODEV; 416 return -ENODEV;
472 if (slot->ops->hardware_test) 417 if (slot->ops->hardware_test)
@@ -474,7 +419,7 @@ static int has_test_file (struct hotplug_slot *slot)
474 return -ENOENT; 419 return -ENOENT;
475} 420}
476 421
477static int fs_add_slot (struct hotplug_slot *slot) 422static int fs_add_slot(struct pci_slot *slot)
478{ 423{
479 int retval = 0; 424 int retval = 0;
480 425
@@ -505,13 +450,6 @@ static int fs_add_slot (struct hotplug_slot *slot)
505 goto exit_adapter; 450 goto exit_adapter;
506 } 451 }
507 452
508 if (has_address_file(slot) == 0) {
509 retval = sysfs_create_file(&slot->kobj,
510 &hotplug_slot_attr_address.attr);
511 if (retval)
512 goto exit_address;
513 }
514
515 if (has_max_bus_speed_file(slot) == 0) { 453 if (has_max_bus_speed_file(slot) == 0) {
516 retval = sysfs_create_file(&slot->kobj, 454 retval = sysfs_create_file(&slot->kobj,
517 &hotplug_slot_attr_max_bus_speed.attr); 455 &hotplug_slot_attr_max_bus_speed.attr);
@@ -544,10 +482,6 @@ exit_cur_speed:
544 sysfs_remove_file(&slot->kobj, &hotplug_slot_attr_max_bus_speed.attr); 482 sysfs_remove_file(&slot->kobj, &hotplug_slot_attr_max_bus_speed.attr);
545 483
546exit_max_speed: 484exit_max_speed:
547 if (has_address_file(slot) == 0)
548 sysfs_remove_file(&slot->kobj, &hotplug_slot_attr_address.attr);
549
550exit_address:
551 if (has_adapter_file(slot) == 0) 485 if (has_adapter_file(slot) == 0)
552 sysfs_remove_file(&slot->kobj, &hotplug_slot_attr_presence.attr); 486 sysfs_remove_file(&slot->kobj, &hotplug_slot_attr_presence.attr);
553 487
@@ -567,7 +501,7 @@ exit:
567 return retval; 501 return retval;
568} 502}
569 503
570static void fs_remove_slot (struct hotplug_slot *slot) 504static void fs_remove_slot(struct pci_slot *slot)
571{ 505{
572 if (has_power_file(slot) == 0) 506 if (has_power_file(slot) == 0)
573 sysfs_remove_file(&slot->kobj, &hotplug_slot_attr_power.attr); 507 sysfs_remove_file(&slot->kobj, &hotplug_slot_attr_power.attr);
@@ -581,9 +515,6 @@ static void fs_remove_slot (struct hotplug_slot *slot)
581 if (has_adapter_file(slot) == 0) 515 if (has_adapter_file(slot) == 0)
582 sysfs_remove_file(&slot->kobj, &hotplug_slot_attr_presence.attr); 516 sysfs_remove_file(&slot->kobj, &hotplug_slot_attr_presence.attr);
583 517
584 if (has_address_file(slot) == 0)
585 sysfs_remove_file(&slot->kobj, &hotplug_slot_attr_address.attr);
586
587 if (has_max_bus_speed_file(slot) == 0) 518 if (has_max_bus_speed_file(slot) == 0)
588 sysfs_remove_file(&slot->kobj, &hotplug_slot_attr_max_bus_speed.attr); 519 sysfs_remove_file(&slot->kobj, &hotplug_slot_attr_max_bus_speed.attr);
589 520
@@ -599,27 +530,33 @@ static struct hotplug_slot *get_slot_from_name (const char *name)
599 struct hotplug_slot *slot; 530 struct hotplug_slot *slot;
600 struct list_head *tmp; 531 struct list_head *tmp;
601 532
533 spin_lock(&pci_hotplug_slot_list_lock);
602 list_for_each (tmp, &pci_hotplug_slot_list) { 534 list_for_each (tmp, &pci_hotplug_slot_list) {
603 slot = list_entry (tmp, struct hotplug_slot, slot_list); 535 slot = list_entry (tmp, struct hotplug_slot, slot_list);
604 if (strcmp(slot->name, name) == 0) 536 if (strcmp(slot->name, name) == 0)
605 return slot; 537 goto out;
606 } 538 }
607 return NULL; 539 slot = NULL;
540out:
541 spin_unlock(&pci_hotplug_slot_list_lock);
542 return slot;
608} 543}
609 544
610/** 545/**
611 * pci_hp_register - register a hotplug_slot with the PCI hotplug subsystem 546 * pci_hp_register - register a hotplug_slot with the PCI hotplug subsystem
547 * @bus: bus this slot is on
612 * @slot: pointer to the &struct hotplug_slot to register 548 * @slot: pointer to the &struct hotplug_slot to register
549 * @slot_nr: slot number
613 * 550 *
614 * Registers a hotplug slot with the pci hotplug subsystem, which will allow 551 * Registers a hotplug slot with the pci hotplug subsystem, which will allow
615 * userspace interaction to the slot. 552 * userspace interaction to the slot.
616 * 553 *
617 * Returns 0 if successful, anything else for an error. 554 * Returns 0 if successful, anything else for an error.
618 */ 555 */
619int pci_hp_register (struct hotplug_slot *slot) 556int pci_hp_register(struct hotplug_slot *slot, struct pci_bus *bus, int slot_nr)
620{ 557{
621 int result; 558 int result;
622 struct hotplug_slot *tmp; 559 struct pci_slot *pci_slot;
623 560
624 if (slot == NULL) 561 if (slot == NULL)
625 return -ENODEV; 562 return -ENODEV;
@@ -632,57 +569,89 @@ int pci_hp_register (struct hotplug_slot *slot)
632 } 569 }
633 570
634 /* Check if we have already registered a slot with the same name. */ 571 /* Check if we have already registered a slot with the same name. */
635 tmp = get_slot_from_name(slot->name); 572 if (get_slot_from_name(slot->name))
636 if (tmp)
637 return -EEXIST; 573 return -EEXIST;
638 574
639 slot->kobj.kset = pci_hotplug_slots_kset; 575 /*
640 result = kobject_init_and_add(&slot->kobj, &hotplug_slot_ktype, NULL, 576 * No problems if we call this interface from both ACPI_PCI_SLOT
641 "%s", slot->name); 577 * driver and call it here again. If we've already created the
642 if (result) { 578 * pci_slot, the interface will simply bump the refcount.
643 err("Unable to register kobject '%s'", slot->name); 579 */
644 return -EINVAL; 580 pci_slot = pci_create_slot(bus, slot_nr, slot->name);
581 if (IS_ERR(pci_slot))
582 return PTR_ERR(pci_slot);
583
584 if (pci_slot->hotplug) {
585 dbg("%s: already claimed\n", __func__);
586 pci_destroy_slot(pci_slot);
587 return -EBUSY;
645 } 588 }
646 589
647 list_add (&slot->slot_list, &pci_hotplug_slot_list); 590 slot->pci_slot = pci_slot;
591 pci_slot->hotplug = slot;
592
593 /*
594 * Allow pcihp drivers to override the ACPI_PCI_SLOT name.
595 */
596 if (strcmp(kobject_name(&pci_slot->kobj), slot->name)) {
597 result = kobject_rename(&pci_slot->kobj, slot->name);
598 if (result) {
599 pci_destroy_slot(pci_slot);
600 return result;
601 }
602 }
603
604 spin_lock(&pci_hotplug_slot_list_lock);
605 list_add(&slot->slot_list, &pci_hotplug_slot_list);
606 spin_unlock(&pci_hotplug_slot_list_lock);
607
608 result = fs_add_slot(pci_slot);
609 kobject_uevent(&pci_slot->kobj, KOBJ_ADD);
610 dbg("Added slot %s to the list\n", slot->name);
611
648 612
649 result = fs_add_slot (slot);
650 kobject_uevent(&slot->kobj, KOBJ_ADD);
651 dbg ("Added slot %s to the list\n", slot->name);
652 return result; 613 return result;
653} 614}
654 615
655/** 616/**
656 * pci_hp_deregister - deregister a hotplug_slot with the PCI hotplug subsystem 617 * pci_hp_deregister - deregister a hotplug_slot with the PCI hotplug subsystem
657 * @slot: pointer to the &struct hotplug_slot to deregister 618 * @hotplug: pointer to the &struct hotplug_slot to deregister
658 * 619 *
659 * The @slot must have been registered with the pci hotplug subsystem 620 * The @slot must have been registered with the pci hotplug subsystem
660 * previously with a call to pci_hp_register(). 621 * previously with a call to pci_hp_register().
661 * 622 *
662 * Returns 0 if successful, anything else for an error. 623 * Returns 0 if successful, anything else for an error.
663 */ 624 */
664int pci_hp_deregister (struct hotplug_slot *slot) 625int pci_hp_deregister(struct hotplug_slot *hotplug)
665{ 626{
666 struct hotplug_slot *temp; 627 struct hotplug_slot *temp;
628 struct pci_slot *slot;
667 629
668 if (slot == NULL) 630 if (!hotplug)
669 return -ENODEV; 631 return -ENODEV;
670 632
671 temp = get_slot_from_name (slot->name); 633 temp = get_slot_from_name(hotplug->name);
672 if (temp != slot) { 634 if (temp != hotplug)
673 return -ENODEV; 635 return -ENODEV;
674 }
675 list_del (&slot->slot_list);
676 636
677 fs_remove_slot (slot); 637 spin_lock(&pci_hotplug_slot_list_lock);
678 dbg ("Removed slot %s from the list\n", slot->name); 638 list_del(&hotplug->slot_list);
679 kobject_put(&slot->kobj); 639 spin_unlock(&pci_hotplug_slot_list_lock);
640
641 slot = hotplug->pci_slot;
642 fs_remove_slot(slot);
643 dbg("Removed slot %s from the list\n", hotplug->name);
644
645 hotplug->release(hotplug);
646 slot->hotplug = NULL;
647 pci_destroy_slot(slot);
648
680 return 0; 649 return 0;
681} 650}
682 651
683/** 652/**
684 * pci_hp_change_slot_info - changes the slot's information structure in the core 653 * pci_hp_change_slot_info - changes the slot's information structure in the core
685 * @slot: pointer to the slot whose info has changed 654 * @hotplug: pointer to the slot whose info has changed
686 * @info: pointer to the info copy into the slot's info structure 655 * @info: pointer to the info copy into the slot's info structure
687 * 656 *
688 * @slot must have been registered with the pci 657 * @slot must have been registered with the pci
@@ -690,13 +659,15 @@ int pci_hp_deregister (struct hotplug_slot *slot)
690 * 659 *
691 * Returns 0 if successful, anything else for an error. 660 * Returns 0 if successful, anything else for an error.
692 */ 661 */
693int __must_check pci_hp_change_slot_info(struct hotplug_slot *slot, 662int __must_check pci_hp_change_slot_info(struct hotplug_slot *hotplug,
694 struct hotplug_slot_info *info) 663 struct hotplug_slot_info *info)
695{ 664{
696 if ((slot == NULL) || (info == NULL)) 665 struct pci_slot *slot;
666 if (!hotplug || !info)
697 return -ENODEV; 667 return -ENODEV;
668 slot = hotplug->pci_slot;
698 669
699 memcpy (slot->info, info, sizeof (struct hotplug_slot_info)); 670 memcpy(hotplug->info, info, sizeof(struct hotplug_slot_info));
700 671
701 return 0; 672 return 0;
702} 673}
@@ -704,36 +675,22 @@ int __must_check pci_hp_change_slot_info(struct hotplug_slot *slot,
704static int __init pci_hotplug_init (void) 675static int __init pci_hotplug_init (void)
705{ 676{
706 int result; 677 int result;
707 struct kset *pci_bus_kset;
708 678
709 pci_bus_kset = bus_get_kset(&pci_bus_type);
710
711 pci_hotplug_slots_kset = kset_create_and_add("slots", NULL,
712 &pci_bus_kset->kobj);
713 if (!pci_hotplug_slots_kset) {
714 result = -ENOMEM;
715 err("Register subsys error\n");
716 goto exit;
717 }
718 result = cpci_hotplug_init(debug); 679 result = cpci_hotplug_init(debug);
719 if (result) { 680 if (result) {
720 err ("cpci_hotplug_init with error %d\n", result); 681 err ("cpci_hotplug_init with error %d\n", result);
721 goto err_subsys; 682 goto err_cpci;
722 } 683 }
723 684
724 info (DRIVER_DESC " version: " DRIVER_VERSION "\n"); 685 info (DRIVER_DESC " version: " DRIVER_VERSION "\n");
725 goto exit;
726 686
727err_subsys: 687err_cpci:
728 kset_unregister(pci_hotplug_slots_kset);
729exit:
730 return result; 688 return result;
731} 689}
732 690
733static void __exit pci_hotplug_exit (void) 691static void __exit pci_hotplug_exit (void)
734{ 692{
735 cpci_hotplug_exit(); 693 cpci_hotplug_exit();
736 kset_unregister(pci_hotplug_slots_kset);
737} 694}
738 695
739module_init(pci_hotplug_init); 696module_init(pci_hotplug_init);
@@ -745,7 +702,6 @@ MODULE_LICENSE("GPL");
745module_param(debug, bool, 0644); 702module_param(debug, bool, 0644);
746MODULE_PARM_DESC(debug, "Debugging mode enabled or not"); 703MODULE_PARM_DESC(debug, "Debugging mode enabled or not");
747 704
748EXPORT_SYMBOL_GPL(pci_hotplug_slots_kset);
749EXPORT_SYMBOL_GPL(pci_hp_register); 705EXPORT_SYMBOL_GPL(pci_hp_register);
750EXPORT_SYMBOL_GPL(pci_hp_deregister); 706EXPORT_SYMBOL_GPL(pci_hp_deregister);
751EXPORT_SYMBOL_GPL(pci_hp_change_slot_info); 707EXPORT_SYMBOL_GPL(pci_hp_change_slot_info);
diff --git a/drivers/pci/hotplug/pciehp.h b/drivers/pci/hotplug/pciehp.h
index 79c9ddaad3fb..e3a1e7e7dba2 100644
--- a/drivers/pci/hotplug/pciehp.h
+++ b/drivers/pci/hotplug/pciehp.h
@@ -43,6 +43,7 @@ extern int pciehp_poll_mode;
43extern int pciehp_poll_time; 43extern int pciehp_poll_time;
44extern int pciehp_debug; 44extern int pciehp_debug;
45extern int pciehp_force; 45extern int pciehp_force;
46extern int pciehp_slot_with_bus;
46extern struct workqueue_struct *pciehp_wq; 47extern struct workqueue_struct *pciehp_wq;
47 48
48#define dbg(format, arg...) \ 49#define dbg(format, arg...) \
@@ -96,7 +97,7 @@ struct controller {
96 u32 slot_cap; 97 u32 slot_cap;
97 u8 cap_base; 98 u8 cap_base;
98 struct timer_list poll_timer; 99 struct timer_list poll_timer;
99 volatile int cmd_busy; 100 int cmd_busy;
100 unsigned int no_cmd_complete:1; 101 unsigned int no_cmd_complete:1;
101}; 102};
102 103
@@ -156,10 +157,10 @@ extern u8 pciehp_handle_power_fault(struct slot *p_slot);
156extern int pciehp_configure_device(struct slot *p_slot); 157extern int pciehp_configure_device(struct slot *p_slot);
157extern int pciehp_unconfigure_device(struct slot *p_slot); 158extern int pciehp_unconfigure_device(struct slot *p_slot);
158extern void pciehp_queue_pushbutton_work(struct work_struct *work); 159extern void pciehp_queue_pushbutton_work(struct work_struct *work);
159int pcie_init(struct controller *ctrl, struct pcie_device *dev); 160struct controller *pcie_init(struct pcie_device *dev);
160int pciehp_enable_slot(struct slot *p_slot); 161int pciehp_enable_slot(struct slot *p_slot);
161int pciehp_disable_slot(struct slot *p_slot); 162int pciehp_disable_slot(struct slot *p_slot);
162int pcie_init_hardware_part2(struct controller *ctrl, struct pcie_device *dev); 163int pcie_enable_notification(struct controller *ctrl);
163 164
164static inline struct slot *pciehp_find_slot(struct controller *ctrl, u8 device) 165static inline struct slot *pciehp_find_slot(struct controller *ctrl, u8 device)
165{ 166{
@@ -202,8 +203,13 @@ struct hpc_ops {
202#include <acpi/actypes.h> 203#include <acpi/actypes.h>
203#include <linux/pci-acpi.h> 204#include <linux/pci-acpi.h>
204 205
205#define pciehp_get_hp_hw_control_from_firmware(dev) \ 206static inline int pciehp_get_hp_hw_control_from_firmware(struct pci_dev *dev)
206 pciehp_acpi_get_hp_hw_control_from_firmware(dev) 207{
208 u32 flags = (OSC_PCI_EXPRESS_NATIVE_HP_CONTROL |
209 OSC_PCI_EXPRESS_CAP_STRUCTURE_CONTROL);
210 return acpi_get_hp_hw_control_from_firmware(dev, flags);
211}
212
207static inline int pciehp_get_hp_params_from_firmware(struct pci_dev *dev, 213static inline int pciehp_get_hp_params_from_firmware(struct pci_dev *dev,
208 struct hotplug_params *hpp) 214 struct hotplug_params *hpp)
209{ 215{
diff --git a/drivers/pci/hotplug/pciehp_core.c b/drivers/pci/hotplug/pciehp_core.c
index 48a2ed378914..3677495c4f91 100644
--- a/drivers/pci/hotplug/pciehp_core.c
+++ b/drivers/pci/hotplug/pciehp_core.c
@@ -72,7 +72,6 @@ static int get_power_status (struct hotplug_slot *slot, u8 *value);
72static int get_attention_status (struct hotplug_slot *slot, u8 *value); 72static int get_attention_status (struct hotplug_slot *slot, u8 *value);
73static int get_latch_status (struct hotplug_slot *slot, u8 *value); 73static int get_latch_status (struct hotplug_slot *slot, u8 *value);
74static int get_adapter_status (struct hotplug_slot *slot, u8 *value); 74static int get_adapter_status (struct hotplug_slot *slot, u8 *value);
75static int get_address (struct hotplug_slot *slot, u32 *value);
76static int get_max_bus_speed (struct hotplug_slot *slot, enum pci_bus_speed *value); 75static int get_max_bus_speed (struct hotplug_slot *slot, enum pci_bus_speed *value);
77static int get_cur_bus_speed (struct hotplug_slot *slot, enum pci_bus_speed *value); 76static int get_cur_bus_speed (struct hotplug_slot *slot, enum pci_bus_speed *value);
78 77
@@ -85,7 +84,6 @@ static struct hotplug_slot_ops pciehp_hotplug_slot_ops = {
85 .get_attention_status = get_attention_status, 84 .get_attention_status = get_attention_status,
86 .get_latch_status = get_latch_status, 85 .get_latch_status = get_latch_status,
87 .get_adapter_status = get_adapter_status, 86 .get_adapter_status = get_adapter_status,
88 .get_address = get_address,
89 .get_max_bus_speed = get_max_bus_speed, 87 .get_max_bus_speed = get_max_bus_speed,
90 .get_cur_bus_speed = get_cur_bus_speed, 88 .get_cur_bus_speed = get_cur_bus_speed,
91}; 89};
@@ -185,23 +183,10 @@ static struct hotplug_slot_attribute hotplug_slot_attr_lock = {
185 */ 183 */
186static void release_slot(struct hotplug_slot *hotplug_slot) 184static void release_slot(struct hotplug_slot *hotplug_slot)
187{ 185{
188 struct slot *slot = hotplug_slot->private;
189
190 dbg("%s - physical_slot = %s\n", __func__, hotplug_slot->name); 186 dbg("%s - physical_slot = %s\n", __func__, hotplug_slot->name);
191 187
192 kfree(slot->hotplug_slot->info); 188 kfree(hotplug_slot->info);
193 kfree(slot->hotplug_slot); 189 kfree(hotplug_slot);
194 kfree(slot);
195}
196
197static void make_slot_name(struct slot *slot)
198{
199 if (pciehp_slot_with_bus)
200 snprintf(slot->hotplug_slot->name, SLOT_NAME_SIZE, "%04d_%04d",
201 slot->bus, slot->number);
202 else
203 snprintf(slot->hotplug_slot->name, SLOT_NAME_SIZE, "%d",
204 slot->number);
205} 190}
206 191
207static int init_slots(struct controller *ctrl) 192static int init_slots(struct controller *ctrl)
@@ -210,49 +195,34 @@ static int init_slots(struct controller *ctrl)
210 struct hotplug_slot *hotplug_slot; 195 struct hotplug_slot *hotplug_slot;
211 struct hotplug_slot_info *info; 196 struct hotplug_slot_info *info;
212 int retval = -ENOMEM; 197 int retval = -ENOMEM;
213 int i;
214
215 for (i = 0; i < ctrl->num_slots; i++) {
216 slot = kzalloc(sizeof(*slot), GFP_KERNEL);
217 if (!slot)
218 goto error;
219 198
199 list_for_each_entry(slot, &ctrl->slot_list, slot_list) {
220 hotplug_slot = kzalloc(sizeof(*hotplug_slot), GFP_KERNEL); 200 hotplug_slot = kzalloc(sizeof(*hotplug_slot), GFP_KERNEL);
221 if (!hotplug_slot) 201 if (!hotplug_slot)
222 goto error_slot; 202 goto error;
223 slot->hotplug_slot = hotplug_slot;
224 203
225 info = kzalloc(sizeof(*info), GFP_KERNEL); 204 info = kzalloc(sizeof(*info), GFP_KERNEL);
226 if (!info) 205 if (!info)
227 goto error_hpslot; 206 goto error_hpslot;
228 hotplug_slot->info = info;
229
230 hotplug_slot->name = slot->name;
231
232 slot->hp_slot = i;
233 slot->ctrl = ctrl;
234 slot->bus = ctrl->pci_dev->subordinate->number;
235 slot->device = ctrl->slot_device_offset + i;
236 slot->hpc_ops = ctrl->hpc_ops;
237 slot->number = ctrl->first_slot;
238 mutex_init(&slot->lock);
239 INIT_DELAYED_WORK(&slot->work, pciehp_queue_pushbutton_work);
240 207
241 /* register this slot with the hotplug pci core */ 208 /* register this slot with the hotplug pci core */
209 hotplug_slot->info = info;
210 hotplug_slot->name = slot->name;
242 hotplug_slot->private = slot; 211 hotplug_slot->private = slot;
243 hotplug_slot->release = &release_slot; 212 hotplug_slot->release = &release_slot;
244 make_slot_name(slot);
245 hotplug_slot->ops = &pciehp_hotplug_slot_ops; 213 hotplug_slot->ops = &pciehp_hotplug_slot_ops;
246
247 get_power_status(hotplug_slot, &info->power_status); 214 get_power_status(hotplug_slot, &info->power_status);
248 get_attention_status(hotplug_slot, &info->attention_status); 215 get_attention_status(hotplug_slot, &info->attention_status);
249 get_latch_status(hotplug_slot, &info->latch_status); 216 get_latch_status(hotplug_slot, &info->latch_status);
250 get_adapter_status(hotplug_slot, &info->adapter_status); 217 get_adapter_status(hotplug_slot, &info->adapter_status);
218 slot->hotplug_slot = hotplug_slot;
251 219
252 dbg("Registering bus=%x dev=%x hp_slot=%x sun=%x " 220 dbg("Registering bus=%x dev=%x hp_slot=%x sun=%x "
253 "slot_device_offset=%x\n", slot->bus, slot->device, 221 "slot_device_offset=%x\n", slot->bus, slot->device,
254 slot->hp_slot, slot->number, ctrl->slot_device_offset); 222 slot->hp_slot, slot->number, ctrl->slot_device_offset);
255 retval = pci_hp_register(hotplug_slot); 223 retval = pci_hp_register(hotplug_slot,
224 ctrl->pci_dev->subordinate,
225 slot->device);
256 if (retval) { 226 if (retval) {
257 err("pci_hp_register failed with error %d\n", retval); 227 err("pci_hp_register failed with error %d\n", retval);
258 if (retval == -EEXIST) 228 if (retval == -EEXIST)
@@ -263,7 +233,7 @@ static int init_slots(struct controller *ctrl)
263 } 233 }
264 /* create additional sysfs entries */ 234 /* create additional sysfs entries */
265 if (EMI(ctrl)) { 235 if (EMI(ctrl)) {
266 retval = sysfs_create_file(&hotplug_slot->kobj, 236 retval = sysfs_create_file(&hotplug_slot->pci_slot->kobj,
267 &hotplug_slot_attr_lock.attr); 237 &hotplug_slot_attr_lock.attr);
268 if (retval) { 238 if (retval) {
269 pci_hp_deregister(hotplug_slot); 239 pci_hp_deregister(hotplug_slot);
@@ -271,8 +241,6 @@ static int init_slots(struct controller *ctrl)
271 goto error_info; 241 goto error_info;
272 } 242 }
273 } 243 }
274
275 list_add(&slot->slot_list, &ctrl->slot_list);
276 } 244 }
277 245
278 return 0; 246 return 0;
@@ -280,27 +248,18 @@ error_info:
280 kfree(info); 248 kfree(info);
281error_hpslot: 249error_hpslot:
282 kfree(hotplug_slot); 250 kfree(hotplug_slot);
283error_slot:
284 kfree(slot);
285error: 251error:
286 return retval; 252 return retval;
287} 253}
288 254
289static void cleanup_slots(struct controller *ctrl) 255static void cleanup_slots(struct controller *ctrl)
290{ 256{
291 struct list_head *tmp;
292 struct list_head *next;
293 struct slot *slot; 257 struct slot *slot;
294 258
295 list_for_each_safe(tmp, next, &ctrl->slot_list) { 259 list_for_each_entry(slot, &ctrl->slot_list, slot_list) {
296 slot = list_entry(tmp, struct slot, slot_list);
297 list_del(&slot->slot_list);
298 if (EMI(ctrl)) 260 if (EMI(ctrl))
299 sysfs_remove_file(&slot->hotplug_slot->kobj, 261 sysfs_remove_file(&slot->hotplug_slot->pci_slot->kobj,
300 &hotplug_slot_attr_lock.attr); 262 &hotplug_slot_attr_lock.attr);
301 cancel_delayed_work(&slot->work);
302 flush_scheduled_work();
303 flush_workqueue(pciehp_wq);
304 pci_hp_deregister(slot->hotplug_slot); 263 pci_hp_deregister(slot->hotplug_slot);
305 } 264 }
306} 265}
@@ -398,19 +357,8 @@ static int get_adapter_status(struct hotplug_slot *hotplug_slot, u8 *value)
398 return 0; 357 return 0;
399} 358}
400 359
401static int get_address(struct hotplug_slot *hotplug_slot, u32 *value) 360static int get_max_bus_speed(struct hotplug_slot *hotplug_slot,
402{ 361 enum pci_bus_speed *value)
403 struct slot *slot = hotplug_slot->private;
404 struct pci_bus *bus = slot->ctrl->pci_dev->subordinate;
405
406 dbg("%s - physical_slot = %s\n", __func__, hotplug_slot->name);
407
408 *value = (pci_domain_nr(bus) << 16) | (slot->bus << 8) | slot->device;
409
410 return 0;
411}
412
413static int get_max_bus_speed(struct hotplug_slot *hotplug_slot, enum pci_bus_speed *value)
414{ 362{
415 struct slot *slot = hotplug_slot->private; 363 struct slot *slot = hotplug_slot->private;
416 int retval; 364 int retval;
@@ -444,34 +392,30 @@ static int pciehp_probe(struct pcie_device *dev, const struct pcie_port_service_
444 struct controller *ctrl; 392 struct controller *ctrl;
445 struct slot *t_slot; 393 struct slot *t_slot;
446 u8 value; 394 u8 value;
447 struct pci_dev *pdev; 395 struct pci_dev *pdev = dev->port;
448 396
449 ctrl = kzalloc(sizeof(*ctrl), GFP_KERNEL); 397 if (pciehp_force)
450 if (!ctrl) { 398 dbg("Bypassing BIOS check for pciehp use on %s\n",
451 err("%s : out of memory\n", __func__); 399 pci_name(pdev));
400 else if (pciehp_get_hp_hw_control_from_firmware(pdev))
452 goto err_out_none; 401 goto err_out_none;
453 }
454 INIT_LIST_HEAD(&ctrl->slot_list);
455
456 pdev = dev->port;
457 ctrl->pci_dev = pdev;
458 402
459 rc = pcie_init(ctrl, dev); 403 ctrl = pcie_init(dev);
460 if (rc) { 404 if (!ctrl) {
461 dbg("%s: controller initialization failed\n", PCIE_MODULE_NAME); 405 dbg("%s: controller initialization failed\n", PCIE_MODULE_NAME);
462 goto err_out_free_ctrl; 406 goto err_out_none;
463 } 407 }
464 408 set_service_data(dev, ctrl);
465 pci_set_drvdata(pdev, ctrl);
466
467 dbg("%s: ctrl bus=0x%x, device=%x, function=%x, irq=%x\n",
468 __func__, pdev->bus->number, PCI_SLOT(pdev->devfn),
469 PCI_FUNC(pdev->devfn), pdev->irq);
470 409
471 /* Setup the slot information structures */ 410 /* Setup the slot information structures */
472 rc = init_slots(ctrl); 411 rc = init_slots(ctrl);
473 if (rc) { 412 if (rc) {
474 err("%s: slot initialization failed\n", PCIE_MODULE_NAME); 413 if (rc == -EBUSY)
414 warn("%s: slot already registered by another "
415 "hotplug driver\n", PCIE_MODULE_NAME);
416 else
417 err("%s: slot initialization failed\n",
418 PCIE_MODULE_NAME);
475 goto err_out_release_ctlr; 419 goto err_out_release_ctlr;
476 } 420 }
477 421
@@ -495,20 +439,16 @@ err_out_free_ctrl_slot:
495 cleanup_slots(ctrl); 439 cleanup_slots(ctrl);
496err_out_release_ctlr: 440err_out_release_ctlr:
497 ctrl->hpc_ops->release_ctlr(ctrl); 441 ctrl->hpc_ops->release_ctlr(ctrl);
498err_out_free_ctrl:
499 kfree(ctrl);
500err_out_none: 442err_out_none:
501 return -ENODEV; 443 return -ENODEV;
502} 444}
503 445
504static void pciehp_remove (struct pcie_device *dev) 446static void pciehp_remove (struct pcie_device *dev)
505{ 447{
506 struct pci_dev *pdev = dev->port; 448 struct controller *ctrl = get_service_data(dev);
507 struct controller *ctrl = pci_get_drvdata(pdev);
508 449
509 cleanup_slots(ctrl); 450 cleanup_slots(ctrl);
510 ctrl->hpc_ops->release_ctlr(ctrl); 451 ctrl->hpc_ops->release_ctlr(ctrl);
511 kfree(ctrl);
512} 452}
513 453
514#ifdef CONFIG_PM 454#ifdef CONFIG_PM
@@ -522,13 +462,12 @@ static int pciehp_resume (struct pcie_device *dev)
522{ 462{
523 printk("%s ENTRY\n", __func__); 463 printk("%s ENTRY\n", __func__);
524 if (pciehp_force) { 464 if (pciehp_force) {
525 struct pci_dev *pdev = dev->port; 465 struct controller *ctrl = get_service_data(dev);
526 struct controller *ctrl = pci_get_drvdata(pdev);
527 struct slot *t_slot; 466 struct slot *t_slot;
528 u8 status; 467 u8 status;
529 468
530 /* reinitialize the chipset's event detection logic */ 469 /* reinitialize the chipset's event detection logic */
531 pcie_init_hardware_part2(ctrl, dev); 470 pcie_enable_notification(ctrl);
532 471
533 t_slot = pciehp_find_slot(ctrl, ctrl->slot_device_offset); 472 t_slot = pciehp_find_slot(ctrl, ctrl->slot_device_offset);
534 473
diff --git a/drivers/pci/hotplug/pciehp_hpc.c b/drivers/pci/hotplug/pciehp_hpc.c
index 79f104963166..1323a43285d7 100644
--- a/drivers/pci/hotplug/pciehp_hpc.c
+++ b/drivers/pci/hotplug/pciehp_hpc.c
@@ -247,30 +247,32 @@ static inline void pciehp_free_irq(struct controller *ctrl)
247 free_irq(ctrl->pci_dev->irq, ctrl); 247 free_irq(ctrl->pci_dev->irq, ctrl);
248} 248}
249 249
250static inline int pcie_poll_cmd(struct controller *ctrl) 250static int pcie_poll_cmd(struct controller *ctrl)
251{ 251{
252 u16 slot_status; 252 u16 slot_status;
253 int timeout = 1000; 253 int timeout = 1000;
254 254
255 if (!pciehp_readw(ctrl, SLOTSTATUS, &slot_status)) 255 if (!pciehp_readw(ctrl, SLOTSTATUS, &slot_status)) {
256 if (slot_status & CMD_COMPLETED) 256 if (slot_status & CMD_COMPLETED) {
257 goto completed; 257 pciehp_writew(ctrl, SLOTSTATUS, CMD_COMPLETED);
258 for (timeout = 1000; timeout > 0; timeout -= 100) { 258 return 1;
259 msleep(100); 259 }
260 if (!pciehp_readw(ctrl, SLOTSTATUS, &slot_status)) 260 }
261 if (slot_status & CMD_COMPLETED) 261 while (timeout > 1000) {
262 goto completed; 262 msleep(10);
263 timeout -= 10;
264 if (!pciehp_readw(ctrl, SLOTSTATUS, &slot_status)) {
265 if (slot_status & CMD_COMPLETED) {
266 pciehp_writew(ctrl, SLOTSTATUS, CMD_COMPLETED);
267 return 1;
268 }
269 }
263 } 270 }
264 return 0; /* timeout */ 271 return 0; /* timeout */
265
266completed:
267 pciehp_writew(ctrl, SLOTSTATUS, CMD_COMPLETED);
268 return timeout;
269} 272}
270 273
271static inline int pcie_wait_cmd(struct controller *ctrl, int poll) 274static void pcie_wait_cmd(struct controller *ctrl, int poll)
272{ 275{
273 int retval = 0;
274 unsigned int msecs = pciehp_poll_mode ? 2500 : 1000; 276 unsigned int msecs = pciehp_poll_mode ? 2500 : 1000;
275 unsigned long timeout = msecs_to_jiffies(msecs); 277 unsigned long timeout = msecs_to_jiffies(msecs);
276 int rc; 278 int rc;
@@ -278,16 +280,9 @@ static inline int pcie_wait_cmd(struct controller *ctrl, int poll)
278 if (poll) 280 if (poll)
279 rc = pcie_poll_cmd(ctrl); 281 rc = pcie_poll_cmd(ctrl);
280 else 282 else
281 rc = wait_event_interruptible_timeout(ctrl->queue, 283 rc = wait_event_timeout(ctrl->queue, !ctrl->cmd_busy, timeout);
282 !ctrl->cmd_busy, timeout);
283 if (!rc) 284 if (!rc)
284 dbg("Command not completed in 1000 msec\n"); 285 dbg("Command not completed in 1000 msec\n");
285 else if (rc < 0) {
286 retval = -EINTR;
287 info("Command was interrupted by a signal\n");
288 }
289
290 return retval;
291} 286}
292 287
293/** 288/**
@@ -342,10 +337,6 @@ static int pcie_write_cmd(struct controller *ctrl, u16 cmd, u16 mask)
342 337
343 slot_ctrl &= ~mask; 338 slot_ctrl &= ~mask;
344 slot_ctrl |= (cmd & mask); 339 slot_ctrl |= (cmd & mask);
345 /* Don't enable command completed if caller is changing it. */
346 if (!(mask & CMD_CMPL_INTR_ENABLE))
347 slot_ctrl |= CMD_CMPL_INTR_ENABLE;
348
349 ctrl->cmd_busy = 1; 340 ctrl->cmd_busy = 1;
350 smp_mb(); 341 smp_mb();
351 retval = pciehp_writew(ctrl, SLOTCTRL, slot_ctrl); 342 retval = pciehp_writew(ctrl, SLOTCTRL, slot_ctrl);
@@ -365,7 +356,7 @@ static int pcie_write_cmd(struct controller *ctrl, u16 cmd, u16 mask)
365 if (!(slot_ctrl & HP_INTR_ENABLE) || 356 if (!(slot_ctrl & HP_INTR_ENABLE) ||
366 !(slot_ctrl & CMD_CMPL_INTR_ENABLE)) 357 !(slot_ctrl & CMD_CMPL_INTR_ENABLE))
367 poll = 1; 358 poll = 1;
368 retval = pcie_wait_cmd(ctrl, poll); 359 pcie_wait_cmd(ctrl, poll);
369 } 360 }
370 out: 361 out:
371 mutex_unlock(&ctrl->ctrl_lock); 362 mutex_unlock(&ctrl->ctrl_lock);
@@ -614,23 +605,6 @@ static void hpc_set_green_led_blink(struct slot *slot)
614 __func__, ctrl->cap_base + SLOTCTRL, slot_cmd); 605 __func__, ctrl->cap_base + SLOTCTRL, slot_cmd);
615} 606}
616 607
617static void hpc_release_ctlr(struct controller *ctrl)
618{
619 /* Mask Hot-plug Interrupt Enable */
620 if (pcie_write_cmd(ctrl, 0, HP_INTR_ENABLE | CMD_CMPL_INTR_ENABLE))
621 err("%s: Cannot mask hotplut interrupt enable\n", __func__);
622
623 /* Free interrupt handler or interrupt polling timer */
624 pciehp_free_irq(ctrl);
625
626 /*
627 * If this is the last controller to be released, destroy the
628 * pciehp work queue
629 */
630 if (atomic_dec_and_test(&pciehp_num_controllers))
631 destroy_workqueue(pciehp_wq);
632}
633
634static int hpc_power_on_slot(struct slot * slot) 608static int hpc_power_on_slot(struct slot * slot)
635{ 609{
636 struct controller *ctrl = slot->ctrl; 610 struct controller *ctrl = slot->ctrl;
@@ -785,7 +759,7 @@ static irqreturn_t pcie_isr(int irq, void *dev_id)
785 intr_loc |= detected; 759 intr_loc |= detected;
786 if (!intr_loc) 760 if (!intr_loc)
787 return IRQ_NONE; 761 return IRQ_NONE;
788 if (pciehp_writew(ctrl, SLOTSTATUS, detected)) { 762 if (detected && pciehp_writew(ctrl, SLOTSTATUS, detected)) {
789 err("%s: Cannot write to SLOTSTATUS\n", __func__); 763 err("%s: Cannot write to SLOTSTATUS\n", __func__);
790 return IRQ_NONE; 764 return IRQ_NONE;
791 } 765 }
@@ -797,25 +771,13 @@ static irqreturn_t pcie_isr(int irq, void *dev_id)
797 if (intr_loc & CMD_COMPLETED) { 771 if (intr_loc & CMD_COMPLETED) {
798 ctrl->cmd_busy = 0; 772 ctrl->cmd_busy = 0;
799 smp_mb(); 773 smp_mb();
800 wake_up_interruptible(&ctrl->queue); 774 wake_up(&ctrl->queue);
801 } 775 }
802 776
803 if (!(intr_loc & ~CMD_COMPLETED)) 777 if (!(intr_loc & ~CMD_COMPLETED))
804 return IRQ_HANDLED; 778 return IRQ_HANDLED;
805 779
806 /*
807 * Return without handling events if this handler routine is
808 * called before controller initialization is done. This may
809 * happen if hotplug event or another interrupt that shares
810 * the IRQ with pciehp arrives before slot initialization is
811 * done after interrupt handler is registered.
812 *
813 * FIXME - Need more structural fixes. We need to be ready to
814 * handle the event before installing interrupt handler.
815 */
816 p_slot = pciehp_find_slot(ctrl, ctrl->slot_device_offset); 780 p_slot = pciehp_find_slot(ctrl, ctrl->slot_device_offset);
817 if (!p_slot || !p_slot->hpc_ops)
818 return IRQ_HANDLED;
819 781
820 /* Check MRL Sensor Changed */ 782 /* Check MRL Sensor Changed */
821 if (intr_loc & MRL_SENS_CHANGED) 783 if (intr_loc & MRL_SENS_CHANGED)
@@ -992,6 +954,7 @@ static int hpc_get_cur_lnk_width(struct slot *slot,
992 return retval; 954 return retval;
993} 955}
994 956
957static void pcie_release_ctrl(struct controller *ctrl);
995static struct hpc_ops pciehp_hpc_ops = { 958static struct hpc_ops pciehp_hpc_ops = {
996 .power_on_slot = hpc_power_on_slot, 959 .power_on_slot = hpc_power_on_slot,
997 .power_off_slot = hpc_power_off_slot, 960 .power_off_slot = hpc_power_off_slot,
@@ -1013,97 +976,11 @@ static struct hpc_ops pciehp_hpc_ops = {
1013 .green_led_off = hpc_set_green_led_off, 976 .green_led_off = hpc_set_green_led_off,
1014 .green_led_blink = hpc_set_green_led_blink, 977 .green_led_blink = hpc_set_green_led_blink,
1015 978
1016 .release_ctlr = hpc_release_ctlr, 979 .release_ctlr = pcie_release_ctrl,
1017 .check_lnk_status = hpc_check_lnk_status, 980 .check_lnk_status = hpc_check_lnk_status,
1018}; 981};
1019 982
1020#ifdef CONFIG_ACPI 983int pcie_enable_notification(struct controller *ctrl)
1021static int pciehp_acpi_get_hp_hw_control_from_firmware(struct pci_dev *dev)
1022{
1023 acpi_status status;
1024 acpi_handle chandle, handle = DEVICE_ACPI_HANDLE(&(dev->dev));
1025 struct pci_dev *pdev = dev;
1026 struct pci_bus *parent;
1027 struct acpi_buffer string = { ACPI_ALLOCATE_BUFFER, NULL };
1028
1029 /*
1030 * Per PCI firmware specification, we should run the ACPI _OSC
1031 * method to get control of hotplug hardware before using it.
1032 * If an _OSC is missing, we look for an OSHP to do the same thing.
1033 * To handle different BIOS behavior, we look for _OSC and OSHP
1034 * within the scope of the hotplug controller and its parents, upto
1035 * the host bridge under which this controller exists.
1036 */
1037 while (!handle) {
1038 /*
1039 * This hotplug controller was not listed in the ACPI name
1040 * space at all. Try to get acpi handle of parent pci bus.
1041 */
1042 if (!pdev || !pdev->bus->parent)
1043 break;
1044 parent = pdev->bus->parent;
1045 dbg("Could not find %s in acpi namespace, trying parent\n",
1046 pci_name(pdev));
1047 if (!parent->self)
1048 /* Parent must be a host bridge */
1049 handle = acpi_get_pci_rootbridge_handle(
1050 pci_domain_nr(parent),
1051 parent->number);
1052 else
1053 handle = DEVICE_ACPI_HANDLE(
1054 &(parent->self->dev));
1055 pdev = parent->self;
1056 }
1057
1058 while (handle) {
1059 acpi_get_name(handle, ACPI_FULL_PATHNAME, &string);
1060 dbg("Trying to get hotplug control for %s \n",
1061 (char *)string.pointer);
1062 status = pci_osc_control_set(handle,
1063 OSC_PCI_EXPRESS_CAP_STRUCTURE_CONTROL |
1064 OSC_PCI_EXPRESS_NATIVE_HP_CONTROL);
1065 if (status == AE_NOT_FOUND)
1066 status = acpi_run_oshp(handle);
1067 if (ACPI_SUCCESS(status)) {
1068 dbg("Gained control for hotplug HW for pci %s (%s)\n",
1069 pci_name(dev), (char *)string.pointer);
1070 kfree(string.pointer);
1071 return 0;
1072 }
1073 if (acpi_root_bridge(handle))
1074 break;
1075 chandle = handle;
1076 status = acpi_get_parent(chandle, &handle);
1077 if (ACPI_FAILURE(status))
1078 break;
1079 }
1080
1081 dbg("Cannot get control of hotplug hardware for pci %s\n",
1082 pci_name(dev));
1083
1084 kfree(string.pointer);
1085 return -1;
1086}
1087#endif
1088
1089static int pcie_init_hardware_part1(struct controller *ctrl,
1090 struct pcie_device *dev)
1091{
1092 /* Clear all remaining event bits in Slot Status register */
1093 if (pciehp_writew(ctrl, SLOTSTATUS, 0x1f)) {
1094 err("%s: Cannot write to SLOTSTATUS register\n", __func__);
1095 return -1;
1096 }
1097
1098 /* Mask Hot-plug Interrupt Enable */
1099 if (pcie_write_cmd(ctrl, 0, HP_INTR_ENABLE | CMD_CMPL_INTR_ENABLE)) {
1100 err("%s: Cannot mask hotplug interrupt enable\n", __func__);
1101 return -1;
1102 }
1103 return 0;
1104}
1105
1106int pcie_init_hardware_part2(struct controller *ctrl, struct pcie_device *dev)
1107{ 984{
1108 u16 cmd, mask; 985 u16 cmd, mask;
1109 986
@@ -1115,30 +992,83 @@ int pcie_init_hardware_part2(struct controller *ctrl, struct pcie_device *dev)
1115 if (MRL_SENS(ctrl)) 992 if (MRL_SENS(ctrl))
1116 cmd |= MRL_DETECT_ENABLE; 993 cmd |= MRL_DETECT_ENABLE;
1117 if (!pciehp_poll_mode) 994 if (!pciehp_poll_mode)
1118 cmd |= HP_INTR_ENABLE; 995 cmd |= HP_INTR_ENABLE | CMD_CMPL_INTR_ENABLE;
1119 996
1120 mask = PRSN_DETECT_ENABLE | ATTN_BUTTN_ENABLE | 997 mask = PRSN_DETECT_ENABLE | ATTN_BUTTN_ENABLE | MRL_DETECT_ENABLE |
1121 PWR_FAULT_DETECT_ENABLE | MRL_DETECT_ENABLE | HP_INTR_ENABLE; 998 PWR_FAULT_DETECT_ENABLE | HP_INTR_ENABLE | CMD_CMPL_INTR_ENABLE;
1122 999
1123 if (pcie_write_cmd(ctrl, cmd, mask)) { 1000 if (pcie_write_cmd(ctrl, cmd, mask)) {
1124 err("%s: Cannot enable software notification\n", __func__); 1001 err("%s: Cannot enable software notification\n", __func__);
1125 goto abort; 1002 return -1;
1126 } 1003 }
1004 return 0;
1005}
1127 1006
1128 if (pciehp_force) 1007static void pcie_disable_notification(struct controller *ctrl)
1129 dbg("Bypassing BIOS check for pciehp use on %s\n", 1008{
1130 pci_name(ctrl->pci_dev)); 1009 u16 mask;
1131 else if (pciehp_get_hp_hw_control_from_firmware(ctrl->pci_dev)) 1010 mask = PRSN_DETECT_ENABLE | ATTN_BUTTN_ENABLE | MRL_DETECT_ENABLE |
1132 goto abort_disable_intr; 1011 PWR_FAULT_DETECT_ENABLE | HP_INTR_ENABLE | CMD_CMPL_INTR_ENABLE;
1012 if (pcie_write_cmd(ctrl, 0, mask))
1013 warn("%s: Cannot disable software notification\n", __func__);
1014}
1133 1015
1016static int pcie_init_notification(struct controller *ctrl)
1017{
1018 if (pciehp_request_irq(ctrl))
1019 return -1;
1020 if (pcie_enable_notification(ctrl)) {
1021 pciehp_free_irq(ctrl);
1022 return -1;
1023 }
1134 return 0; 1024 return 0;
1025}
1135 1026
1136 /* We end up here for the many possible ways to fail this API. */ 1027static void pcie_shutdown_notification(struct controller *ctrl)
1137abort_disable_intr: 1028{
1138 if (pcie_write_cmd(ctrl, 0, HP_INTR_ENABLE)) 1029 pcie_disable_notification(ctrl);
1139 err("%s : disabling interrupts failed\n", __func__); 1030 pciehp_free_irq(ctrl);
1140abort: 1031}
1141 return -1; 1032
1033static void make_slot_name(struct slot *slot)
1034{
1035 if (pciehp_slot_with_bus)
1036 snprintf(slot->name, SLOT_NAME_SIZE, "%04d_%04d",
1037 slot->bus, slot->number);
1038 else
1039 snprintf(slot->name, SLOT_NAME_SIZE, "%d", slot->number);
1040}
1041
1042static int pcie_init_slot(struct controller *ctrl)
1043{
1044 struct slot *slot;
1045
1046 slot = kzalloc(sizeof(*slot), GFP_KERNEL);
1047 if (!slot)
1048 return -ENOMEM;
1049
1050 slot->hp_slot = 0;
1051 slot->ctrl = ctrl;
1052 slot->bus = ctrl->pci_dev->subordinate->number;
1053 slot->device = ctrl->slot_device_offset + slot->hp_slot;
1054 slot->hpc_ops = ctrl->hpc_ops;
1055 slot->number = ctrl->first_slot;
1056 make_slot_name(slot);
1057 mutex_init(&slot->lock);
1058 INIT_DELAYED_WORK(&slot->work, pciehp_queue_pushbutton_work);
1059 list_add(&slot->slot_list, &ctrl->slot_list);
1060 return 0;
1061}
1062
1063static void pcie_cleanup_slot(struct controller *ctrl)
1064{
1065 struct slot *slot;
1066 slot = list_first_entry(&ctrl->slot_list, struct slot, slot_list);
1067 list_del(&slot->slot_list);
1068 cancel_delayed_work(&slot->work);
1069 flush_scheduled_work();
1070 flush_workqueue(pciehp_wq);
1071 kfree(slot);
1142} 1072}
1143 1073
1144static inline void dbg_ctrl(struct controller *ctrl) 1074static inline void dbg_ctrl(struct controller *ctrl)
@@ -1176,15 +1106,23 @@ static inline void dbg_ctrl(struct controller *ctrl)
1176 dbg(" Comamnd Completed : %3s\n", NO_CMD_CMPL(ctrl)? "no" : "yes"); 1106 dbg(" Comamnd Completed : %3s\n", NO_CMD_CMPL(ctrl)? "no" : "yes");
1177 pciehp_readw(ctrl, SLOTSTATUS, &reg16); 1107 pciehp_readw(ctrl, SLOTSTATUS, &reg16);
1178 dbg("Slot Status : 0x%04x\n", reg16); 1108 dbg("Slot Status : 0x%04x\n", reg16);
1179 pciehp_readw(ctrl, SLOTSTATUS, &reg16); 1109 pciehp_readw(ctrl, SLOTCTRL, &reg16);
1180 dbg("Slot Control : 0x%04x\n", reg16); 1110 dbg("Slot Control : 0x%04x\n", reg16);
1181} 1111}
1182 1112
1183int pcie_init(struct controller *ctrl, struct pcie_device *dev) 1113struct controller *pcie_init(struct pcie_device *dev)
1184{ 1114{
1115 struct controller *ctrl;
1185 u32 slot_cap; 1116 u32 slot_cap;
1186 struct pci_dev *pdev = dev->port; 1117 struct pci_dev *pdev = dev->port;
1187 1118
1119 ctrl = kzalloc(sizeof(*ctrl), GFP_KERNEL);
1120 if (!ctrl) {
1121 err("%s : out of memory\n", __func__);
1122 goto abort;
1123 }
1124 INIT_LIST_HEAD(&ctrl->slot_list);
1125
1188 ctrl->pci_dev = pdev; 1126 ctrl->pci_dev = pdev;
1189 ctrl->cap_base = pci_find_capability(pdev, PCI_CAP_ID_EXP); 1127 ctrl->cap_base = pci_find_capability(pdev, PCI_CAP_ID_EXP);
1190 if (!ctrl->cap_base) { 1128 if (!ctrl->cap_base) {
@@ -1215,15 +1153,12 @@ int pcie_init(struct controller *ctrl, struct pcie_device *dev)
1215 !(POWER_CTRL(ctrl) | ATTN_LED(ctrl) | PWR_LED(ctrl) | EMI(ctrl))) 1153 !(POWER_CTRL(ctrl) | ATTN_LED(ctrl) | PWR_LED(ctrl) | EMI(ctrl)))
1216 ctrl->no_cmd_complete = 1; 1154 ctrl->no_cmd_complete = 1;
1217 1155
1218 info("HPC vendor_id %x device_id %x ss_vid %x ss_did %x\n", 1156 /* Clear all remaining event bits in Slot Status register */
1219 pdev->vendor, pdev->device, 1157 if (pciehp_writew(ctrl, SLOTSTATUS, 0x1f))
1220 pdev->subsystem_vendor, pdev->subsystem_device); 1158 goto abort_ctrl;
1221 1159
1222 if (pcie_init_hardware_part1(ctrl, dev)) 1160 /* Disable sotfware notification */
1223 goto abort; 1161 pcie_disable_notification(ctrl);
1224
1225 if (pciehp_request_irq(ctrl))
1226 goto abort;
1227 1162
1228 /* 1163 /*
1229 * If this is the first controller to be initialized, 1164 * If this is the first controller to be initialized,
@@ -1231,18 +1166,39 @@ int pcie_init(struct controller *ctrl, struct pcie_device *dev)
1231 */ 1166 */
1232 if (atomic_add_return(1, &pciehp_num_controllers) == 1) { 1167 if (atomic_add_return(1, &pciehp_num_controllers) == 1) {
1233 pciehp_wq = create_singlethread_workqueue("pciehpd"); 1168 pciehp_wq = create_singlethread_workqueue("pciehpd");
1234 if (!pciehp_wq) { 1169 if (!pciehp_wq)
1235 goto abort_free_irq; 1170 goto abort_ctrl;
1236 }
1237 } 1171 }
1238 1172
1239 if (pcie_init_hardware_part2(ctrl, dev)) 1173 info("HPC vendor_id %x device_id %x ss_vid %x ss_did %x\n",
1240 goto abort_free_irq; 1174 pdev->vendor, pdev->device,
1175 pdev->subsystem_vendor, pdev->subsystem_device);
1176
1177 if (pcie_init_slot(ctrl))
1178 goto abort_ctrl;
1241 1179
1242 return 0; 1180 if (pcie_init_notification(ctrl))
1181 goto abort_slot;
1243 1182
1244abort_free_irq: 1183 return ctrl;
1245 pciehp_free_irq(ctrl); 1184
1185abort_slot:
1186 pcie_cleanup_slot(ctrl);
1187abort_ctrl:
1188 kfree(ctrl);
1246abort: 1189abort:
1247 return -1; 1190 return NULL;
1191}
1192
1193void pcie_release_ctrl(struct controller *ctrl)
1194{
1195 pcie_shutdown_notification(ctrl);
1196 pcie_cleanup_slot(ctrl);
1197 /*
1198 * If this is the last controller to be released, destroy the
1199 * pciehp work queue
1200 */
1201 if (atomic_dec_and_test(&pciehp_num_controllers))
1202 destroy_workqueue(pciehp_wq);
1203 kfree(ctrl);
1248} 1204}
diff --git a/drivers/pci/hotplug/rpadlpar_sysfs.c b/drivers/pci/hotplug/rpadlpar_sysfs.c
index 779c5db71be4..a796301ea03f 100644
--- a/drivers/pci/hotplug/rpadlpar_sysfs.c
+++ b/drivers/pci/hotplug/rpadlpar_sysfs.c
@@ -14,8 +14,10 @@
14 */ 14 */
15#include <linux/kobject.h> 15#include <linux/kobject.h>
16#include <linux/string.h> 16#include <linux/string.h>
17#include <linux/pci.h>
17#include <linux/pci_hotplug.h> 18#include <linux/pci_hotplug.h>
18#include "rpadlpar.h" 19#include "rpadlpar.h"
20#include "../pci.h"
19 21
20#define DLPAR_KOBJ_NAME "control" 22#define DLPAR_KOBJ_NAME "control"
21 23
@@ -27,7 +29,6 @@
27 29
28#define MAX_DRC_NAME_LEN 64 30#define MAX_DRC_NAME_LEN 64
29 31
30
31static ssize_t add_slot_store(struct kobject *kobj, struct kobj_attribute *attr, 32static ssize_t add_slot_store(struct kobject *kobj, struct kobj_attribute *attr,
32 const char *buf, size_t nbytes) 33 const char *buf, size_t nbytes)
33{ 34{
@@ -112,7 +113,7 @@ int dlpar_sysfs_init(void)
112 int error; 113 int error;
113 114
114 dlpar_kobj = kobject_create_and_add(DLPAR_KOBJ_NAME, 115 dlpar_kobj = kobject_create_and_add(DLPAR_KOBJ_NAME,
115 &pci_hotplug_slots_kset->kobj); 116 &pci_slots_kset->kobj);
116 if (!dlpar_kobj) 117 if (!dlpar_kobj)
117 return -EINVAL; 118 return -EINVAL;
118 119
diff --git a/drivers/pci/hotplug/rpaphp_slot.c b/drivers/pci/hotplug/rpaphp_slot.c
index 56197b600d36..9b714ea93d20 100644
--- a/drivers/pci/hotplug/rpaphp_slot.c
+++ b/drivers/pci/hotplug/rpaphp_slot.c
@@ -33,33 +33,6 @@
33#include <asm/rtas.h> 33#include <asm/rtas.h>
34#include "rpaphp.h" 34#include "rpaphp.h"
35 35
36static ssize_t address_read_file (struct hotplug_slot *php_slot, char *buf)
37{
38 int retval;
39 struct slot *slot = (struct slot *)php_slot->private;
40 struct pci_bus *bus;
41
42 if (!slot)
43 return -ENOENT;
44
45 bus = slot->bus;
46 if (!bus)
47 return -ENOENT;
48
49 if (bus->self)
50 retval = sprintf(buf, pci_name(bus->self));
51 else
52 retval = sprintf(buf, "%04x:%02x:00.0",
53 pci_domain_nr(bus), bus->number);
54
55 return retval;
56}
57
58static struct hotplug_slot_attribute php_attr_address = {
59 .attr = {.name = "address", .mode = S_IFREG | S_IRUGO},
60 .show = address_read_file,
61};
62
63/* free up the memory used by a slot */ 36/* free up the memory used by a slot */
64static void rpaphp_release_slot(struct hotplug_slot *hotplug_slot) 37static void rpaphp_release_slot(struct hotplug_slot *hotplug_slot)
65{ 38{
@@ -135,9 +108,6 @@ int rpaphp_deregister_slot(struct slot *slot)
135 108
136 list_del(&slot->rpaphp_slot_list); 109 list_del(&slot->rpaphp_slot_list);
137 110
138 /* remove "address" file */
139 sysfs_remove_file(&php_slot->kobj, &php_attr_address.attr);
140
141 retval = pci_hp_deregister(php_slot); 111 retval = pci_hp_deregister(php_slot);
142 if (retval) 112 if (retval)
143 err("Problem unregistering a slot %s\n", slot->name); 113 err("Problem unregistering a slot %s\n", slot->name);
@@ -151,6 +121,7 @@ int rpaphp_register_slot(struct slot *slot)
151{ 121{
152 struct hotplug_slot *php_slot = slot->hotplug_slot; 122 struct hotplug_slot *php_slot = slot->hotplug_slot;
153 int retval; 123 int retval;
124 int slotno;
154 125
155 dbg("%s registering slot:path[%s] index[%x], name[%s] pdomain[%x] type[%d]\n", 126 dbg("%s registering slot:path[%s] index[%x], name[%s] pdomain[%x] type[%d]\n",
156 __func__, slot->dn->full_name, slot->index, slot->name, 127 __func__, slot->dn->full_name, slot->index, slot->name,
@@ -162,19 +133,16 @@ int rpaphp_register_slot(struct slot *slot)
162 return -EAGAIN; 133 return -EAGAIN;
163 } 134 }
164 135
165 retval = pci_hp_register(php_slot); 136 if (slot->dn->child)
137 slotno = PCI_SLOT(PCI_DN(slot->dn->child)->devfn);
138 else
139 slotno = -1;
140 retval = pci_hp_register(php_slot, slot->bus, slotno);
166 if (retval) { 141 if (retval) {
167 err("pci_hp_register failed with error %d\n", retval); 142 err("pci_hp_register failed with error %d\n", retval);
168 return retval; 143 return retval;
169 } 144 }
170 145
171 /* create "address" file */
172 retval = sysfs_create_file(&php_slot->kobj, &php_attr_address.attr);
173 if (retval) {
174 err("sysfs_create_file failed with error %d\n", retval);
175 goto sysfs_fail;
176 }
177
178 /* add slot to our internal list */ 146 /* add slot to our internal list */
179 list_add(&slot->rpaphp_slot_list, &rpaphp_slot_head); 147 list_add(&slot->rpaphp_slot_list, &rpaphp_slot_head);
180 info("Slot [%s] registered\n", slot->name); 148 info("Slot [%s] registered\n", slot->name);
diff --git a/drivers/pci/hotplug/sgi_hotplug.c b/drivers/pci/hotplug/sgi_hotplug.c
index 2fe37cd85b69..410fe0394a8e 100644
--- a/drivers/pci/hotplug/sgi_hotplug.c
+++ b/drivers/pci/hotplug/sgi_hotplug.c
@@ -197,13 +197,15 @@ static int sn_hp_slot_private_alloc(struct hotplug_slot *bss_hotplug_slot,
197static struct hotplug_slot * sn_hp_destroy(void) 197static struct hotplug_slot * sn_hp_destroy(void)
198{ 198{
199 struct slot *slot; 199 struct slot *slot;
200 struct pci_slot *pci_slot;
200 struct hotplug_slot *bss_hotplug_slot = NULL; 201 struct hotplug_slot *bss_hotplug_slot = NULL;
201 202
202 list_for_each_entry(slot, &sn_hp_list, hp_list) { 203 list_for_each_entry(slot, &sn_hp_list, hp_list) {
203 bss_hotplug_slot = slot->hotplug_slot; 204 bss_hotplug_slot = slot->hotplug_slot;
205 pci_slot = bss_hotplug_slot->pci_slot;
204 list_del(&((struct slot *)bss_hotplug_slot->private)-> 206 list_del(&((struct slot *)bss_hotplug_slot->private)->
205 hp_list); 207 hp_list);
206 sysfs_remove_file(&bss_hotplug_slot->kobj, 208 sysfs_remove_file(&pci_slot->kobj,
207 &sn_slot_path_attr.attr); 209 &sn_slot_path_attr.attr);
208 break; 210 break;
209 } 211 }
@@ -614,6 +616,7 @@ static void sn_release_slot(struct hotplug_slot *bss_hotplug_slot)
614static int sn_hotplug_slot_register(struct pci_bus *pci_bus) 616static int sn_hotplug_slot_register(struct pci_bus *pci_bus)
615{ 617{
616 int device; 618 int device;
619 struct pci_slot *pci_slot;
617 struct hotplug_slot *bss_hotplug_slot; 620 struct hotplug_slot *bss_hotplug_slot;
618 int rc = 0; 621 int rc = 0;
619 622
@@ -650,11 +653,12 @@ static int sn_hotplug_slot_register(struct pci_bus *pci_bus)
650 bss_hotplug_slot->ops = &sn_hotplug_slot_ops; 653 bss_hotplug_slot->ops = &sn_hotplug_slot_ops;
651 bss_hotplug_slot->release = &sn_release_slot; 654 bss_hotplug_slot->release = &sn_release_slot;
652 655
653 rc = pci_hp_register(bss_hotplug_slot); 656 rc = pci_hp_register(bss_hotplug_slot, pci_bus, device);
654 if (rc) 657 if (rc)
655 goto register_err; 658 goto register_err;
656 659
657 rc = sysfs_create_file(&bss_hotplug_slot->kobj, 660 pci_slot = bss_hotplug_slot->pci_slot;
661 rc = sysfs_create_file(&pci_slot->kobj,
658 &sn_slot_path_attr.attr); 662 &sn_slot_path_attr.attr);
659 if (rc) 663 if (rc)
660 goto register_err; 664 goto register_err;
@@ -664,7 +668,7 @@ static int sn_hotplug_slot_register(struct pci_bus *pci_bus)
664 668
665register_err: 669register_err:
666 dev_dbg(&pci_bus->self->dev, "bus failed to register with err = %d\n", 670 dev_dbg(&pci_bus->self->dev, "bus failed to register with err = %d\n",
667 rc); 671 rc);
668 672
669alloc_err: 673alloc_err:
670 if (rc == -ENOMEM) 674 if (rc == -ENOMEM)
diff --git a/drivers/pci/hotplug/shpchp.h b/drivers/pci/hotplug/shpchp.h
index f66e8d6315ab..8a026f750deb 100644
--- a/drivers/pci/hotplug/shpchp.h
+++ b/drivers/pci/hotplug/shpchp.h
@@ -170,6 +170,7 @@ extern void shpchp_queue_pushbutton_work(struct work_struct *work);
170extern int shpc_init( struct controller *ctrl, struct pci_dev *pdev); 170extern int shpc_init( struct controller *ctrl, struct pci_dev *pdev);
171 171
172#ifdef CONFIG_ACPI 172#ifdef CONFIG_ACPI
173#include <linux/pci-acpi.h>
173static inline int get_hp_params_from_firmware(struct pci_dev *dev, 174static inline int get_hp_params_from_firmware(struct pci_dev *dev,
174 struct hotplug_params *hpp) 175 struct hotplug_params *hpp)
175{ 176{
@@ -177,14 +178,15 @@ static inline int get_hp_params_from_firmware(struct pci_dev *dev,
177 return -ENODEV; 178 return -ENODEV;
178 return 0; 179 return 0;
179} 180}
180#define get_hp_hw_control_from_firmware(pdev) \ 181
181 do { \ 182static inline int get_hp_hw_control_from_firmware(struct pci_dev *dev)
182 if (DEVICE_ACPI_HANDLE(&(pdev->dev))) \ 183{
183 acpi_run_oshp(DEVICE_ACPI_HANDLE(&(pdev->dev)));\ 184 u32 flags = OSC_SHPC_NATIVE_HP_CONTROL;
184 } while (0) 185 return acpi_get_hp_hw_control_from_firmware(dev, flags);
186}
185#else 187#else
186#define get_hp_params_from_firmware(dev, hpp) (-ENODEV) 188#define get_hp_params_from_firmware(dev, hpp) (-ENODEV)
187#define get_hp_hw_control_from_firmware(dev) do { } while (0) 189#define get_hp_hw_control_from_firmware(dev) (0)
188#endif 190#endif
189 191
190struct ctrl_reg { 192struct ctrl_reg {
diff --git a/drivers/pci/hotplug/shpchp_core.c b/drivers/pci/hotplug/shpchp_core.c
index 97848654652a..a8cbd039b85b 100644
--- a/drivers/pci/hotplug/shpchp_core.c
+++ b/drivers/pci/hotplug/shpchp_core.c
@@ -39,7 +39,7 @@
39int shpchp_debug; 39int shpchp_debug;
40int shpchp_poll_mode; 40int shpchp_poll_mode;
41int shpchp_poll_time; 41int shpchp_poll_time;
42int shpchp_slot_with_bus; 42static int shpchp_slot_with_bus;
43struct workqueue_struct *shpchp_wq; 43struct workqueue_struct *shpchp_wq;
44 44
45#define DRIVER_VERSION "0.4" 45#define DRIVER_VERSION "0.4"
@@ -68,7 +68,6 @@ static int get_power_status (struct hotplug_slot *slot, u8 *value);
68static int get_attention_status (struct hotplug_slot *slot, u8 *value); 68static int get_attention_status (struct hotplug_slot *slot, u8 *value);
69static int get_latch_status (struct hotplug_slot *slot, u8 *value); 69static int get_latch_status (struct hotplug_slot *slot, u8 *value);
70static int get_adapter_status (struct hotplug_slot *slot, u8 *value); 70static int get_adapter_status (struct hotplug_slot *slot, u8 *value);
71static int get_address (struct hotplug_slot *slot, u32 *value);
72static int get_max_bus_speed (struct hotplug_slot *slot, enum pci_bus_speed *value); 71static int get_max_bus_speed (struct hotplug_slot *slot, enum pci_bus_speed *value);
73static int get_cur_bus_speed (struct hotplug_slot *slot, enum pci_bus_speed *value); 72static int get_cur_bus_speed (struct hotplug_slot *slot, enum pci_bus_speed *value);
74 73
@@ -81,7 +80,6 @@ static struct hotplug_slot_ops shpchp_hotplug_slot_ops = {
81 .get_attention_status = get_attention_status, 80 .get_attention_status = get_attention_status,
82 .get_latch_status = get_latch_status, 81 .get_latch_status = get_latch_status,
83 .get_adapter_status = get_adapter_status, 82 .get_adapter_status = get_adapter_status,
84 .get_address = get_address,
85 .get_max_bus_speed = get_max_bus_speed, 83 .get_max_bus_speed = get_max_bus_speed,
86 .get_cur_bus_speed = get_cur_bus_speed, 84 .get_cur_bus_speed = get_cur_bus_speed,
87}; 85};
@@ -159,7 +157,8 @@ static int init_slots(struct controller *ctrl)
159 dbg("Registering bus=%x dev=%x hp_slot=%x sun=%x " 157 dbg("Registering bus=%x dev=%x hp_slot=%x sun=%x "
160 "slot_device_offset=%x\n", slot->bus, slot->device, 158 "slot_device_offset=%x\n", slot->bus, slot->device,
161 slot->hp_slot, slot->number, ctrl->slot_device_offset); 159 slot->hp_slot, slot->number, ctrl->slot_device_offset);
162 retval = pci_hp_register(slot->hotplug_slot); 160 retval = pci_hp_register(slot->hotplug_slot,
161 ctrl->pci_dev->subordinate, slot->device);
163 if (retval) { 162 if (retval) {
164 err("pci_hp_register failed with error %d\n", retval); 163 err("pci_hp_register failed with error %d\n", retval);
165 if (retval == -EEXIST) 164 if (retval == -EEXIST)
@@ -288,19 +287,8 @@ static int get_adapter_status (struct hotplug_slot *hotplug_slot, u8 *value)
288 return 0; 287 return 0;
289} 288}
290 289
291static int get_address (struct hotplug_slot *hotplug_slot, u32 *value) 290static int get_max_bus_speed(struct hotplug_slot *hotplug_slot,
292{ 291 enum pci_bus_speed *value)
293 struct slot *slot = get_slot(hotplug_slot);
294 struct pci_bus *bus = slot->ctrl->pci_dev->subordinate;
295
296 dbg("%s - physical_slot = %s\n", __func__, hotplug_slot->name);
297
298 *value = (pci_domain_nr(bus) << 16) | (slot->bus << 8) | slot->device;
299
300 return 0;
301}
302
303static int get_max_bus_speed (struct hotplug_slot *hotplug_slot, enum pci_bus_speed *value)
304{ 292{
305 struct slot *slot = get_slot(hotplug_slot); 293 struct slot *slot = get_slot(hotplug_slot);
306 int retval; 294 int retval;
@@ -330,13 +318,14 @@ static int get_cur_bus_speed (struct hotplug_slot *hotplug_slot, enum pci_bus_sp
330 318
331static int is_shpc_capable(struct pci_dev *dev) 319static int is_shpc_capable(struct pci_dev *dev)
332{ 320{
333 if ((dev->vendor == PCI_VENDOR_ID_AMD) || (dev->device == 321 if ((dev->vendor == PCI_VENDOR_ID_AMD) || (dev->device ==
334 PCI_DEVICE_ID_AMD_GOLAM_7450)) 322 PCI_DEVICE_ID_AMD_GOLAM_7450))
335 return 1; 323 return 1;
336 if (pci_find_capability(dev, PCI_CAP_ID_SHPC)) 324 if (!pci_find_capability(dev, PCI_CAP_ID_SHPC))
337 return 1; 325 return 0;
338 326 if (get_hp_hw_control_from_firmware(dev))
339 return 0; 327 return 0;
328 return 1;
340} 329}
341 330
342static int shpc_probe(struct pci_dev *pdev, const struct pci_device_id *ent) 331static int shpc_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
diff --git a/drivers/pci/hotplug/shpchp_hpc.c b/drivers/pci/hotplug/shpchp_hpc.c
index 7d770b2cd889..7a0bff364cd4 100644
--- a/drivers/pci/hotplug/shpchp_hpc.c
+++ b/drivers/pci/hotplug/shpchp_hpc.c
@@ -1084,7 +1084,6 @@ int shpc_init(struct controller *ctrl, struct pci_dev *pdev)
1084 dbg("%s: HPC at b:d:f:irq=0x%x:%x:%x:%x\n", __func__, 1084 dbg("%s: HPC at b:d:f:irq=0x%x:%x:%x:%x\n", __func__,
1085 pdev->bus->number, PCI_SLOT(pdev->devfn), 1085 pdev->bus->number, PCI_SLOT(pdev->devfn),
1086 PCI_FUNC(pdev->devfn), pdev->irq); 1086 PCI_FUNC(pdev->devfn), pdev->irq);
1087 get_hp_hw_control_from_firmware(pdev);
1088 1087
1089 /* 1088 /*
1090 * If this is the first controller to be initialized, 1089 * If this is the first controller to be initialized,
diff --git a/drivers/pci/intel-iommu.c b/drivers/pci/intel-iommu.c
index bb0642318a95..3f7b81c065d2 100644
--- a/drivers/pci/intel-iommu.c
+++ b/drivers/pci/intel-iommu.c
@@ -1748,7 +1748,6 @@ int __init init_dmars(void)
1748 deferred_flush = kzalloc(g_num_of_iommus * 1748 deferred_flush = kzalloc(g_num_of_iommus *
1749 sizeof(struct deferred_flush_tables), GFP_KERNEL); 1749 sizeof(struct deferred_flush_tables), GFP_KERNEL);
1750 if (!deferred_flush) { 1750 if (!deferred_flush) {
1751 kfree(g_iommus);
1752 ret = -ENOMEM; 1751 ret = -ENOMEM;
1753 goto error; 1752 goto error;
1754 } 1753 }
diff --git a/drivers/pci/msi.c b/drivers/pci/msi.c
index 8c61304cbb37..15af618d36e2 100644
--- a/drivers/pci/msi.c
+++ b/drivers/pci/msi.c
@@ -70,12 +70,10 @@ arch_teardown_msi_irqs(struct pci_dev *dev)
70 } 70 }
71} 71}
72 72
73static void msi_set_enable(struct pci_dev *dev, int enable) 73static void __msi_set_enable(struct pci_dev *dev, int pos, int enable)
74{ 74{
75 int pos;
76 u16 control; 75 u16 control;
77 76
78 pos = pci_find_capability(dev, PCI_CAP_ID_MSI);
79 if (pos) { 77 if (pos) {
80 pci_read_config_word(dev, pos + PCI_MSI_FLAGS, &control); 78 pci_read_config_word(dev, pos + PCI_MSI_FLAGS, &control);
81 control &= ~PCI_MSI_FLAGS_ENABLE; 79 control &= ~PCI_MSI_FLAGS_ENABLE;
@@ -85,6 +83,11 @@ static void msi_set_enable(struct pci_dev *dev, int enable)
85 } 83 }
86} 84}
87 85
86static void msi_set_enable(struct pci_dev *dev, int enable)
87{
88 __msi_set_enable(dev, pci_find_capability(dev, PCI_CAP_ID_MSI), enable);
89}
90
88static void msix_set_enable(struct pci_dev *dev, int enable) 91static void msix_set_enable(struct pci_dev *dev, int enable)
89{ 92{
90 int pos; 93 int pos;
@@ -141,7 +144,8 @@ static void msi_set_mask_bits(unsigned int irq, u32 mask, u32 flag)
141 mask_bits |= flag & mask; 144 mask_bits |= flag & mask;
142 pci_write_config_dword(entry->dev, pos, mask_bits); 145 pci_write_config_dword(entry->dev, pos, mask_bits);
143 } else { 146 } else {
144 msi_set_enable(entry->dev, !flag); 147 __msi_set_enable(entry->dev, entry->msi_attrib.pos,
148 !flag);
145 } 149 }
146 break; 150 break;
147 case PCI_CAP_ID_MSIX: 151 case PCI_CAP_ID_MSIX:
@@ -561,9 +565,8 @@ int pci_enable_msi(struct pci_dev* dev)
561 565
562 /* Check whether driver already requested for MSI-X irqs */ 566 /* Check whether driver already requested for MSI-X irqs */
563 if (dev->msix_enabled) { 567 if (dev->msix_enabled) {
564 printk(KERN_INFO "PCI: %s: Can't enable MSI. " 568 dev_info(&dev->dev, "can't enable MSI "
565 "Device already has MSI-X enabled\n", 569 "(MSI-X already enabled)\n");
566 pci_name(dev));
567 return -EINVAL; 570 return -EINVAL;
568 } 571 }
569 status = msi_capability_init(dev); 572 status = msi_capability_init(dev);
@@ -686,9 +689,8 @@ int pci_enable_msix(struct pci_dev* dev, struct msix_entry *entries, int nvec)
686 689
687 /* Check whether driver already requested for MSI irq */ 690 /* Check whether driver already requested for MSI irq */
688 if (dev->msi_enabled) { 691 if (dev->msi_enabled) {
689 printk(KERN_INFO "PCI: %s: Can't enable MSI-X. " 692 dev_info(&dev->dev, "can't enable MSI-X "
690 "Device already has an MSI irq assigned\n", 693 "(MSI IRQ already assigned)\n");
691 pci_name(dev));
692 return -EINVAL; 694 return -EINVAL;
693 } 695 }
694 status = msix_capability_init(dev, entries, nvec); 696 status = msix_capability_init(dev, entries, nvec);
diff --git a/drivers/pci/pci-acpi.c b/drivers/pci/pci-acpi.c
index dab9d471914c..7764768b6a0e 100644
--- a/drivers/pci/pci-acpi.c
+++ b/drivers/pci/pci-acpi.c
@@ -21,12 +21,19 @@
21 21
22struct acpi_osc_data { 22struct acpi_osc_data {
23 acpi_handle handle; 23 acpi_handle handle;
24 u32 ctrlset_buf[3]; 24 u32 support_set;
25 u32 global_ctrlsets; 25 u32 control_set;
26 int is_queried;
27 u32 query_result;
26 struct list_head sibiling; 28 struct list_head sibiling;
27}; 29};
28static LIST_HEAD(acpi_osc_data_list); 30static LIST_HEAD(acpi_osc_data_list);
29 31
32struct acpi_osc_args {
33 u32 capbuf[3];
34 u32 query_result;
35};
36
30static struct acpi_osc_data *acpi_get_osc_data(acpi_handle handle) 37static struct acpi_osc_data *acpi_get_osc_data(acpi_handle handle)
31{ 38{
32 struct acpi_osc_data *data; 39 struct acpi_osc_data *data;
@@ -44,42 +51,18 @@ static struct acpi_osc_data *acpi_get_osc_data(acpi_handle handle)
44 return data; 51 return data;
45} 52}
46 53
47static u8 OSC_UUID[16] = {0x5B, 0x4D, 0xDB, 0x33, 0xF7, 0x1F, 0x1C, 0x40, 0x96, 0x57, 0x74, 0x41, 0xC0, 0x3D, 0xD7, 0x66}; 54static u8 OSC_UUID[16] = {0x5B, 0x4D, 0xDB, 0x33, 0xF7, 0x1F, 0x1C, 0x40,
55 0x96, 0x57, 0x74, 0x41, 0xC0, 0x3D, 0xD7, 0x66};
48 56
49static acpi_status 57static acpi_status acpi_run_osc(acpi_handle handle,
50acpi_query_osc ( 58 struct acpi_osc_args *osc_args)
51 acpi_handle handle,
52 u32 level,
53 void *context,
54 void **retval )
55{ 59{
56 acpi_status status; 60 acpi_status status;
57 struct acpi_object_list input; 61 struct acpi_object_list input;
58 union acpi_object in_params[4]; 62 union acpi_object in_params[4];
59 struct acpi_buffer output = {ACPI_ALLOCATE_BUFFER, NULL}; 63 struct acpi_buffer output = {ACPI_ALLOCATE_BUFFER, NULL};
60 union acpi_object *out_obj; 64 union acpi_object *out_obj;
61 u32 osc_dw0; 65 u32 osc_dw0, flags = osc_args->capbuf[OSC_QUERY_TYPE];
62 acpi_status *ret_status = (acpi_status *)retval;
63 struct acpi_osc_data *osc_data;
64 u32 flags = (unsigned long)context, temp;
65 acpi_handle tmp;
66
67 status = acpi_get_handle(handle, "_OSC", &tmp);
68 if (ACPI_FAILURE(status))
69 return status;
70
71 osc_data = acpi_get_osc_data(handle);
72 if (!osc_data) {
73 printk(KERN_ERR "acpi osc data array is full\n");
74 return AE_ERROR;
75 }
76
77 osc_data->ctrlset_buf[OSC_SUPPORT_TYPE] |= (flags & OSC_SUPPORT_MASKS);
78
79 /* do _OSC query for all possible controls */
80 temp = osc_data->ctrlset_buf[OSC_CONTROL_TYPE];
81 osc_data->ctrlset_buf[OSC_QUERY_TYPE] = OSC_QUERY_ENABLE;
82 osc_data->ctrlset_buf[OSC_CONTROL_TYPE] = OSC_CONTROL_MASKS;
83 66
84 /* Setting up input parameters */ 67 /* Setting up input parameters */
85 input.count = 4; 68 input.count = 4;
@@ -93,20 +76,19 @@ acpi_query_osc (
93 in_params[2].integer.value = 3; 76 in_params[2].integer.value = 3;
94 in_params[3].type = ACPI_TYPE_BUFFER; 77 in_params[3].type = ACPI_TYPE_BUFFER;
95 in_params[3].buffer.length = 12; 78 in_params[3].buffer.length = 12;
96 in_params[3].buffer.pointer = (u8 *)osc_data->ctrlset_buf; 79 in_params[3].buffer.pointer = (u8 *)osc_args->capbuf;
97 80
98 status = acpi_evaluate_object(handle, "_OSC", &input, &output); 81 status = acpi_evaluate_object(handle, "_OSC", &input, &output);
99 if (ACPI_FAILURE(status)) 82 if (ACPI_FAILURE(status))
100 goto out_nofree; 83 return status;
101 out_obj = output.pointer;
102 84
85 out_obj = output.pointer;
103 if (out_obj->type != ACPI_TYPE_BUFFER) { 86 if (out_obj->type != ACPI_TYPE_BUFFER) {
104 printk(KERN_DEBUG 87 printk(KERN_DEBUG "Evaluate _OSC returns wrong type\n");
105 "Evaluate _OSC returns wrong type\n");
106 status = AE_TYPE; 88 status = AE_TYPE;
107 goto query_osc_out; 89 goto out_kfree;
108 } 90 }
109 osc_dw0 = *((u32 *) out_obj->buffer.pointer); 91 osc_dw0 = *((u32 *)out_obj->buffer.pointer);
110 if (osc_dw0) { 92 if (osc_dw0) {
111 if (osc_dw0 & OSC_REQUEST_ERROR) 93 if (osc_dw0 & OSC_REQUEST_ERROR)
112 printk(KERN_DEBUG "_OSC request fails\n"); 94 printk(KERN_DEBUG "_OSC request fails\n");
@@ -115,93 +97,58 @@ acpi_query_osc (
115 if (osc_dw0 & OSC_INVALID_REVISION_ERROR) 97 if (osc_dw0 & OSC_INVALID_REVISION_ERROR)
116 printk(KERN_DEBUG "_OSC invalid revision\n"); 98 printk(KERN_DEBUG "_OSC invalid revision\n");
117 if (osc_dw0 & OSC_CAPABILITIES_MASK_ERROR) { 99 if (osc_dw0 & OSC_CAPABILITIES_MASK_ERROR) {
118 /* Update Global Control Set */ 100 if (flags & OSC_QUERY_ENABLE)
119 osc_data->global_ctrlsets = 101 goto out_success;
120 *((u32 *)(out_obj->buffer.pointer + 8)); 102 printk(KERN_DEBUG "_OSC FW not grant req. control\n");
121 status = AE_OK; 103 status = AE_SUPPORT;
122 goto query_osc_out; 104 goto out_kfree;
123 } 105 }
124 status = AE_ERROR; 106 status = AE_ERROR;
125 goto query_osc_out; 107 goto out_kfree;
126 } 108 }
127 109out_success:
128 /* Update Global Control Set */ 110 if (flags & OSC_QUERY_ENABLE)
129 osc_data->global_ctrlsets = *((u32 *)(out_obj->buffer.pointer + 8)); 111 osc_args->query_result =
112 *((u32 *)(out_obj->buffer.pointer + 8));
130 status = AE_OK; 113 status = AE_OK;
131 114
132query_osc_out: 115out_kfree:
133 kfree(output.pointer); 116 kfree(output.pointer);
134out_nofree:
135 *ret_status = status;
136
137 osc_data->ctrlset_buf[OSC_QUERY_TYPE] = !OSC_QUERY_ENABLE;
138 osc_data->ctrlset_buf[OSC_CONTROL_TYPE] = temp;
139 if (ACPI_FAILURE(status)) {
140 /* no osc support at all */
141 osc_data->ctrlset_buf[OSC_SUPPORT_TYPE] = 0;
142 }
143
144 return status; 117 return status;
145} 118}
146 119
147 120static acpi_status acpi_query_osc(acpi_handle handle,
148static acpi_status 121 u32 level, void *context, void **retval)
149acpi_run_osc (
150 acpi_handle handle,
151 void *context)
152{ 122{
153 acpi_status status; 123 acpi_status status;
154 struct acpi_object_list input; 124 struct acpi_osc_data *osc_data;
155 union acpi_object in_params[4]; 125 u32 flags = (unsigned long)context, support_set;
156 struct acpi_buffer output = {ACPI_ALLOCATE_BUFFER, NULL}; 126 acpi_handle tmp;
157 union acpi_object *out_obj; 127 struct acpi_osc_args osc_args;
158 u32 osc_dw0;
159
160 /* Setting up input parameters */
161 input.count = 4;
162 input.pointer = in_params;
163 in_params[0].type = ACPI_TYPE_BUFFER;
164 in_params[0].buffer.length = 16;
165 in_params[0].buffer.pointer = OSC_UUID;
166 in_params[1].type = ACPI_TYPE_INTEGER;
167 in_params[1].integer.value = 1;
168 in_params[2].type = ACPI_TYPE_INTEGER;
169 in_params[2].integer.value = 3;
170 in_params[3].type = ACPI_TYPE_BUFFER;
171 in_params[3].buffer.length = 12;
172 in_params[3].buffer.pointer = (u8 *)context;
173 128
174 status = acpi_evaluate_object(handle, "_OSC", &input, &output); 129 status = acpi_get_handle(handle, "_OSC", &tmp);
175 if (ACPI_FAILURE (status)) 130 if (ACPI_FAILURE(status))
176 return status; 131 return status;
177 132
178 out_obj = output.pointer; 133 osc_data = acpi_get_osc_data(handle);
179 if (out_obj->type != ACPI_TYPE_BUFFER) { 134 if (!osc_data) {
180 printk(KERN_DEBUG 135 printk(KERN_ERR "acpi osc data array is full\n");
181 "Evaluate _OSC returns wrong type\n"); 136 return AE_ERROR;
182 status = AE_TYPE;
183 goto run_osc_out;
184 } 137 }
185 osc_dw0 = *((u32 *) out_obj->buffer.pointer); 138
186 if (osc_dw0) { 139 /* do _OSC query for all possible controls */
187 if (osc_dw0 & OSC_REQUEST_ERROR) 140 support_set = osc_data->support_set | (flags & OSC_SUPPORT_MASKS);
188 printk(KERN_DEBUG "_OSC request fails\n"); 141 osc_args.capbuf[OSC_QUERY_TYPE] = OSC_QUERY_ENABLE;
189 if (osc_dw0 & OSC_INVALID_UUID_ERROR) 142 osc_args.capbuf[OSC_SUPPORT_TYPE] = support_set;
190 printk(KERN_DEBUG "_OSC invalid UUID\n"); 143 osc_args.capbuf[OSC_CONTROL_TYPE] = OSC_CONTROL_MASKS;
191 if (osc_dw0 & OSC_INVALID_REVISION_ERROR) 144
192 printk(KERN_DEBUG "_OSC invalid revision\n"); 145 status = acpi_run_osc(handle, &osc_args);
193 if (osc_dw0 & OSC_CAPABILITIES_MASK_ERROR) { 146 if (ACPI_SUCCESS(status)) {
194 printk(KERN_DEBUG "_OSC FW not grant req. control\n"); 147 osc_data->support_set = support_set;
195 status = AE_SUPPORT; 148 osc_data->query_result = osc_args.query_result;
196 goto run_osc_out; 149 osc_data->is_queried = 1;
197 }
198 status = AE_ERROR;
199 goto run_osc_out;
200 } 150 }
201 status = AE_OK;
202 151
203run_osc_out:
204 kfree(output.pointer);
205 return status; 152 return status;
206} 153}
207 154
@@ -215,15 +162,11 @@ run_osc_out:
215 **/ 162 **/
216acpi_status __pci_osc_support_set(u32 flags, const char *hid) 163acpi_status __pci_osc_support_set(u32 flags, const char *hid)
217{ 164{
218 acpi_status retval = AE_NOT_FOUND; 165 if (!(flags & OSC_SUPPORT_MASKS))
219
220 if (!(flags & OSC_SUPPORT_MASKS)) {
221 return AE_TYPE; 166 return AE_TYPE;
222 } 167
223 acpi_get_devices(hid, 168 acpi_get_devices(hid, acpi_query_osc,
224 acpi_query_osc, 169 (void *)(unsigned long)flags, NULL);
225 (void *)(unsigned long)flags,
226 (void **) &retval );
227 return AE_OK; 170 return AE_OK;
228} 171}
229 172
@@ -236,10 +179,11 @@ acpi_status __pci_osc_support_set(u32 flags, const char *hid)
236 **/ 179 **/
237acpi_status pci_osc_control_set(acpi_handle handle, u32 flags) 180acpi_status pci_osc_control_set(acpi_handle handle, u32 flags)
238{ 181{
239 acpi_status status; 182 acpi_status status;
240 u32 ctrlset; 183 u32 ctrlset, control_set;
241 acpi_handle tmp; 184 acpi_handle tmp;
242 struct acpi_osc_data *osc_data; 185 struct acpi_osc_data *osc_data;
186 struct acpi_osc_args osc_args;
243 187
244 status = acpi_get_handle(handle, "_OSC", &tmp); 188 status = acpi_get_handle(handle, "_OSC", &tmp);
245 if (ACPI_FAILURE(status)) 189 if (ACPI_FAILURE(status))
@@ -252,24 +196,25 @@ acpi_status pci_osc_control_set(acpi_handle handle, u32 flags)
252 } 196 }
253 197
254 ctrlset = (flags & OSC_CONTROL_MASKS); 198 ctrlset = (flags & OSC_CONTROL_MASKS);
255 if (!ctrlset) { 199 if (!ctrlset)
256 return AE_TYPE; 200 return AE_TYPE;
257 } 201
258 if (osc_data->ctrlset_buf[OSC_SUPPORT_TYPE] && 202 if (osc_data->is_queried &&
259 ((osc_data->global_ctrlsets & ctrlset) != ctrlset)) { 203 ((osc_data->query_result & ctrlset) != ctrlset))
260 return AE_SUPPORT; 204 return AE_SUPPORT;
261 } 205
262 osc_data->ctrlset_buf[OSC_CONTROL_TYPE] |= ctrlset; 206 control_set = osc_data->control_set | ctrlset;
263 status = acpi_run_osc(handle, osc_data->ctrlset_buf); 207 osc_args.capbuf[OSC_QUERY_TYPE] = 0;
264 if (ACPI_FAILURE (status)) { 208 osc_args.capbuf[OSC_SUPPORT_TYPE] = osc_data->support_set;
265 osc_data->ctrlset_buf[OSC_CONTROL_TYPE] &= ~ctrlset; 209 osc_args.capbuf[OSC_CONTROL_TYPE] = control_set;
266 } 210 status = acpi_run_osc(handle, &osc_args);
267 211 if (ACPI_SUCCESS(status))
212 osc_data->control_set = control_set;
213
268 return status; 214 return status;
269} 215}
270EXPORT_SYMBOL(pci_osc_control_set); 216EXPORT_SYMBOL(pci_osc_control_set);
271 217
272#ifdef CONFIG_ACPI_SLEEP
273/* 218/*
274 * _SxD returns the D-state with the highest power 219 * _SxD returns the D-state with the highest power
275 * (lowest D-state number) supported in the S-state "x". 220 * (lowest D-state number) supported in the S-state "x".
@@ -313,7 +258,13 @@ static pci_power_t acpi_pci_choose_state(struct pci_dev *pdev)
313 } 258 }
314 return PCI_POWER_ERROR; 259 return PCI_POWER_ERROR;
315} 260}
316#endif 261
262static bool acpi_pci_power_manageable(struct pci_dev *dev)
263{
264 acpi_handle handle = DEVICE_ACPI_HANDLE(&dev->dev);
265
266 return handle ? acpi_bus_power_manageable(handle) : false;
267}
317 268
318static int acpi_pci_set_power_state(struct pci_dev *dev, pci_power_t state) 269static int acpi_pci_set_power_state(struct pci_dev *dev, pci_power_t state)
319{ 270{
@@ -326,12 +277,11 @@ static int acpi_pci_set_power_state(struct pci_dev *dev, pci_power_t state)
326 [PCI_D3hot] = ACPI_STATE_D3, 277 [PCI_D3hot] = ACPI_STATE_D3,
327 [PCI_D3cold] = ACPI_STATE_D3 278 [PCI_D3cold] = ACPI_STATE_D3
328 }; 279 };
280 int error = -EINVAL;
329 281
330 if (!handle)
331 return -ENODEV;
332 /* If the ACPI device has _EJ0, ignore the device */ 282 /* If the ACPI device has _EJ0, ignore the device */
333 if (ACPI_SUCCESS(acpi_get_handle(handle, "_EJ0", &tmp))) 283 if (!handle || ACPI_SUCCESS(acpi_get_handle(handle, "_EJ0", &tmp)))
334 return 0; 284 return -ENODEV;
335 285
336 switch (state) { 286 switch (state) {
337 case PCI_D0: 287 case PCI_D0:
@@ -339,11 +289,41 @@ static int acpi_pci_set_power_state(struct pci_dev *dev, pci_power_t state)
339 case PCI_D2: 289 case PCI_D2:
340 case PCI_D3hot: 290 case PCI_D3hot:
341 case PCI_D3cold: 291 case PCI_D3cold:
342 return acpi_bus_set_power(handle, state_conv[state]); 292 error = acpi_bus_set_power(handle, state_conv[state]);
343 } 293 }
344 return -EINVAL; 294
295 if (!error)
296 dev_printk(KERN_INFO, &dev->dev,
297 "power state changed by ACPI to D%d\n", state);
298
299 return error;
300}
301
302static bool acpi_pci_can_wakeup(struct pci_dev *dev)
303{
304 acpi_handle handle = DEVICE_ACPI_HANDLE(&dev->dev);
305
306 return handle ? acpi_bus_can_wakeup(handle) : false;
307}
308
309static int acpi_pci_sleep_wake(struct pci_dev *dev, bool enable)
310{
311 int error = acpi_pm_device_sleep_wake(&dev->dev, enable);
312
313 if (!error)
314 dev_printk(KERN_INFO, &dev->dev,
315 "wake-up capability %s by ACPI\n",
316 enable ? "enabled" : "disabled");
317 return error;
345} 318}
346 319
320static struct pci_platform_pm_ops acpi_pci_platform_pm = {
321 .is_manageable = acpi_pci_power_manageable,
322 .set_state = acpi_pci_set_power_state,
323 .choose_state = acpi_pci_choose_state,
324 .can_wakeup = acpi_pci_can_wakeup,
325 .sleep_wake = acpi_pci_sleep_wake,
326};
347 327
348/* ACPI bus type */ 328/* ACPI bus type */
349static int acpi_pci_find_device(struct device *dev, acpi_handle *handle) 329static int acpi_pci_find_device(struct device *dev, acpi_handle *handle)
@@ -395,10 +375,7 @@ static int __init acpi_pci_init(void)
395 ret = register_acpi_bus_type(&acpi_pci_bus); 375 ret = register_acpi_bus_type(&acpi_pci_bus);
396 if (ret) 376 if (ret)
397 return 0; 377 return 0;
398#ifdef CONFIG_ACPI_SLEEP 378 pci_set_platform_pm(&acpi_pci_platform_pm);
399 platform_pci_choose_state = acpi_pci_choose_state;
400#endif
401 platform_pci_set_power_state = acpi_pci_set_power_state;
402 return 0; 379 return 0;
403} 380}
404arch_initcall(acpi_pci_init); 381arch_initcall(acpi_pci_init);
diff --git a/drivers/pci/pci-driver.c b/drivers/pci/pci-driver.c
index e1637bd82b8e..a13f53486114 100644
--- a/drivers/pci/pci-driver.c
+++ b/drivers/pci/pci-driver.c
@@ -274,7 +274,57 @@ static int pci_device_remove(struct device * dev)
274 return 0; 274 return 0;
275} 275}
276 276
277static int pci_device_suspend(struct device * dev, pm_message_t state) 277static void pci_device_shutdown(struct device *dev)
278{
279 struct pci_dev *pci_dev = to_pci_dev(dev);
280 struct pci_driver *drv = pci_dev->driver;
281
282 if (drv && drv->shutdown)
283 drv->shutdown(pci_dev);
284 pci_msi_shutdown(pci_dev);
285 pci_msix_shutdown(pci_dev);
286}
287
288#ifdef CONFIG_PM_SLEEP
289
290/*
291 * Default "suspend" method for devices that have no driver provided suspend,
292 * or not even a driver at all.
293 */
294static void pci_default_pm_suspend(struct pci_dev *pci_dev)
295{
296 pci_save_state(pci_dev);
297 /*
298 * mark its power state as "unknown", since we don't know if
299 * e.g. the BIOS will change its device state when we suspend.
300 */
301 if (pci_dev->current_state == PCI_D0)
302 pci_dev->current_state = PCI_UNKNOWN;
303}
304
305/*
306 * Default "resume" method for devices that have no driver provided resume,
307 * or not even a driver at all.
308 */
309static int pci_default_pm_resume(struct pci_dev *pci_dev)
310{
311 int retval = 0;
312
313 /* restore the PCI config space */
314 pci_restore_state(pci_dev);
315 /* if the device was enabled before suspend, reenable */
316 retval = pci_reenable_device(pci_dev);
317 /*
318 * if the device was busmaster before the suspend, make it busmaster
319 * again
320 */
321 if (pci_dev->is_busmaster)
322 pci_set_master(pci_dev);
323
324 return retval;
325}
326
327static int pci_legacy_suspend(struct device *dev, pm_message_t state)
278{ 328{
279 struct pci_dev * pci_dev = to_pci_dev(dev); 329 struct pci_dev * pci_dev = to_pci_dev(dev);
280 struct pci_driver * drv = pci_dev->driver; 330 struct pci_driver * drv = pci_dev->driver;
@@ -284,18 +334,12 @@ static int pci_device_suspend(struct device * dev, pm_message_t state)
284 i = drv->suspend(pci_dev, state); 334 i = drv->suspend(pci_dev, state);
285 suspend_report_result(drv->suspend, i); 335 suspend_report_result(drv->suspend, i);
286 } else { 336 } else {
287 pci_save_state(pci_dev); 337 pci_default_pm_suspend(pci_dev);
288 /*
289 * mark its power state as "unknown", since we don't know if
290 * e.g. the BIOS will change its device state when we suspend.
291 */
292 if (pci_dev->current_state == PCI_D0)
293 pci_dev->current_state = PCI_UNKNOWN;
294 } 338 }
295 return i; 339 return i;
296} 340}
297 341
298static int pci_device_suspend_late(struct device * dev, pm_message_t state) 342static int pci_legacy_suspend_late(struct device *dev, pm_message_t state)
299{ 343{
300 struct pci_dev * pci_dev = to_pci_dev(dev); 344 struct pci_dev * pci_dev = to_pci_dev(dev);
301 struct pci_driver * drv = pci_dev->driver; 345 struct pci_driver * drv = pci_dev->driver;
@@ -308,26 +352,7 @@ static int pci_device_suspend_late(struct device * dev, pm_message_t state)
308 return i; 352 return i;
309} 353}
310 354
311/* 355static int pci_legacy_resume(struct device *dev)
312 * Default resume method for devices that have no driver provided resume,
313 * or not even a driver at all.
314 */
315static int pci_default_resume(struct pci_dev *pci_dev)
316{
317 int retval = 0;
318
319 /* restore the PCI config space */
320 pci_restore_state(pci_dev);
321 /* if the device was enabled before suspend, reenable */
322 retval = pci_reenable_device(pci_dev);
323 /* if the device was busmaster before the suspend, make it busmaster again */
324 if (pci_dev->is_busmaster)
325 pci_set_master(pci_dev);
326
327 return retval;
328}
329
330static int pci_device_resume(struct device * dev)
331{ 356{
332 int error; 357 int error;
333 struct pci_dev * pci_dev = to_pci_dev(dev); 358 struct pci_dev * pci_dev = to_pci_dev(dev);
@@ -336,34 +361,313 @@ static int pci_device_resume(struct device * dev)
336 if (drv && drv->resume) 361 if (drv && drv->resume)
337 error = drv->resume(pci_dev); 362 error = drv->resume(pci_dev);
338 else 363 else
339 error = pci_default_resume(pci_dev); 364 error = pci_default_pm_resume(pci_dev);
340 return error; 365 return error;
341} 366}
342 367
343static int pci_device_resume_early(struct device * dev) 368static int pci_legacy_resume_early(struct device *dev)
344{ 369{
345 int error = 0; 370 int error = 0;
346 struct pci_dev * pci_dev = to_pci_dev(dev); 371 struct pci_dev * pci_dev = to_pci_dev(dev);
347 struct pci_driver * drv = pci_dev->driver; 372 struct pci_driver * drv = pci_dev->driver;
348 373
349 pci_fixup_device(pci_fixup_resume, pci_dev);
350
351 if (drv && drv->resume_early) 374 if (drv && drv->resume_early)
352 error = drv->resume_early(pci_dev); 375 error = drv->resume_early(pci_dev);
353 return error; 376 return error;
354} 377}
355 378
356static void pci_device_shutdown(struct device *dev) 379static int pci_pm_prepare(struct device *dev)
380{
381 struct device_driver *drv = dev->driver;
382 int error = 0;
383
384 if (drv && drv->pm && drv->pm->prepare)
385 error = drv->pm->prepare(dev);
386
387 return error;
388}
389
390static void pci_pm_complete(struct device *dev)
391{
392 struct device_driver *drv = dev->driver;
393
394 if (drv && drv->pm && drv->pm->complete)
395 drv->pm->complete(dev);
396}
397
398#ifdef CONFIG_SUSPEND
399
400static int pci_pm_suspend(struct device *dev)
401{
402 struct pci_dev *pci_dev = to_pci_dev(dev);
403 struct device_driver *drv = dev->driver;
404 int error = 0;
405
406 if (drv && drv->pm) {
407 if (drv->pm->suspend) {
408 error = drv->pm->suspend(dev);
409 suspend_report_result(drv->pm->suspend, error);
410 } else {
411 pci_default_pm_suspend(pci_dev);
412 }
413 } else {
414 error = pci_legacy_suspend(dev, PMSG_SUSPEND);
415 }
416 pci_fixup_device(pci_fixup_suspend, pci_dev);
417
418 return error;
419}
420
421static int pci_pm_suspend_noirq(struct device *dev)
357{ 422{
358 struct pci_dev *pci_dev = to_pci_dev(dev); 423 struct pci_dev *pci_dev = to_pci_dev(dev);
359 struct pci_driver *drv = pci_dev->driver; 424 struct pci_driver *drv = pci_dev->driver;
425 int error = 0;
360 426
361 if (drv && drv->shutdown) 427 if (drv && drv->pm) {
362 drv->shutdown(pci_dev); 428 if (drv->pm->suspend_noirq) {
363 pci_msi_shutdown(pci_dev); 429 error = drv->pm->suspend_noirq(dev);
364 pci_msix_shutdown(pci_dev); 430 suspend_report_result(drv->pm->suspend_noirq, error);
431 }
432 } else {
433 error = pci_legacy_suspend_late(dev, PMSG_SUSPEND);
434 }
435
436 return error;
365} 437}
366 438
439static int pci_pm_resume(struct device *dev)
440{
441 struct pci_dev *pci_dev = to_pci_dev(dev);
442 struct device_driver *drv = dev->driver;
443 int error;
444
445 pci_fixup_device(pci_fixup_resume, pci_dev);
446
447 if (drv && drv->pm) {
448 error = drv->pm->resume ? drv->pm->resume(dev) :
449 pci_default_pm_resume(pci_dev);
450 } else {
451 error = pci_legacy_resume(dev);
452 }
453
454 return error;
455}
456
457static int pci_pm_resume_noirq(struct device *dev)
458{
459 struct pci_dev *pci_dev = to_pci_dev(dev);
460 struct pci_driver *drv = pci_dev->driver;
461 int error = 0;
462
463 pci_fixup_device(pci_fixup_resume_early, pci_dev);
464
465 if (drv && drv->pm) {
466 if (drv->pm->resume_noirq)
467 error = drv->pm->resume_noirq(dev);
468 } else {
469 error = pci_legacy_resume_early(dev);
470 }
471
472 return error;
473}
474
475#else /* !CONFIG_SUSPEND */
476
477#define pci_pm_suspend NULL
478#define pci_pm_suspend_noirq NULL
479#define pci_pm_resume NULL
480#define pci_pm_resume_noirq NULL
481
482#endif /* !CONFIG_SUSPEND */
483
484#ifdef CONFIG_HIBERNATION
485
486static int pci_pm_freeze(struct device *dev)
487{
488 struct pci_dev *pci_dev = to_pci_dev(dev);
489 struct device_driver *drv = dev->driver;
490 int error = 0;
491
492 if (drv && drv->pm) {
493 if (drv->pm->freeze) {
494 error = drv->pm->freeze(dev);
495 suspend_report_result(drv->pm->freeze, error);
496 } else {
497 pci_default_pm_suspend(pci_dev);
498 }
499 } else {
500 error = pci_legacy_suspend(dev, PMSG_FREEZE);
501 pci_fixup_device(pci_fixup_suspend, pci_dev);
502 }
503
504 return error;
505}
506
507static int pci_pm_freeze_noirq(struct device *dev)
508{
509 struct pci_dev *pci_dev = to_pci_dev(dev);
510 struct pci_driver *drv = pci_dev->driver;
511 int error = 0;
512
513 if (drv && drv->pm) {
514 if (drv->pm->freeze_noirq) {
515 error = drv->pm->freeze_noirq(dev);
516 suspend_report_result(drv->pm->freeze_noirq, error);
517 }
518 } else {
519 error = pci_legacy_suspend_late(dev, PMSG_FREEZE);
520 }
521
522 return error;
523}
524
525static int pci_pm_thaw(struct device *dev)
526{
527 struct device_driver *drv = dev->driver;
528 int error = 0;
529
530 if (drv && drv->pm) {
531 if (drv->pm->thaw)
532 error = drv->pm->thaw(dev);
533 } else {
534 pci_fixup_device(pci_fixup_resume, to_pci_dev(dev));
535 error = pci_legacy_resume(dev);
536 }
537
538 return error;
539}
540
541static int pci_pm_thaw_noirq(struct device *dev)
542{
543 struct pci_dev *pci_dev = to_pci_dev(dev);
544 struct pci_driver *drv = pci_dev->driver;
545 int error = 0;
546
547 if (drv && drv->pm) {
548 if (drv->pm->thaw_noirq)
549 error = drv->pm->thaw_noirq(dev);
550 } else {
551 pci_fixup_device(pci_fixup_resume_early, pci_dev);
552 error = pci_legacy_resume_early(dev);
553 }
554
555 return error;
556}
557
558static int pci_pm_poweroff(struct device *dev)
559{
560 struct device_driver *drv = dev->driver;
561 int error = 0;
562
563 pci_fixup_device(pci_fixup_suspend, to_pci_dev(dev));
564
565 if (drv && drv->pm) {
566 if (drv->pm->poweroff) {
567 error = drv->pm->poweroff(dev);
568 suspend_report_result(drv->pm->poweroff, error);
569 }
570 } else {
571 error = pci_legacy_suspend(dev, PMSG_HIBERNATE);
572 }
573
574 return error;
575}
576
577static int pci_pm_poweroff_noirq(struct device *dev)
578{
579 struct pci_dev *pci_dev = to_pci_dev(dev);
580 struct pci_driver *drv = pci_dev->driver;
581 int error = 0;
582
583 if (drv && drv->pm) {
584 if (drv->pm->poweroff_noirq) {
585 error = drv->pm->poweroff_noirq(dev);
586 suspend_report_result(drv->pm->poweroff_noirq, error);
587 }
588 } else {
589 error = pci_legacy_suspend_late(dev, PMSG_HIBERNATE);
590 }
591
592 return error;
593}
594
595static int pci_pm_restore(struct device *dev)
596{
597 struct pci_dev *pci_dev = to_pci_dev(dev);
598 struct device_driver *drv = dev->driver;
599 int error;
600
601 if (drv && drv->pm) {
602 error = drv->pm->restore ? drv->pm->restore(dev) :
603 pci_default_pm_resume(pci_dev);
604 } else {
605 error = pci_legacy_resume(dev);
606 }
607 pci_fixup_device(pci_fixup_resume, pci_dev);
608
609 return error;
610}
611
612static int pci_pm_restore_noirq(struct device *dev)
613{
614 struct pci_dev *pci_dev = to_pci_dev(dev);
615 struct pci_driver *drv = pci_dev->driver;
616 int error = 0;
617
618 pci_fixup_device(pci_fixup_resume, pci_dev);
619
620 if (drv && drv->pm) {
621 if (drv->pm->restore_noirq)
622 error = drv->pm->restore_noirq(dev);
623 } else {
624 error = pci_legacy_resume_early(dev);
625 }
626 pci_fixup_device(pci_fixup_resume_early, pci_dev);
627
628 return error;
629}
630
631#else /* !CONFIG_HIBERNATION */
632
633#define pci_pm_freeze NULL
634#define pci_pm_freeze_noirq NULL
635#define pci_pm_thaw NULL
636#define pci_pm_thaw_noirq NULL
637#define pci_pm_poweroff NULL
638#define pci_pm_poweroff_noirq NULL
639#define pci_pm_restore NULL
640#define pci_pm_restore_noirq NULL
641
642#endif /* !CONFIG_HIBERNATION */
643
644struct pm_ext_ops pci_pm_ops = {
645 .base = {
646 .prepare = pci_pm_prepare,
647 .complete = pci_pm_complete,
648 .suspend = pci_pm_suspend,
649 .resume = pci_pm_resume,
650 .freeze = pci_pm_freeze,
651 .thaw = pci_pm_thaw,
652 .poweroff = pci_pm_poweroff,
653 .restore = pci_pm_restore,
654 },
655 .suspend_noirq = pci_pm_suspend_noirq,
656 .resume_noirq = pci_pm_resume_noirq,
657 .freeze_noirq = pci_pm_freeze_noirq,
658 .thaw_noirq = pci_pm_thaw_noirq,
659 .poweroff_noirq = pci_pm_poweroff_noirq,
660 .restore_noirq = pci_pm_restore_noirq,
661};
662
663#define PCI_PM_OPS_PTR &pci_pm_ops
664
665#else /* !CONFIG_PM_SLEEP */
666
667#define PCI_PM_OPS_PTR NULL
668
669#endif /* !CONFIG_PM_SLEEP */
670
367/** 671/**
368 * __pci_register_driver - register a new pci driver 672 * __pci_register_driver - register a new pci driver
369 * @drv: the driver structure to register 673 * @drv: the driver structure to register
@@ -386,6 +690,9 @@ int __pci_register_driver(struct pci_driver *drv, struct module *owner,
386 drv->driver.owner = owner; 690 drv->driver.owner = owner;
387 drv->driver.mod_name = mod_name; 691 drv->driver.mod_name = mod_name;
388 692
693 if (drv->pm)
694 drv->driver.pm = &drv->pm->base;
695
389 spin_lock_init(&drv->dynids.lock); 696 spin_lock_init(&drv->dynids.lock);
390 INIT_LIST_HEAD(&drv->dynids.list); 697 INIT_LIST_HEAD(&drv->dynids.list);
391 698
@@ -511,12 +818,9 @@ struct bus_type pci_bus_type = {
511 .uevent = pci_uevent, 818 .uevent = pci_uevent,
512 .probe = pci_device_probe, 819 .probe = pci_device_probe,
513 .remove = pci_device_remove, 820 .remove = pci_device_remove,
514 .suspend = pci_device_suspend,
515 .suspend_late = pci_device_suspend_late,
516 .resume_early = pci_device_resume_early,
517 .resume = pci_device_resume,
518 .shutdown = pci_device_shutdown, 821 .shutdown = pci_device_shutdown,
519 .dev_attrs = pci_dev_attrs, 822 .dev_attrs = pci_dev_attrs,
823 .pm = PCI_PM_OPS_PTR,
520}; 824};
521 825
522static int __init pci_driver_init(void) 826static int __init pci_driver_init(void)
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
index 75c60239cadd..44a46c92b721 100644
--- a/drivers/pci/pci.c
+++ b/drivers/pci/pci.c
@@ -1,6 +1,4 @@
1/* 1/*
2 * $Id: pci.c,v 1.91 1999/01/21 13:34:01 davem Exp $
3 *
4 * PCI Bus Services, see include/linux/pci.h for further explanation. 2 * PCI Bus Services, see include/linux/pci.h for further explanation.
5 * 3 *
6 * Copyright 1993 -- 1997 Drew Eckhardt, Frederic Potter, 4 * Copyright 1993 -- 1997 Drew Eckhardt, Frederic Potter,
@@ -19,6 +17,7 @@
19#include <linux/string.h> 17#include <linux/string.h>
20#include <linux/log2.h> 18#include <linux/log2.h>
21#include <linux/pci-aspm.h> 19#include <linux/pci-aspm.h>
20#include <linux/pm_wakeup.h>
22#include <asm/dma.h> /* isa_dma_bridge_buggy */ 21#include <asm/dma.h> /* isa_dma_bridge_buggy */
23#include "pci.h" 22#include "pci.h"
24 23
@@ -378,74 +377,90 @@ pci_restore_bars(struct pci_dev *dev)
378 pci_update_resource(dev, &dev->resource[i], i); 377 pci_update_resource(dev, &dev->resource[i], i);
379} 378}
380 379
381int (*platform_pci_set_power_state)(struct pci_dev *dev, pci_power_t t); 380static struct pci_platform_pm_ops *pci_platform_pm;
382 381
383/** 382int pci_set_platform_pm(struct pci_platform_pm_ops *ops)
384 * pci_set_power_state - Set the power state of a PCI device
385 * @dev: PCI device to be suspended
386 * @state: PCI power state (D0, D1, D2, D3hot, D3cold) we're entering
387 *
388 * Transition a device to a new power state, using the Power Management
389 * Capabilities in the device's config space.
390 *
391 * RETURN VALUE:
392 * -EINVAL if trying to enter a lower state than we're already in.
393 * 0 if we're already in the requested state.
394 * -EIO if device does not support PCI PM.
395 * 0 if we can successfully change the power state.
396 */
397int
398pci_set_power_state(struct pci_dev *dev, pci_power_t state)
399{ 383{
400 int pm, need_restore = 0; 384 if (!ops->is_manageable || !ops->set_state || !ops->choose_state
401 u16 pmcsr, pmc; 385 || !ops->sleep_wake || !ops->can_wakeup)
386 return -EINVAL;
387 pci_platform_pm = ops;
388 return 0;
389}
402 390
403 /* bound the state we're entering */ 391static inline bool platform_pci_power_manageable(struct pci_dev *dev)
404 if (state > PCI_D3hot) 392{
405 state = PCI_D3hot; 393 return pci_platform_pm ? pci_platform_pm->is_manageable(dev) : false;
394}
406 395
407 /* 396static inline int platform_pci_set_power_state(struct pci_dev *dev,
408 * If the device or the parent bridge can't support PCI PM, ignore 397 pci_power_t t)
409 * the request if we're doing anything besides putting it into D0 398{
410 * (which would only happen on boot). 399 return pci_platform_pm ? pci_platform_pm->set_state(dev, t) : -ENOSYS;
411 */ 400}
412 if ((state == PCI_D1 || state == PCI_D2) && pci_no_d1d2(dev))
413 return 0;
414 401
415 /* find PCI PM capability in list */ 402static inline pci_power_t platform_pci_choose_state(struct pci_dev *dev)
416 pm = pci_find_capability(dev, PCI_CAP_ID_PM); 403{
404 return pci_platform_pm ?
405 pci_platform_pm->choose_state(dev) : PCI_POWER_ERROR;
406}
417 407
418 /* abort if the device doesn't support PM capabilities */ 408static inline bool platform_pci_can_wakeup(struct pci_dev *dev)
419 if (!pm) 409{
410 return pci_platform_pm ? pci_platform_pm->can_wakeup(dev) : false;
411}
412
413static inline int platform_pci_sleep_wake(struct pci_dev *dev, bool enable)
414{
415 return pci_platform_pm ?
416 pci_platform_pm->sleep_wake(dev, enable) : -ENODEV;
417}
418
419/**
420 * pci_raw_set_power_state - Use PCI PM registers to set the power state of
421 * given PCI device
422 * @dev: PCI device to handle.
423 * @state: PCI power state (D0, D1, D2, D3hot) to put the device into.
424 *
425 * RETURN VALUE:
426 * -EINVAL if the requested state is invalid.
427 * -EIO if device does not support PCI PM or its PM capabilities register has a
428 * wrong version, or device doesn't support the requested state.
429 * 0 if device already is in the requested state.
430 * 0 if device's power state has been successfully changed.
431 */
432static int
433pci_raw_set_power_state(struct pci_dev *dev, pci_power_t state)
434{
435 u16 pmcsr;
436 bool need_restore = false;
437
438 if (!dev->pm_cap)
420 return -EIO; 439 return -EIO;
421 440
441 if (state < PCI_D0 || state > PCI_D3hot)
442 return -EINVAL;
443
422 /* Validate current state: 444 /* Validate current state:
423 * Can enter D0 from any state, but if we can only go deeper 445 * Can enter D0 from any state, but if we can only go deeper
424 * to sleep if we're already in a low power state 446 * to sleep if we're already in a low power state
425 */ 447 */
426 if (state != PCI_D0 && dev->current_state > state) { 448 if (dev->current_state == state) {
427 printk(KERN_ERR "%s(): %s: state=%d, current state=%d\n", 449 /* we're already there */
428 __func__, pci_name(dev), state, dev->current_state); 450 return 0;
451 } else if (state != PCI_D0 && dev->current_state <= PCI_D3cold
452 && dev->current_state > state) {
453 dev_err(&dev->dev, "invalid power transition "
454 "(from state %d to %d)\n", dev->current_state, state);
429 return -EINVAL; 455 return -EINVAL;
430 } else if (dev->current_state == state)
431 return 0; /* we're already there */
432
433
434 pci_read_config_word(dev,pm + PCI_PM_PMC,&pmc);
435 if ((pmc & PCI_PM_CAP_VER_MASK) > 3) {
436 printk(KERN_DEBUG
437 "PCI: %s has unsupported PM cap regs version (%u)\n",
438 pci_name(dev), pmc & PCI_PM_CAP_VER_MASK);
439 return -EIO;
440 } 456 }
441 457
442 /* check if this device supports the desired state */ 458 /* check if this device supports the desired state */
443 if (state == PCI_D1 && !(pmc & PCI_PM_CAP_D1)) 459 if ((state == PCI_D1 && !dev->d1_support)
444 return -EIO; 460 || (state == PCI_D2 && !dev->d2_support))
445 else if (state == PCI_D2 && !(pmc & PCI_PM_CAP_D2))
446 return -EIO; 461 return -EIO;
447 462
448 pci_read_config_word(dev, pm + PCI_PM_CTRL, &pmcsr); 463 pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
449 464
450 /* If we're (effectively) in D3, force entire word to 0. 465 /* If we're (effectively) in D3, force entire word to 0.
451 * This doesn't affect PME_Status, disables PME_En, and 466 * This doesn't affect PME_Status, disables PME_En, and
@@ -461,7 +476,7 @@ pci_set_power_state(struct pci_dev *dev, pci_power_t state)
461 case PCI_UNKNOWN: /* Boot-up */ 476 case PCI_UNKNOWN: /* Boot-up */
462 if ((pmcsr & PCI_PM_CTRL_STATE_MASK) == PCI_D3hot 477 if ((pmcsr & PCI_PM_CTRL_STATE_MASK) == PCI_D3hot
463 && !(pmcsr & PCI_PM_CTRL_NO_SOFT_RESET)) 478 && !(pmcsr & PCI_PM_CTRL_NO_SOFT_RESET))
464 need_restore = 1; 479 need_restore = true;
465 /* Fall-through: force to D0 */ 480 /* Fall-through: force to D0 */
466 default: 481 default:
467 pmcsr = 0; 482 pmcsr = 0;
@@ -469,7 +484,7 @@ pci_set_power_state(struct pci_dev *dev, pci_power_t state)
469 } 484 }
470 485
471 /* enter specified state */ 486 /* enter specified state */
472 pci_write_config_word(dev, pm + PCI_PM_CTRL, pmcsr); 487 pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, pmcsr);
473 488
474 /* Mandatory power management transition delays */ 489 /* Mandatory power management transition delays */
475 /* see PCI PM 1.1 5.6.1 table 18 */ 490 /* see PCI PM 1.1 5.6.1 table 18 */
@@ -478,13 +493,6 @@ pci_set_power_state(struct pci_dev *dev, pci_power_t state)
478 else if (state == PCI_D2 || dev->current_state == PCI_D2) 493 else if (state == PCI_D2 || dev->current_state == PCI_D2)
479 udelay(200); 494 udelay(200);
480 495
481 /*
482 * Give firmware a chance to be called, such as ACPI _PRx, _PSx
483 * Firmware method after native method ?
484 */
485 if (platform_pci_set_power_state)
486 platform_pci_set_power_state(dev, state);
487
488 dev->current_state = state; 496 dev->current_state = state;
489 497
490 /* According to section 5.4.1 of the "PCI BUS POWER MANAGEMENT 498 /* According to section 5.4.1 of the "PCI BUS POWER MANAGEMENT
@@ -508,8 +516,77 @@ pci_set_power_state(struct pci_dev *dev, pci_power_t state)
508 return 0; 516 return 0;
509} 517}
510 518
511pci_power_t (*platform_pci_choose_state)(struct pci_dev *dev); 519/**
512 520 * pci_update_current_state - Read PCI power state of given device from its
521 * PCI PM registers and cache it
522 * @dev: PCI device to handle.
523 */
524static void pci_update_current_state(struct pci_dev *dev)
525{
526 if (dev->pm_cap) {
527 u16 pmcsr;
528
529 pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
530 dev->current_state = (pmcsr & PCI_PM_CTRL_STATE_MASK);
531 }
532}
533
534/**
535 * pci_set_power_state - Set the power state of a PCI device
536 * @dev: PCI device to handle.
537 * @state: PCI power state (D0, D1, D2, D3hot) to put the device into.
538 *
539 * Transition a device to a new power state, using the platform formware and/or
540 * the device's PCI PM registers.
541 *
542 * RETURN VALUE:
543 * -EINVAL if the requested state is invalid.
544 * -EIO if device does not support PCI PM or its PM capabilities register has a
545 * wrong version, or device doesn't support the requested state.
546 * 0 if device already is in the requested state.
547 * 0 if device's power state has been successfully changed.
548 */
549int pci_set_power_state(struct pci_dev *dev, pci_power_t state)
550{
551 int error;
552
553 /* bound the state we're entering */
554 if (state > PCI_D3hot)
555 state = PCI_D3hot;
556 else if (state < PCI_D0)
557 state = PCI_D0;
558 else if ((state == PCI_D1 || state == PCI_D2) && pci_no_d1d2(dev))
559 /*
560 * If the device or the parent bridge do not support PCI PM,
561 * ignore the request if we're doing anything other than putting
562 * it into D0 (which would only happen on boot).
563 */
564 return 0;
565
566 if (state == PCI_D0 && platform_pci_power_manageable(dev)) {
567 /*
568 * Allow the platform to change the state, for example via ACPI
569 * _PR0, _PS0 and some such, but do not trust it.
570 */
571 int ret = platform_pci_set_power_state(dev, PCI_D0);
572 if (!ret)
573 pci_update_current_state(dev);
574 }
575
576 error = pci_raw_set_power_state(dev, state);
577
578 if (state > PCI_D0 && platform_pci_power_manageable(dev)) {
579 /* Allow the platform to finalize the transition */
580 int ret = platform_pci_set_power_state(dev, state);
581 if (!ret) {
582 pci_update_current_state(dev);
583 error = 0;
584 }
585 }
586
587 return error;
588}
589
513/** 590/**
514 * pci_choose_state - Choose the power state of a PCI device 591 * pci_choose_state - Choose the power state of a PCI device
515 * @dev: PCI device to be suspended 592 * @dev: PCI device to be suspended
@@ -527,11 +604,9 @@ pci_power_t pci_choose_state(struct pci_dev *dev, pm_message_t state)
527 if (!pci_find_capability(dev, PCI_CAP_ID_PM)) 604 if (!pci_find_capability(dev, PCI_CAP_ID_PM))
528 return PCI_D0; 605 return PCI_D0;
529 606
530 if (platform_pci_choose_state) { 607 ret = platform_pci_choose_state(dev);
531 ret = platform_pci_choose_state(dev); 608 if (ret != PCI_POWER_ERROR)
532 if (ret != PCI_POWER_ERROR) 609 return ret;
533 return ret;
534 }
535 610
536 switch (state.event) { 611 switch (state.event) {
537 case PM_EVENT_ON: 612 case PM_EVENT_ON:
@@ -543,7 +618,8 @@ pci_power_t pci_choose_state(struct pci_dev *dev, pm_message_t state)
543 case PM_EVENT_HIBERNATE: 618 case PM_EVENT_HIBERNATE:
544 return PCI_D3hot; 619 return PCI_D3hot;
545 default: 620 default:
546 printk("Unrecognized suspend event %d\n", state.event); 621 dev_info(&dev->dev, "unrecognized suspend event %d\n",
622 state.event);
547 BUG(); 623 BUG();
548 } 624 }
549 return PCI_D0; 625 return PCI_D0;
@@ -568,7 +644,7 @@ static int pci_save_pcie_state(struct pci_dev *dev)
568 else 644 else
569 found = 1; 645 found = 1;
570 if (!save_state) { 646 if (!save_state) {
571 dev_err(&dev->dev, "Out of memory in pci_save_pcie_state\n"); 647 dev_err(&dev->dev, "out of memory in pci_save_pcie_state\n");
572 return -ENOMEM; 648 return -ENOMEM;
573 } 649 }
574 cap = (u16 *)&save_state->data[0]; 650 cap = (u16 *)&save_state->data[0];
@@ -619,7 +695,7 @@ static int pci_save_pcix_state(struct pci_dev *dev)
619 else 695 else
620 found = 1; 696 found = 1;
621 if (!save_state) { 697 if (!save_state) {
622 dev_err(&dev->dev, "Out of memory in pci_save_pcie_state\n"); 698 dev_err(&dev->dev, "out of memory in pci_save_pcie_state\n");
623 return -ENOMEM; 699 return -ENOMEM;
624 } 700 }
625 cap = (u16 *)&save_state->data[0]; 701 cap = (u16 *)&save_state->data[0];
@@ -685,10 +761,9 @@ pci_restore_state(struct pci_dev *dev)
685 for (i = 15; i >= 0; i--) { 761 for (i = 15; i >= 0; i--) {
686 pci_read_config_dword(dev, i * 4, &val); 762 pci_read_config_dword(dev, i * 4, &val);
687 if (val != dev->saved_config_space[i]) { 763 if (val != dev->saved_config_space[i]) {
688 printk(KERN_DEBUG "PM: Writing back config space on " 764 dev_printk(KERN_DEBUG, &dev->dev, "restoring config "
689 "device %s at offset %x (was %x, writing %x)\n", 765 "space at offset %#x (was %#x, writing %#x)\n",
690 pci_name(dev), i, 766 i, val, (int)dev->saved_config_space[i]);
691 val, (int)dev->saved_config_space[i]);
692 pci_write_config_dword(dev,i * 4, 767 pci_write_config_dword(dev,i * 4,
693 dev->saved_config_space[i]); 768 dev->saved_config_space[i]);
694 } 769 }
@@ -961,6 +1036,46 @@ int pci_set_pcie_reset_state(struct pci_dev *dev, enum pcie_reset_state state)
961} 1036}
962 1037
963/** 1038/**
1039 * pci_pme_capable - check the capability of PCI device to generate PME#
1040 * @dev: PCI device to handle.
1041 * @state: PCI state from which device will issue PME#.
1042 */
1043static bool pci_pme_capable(struct pci_dev *dev, pci_power_t state)
1044{
1045 if (!dev->pm_cap)
1046 return false;
1047
1048 return !!(dev->pme_support & (1 << state));
1049}
1050
1051/**
1052 * pci_pme_active - enable or disable PCI device's PME# function
1053 * @dev: PCI device to handle.
1054 * @enable: 'true' to enable PME# generation; 'false' to disable it.
1055 *
1056 * The caller must verify that the device is capable of generating PME# before
1057 * calling this function with @enable equal to 'true'.
1058 */
1059static void pci_pme_active(struct pci_dev *dev, bool enable)
1060{
1061 u16 pmcsr;
1062
1063 if (!dev->pm_cap)
1064 return;
1065
1066 pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
1067 /* Clear PME_Status by writing 1 to it and enable PME# */
1068 pmcsr |= PCI_PM_CTRL_PME_STATUS | PCI_PM_CTRL_PME_ENABLE;
1069 if (!enable)
1070 pmcsr &= ~PCI_PM_CTRL_PME_ENABLE;
1071
1072 pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, pmcsr);
1073
1074 dev_printk(KERN_INFO, &dev->dev, "PME# %s\n",
1075 enable ? "enabled" : "disabled");
1076}
1077
1078/**
964 * pci_enable_wake - enable PCI device as wakeup event source 1079 * pci_enable_wake - enable PCI device as wakeup event source
965 * @dev: PCI device affected 1080 * @dev: PCI device affected
966 * @state: PCI state from which device will issue wakeup events 1081 * @state: PCI state from which device will issue wakeup events
@@ -971,66 +1086,173 @@ int pci_set_pcie_reset_state(struct pci_dev *dev, enum pcie_reset_state state)
971 * called automatically by this routine. 1086 * called automatically by this routine.
972 * 1087 *
973 * Devices with legacy power management (no standard PCI PM capabilities) 1088 * Devices with legacy power management (no standard PCI PM capabilities)
974 * always require such platform hooks. Depending on the platform, devices 1089 * always require such platform hooks.
975 * supporting the standard PCI PME# signal may require such platform hooks;
976 * they always update bits in config space to allow PME# generation.
977 * 1090 *
978 * -EIO is returned if the device can't ever be a wakeup event source. 1091 * RETURN VALUE:
979 * -EINVAL is returned if the device can't generate wakeup events from 1092 * 0 is returned on success
980 * the specified PCI state. Returns zero if the operation is successful. 1093 * -EINVAL is returned if device is not supposed to wake up the system
1094 * Error code depending on the platform is returned if both the platform and
1095 * the native mechanism fail to enable the generation of wake-up events
981 */ 1096 */
982int pci_enable_wake(struct pci_dev *dev, pci_power_t state, int enable) 1097int pci_enable_wake(struct pci_dev *dev, pci_power_t state, int enable)
983{ 1098{
984 int pm; 1099 int error = 0;
985 int status; 1100 bool pme_done = false;
986 u16 value; 1101
987 1102 if (!device_may_wakeup(&dev->dev))
988 /* Note that drivers should verify device_may_wakeup(&dev->dev) 1103 return -EINVAL;
989 * before calling this function. Platform code should report 1104
990 * errors when drivers try to enable wakeup on devices that 1105 /*
991 * can't issue wakeups, or on which wakeups were disabled by 1106 * According to "PCI System Architecture" 4th ed. by Tom Shanley & Don
992 * userspace updating the /sys/devices.../power/wakeup file. 1107 * Anderson we should be doing PME# wake enable followed by ACPI wake
1108 * enable. To disable wake-up we call the platform first, for symmetry.
993 */ 1109 */
994 1110
995 status = call_platform_enable_wakeup(&dev->dev, enable); 1111 if (!enable && platform_pci_can_wakeup(dev))
1112 error = platform_pci_sleep_wake(dev, false);
996 1113
997 /* find PCI PM capability in list */ 1114 if (!enable || pci_pme_capable(dev, state)) {
998 pm = pci_find_capability(dev, PCI_CAP_ID_PM); 1115 pci_pme_active(dev, enable);
1116 pme_done = true;
1117 }
999 1118
1000 /* If device doesn't support PM Capabilities, but caller wants to 1119 if (enable && platform_pci_can_wakeup(dev))
1001 * disable wake events, it's a NOP. Otherwise fail unless the 1120 error = platform_pci_sleep_wake(dev, true);
1002 * platform hooks handled this legacy device already.
1003 */
1004 if (!pm)
1005 return enable ? status : 0;
1006 1121
1007 /* Check device's ability to generate PME# */ 1122 return pme_done ? 0 : error;
1008 pci_read_config_word(dev,pm+PCI_PM_PMC,&value); 1123}
1009 1124
1010 value &= PCI_PM_CAP_PME_MASK; 1125/**
1011 value >>= ffs(PCI_PM_CAP_PME_MASK) - 1; /* First bit of mask */ 1126 * pci_prepare_to_sleep - prepare PCI device for system-wide transition into
1127 * a sleep state
1128 * @dev: Device to handle.
1129 *
1130 * Choose the power state appropriate for the device depending on whether
1131 * it can wake up the system and/or is power manageable by the platform
1132 * (PCI_D3hot is the default) and put the device into that state.
1133 */
1134int pci_prepare_to_sleep(struct pci_dev *dev)
1135{
1136 pci_power_t target_state = PCI_D3hot;
1137 int error;
1012 1138
1013 /* Check if it can generate PME# from requested state. */ 1139 if (platform_pci_power_manageable(dev)) {
1014 if (!value || !(value & (1 << state))) { 1140 /*
1015 /* if it can't, revert what the platform hook changed, 1141 * Call the platform to choose the target state of the device
1016 * always reporting the base "EINVAL, can't PME#" error 1142 * and enable wake-up from this state if supported.
1017 */ 1143 */
1018 if (enable) 1144 pci_power_t state = platform_pci_choose_state(dev);
1019 call_platform_enable_wakeup(&dev->dev, 0); 1145
1020 return enable ? -EINVAL : 0; 1146 switch (state) {
1147 case PCI_POWER_ERROR:
1148 case PCI_UNKNOWN:
1149 break;
1150 case PCI_D1:
1151 case PCI_D2:
1152 if (pci_no_d1d2(dev))
1153 break;
1154 default:
1155 target_state = state;
1156 }
1157 } else if (device_may_wakeup(&dev->dev)) {
1158 /*
1159 * Find the deepest state from which the device can generate
1160 * wake-up events, make it the target state and enable device
1161 * to generate PME#.
1162 */
1163 if (!dev->pm_cap)
1164 return -EIO;
1165
1166 if (dev->pme_support) {
1167 while (target_state
1168 && !(dev->pme_support & (1 << target_state)))
1169 target_state--;
1170 }
1021 } 1171 }
1022 1172
1023 pci_read_config_word(dev, pm + PCI_PM_CTRL, &value); 1173 pci_enable_wake(dev, target_state, true);
1024 1174
1025 /* Clear PME_Status by writing 1 to it and enable PME# */ 1175 error = pci_set_power_state(dev, target_state);
1026 value |= PCI_PM_CTRL_PME_STATUS | PCI_PM_CTRL_PME_ENABLE;
1027 1176
1028 if (!enable) 1177 if (error)
1029 value &= ~PCI_PM_CTRL_PME_ENABLE; 1178 pci_enable_wake(dev, target_state, false);
1030 1179
1031 pci_write_config_word(dev, pm + PCI_PM_CTRL, value); 1180 return error;
1181}
1032 1182
1033 return 0; 1183/**
1184 * pci_back_from_sleep - turn PCI device on during system-wide transition into
1185 * the working state a sleep state
1186 * @dev: Device to handle.
1187 *
1188 * Disable device's sytem wake-up capability and put it into D0.
1189 */
1190int pci_back_from_sleep(struct pci_dev *dev)
1191{
1192 pci_enable_wake(dev, PCI_D0, false);
1193 return pci_set_power_state(dev, PCI_D0);
1194}
1195
1196/**
1197 * pci_pm_init - Initialize PM functions of given PCI device
1198 * @dev: PCI device to handle.
1199 */
1200void pci_pm_init(struct pci_dev *dev)
1201{
1202 int pm;
1203 u16 pmc;
1204
1205 dev->pm_cap = 0;
1206
1207 /* find PCI PM capability in list */
1208 pm = pci_find_capability(dev, PCI_CAP_ID_PM);
1209 if (!pm)
1210 return;
1211 /* Check device's ability to generate PME# */
1212 pci_read_config_word(dev, pm + PCI_PM_PMC, &pmc);
1213
1214 if ((pmc & PCI_PM_CAP_VER_MASK) > 3) {
1215 dev_err(&dev->dev, "unsupported PM cap regs version (%u)\n",
1216 pmc & PCI_PM_CAP_VER_MASK);
1217 return;
1218 }
1219
1220 dev->pm_cap = pm;
1221
1222 dev->d1_support = false;
1223 dev->d2_support = false;
1224 if (!pci_no_d1d2(dev)) {
1225 if (pmc & PCI_PM_CAP_D1) {
1226 dev_printk(KERN_DEBUG, &dev->dev, "supports D1\n");
1227 dev->d1_support = true;
1228 }
1229 if (pmc & PCI_PM_CAP_D2) {
1230 dev_printk(KERN_DEBUG, &dev->dev, "supports D2\n");
1231 dev->d2_support = true;
1232 }
1233 }
1234
1235 pmc &= PCI_PM_CAP_PME_MASK;
1236 if (pmc) {
1237 dev_printk(KERN_INFO, &dev->dev,
1238 "PME# supported from%s%s%s%s%s\n",
1239 (pmc & PCI_PM_CAP_PME_D0) ? " D0" : "",
1240 (pmc & PCI_PM_CAP_PME_D1) ? " D1" : "",
1241 (pmc & PCI_PM_CAP_PME_D2) ? " D2" : "",
1242 (pmc & PCI_PM_CAP_PME_D3) ? " D3hot" : "",
1243 (pmc & PCI_PM_CAP_PME_D3cold) ? " D3cold" : "");
1244 dev->pme_support = pmc >> PCI_PM_CAP_PME_SHIFT;
1245 /*
1246 * Make device's PM flags reflect the wake-up capability, but
1247 * let the user space enable it to wake up the system as needed.
1248 */
1249 device_set_wakeup_capable(&dev->dev, true);
1250 device_set_wakeup_enable(&dev->dev, false);
1251 /* Disable the PME# generation functionality */
1252 pci_pme_active(dev, false);
1253 } else {
1254 dev->pme_support = 0;
1255 }
1034} 1256}
1035 1257
1036int 1258int
@@ -1116,13 +1338,11 @@ int pci_request_region(struct pci_dev *pdev, int bar, const char *res_name)
1116 return 0; 1338 return 0;
1117 1339
1118err_out: 1340err_out:
1119 printk (KERN_WARNING "PCI: Unable to reserve %s region #%d:%llx@%llx " 1341 dev_warn(&pdev->dev, "BAR %d: can't reserve %s region [%#llx-%#llx]\n",
1120 "for device %s\n", 1342 bar,
1121 pci_resource_flags(pdev, bar) & IORESOURCE_IO ? "I/O" : "mem", 1343 pci_resource_flags(pdev, bar) & IORESOURCE_IO ? "I/O" : "mem",
1122 bar + 1, /* PCI BAR # */ 1344 (unsigned long long)pci_resource_start(pdev, bar),
1123 (unsigned long long)pci_resource_len(pdev, bar), 1345 (unsigned long long)pci_resource_end(pdev, bar));
1124 (unsigned long long)pci_resource_start(pdev, bar),
1125 pci_name(pdev));
1126 return -EBUSY; 1346 return -EBUSY;
1127} 1347}
1128 1348
@@ -1214,7 +1434,7 @@ pci_set_master(struct pci_dev *dev)
1214 1434
1215 pci_read_config_word(dev, PCI_COMMAND, &cmd); 1435 pci_read_config_word(dev, PCI_COMMAND, &cmd);
1216 if (! (cmd & PCI_COMMAND_MASTER)) { 1436 if (! (cmd & PCI_COMMAND_MASTER)) {
1217 pr_debug("PCI: Enabling bus mastering for device %s\n", pci_name(dev)); 1437 dev_dbg(&dev->dev, "enabling bus mastering\n");
1218 cmd |= PCI_COMMAND_MASTER; 1438 cmd |= PCI_COMMAND_MASTER;
1219 pci_write_config_word(dev, PCI_COMMAND, cmd); 1439 pci_write_config_word(dev, PCI_COMMAND, cmd);
1220 } 1440 }
@@ -1279,8 +1499,8 @@ pci_set_cacheline_size(struct pci_dev *dev)
1279 if (cacheline_size == pci_cache_line_size) 1499 if (cacheline_size == pci_cache_line_size)
1280 return 0; 1500 return 0;
1281 1501
1282 printk(KERN_DEBUG "PCI: cache line size of %d is not supported " 1502 dev_printk(KERN_DEBUG, &dev->dev, "cache line size of %d is not "
1283 "by device %s\n", pci_cache_line_size << 2, pci_name(dev)); 1503 "supported\n", pci_cache_line_size << 2);
1284 1504
1285 return -EINVAL; 1505 return -EINVAL;
1286} 1506}
@@ -1305,8 +1525,7 @@ pci_set_mwi(struct pci_dev *dev)
1305 1525
1306 pci_read_config_word(dev, PCI_COMMAND, &cmd); 1526 pci_read_config_word(dev, PCI_COMMAND, &cmd);
1307 if (! (cmd & PCI_COMMAND_INVALIDATE)) { 1527 if (! (cmd & PCI_COMMAND_INVALIDATE)) {
1308 pr_debug("PCI: Enabling Mem-Wr-Inval for device %s\n", 1528 dev_dbg(&dev->dev, "enabling Mem-Wr-Inval\n");
1309 pci_name(dev));
1310 cmd |= PCI_COMMAND_INVALIDATE; 1529 cmd |= PCI_COMMAND_INVALIDATE;
1311 pci_write_config_word(dev, PCI_COMMAND, cmd); 1530 pci_write_config_word(dev, PCI_COMMAND, cmd);
1312 } 1531 }
@@ -1702,5 +1921,7 @@ EXPORT_SYMBOL(pci_set_power_state);
1702EXPORT_SYMBOL(pci_save_state); 1921EXPORT_SYMBOL(pci_save_state);
1703EXPORT_SYMBOL(pci_restore_state); 1922EXPORT_SYMBOL(pci_restore_state);
1704EXPORT_SYMBOL(pci_enable_wake); 1923EXPORT_SYMBOL(pci_enable_wake);
1924EXPORT_SYMBOL(pci_prepare_to_sleep);
1925EXPORT_SYMBOL(pci_back_from_sleep);
1705EXPORT_SYMBOL_GPL(pci_set_pcie_reset_state); 1926EXPORT_SYMBOL_GPL(pci_set_pcie_reset_state);
1706 1927
diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h
index 312daff834b6..d807cd786f20 100644
--- a/drivers/pci/pci.h
+++ b/drivers/pci/pci.h
@@ -5,10 +5,36 @@ extern int pci_create_sysfs_dev_files(struct pci_dev *pdev);
5extern void pci_remove_sysfs_dev_files(struct pci_dev *pdev); 5extern void pci_remove_sysfs_dev_files(struct pci_dev *pdev);
6extern void pci_cleanup_rom(struct pci_dev *dev); 6extern void pci_cleanup_rom(struct pci_dev *dev);
7 7
8/* Firmware callbacks */ 8/**
9extern pci_power_t (*platform_pci_choose_state)(struct pci_dev *dev); 9 * Firmware PM callbacks
10extern int (*platform_pci_set_power_state)(struct pci_dev *dev, 10 *
11 pci_power_t state); 11 * @is_manageable - returns 'true' if given device is power manageable by the
12 * platform firmware
13 *
14 * @set_state - invokes the platform firmware to set the device's power state
15 *
16 * @choose_state - returns PCI power state of given device preferred by the
17 * platform; to be used during system-wide transitions from a
18 * sleeping state to the working state and vice versa
19 *
20 * @can_wakeup - returns 'true' if given device is capable of waking up the
21 * system from a sleeping state
22 *
23 * @sleep_wake - enables/disables the system wake up capability of given device
24 *
25 * If given platform is generally capable of power managing PCI devices, all of
26 * these callbacks are mandatory.
27 */
28struct pci_platform_pm_ops {
29 bool (*is_manageable)(struct pci_dev *dev);
30 int (*set_state)(struct pci_dev *dev, pci_power_t state);
31 pci_power_t (*choose_state)(struct pci_dev *dev);
32 bool (*can_wakeup)(struct pci_dev *dev);
33 int (*sleep_wake)(struct pci_dev *dev, bool enable);
34};
35
36extern int pci_set_platform_pm(struct pci_platform_pm_ops *ops);
37extern void pci_pm_init(struct pci_dev *dev);
12 38
13extern int pci_user_read_config_byte(struct pci_dev *dev, int where, u8 *val); 39extern int pci_user_read_config_byte(struct pci_dev *dev, int where, u8 *val);
14extern int pci_user_read_config_word(struct pci_dev *dev, int where, u16 *val); 40extern int pci_user_read_config_word(struct pci_dev *dev, int where, u16 *val);
@@ -105,3 +131,16 @@ pci_match_one_device(const struct pci_device_id *id, const struct pci_dev *dev)
105} 131}
106 132
107struct pci_dev *pci_find_upstream_pcie_bridge(struct pci_dev *pdev); 133struct pci_dev *pci_find_upstream_pcie_bridge(struct pci_dev *pdev);
134
135/* PCI slot sysfs helper code */
136#define to_pci_slot(s) container_of(s, struct pci_slot, kobj)
137
138extern struct kset *pci_slots_kset;
139
140struct pci_slot_attribute {
141 struct attribute attr;
142 ssize_t (*show)(struct pci_slot *, char *);
143 ssize_t (*store)(struct pci_slot *, const char *, size_t);
144};
145#define to_pci_slot_attr(s) container_of(s, struct pci_slot_attribute, attr)
146
diff --git a/drivers/pci/pcie/aer/aerdrv.c b/drivers/pci/pcie/aer/aerdrv.c
index 07c3bdb6edc2..77036f46acfe 100644
--- a/drivers/pci/pcie/aer/aerdrv.c
+++ b/drivers/pci/pcie/aer/aerdrv.c
@@ -26,6 +26,7 @@
26#include <linux/pcieport_if.h> 26#include <linux/pcieport_if.h>
27 27
28#include "aerdrv.h" 28#include "aerdrv.h"
29#include "../../pci.h"
29 30
30/* 31/*
31 * Version Information 32 * Version Information
@@ -219,8 +220,7 @@ static int __devinit aer_probe (struct pcie_device *dev,
219 220
220 /* Alloc rpc data structure */ 221 /* Alloc rpc data structure */
221 if (!(rpc = aer_alloc_rpc(dev))) { 222 if (!(rpc = aer_alloc_rpc(dev))) {
222 printk(KERN_DEBUG "%s: Alloc rpc fails on PCIE device[%s]\n", 223 dev_printk(KERN_DEBUG, device, "alloc rpc failed\n");
223 __func__, device->bus_id);
224 aer_remove(dev); 224 aer_remove(dev);
225 return -ENOMEM; 225 return -ENOMEM;
226 } 226 }
@@ -228,8 +228,7 @@ static int __devinit aer_probe (struct pcie_device *dev,
228 /* Request IRQ ISR */ 228 /* Request IRQ ISR */
229 if ((status = request_irq(dev->irq, aer_irq, IRQF_SHARED, "aerdrv", 229 if ((status = request_irq(dev->irq, aer_irq, IRQF_SHARED, "aerdrv",
230 dev))) { 230 dev))) {
231 printk(KERN_DEBUG "%s: Request ISR fails on PCIE device[%s]\n", 231 dev_printk(KERN_DEBUG, device, "request IRQ failed\n");
232 __func__, device->bus_id);
233 aer_remove(dev); 232 aer_remove(dev);
234 return status; 233 return status;
235 } 234 }
@@ -273,7 +272,7 @@ static pci_ers_result_t aer_root_reset(struct pci_dev *dev)
273 * to issue Configuration Requests to those devices. 272 * to issue Configuration Requests to those devices.
274 */ 273 */
275 msleep(200); 274 msleep(200);
276 printk(KERN_DEBUG "Complete link reset at Root[%s]\n", dev->dev.bus_id); 275 dev_printk(KERN_DEBUG, &dev->dev, "Root Port link has been reset\n");
277 276
278 /* Enable Root Port's interrupt in response to error messages */ 277 /* Enable Root Port's interrupt in response to error messages */
279 pci_read_config_dword(dev, pos + PCI_ERR_ROOT_STATUS, &status); 278 pci_read_config_dword(dev, pos + PCI_ERR_ROOT_STATUS, &status);
diff --git a/drivers/pci/pcie/aer/aerdrv_acpi.c b/drivers/pci/pcie/aer/aerdrv_acpi.c
index d39a78dbd026..30f581b8791f 100644
--- a/drivers/pci/pcie/aer/aerdrv_acpi.c
+++ b/drivers/pci/pcie/aer/aerdrv_acpi.c
@@ -50,10 +50,10 @@ int aer_osc_setup(struct pcie_device *pciedev)
50 } 50 }
51 51
52 if (ACPI_FAILURE(status)) { 52 if (ACPI_FAILURE(status)) {
53 printk(KERN_DEBUG "AER service couldn't init device %s - %s\n", 53 dev_printk(KERN_DEBUG, &pciedev->device, "AER service couldn't "
54 pciedev->device.bus_id, 54 "init device: %s\n",
55 (status == AE_SUPPORT || status == AE_NOT_FOUND) ? 55 (status == AE_SUPPORT || status == AE_NOT_FOUND) ?
56 "no _OSC support" : "Run ACPI _OSC fails"); 56 "no _OSC support" : "_OSC failed");
57 return -1; 57 return -1;
58 } 58 }
59 59
diff --git a/drivers/pci/pcie/aer/aerdrv_core.c b/drivers/pci/pcie/aer/aerdrv_core.c
index aaa82392d1dc..ee5e7b5176d0 100644
--- a/drivers/pci/pcie/aer/aerdrv_core.c
+++ b/drivers/pci/pcie/aer/aerdrv_core.c
@@ -221,9 +221,9 @@ static void report_error_detected(struct pci_dev *dev, void *data)
221 * of a driver for this device is unaware of 221 * of a driver for this device is unaware of
222 * its hw state. 222 * its hw state.
223 */ 223 */
224 printk(KERN_DEBUG "Device ID[%s] has %s\n", 224 dev_printk(KERN_DEBUG, &dev->dev, "device has %s\n",
225 dev->dev.bus_id, (dev->driver) ? 225 dev->driver ?
226 "no AER-aware driver" : "no driver"); 226 "no AER-aware driver" : "no driver");
227 } 227 }
228 return; 228 return;
229 } 229 }
@@ -304,7 +304,7 @@ static pci_ers_result_t broadcast_error_message(struct pci_dev *dev,
304{ 304{
305 struct aer_broadcast_data result_data; 305 struct aer_broadcast_data result_data;
306 306
307 printk(KERN_DEBUG "Broadcast %s message\n", error_mesg); 307 dev_printk(KERN_DEBUG, &dev->dev, "broadcast %s message\n", error_mesg);
308 result_data.state = state; 308 result_data.state = state;
309 if (cb == report_error_detected) 309 if (cb == report_error_detected)
310 result_data.result = PCI_ERS_RESULT_CAN_RECOVER; 310 result_data.result = PCI_ERS_RESULT_CAN_RECOVER;
@@ -404,18 +404,16 @@ static pci_ers_result_t reset_link(struct pcie_device *aerdev,
404 data.aer_driver = 404 data.aer_driver =
405 to_service_driver(aerdev->device.driver); 405 to_service_driver(aerdev->device.driver);
406 } else { 406 } else {
407 printk(KERN_DEBUG "No link-reset support to Device ID" 407 dev_printk(KERN_DEBUG, &dev->dev, "no link-reset "
408 "[%s]\n", 408 "support\n");
409 dev->dev.bus_id);
410 return PCI_ERS_RESULT_DISCONNECT; 409 return PCI_ERS_RESULT_DISCONNECT;
411 } 410 }
412 } 411 }
413 412
414 status = data.aer_driver->reset_link(udev); 413 status = data.aer_driver->reset_link(udev);
415 if (status != PCI_ERS_RESULT_RECOVERED) { 414 if (status != PCI_ERS_RESULT_RECOVERED) {
416 printk(KERN_DEBUG "Link reset at upstream Device ID" 415 dev_printk(KERN_DEBUG, &dev->dev, "link reset at upstream "
417 "[%s] failed\n", 416 "device %s failed\n", pci_name(udev));
418 udev->dev.bus_id);
419 return PCI_ERS_RESULT_DISCONNECT; 417 return PCI_ERS_RESULT_DISCONNECT;
420 } 418 }
421 419
@@ -511,10 +509,12 @@ static void handle_error_source(struct pcie_device * aerdev,
511 } else { 509 } else {
512 status = do_recovery(aerdev, dev, info.severity); 510 status = do_recovery(aerdev, dev, info.severity);
513 if (status == PCI_ERS_RESULT_RECOVERED) { 511 if (status == PCI_ERS_RESULT_RECOVERED) {
514 printk(KERN_DEBUG "AER driver successfully recovered\n"); 512 dev_printk(KERN_DEBUG, &dev->dev, "AER driver "
513 "successfully recovered\n");
515 } else { 514 } else {
516 /* TODO: Should kernel panic here? */ 515 /* TODO: Should kernel panic here? */
517 printk(KERN_DEBUG "AER driver didn't recover\n"); 516 dev_printk(KERN_DEBUG, &dev->dev, "AER driver didn't "
517 "recover\n");
518 } 518 }
519 } 519 }
520} 520}
diff --git a/drivers/pci/pcie/portdrv_bus.c b/drivers/pci/pcie/portdrv_bus.c
index 3f0976868eda..359fe5568df1 100644
--- a/drivers/pci/pcie/portdrv_bus.c
+++ b/drivers/pci/pcie/portdrv_bus.c
@@ -13,6 +13,7 @@
13#include <linux/pm.h> 13#include <linux/pm.h>
14 14
15#include <linux/pcieport_if.h> 15#include <linux/pcieport_if.h>
16#include "portdrv.h"
16 17
17static int pcie_port_bus_match(struct device *dev, struct device_driver *drv); 18static int pcie_port_bus_match(struct device *dev, struct device_driver *drv);
18static int pcie_port_bus_suspend(struct device *dev, pm_message_t state); 19static int pcie_port_bus_suspend(struct device *dev, pm_message_t state);
diff --git a/drivers/pci/pcie/portdrv_core.c b/drivers/pci/pcie/portdrv_core.c
index fb0abfa508dc..890f0d2b370a 100644
--- a/drivers/pci/pcie/portdrv_core.c
+++ b/drivers/pci/pcie/portdrv_core.c
@@ -23,20 +23,20 @@ static int pcie_port_probe_service(struct device *dev)
23{ 23{
24 struct pcie_device *pciedev; 24 struct pcie_device *pciedev;
25 struct pcie_port_service_driver *driver; 25 struct pcie_port_service_driver *driver;
26 int status = -ENODEV; 26 int status;
27 27
28 if (!dev || !dev->driver) 28 if (!dev || !dev->driver)
29 return status; 29 return -ENODEV;
30 30
31 driver = to_service_driver(dev->driver); 31 driver = to_service_driver(dev->driver);
32 if (!driver || !driver->probe) 32 if (!driver || !driver->probe)
33 return status; 33 return -ENODEV;
34 34
35 pciedev = to_pcie_device(dev); 35 pciedev = to_pcie_device(dev);
36 status = driver->probe(pciedev, driver->id_table); 36 status = driver->probe(pciedev, driver->id_table);
37 if (!status) { 37 if (!status) {
38 printk(KERN_DEBUG "Load service driver %s on pcie device %s\n", 38 dev_printk(KERN_DEBUG, dev, "service driver %s loaded\n",
39 driver->name, dev->bus_id); 39 driver->name);
40 get_device(dev); 40 get_device(dev);
41 } 41 }
42 return status; 42 return status;
@@ -53,8 +53,8 @@ static int pcie_port_remove_service(struct device *dev)
53 pciedev = to_pcie_device(dev); 53 pciedev = to_pcie_device(dev);
54 driver = to_service_driver(dev->driver); 54 driver = to_service_driver(dev->driver);
55 if (driver && driver->remove) { 55 if (driver && driver->remove) {
56 printk(KERN_DEBUG "Unload service driver %s on pcie device %s\n", 56 dev_printk(KERN_DEBUG, dev, "unloading service driver %s\n",
57 driver->name, dev->bus_id); 57 driver->name);
58 driver->remove(pciedev); 58 driver->remove(pciedev);
59 put_device(dev); 59 put_device(dev);
60 } 60 }
@@ -103,7 +103,7 @@ static int pcie_port_resume_service(struct device *dev)
103 */ 103 */
104static void release_pcie_device(struct device *dev) 104static void release_pcie_device(struct device *dev)
105{ 105{
106 printk(KERN_DEBUG "Free Port Service[%s]\n", dev->bus_id); 106 dev_printk(KERN_DEBUG, dev, "free port service\n");
107 kfree(to_pcie_device(dev)); 107 kfree(to_pcie_device(dev));
108} 108}
109 109
@@ -150,7 +150,7 @@ static int assign_interrupt_mode(struct pci_dev *dev, int *vectors, int mask)
150 if (pos) { 150 if (pos) {
151 struct msix_entry msix_entries[PCIE_PORT_DEVICE_MAXSERVICES] = 151 struct msix_entry msix_entries[PCIE_PORT_DEVICE_MAXSERVICES] =
152 {{0, 0}, {0, 1}, {0, 2}, {0, 3}}; 152 {{0, 0}, {0, 1}, {0, 2}, {0, 3}};
153 printk("%s Found MSIX capability\n", __func__); 153 dev_info(&dev->dev, "found MSI-X capability\n");
154 status = pci_enable_msix(dev, msix_entries, nvec); 154 status = pci_enable_msix(dev, msix_entries, nvec);
155 if (!status) { 155 if (!status) {
156 int j = 0; 156 int j = 0;
@@ -165,7 +165,7 @@ static int assign_interrupt_mode(struct pci_dev *dev, int *vectors, int mask)
165 if (status) { 165 if (status) {
166 pos = pci_find_capability(dev, PCI_CAP_ID_MSI); 166 pos = pci_find_capability(dev, PCI_CAP_ID_MSI);
167 if (pos) { 167 if (pos) {
168 printk("%s Found MSI capability\n", __func__); 168 dev_info(&dev->dev, "found MSI capability\n");
169 status = pci_enable_msi(dev); 169 status = pci_enable_msi(dev);
170 if (!status) { 170 if (!status) {
171 interrupt_mode = PCIE_PORT_MSI_MODE; 171 interrupt_mode = PCIE_PORT_MSI_MODE;
@@ -252,7 +252,7 @@ static struct pcie_device* alloc_pcie_device(struct pci_dev *parent,
252 return NULL; 252 return NULL;
253 253
254 pcie_device_init(parent, device, port_type, service_type, irq,irq_mode); 254 pcie_device_init(parent, device, port_type, service_type, irq,irq_mode);
255 printk(KERN_DEBUG "Allocate Port Service[%s]\n", device->device.bus_id); 255 dev_printk(KERN_DEBUG, &device->device, "allocate port service\n");
256 return device; 256 return device;
257} 257}
258 258
diff --git a/drivers/pci/pcie/portdrv_pci.c b/drivers/pci/pcie/portdrv_pci.c
index 51d163238d93..367c9c20000d 100644
--- a/drivers/pci/pcie/portdrv_pci.c
+++ b/drivers/pci/pcie/portdrv_pci.c
@@ -91,9 +91,8 @@ static int __devinit pcie_portdrv_probe (struct pci_dev *dev,
91 91
92 pci_set_master(dev); 92 pci_set_master(dev);
93 if (!dev->irq && dev->pin) { 93 if (!dev->irq && dev->pin) {
94 printk(KERN_WARNING 94 dev_warn(&dev->dev, "device [%04x/%04x] has invalid IRQ; "
95 "%s->Dev[%04x:%04x] has invalid IRQ. Check vendor BIOS\n", 95 "check vendor BIOS\n", dev->vendor, dev->device);
96 __func__, dev->vendor, dev->device);
97 } 96 }
98 if (pcie_port_device_register(dev)) { 97 if (pcie_port_device_register(dev)) {
99 pci_disable_device(dev); 98 pci_disable_device(dev);
diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
index 3706ce7972dd..b1724cf31b66 100644
--- a/drivers/pci/probe.c
+++ b/drivers/pci/probe.c
@@ -277,8 +277,8 @@ static void pci_read_bases(struct pci_dev *dev, unsigned int howmany, int rom)
277 res->end = res->start + sz64; 277 res->end = res->start + sz64;
278#else 278#else
279 if (sz64 > 0x100000000ULL) { 279 if (sz64 > 0x100000000ULL) {
280 printk(KERN_ERR "PCI: Unable to handle 64-bit " 280 dev_err(&dev->dev, "BAR %d: can't handle 64-bit"
281 "BAR for device %s\n", pci_name(dev)); 281 " BAR\n", pos);
282 res->start = 0; 282 res->start = 0;
283 res->flags = 0; 283 res->flags = 0;
284 } else if (lhi) { 284 } else if (lhi) {
@@ -329,7 +329,7 @@ void __devinit pci_read_bridge_bases(struct pci_bus *child)
329 return; 329 return;
330 330
331 if (dev->transparent) { 331 if (dev->transparent) {
332 printk(KERN_INFO "PCI: Transparent bridge - %s\n", pci_name(dev)); 332 dev_info(&dev->dev, "transparent bridge\n");
333 for(i = 3; i < PCI_BUS_NUM_RESOURCES; i++) 333 for(i = 3; i < PCI_BUS_NUM_RESOURCES; i++)
334 child->resource[i] = child->parent->resource[i - 3]; 334 child->resource[i] = child->parent->resource[i - 3];
335 } 335 }
@@ -392,7 +392,8 @@ void __devinit pci_read_bridge_bases(struct pci_bus *child)
392 limit |= ((long) mem_limit_hi) << 32; 392 limit |= ((long) mem_limit_hi) << 32;
393#else 393#else
394 if (mem_base_hi || mem_limit_hi) { 394 if (mem_base_hi || mem_limit_hi) {
395 printk(KERN_ERR "PCI: Unable to handle 64-bit address space for bridge %s\n", pci_name(dev)); 395 dev_err(&dev->dev, "can't handle 64-bit "
396 "address space for bridge\n");
396 return; 397 return;
397 } 398 }
398#endif 399#endif
@@ -414,6 +415,7 @@ static struct pci_bus * pci_alloc_bus(void)
414 INIT_LIST_HEAD(&b->node); 415 INIT_LIST_HEAD(&b->node);
415 INIT_LIST_HEAD(&b->children); 416 INIT_LIST_HEAD(&b->children);
416 INIT_LIST_HEAD(&b->devices); 417 INIT_LIST_HEAD(&b->devices);
418 INIT_LIST_HEAD(&b->slots);
417 } 419 }
418 return b; 420 return b;
419} 421}
@@ -511,8 +513,8 @@ int __devinit pci_scan_bridge(struct pci_bus *bus, struct pci_dev *dev, int max,
511 513
512 pci_read_config_dword(dev, PCI_PRIMARY_BUS, &buses); 514 pci_read_config_dword(dev, PCI_PRIMARY_BUS, &buses);
513 515
514 pr_debug("PCI: Scanning behind PCI bridge %s, config %06x, pass %d\n", 516 dev_dbg(&dev->dev, "scanning behind bridge, config %06x, pass %d\n",
515 pci_name(dev), buses & 0xffffff, pass); 517 buses & 0xffffff, pass);
516 518
517 /* Disable MasterAbortMode during probing to avoid reporting 519 /* Disable MasterAbortMode during probing to avoid reporting
518 of bus errors (in some architectures) */ 520 of bus errors (in some architectures) */
@@ -535,8 +537,8 @@ int __devinit pci_scan_bridge(struct pci_bus *bus, struct pci_dev *dev, int max,
535 * ignore it. This can happen with the i450NX chipset. 537 * ignore it. This can happen with the i450NX chipset.
536 */ 538 */
537 if (pci_find_bus(pci_domain_nr(bus), busnr)) { 539 if (pci_find_bus(pci_domain_nr(bus), busnr)) {
538 printk(KERN_INFO "PCI: Bus %04x:%02x already known\n", 540 dev_info(&dev->dev, "bus %04x:%02x already known\n",
539 pci_domain_nr(bus), busnr); 541 pci_domain_nr(bus), busnr);
540 goto out; 542 goto out;
541 } 543 }
542 544
@@ -711,8 +713,9 @@ static int pci_setup_device(struct pci_dev * dev)
711{ 713{
712 u32 class; 714 u32 class;
713 715
714 sprintf(pci_name(dev), "%04x:%02x:%02x.%d", pci_domain_nr(dev->bus), 716 dev_set_name(&dev->dev, "%04x:%02x:%02x.%d", pci_domain_nr(dev->bus),
715 dev->bus->number, PCI_SLOT(dev->devfn), PCI_FUNC(dev->devfn)); 717 dev->bus->number, PCI_SLOT(dev->devfn),
718 PCI_FUNC(dev->devfn));
716 719
717 pci_read_config_dword(dev, PCI_CLASS_REVISION, &class); 720 pci_read_config_dword(dev, PCI_CLASS_REVISION, &class);
718 dev->revision = class & 0xff; 721 dev->revision = class & 0xff;
@@ -720,7 +723,7 @@ static int pci_setup_device(struct pci_dev * dev)
720 dev->class = class; 723 dev->class = class;
721 class >>= 8; 724 class >>= 8;
722 725
723 pr_debug("PCI: Found %s [%04x/%04x] %06x %02x\n", pci_name(dev), 726 dev_dbg(&dev->dev, "found [%04x/%04x] class %06x header type %02x\n",
724 dev->vendor, dev->device, class, dev->hdr_type); 727 dev->vendor, dev->device, class, dev->hdr_type);
725 728
726 /* "Unknown power state" */ 729 /* "Unknown power state" */
@@ -788,13 +791,13 @@ static int pci_setup_device(struct pci_dev * dev)
788 break; 791 break;
789 792
790 default: /* unknown header */ 793 default: /* unknown header */
791 printk(KERN_ERR "PCI: device %s has unknown header type %02x, ignoring.\n", 794 dev_err(&dev->dev, "unknown header type %02x, "
792 pci_name(dev), dev->hdr_type); 795 "ignoring device\n", dev->hdr_type);
793 return -1; 796 return -1;
794 797
795 bad: 798 bad:
796 printk(KERN_ERR "PCI: %s: class %x doesn't match header type %02x. Ignoring class.\n", 799 dev_err(&dev->dev, "ignoring class %02x (doesn't match header "
797 pci_name(dev), class, dev->hdr_type); 800 "type %02x)\n", class, dev->hdr_type);
798 dev->class = PCI_CLASS_NOT_DEFINED; 801 dev->class = PCI_CLASS_NOT_DEFINED;
799 } 802 }
800 803
@@ -927,7 +930,7 @@ static struct pci_dev *pci_scan_device(struct pci_bus *bus, int devfn)
927 return NULL; 930 return NULL;
928 /* Card hasn't responded in 60 seconds? Must be stuck. */ 931 /* Card hasn't responded in 60 seconds? Must be stuck. */
929 if (delay > 60 * 1000) { 932 if (delay > 60 * 1000) {
930 printk(KERN_WARNING "Device %04x:%02x:%02x.%d not " 933 printk(KERN_WARNING "pci %04x:%02x:%02x.%d: not "
931 "responding\n", pci_domain_nr(bus), 934 "responding\n", pci_domain_nr(bus),
932 bus->number, PCI_SLOT(devfn), 935 bus->number, PCI_SLOT(devfn),
933 PCI_FUNC(devfn)); 936 PCI_FUNC(devfn));
@@ -984,6 +987,9 @@ void pci_device_add(struct pci_dev *dev, struct pci_bus *bus)
984 /* Fix up broken headers */ 987 /* Fix up broken headers */
985 pci_fixup_device(pci_fixup_header, dev); 988 pci_fixup_device(pci_fixup_header, dev);
986 989
990 /* Initialize power management of the device */
991 pci_pm_init(dev);
992
987 /* 993 /*
988 * Add the device to our list of discovered devices 994 * Add the device to our list of discovered devices
989 * and the bus list for fixup functions, etc. 995 * and the bus list for fixup functions, etc.
diff --git a/drivers/pci/proc.c b/drivers/pci/proc.c
index 963a97642ae9..4400dffbd93a 100644
--- a/drivers/pci/proc.c
+++ b/drivers/pci/proc.c
@@ -1,6 +1,4 @@
1/* 1/*
2 * $Id: proc.c,v 1.13 1998/05/12 07:36:07 mj Exp $
3 *
4 * Procfs interface for the PCI bus. 2 * Procfs interface for the PCI bus.
5 * 3 *
6 * Copyright (c) 1997--1999 Martin Mares <mj@ucw.cz> 4 * Copyright (c) 1997--1999 Martin Mares <mj@ucw.cz>
@@ -482,5 +480,5 @@ static int __init pci_proc_init(void)
482 return 0; 480 return 0;
483} 481}
484 482
485__initcall(pci_proc_init); 483device_initcall(pci_proc_init);
486 484
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
index 338a3f94b4d4..12d489395fad 100644
--- a/drivers/pci/quirks.c
+++ b/drivers/pci/quirks.c
@@ -556,7 +556,7 @@ static void quirk_via_ioapic(struct pci_dev *dev)
556 pci_write_config_byte (dev, 0x58, tmp); 556 pci_write_config_byte (dev, 0x58, tmp);
557} 557}
558DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C686, quirk_via_ioapic); 558DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C686, quirk_via_ioapic);
559DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C686, quirk_via_ioapic); 559DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C686, quirk_via_ioapic);
560 560
561/* 561/*
562 * VIA 8237: Some BIOSs don't set the 'Bypass APIC De-Assert Message' Bit. 562 * VIA 8237: Some BIOSs don't set the 'Bypass APIC De-Assert Message' Bit.
@@ -576,7 +576,7 @@ static void quirk_via_vt8237_bypass_apic_deassert(struct pci_dev *dev)
576 } 576 }
577} 577}
578DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8237, quirk_via_vt8237_bypass_apic_deassert); 578DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8237, quirk_via_vt8237_bypass_apic_deassert);
579DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8237, quirk_via_vt8237_bypass_apic_deassert); 579DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8237, quirk_via_vt8237_bypass_apic_deassert);
580 580
581/* 581/*
582 * The AMD io apic can hang the box when an apic irq is masked. 582 * The AMD io apic can hang the box when an apic irq is masked.
@@ -622,7 +622,7 @@ static void quirk_amd_8131_ioapic(struct pci_dev *dev)
622 } 622 }
623} 623}
624DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8131_BRIDGE, quirk_amd_8131_ioapic); 624DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8131_BRIDGE, quirk_amd_8131_ioapic);
625DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8131_BRIDGE, quirk_amd_8131_ioapic); 625DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8131_BRIDGE, quirk_amd_8131_ioapic);
626#endif /* CONFIG_X86_IO_APIC */ 626#endif /* CONFIG_X86_IO_APIC */
627 627
628/* 628/*
@@ -774,7 +774,7 @@ static void quirk_cardbus_legacy(struct pci_dev *dev)
774 pci_write_config_dword(dev, PCI_CB_LEGACY_MODE_BASE, 0); 774 pci_write_config_dword(dev, PCI_CB_LEGACY_MODE_BASE, 0);
775} 775}
776DECLARE_PCI_FIXUP_FINAL(PCI_ANY_ID, PCI_ANY_ID, quirk_cardbus_legacy); 776DECLARE_PCI_FIXUP_FINAL(PCI_ANY_ID, PCI_ANY_ID, quirk_cardbus_legacy);
777DECLARE_PCI_FIXUP_RESUME(PCI_ANY_ID, PCI_ANY_ID, quirk_cardbus_legacy); 777DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_ANY_ID, PCI_ANY_ID, quirk_cardbus_legacy);
778 778
779/* 779/*
780 * Following the PCI ordering rules is optional on the AMD762. I'm not 780 * Following the PCI ordering rules is optional on the AMD762. I'm not
@@ -797,7 +797,7 @@ static void quirk_amd_ordering(struct pci_dev *dev)
797 } 797 }
798} 798}
799DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_FE_GATE_700C, quirk_amd_ordering); 799DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_FE_GATE_700C, quirk_amd_ordering);
800DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_FE_GATE_700C, quirk_amd_ordering); 800DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_FE_GATE_700C, quirk_amd_ordering);
801 801
802/* 802/*
803 * DreamWorks provided workaround for Dunord I-3000 problem 803 * DreamWorks provided workaround for Dunord I-3000 problem
@@ -865,7 +865,7 @@ static void quirk_disable_pxb(struct pci_dev *pdev)
865 } 865 }
866} 866}
867DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82454NX, quirk_disable_pxb); 867DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82454NX, quirk_disable_pxb);
868DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82454NX, quirk_disable_pxb); 868DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82454NX, quirk_disable_pxb);
869 869
870static void __devinit quirk_amd_ide_mode(struct pci_dev *pdev) 870static void __devinit quirk_amd_ide_mode(struct pci_dev *pdev)
871{ 871{
@@ -885,9 +885,9 @@ static void __devinit quirk_amd_ide_mode(struct pci_dev *pdev)
885 } 885 }
886} 886}
887DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_IXP600_SATA, quirk_amd_ide_mode); 887DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_IXP600_SATA, quirk_amd_ide_mode);
888DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_IXP600_SATA, quirk_amd_ide_mode); 888DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_IXP600_SATA, quirk_amd_ide_mode);
889DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_IXP700_SATA, quirk_amd_ide_mode); 889DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_IXP700_SATA, quirk_amd_ide_mode);
890DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_IXP700_SATA, quirk_amd_ide_mode); 890DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_IXP700_SATA, quirk_amd_ide_mode);
891 891
892/* 892/*
893 * Serverworks CSB5 IDE does not fully support native mode 893 * Serverworks CSB5 IDE does not fully support native mode
@@ -1054,6 +1054,20 @@ static void __init asus_hides_smbus_hostbridge(struct pci_dev *dev)
1054 * its on-board VGA controller */ 1054 * its on-board VGA controller */
1055 asus_hides_smbus = 1; 1055 asus_hides_smbus = 1;
1056 } 1056 }
1057 else if (dev->device == PCI_DEVICE_ID_INTEL_82845G_IG)
1058 switch(dev->subsystem_device) {
1059 case 0x00b8: /* Compaq Evo D510 CMT */
1060 case 0x00b9: /* Compaq Evo D510 SFF */
1061 asus_hides_smbus = 1;
1062 }
1063 else if (dev->device == PCI_DEVICE_ID_INTEL_82815_CGC)
1064 switch (dev->subsystem_device) {
1065 case 0x001A: /* Compaq Deskpro EN SSF P667 815E */
1066 /* Motherboard doesn't have host bridge
1067 * subvendor/subdevice IDs, therefore checking
1068 * its on-board VGA controller */
1069 asus_hides_smbus = 1;
1070 }
1057 } 1071 }
1058} 1072}
1059DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82845_HB, asus_hides_smbus_hostbridge); 1073DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82845_HB, asus_hides_smbus_hostbridge);
@@ -1068,6 +1082,8 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82855GM_HB, as
1068DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82915GM_HB, asus_hides_smbus_hostbridge); 1082DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82915GM_HB, asus_hides_smbus_hostbridge);
1069 1083
1070DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82810_IG3, asus_hides_smbus_hostbridge); 1084DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82810_IG3, asus_hides_smbus_hostbridge);
1085DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82845G_IG, asus_hides_smbus_hostbridge);
1086DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82815_CGC, asus_hides_smbus_hostbridge);
1071 1087
1072static void asus_hides_smbus_lpc(struct pci_dev *dev) 1088static void asus_hides_smbus_lpc(struct pci_dev *dev)
1073{ 1089{
@@ -1093,31 +1109,61 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801CA_0, asu
1093DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801CA_12, asus_hides_smbus_lpc); 1109DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801CA_12, asus_hides_smbus_lpc);
1094DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801DB_12, asus_hides_smbus_lpc); 1110DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801DB_12, asus_hides_smbus_lpc);
1095DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801EB_0, asus_hides_smbus_lpc); 1111DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801EB_0, asus_hides_smbus_lpc);
1096DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_0, asus_hides_smbus_lpc); 1112DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_0, asus_hides_smbus_lpc);
1097DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801DB_0, asus_hides_smbus_lpc); 1113DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801DB_0, asus_hides_smbus_lpc);
1098DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_0, asus_hides_smbus_lpc); 1114DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_0, asus_hides_smbus_lpc);
1099DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801CA_0, asus_hides_smbus_lpc); 1115DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801CA_0, asus_hides_smbus_lpc);
1100DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801CA_12, asus_hides_smbus_lpc); 1116DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801CA_12, asus_hides_smbus_lpc);
1101DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801DB_12, asus_hides_smbus_lpc); 1117DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801DB_12, asus_hides_smbus_lpc);
1102DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801EB_0, asus_hides_smbus_lpc); 1118DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801EB_0, asus_hides_smbus_lpc);
1103 1119
1104static void asus_hides_smbus_lpc_ich6(struct pci_dev *dev) 1120/* It appears we just have one such device. If not, we have a warning */
1121static void __iomem *asus_rcba_base;
1122static void asus_hides_smbus_lpc_ich6_suspend(struct pci_dev *dev)
1105{ 1123{
1106 u32 val, rcba; 1124 u32 rcba;
1107 void __iomem *base;
1108 1125
1109 if (likely(!asus_hides_smbus)) 1126 if (likely(!asus_hides_smbus))
1110 return; 1127 return;
1128 WARN_ON(asus_rcba_base);
1129
1111 pci_read_config_dword(dev, 0xF0, &rcba); 1130 pci_read_config_dword(dev, 0xF0, &rcba);
1112 base = ioremap_nocache(rcba & 0xFFFFC000, 0x4000); /* use bits 31:14, 16 kB aligned */ 1131 /* use bits 31:14, 16 kB aligned */
1113 if (base == NULL) return; 1132 asus_rcba_base = ioremap_nocache(rcba & 0xFFFFC000, 0x4000);
1114 val=readl(base + 0x3418); /* read the Function Disable register, dword mode only */ 1133 if (asus_rcba_base == NULL)
1115 writel(val & 0xFFFFFFF7, base + 0x3418); /* enable the SMBus device */ 1134 return;
1116 iounmap(base); 1135}
1136
1137static void asus_hides_smbus_lpc_ich6_resume_early(struct pci_dev *dev)
1138{
1139 u32 val;
1140
1141 if (likely(!asus_hides_smbus || !asus_rcba_base))
1142 return;
1143 /* read the Function Disable register, dword mode only */
1144 val = readl(asus_rcba_base + 0x3418);
1145 writel(val & 0xFFFFFFF7, asus_rcba_base + 0x3418); /* enable the SMBus device */
1146}
1147
1148static void asus_hides_smbus_lpc_ich6_resume(struct pci_dev *dev)
1149{
1150 if (likely(!asus_hides_smbus || !asus_rcba_base))
1151 return;
1152 iounmap(asus_rcba_base);
1153 asus_rcba_base = NULL;
1117 dev_info(&dev->dev, "Enabled ICH6/i801 SMBus device\n"); 1154 dev_info(&dev->dev, "Enabled ICH6/i801 SMBus device\n");
1118} 1155}
1156
1157static void asus_hides_smbus_lpc_ich6(struct pci_dev *dev)
1158{
1159 asus_hides_smbus_lpc_ich6_suspend(dev);
1160 asus_hides_smbus_lpc_ich6_resume_early(dev);
1161 asus_hides_smbus_lpc_ich6_resume(dev);
1162}
1119DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH6_1, asus_hides_smbus_lpc_ich6); 1163DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH6_1, asus_hides_smbus_lpc_ich6);
1120DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH6_1, asus_hides_smbus_lpc_ich6); 1164DECLARE_PCI_FIXUP_SUSPEND(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH6_1, asus_hides_smbus_lpc_ich6_suspend);
1165DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH6_1, asus_hides_smbus_lpc_ich6_resume);
1166DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH6_1, asus_hides_smbus_lpc_ich6_resume_early);
1121 1167
1122/* 1168/*
1123 * SiS 96x south bridge: BIOS typically hides SMBus device... 1169 * SiS 96x south bridge: BIOS typically hides SMBus device...
@@ -1135,10 +1181,10 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_961, quirk_sis_96x_
1135DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_962, quirk_sis_96x_smbus); 1181DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_962, quirk_sis_96x_smbus);
1136DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_963, quirk_sis_96x_smbus); 1182DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_963, quirk_sis_96x_smbus);
1137DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_LPC, quirk_sis_96x_smbus); 1183DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_LPC, quirk_sis_96x_smbus);
1138DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_961, quirk_sis_96x_smbus); 1184DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_961, quirk_sis_96x_smbus);
1139DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_962, quirk_sis_96x_smbus); 1185DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_962, quirk_sis_96x_smbus);
1140DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_963, quirk_sis_96x_smbus); 1186DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_963, quirk_sis_96x_smbus);
1141DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_LPC, quirk_sis_96x_smbus); 1187DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_LPC, quirk_sis_96x_smbus);
1142 1188
1143/* 1189/*
1144 * ... This is further complicated by the fact that some SiS96x south 1190 * ... This is further complicated by the fact that some SiS96x south
@@ -1172,7 +1218,7 @@ static void quirk_sis_503(struct pci_dev *dev)
1172 quirk_sis_96x_smbus(dev); 1218 quirk_sis_96x_smbus(dev);
1173} 1219}
1174DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_503, quirk_sis_503); 1220DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_503, quirk_sis_503);
1175DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_503, quirk_sis_503); 1221DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_503, quirk_sis_503);
1176 1222
1177 1223
1178/* 1224/*
@@ -1205,7 +1251,7 @@ static void asus_hides_ac97_lpc(struct pci_dev *dev)
1205 } 1251 }
1206} 1252}
1207DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8237, asus_hides_ac97_lpc); 1253DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8237, asus_hides_ac97_lpc);
1208DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8237, asus_hides_ac97_lpc); 1254DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8237, asus_hides_ac97_lpc);
1209 1255
1210#if defined(CONFIG_ATA) || defined(CONFIG_ATA_MODULE) 1256#if defined(CONFIG_ATA) || defined(CONFIG_ATA_MODULE)
1211 1257
@@ -1270,12 +1316,12 @@ DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB363, qui
1270DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB365, quirk_jmicron_ata); 1316DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB365, quirk_jmicron_ata);
1271DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB366, quirk_jmicron_ata); 1317DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB366, quirk_jmicron_ata);
1272DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB368, quirk_jmicron_ata); 1318DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB368, quirk_jmicron_ata);
1273DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB360, quirk_jmicron_ata); 1319DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB360, quirk_jmicron_ata);
1274DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB361, quirk_jmicron_ata); 1320DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB361, quirk_jmicron_ata);
1275DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB363, quirk_jmicron_ata); 1321DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB363, quirk_jmicron_ata);
1276DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB365, quirk_jmicron_ata); 1322DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB365, quirk_jmicron_ata);
1277DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB366, quirk_jmicron_ata); 1323DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB366, quirk_jmicron_ata);
1278DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB368, quirk_jmicron_ata); 1324DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB368, quirk_jmicron_ata);
1279 1325
1280#endif 1326#endif
1281 1327
@@ -1521,6 +1567,10 @@ extern struct pci_fixup __start_pci_fixups_enable[];
1521extern struct pci_fixup __end_pci_fixups_enable[]; 1567extern struct pci_fixup __end_pci_fixups_enable[];
1522extern struct pci_fixup __start_pci_fixups_resume[]; 1568extern struct pci_fixup __start_pci_fixups_resume[];
1523extern struct pci_fixup __end_pci_fixups_resume[]; 1569extern struct pci_fixup __end_pci_fixups_resume[];
1570extern struct pci_fixup __start_pci_fixups_resume_early[];
1571extern struct pci_fixup __end_pci_fixups_resume_early[];
1572extern struct pci_fixup __start_pci_fixups_suspend[];
1573extern struct pci_fixup __end_pci_fixups_suspend[];
1524 1574
1525 1575
1526void pci_fixup_device(enum pci_fixup_pass pass, struct pci_dev *dev) 1576void pci_fixup_device(enum pci_fixup_pass pass, struct pci_dev *dev)
@@ -1553,6 +1603,16 @@ void pci_fixup_device(enum pci_fixup_pass pass, struct pci_dev *dev)
1553 end = __end_pci_fixups_resume; 1603 end = __end_pci_fixups_resume;
1554 break; 1604 break;
1555 1605
1606 case pci_fixup_resume_early:
1607 start = __start_pci_fixups_resume_early;
1608 end = __end_pci_fixups_resume_early;
1609 break;
1610
1611 case pci_fixup_suspend:
1612 start = __start_pci_fixups_suspend;
1613 end = __end_pci_fixups_suspend;
1614 break;
1615
1556 default: 1616 default:
1557 /* stupid compiler warning, you would think with an enum... */ 1617 /* stupid compiler warning, you would think with an enum... */
1558 return; 1618 return;
@@ -1629,7 +1689,7 @@ static void quirk_nvidia_ck804_pcie_aer_ext_cap(struct pci_dev *dev)
1629} 1689}
1630DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_CK804_PCIE, 1690DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_CK804_PCIE,
1631 quirk_nvidia_ck804_pcie_aer_ext_cap); 1691 quirk_nvidia_ck804_pcie_aer_ext_cap);
1632DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_CK804_PCIE, 1692DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_CK804_PCIE,
1633 quirk_nvidia_ck804_pcie_aer_ext_cap); 1693 quirk_nvidia_ck804_pcie_aer_ext_cap);
1634 1694
1635static void __devinit quirk_via_cx700_pci_parking_caching(struct pci_dev *dev) 1695static void __devinit quirk_via_cx700_pci_parking_caching(struct pci_dev *dev)
diff --git a/drivers/pci/setup-bus.c b/drivers/pci/setup-bus.c
index 8ddb918f5f57..827c0a520e2b 100644
--- a/drivers/pci/setup-bus.c
+++ b/drivers/pci/setup-bus.c
@@ -27,13 +27,6 @@
27#include <linux/slab.h> 27#include <linux/slab.h>
28 28
29 29
30#define DEBUG_CONFIG 1
31#if DEBUG_CONFIG
32#define DBG(x...) printk(x)
33#else
34#define DBG(x...)
35#endif
36
37static void pbus_assign_resources_sorted(struct pci_bus *bus) 30static void pbus_assign_resources_sorted(struct pci_bus *bus)
38{ 31{
39 struct pci_dev *dev; 32 struct pci_dev *dev;
@@ -81,8 +74,8 @@ void pci_setup_cardbus(struct pci_bus *bus)
81 struct pci_dev *bridge = bus->self; 74 struct pci_dev *bridge = bus->self;
82 struct pci_bus_region region; 75 struct pci_bus_region region;
83 76
84 printk("PCI: Bus %d, cardbus bridge: %s\n", 77 dev_info(&bridge->dev, "CardBus bridge, secondary bus %04x:%02x\n",
85 bus->number, pci_name(bridge)); 78 pci_domain_nr(bus), bus->number);
86 79
87 pcibios_resource_to_bus(bridge, &region, bus->resource[0]); 80 pcibios_resource_to_bus(bridge, &region, bus->resource[0]);
88 if (bus->resource[0]->flags & IORESOURCE_IO) { 81 if (bus->resource[0]->flags & IORESOURCE_IO) {
@@ -90,7 +83,7 @@ void pci_setup_cardbus(struct pci_bus *bus)
90 * The IO resource is allocated a range twice as large as it 83 * The IO resource is allocated a range twice as large as it
91 * would normally need. This allows us to set both IO regs. 84 * would normally need. This allows us to set both IO regs.
92 */ 85 */
93 printk(KERN_INFO " IO window: 0x%08lx-0x%08lx\n", 86 dev_info(&bridge->dev, " IO window: %#08lx-%#08lx\n",
94 (unsigned long)region.start, 87 (unsigned long)region.start,
95 (unsigned long)region.end); 88 (unsigned long)region.end);
96 pci_write_config_dword(bridge, PCI_CB_IO_BASE_0, 89 pci_write_config_dword(bridge, PCI_CB_IO_BASE_0,
@@ -101,7 +94,7 @@ void pci_setup_cardbus(struct pci_bus *bus)
101 94
102 pcibios_resource_to_bus(bridge, &region, bus->resource[1]); 95 pcibios_resource_to_bus(bridge, &region, bus->resource[1]);
103 if (bus->resource[1]->flags & IORESOURCE_IO) { 96 if (bus->resource[1]->flags & IORESOURCE_IO) {
104 printk(KERN_INFO " IO window: 0x%08lx-0x%08lx\n", 97 dev_info(&bridge->dev, " IO window: %#08lx-%#08lx\n",
105 (unsigned long)region.start, 98 (unsigned long)region.start,
106 (unsigned long)region.end); 99 (unsigned long)region.end);
107 pci_write_config_dword(bridge, PCI_CB_IO_BASE_1, 100 pci_write_config_dword(bridge, PCI_CB_IO_BASE_1,
@@ -112,7 +105,7 @@ void pci_setup_cardbus(struct pci_bus *bus)
112 105
113 pcibios_resource_to_bus(bridge, &region, bus->resource[2]); 106 pcibios_resource_to_bus(bridge, &region, bus->resource[2]);
114 if (bus->resource[2]->flags & IORESOURCE_MEM) { 107 if (bus->resource[2]->flags & IORESOURCE_MEM) {
115 printk(KERN_INFO " PREFETCH window: 0x%08lx-0x%08lx\n", 108 dev_info(&bridge->dev, " PREFETCH window: %#08lx-%#08lx\n",
116 (unsigned long)region.start, 109 (unsigned long)region.start,
117 (unsigned long)region.end); 110 (unsigned long)region.end);
118 pci_write_config_dword(bridge, PCI_CB_MEMORY_BASE_0, 111 pci_write_config_dword(bridge, PCI_CB_MEMORY_BASE_0,
@@ -123,7 +116,7 @@ void pci_setup_cardbus(struct pci_bus *bus)
123 116
124 pcibios_resource_to_bus(bridge, &region, bus->resource[3]); 117 pcibios_resource_to_bus(bridge, &region, bus->resource[3]);
125 if (bus->resource[3]->flags & IORESOURCE_MEM) { 118 if (bus->resource[3]->flags & IORESOURCE_MEM) {
126 printk(KERN_INFO " MEM window: 0x%08lx-0x%08lx\n", 119 dev_info(&bridge->dev, " MEM window: %#08lx-%#08lx\n",
127 (unsigned long)region.start, 120 (unsigned long)region.start,
128 (unsigned long)region.end); 121 (unsigned long)region.end);
129 pci_write_config_dword(bridge, PCI_CB_MEMORY_BASE_1, 122 pci_write_config_dword(bridge, PCI_CB_MEMORY_BASE_1,
@@ -151,7 +144,8 @@ static void pci_setup_bridge(struct pci_bus *bus)
151 struct pci_bus_region region; 144 struct pci_bus_region region;
152 u32 l, bu, lu, io_upper16; 145 u32 l, bu, lu, io_upper16;
153 146
154 DBG(KERN_INFO "PCI: Bridge: %s\n", pci_name(bridge)); 147 dev_info(&bridge->dev, "PCI bridge, secondary bus %04x:%02x\n",
148 pci_domain_nr(bus), bus->number);
155 149
156 /* Set up the top and bottom of the PCI I/O segment for this bus. */ 150 /* Set up the top and bottom of the PCI I/O segment for this bus. */
157 pcibios_resource_to_bus(bridge, &region, bus->resource[0]); 151 pcibios_resource_to_bus(bridge, &region, bus->resource[0]);
@@ -162,7 +156,7 @@ static void pci_setup_bridge(struct pci_bus *bus)
162 l |= region.end & 0xf000; 156 l |= region.end & 0xf000;
163 /* Set up upper 16 bits of I/O base/limit. */ 157 /* Set up upper 16 bits of I/O base/limit. */
164 io_upper16 = (region.end & 0xffff0000) | (region.start >> 16); 158 io_upper16 = (region.end & 0xffff0000) | (region.start >> 16);
165 DBG(KERN_INFO " IO window: %04lx-%04lx\n", 159 dev_info(&bridge->dev, " IO window: %#04lx-%#04lx\n",
166 (unsigned long)region.start, 160 (unsigned long)region.start,
167 (unsigned long)region.end); 161 (unsigned long)region.end);
168 } 162 }
@@ -170,7 +164,7 @@ static void pci_setup_bridge(struct pci_bus *bus)
170 /* Clear upper 16 bits of I/O base/limit. */ 164 /* Clear upper 16 bits of I/O base/limit. */
171 io_upper16 = 0; 165 io_upper16 = 0;
172 l = 0x00f0; 166 l = 0x00f0;
173 DBG(KERN_INFO " IO window: disabled.\n"); 167 dev_info(&bridge->dev, " IO window: disabled\n");
174 } 168 }
175 /* Temporarily disable the I/O range before updating PCI_IO_BASE. */ 169 /* Temporarily disable the I/O range before updating PCI_IO_BASE. */
176 pci_write_config_dword(bridge, PCI_IO_BASE_UPPER16, 0x0000ffff); 170 pci_write_config_dword(bridge, PCI_IO_BASE_UPPER16, 0x0000ffff);
@@ -185,13 +179,13 @@ static void pci_setup_bridge(struct pci_bus *bus)
185 if (bus->resource[1]->flags & IORESOURCE_MEM) { 179 if (bus->resource[1]->flags & IORESOURCE_MEM) {
186 l = (region.start >> 16) & 0xfff0; 180 l = (region.start >> 16) & 0xfff0;
187 l |= region.end & 0xfff00000; 181 l |= region.end & 0xfff00000;
188 DBG(KERN_INFO " MEM window: 0x%08lx-0x%08lx\n", 182 dev_info(&bridge->dev, " MEM window: %#08lx-%#08lx\n",
189 (unsigned long)region.start, 183 (unsigned long)region.start,
190 (unsigned long)region.end); 184 (unsigned long)region.end);
191 } 185 }
192 else { 186 else {
193 l = 0x0000fff0; 187 l = 0x0000fff0;
194 DBG(KERN_INFO " MEM window: disabled.\n"); 188 dev_info(&bridge->dev, " MEM window: disabled\n");
195 } 189 }
196 pci_write_config_dword(bridge, PCI_MEMORY_BASE, l); 190 pci_write_config_dword(bridge, PCI_MEMORY_BASE, l);
197 191
@@ -208,13 +202,13 @@ static void pci_setup_bridge(struct pci_bus *bus)
208 l |= region.end & 0xfff00000; 202 l |= region.end & 0xfff00000;
209 bu = upper_32_bits(region.start); 203 bu = upper_32_bits(region.start);
210 lu = upper_32_bits(region.end); 204 lu = upper_32_bits(region.end);
211 DBG(KERN_INFO " PREFETCH window: 0x%016llx-0x%016llx\n", 205 dev_info(&bridge->dev, " PREFETCH window: %#016llx-%#016llx\n",
212 (unsigned long long)region.start, 206 (unsigned long long)region.start,
213 (unsigned long long)region.end); 207 (unsigned long long)region.end);
214 } 208 }
215 else { 209 else {
216 l = 0x0000fff0; 210 l = 0x0000fff0;
217 DBG(KERN_INFO " PREFETCH window: disabled.\n"); 211 dev_info(&bridge->dev, " PREFETCH window: disabled\n");
218 } 212 }
219 pci_write_config_dword(bridge, PCI_PREF_MEMORY_BASE, l); 213 pci_write_config_dword(bridge, PCI_PREF_MEMORY_BASE, l);
220 214
@@ -361,9 +355,8 @@ static int pbus_size_mem(struct pci_bus *bus, unsigned long mask, unsigned long
361 align = (i < PCI_BRIDGE_RESOURCES) ? r_size : r->start; 355 align = (i < PCI_BRIDGE_RESOURCES) ? r_size : r->start;
362 order = __ffs(align) - 20; 356 order = __ffs(align) - 20;
363 if (order > 11) { 357 if (order > 11) {
364 printk(KERN_WARNING "PCI: region %s/%d " 358 dev_warn(&dev->dev, "BAR %d too large: "
365 "too large: 0x%016llx-0x%016llx\n", 359 "%#016llx-%#016llx\n", i,
366 pci_name(dev), i,
367 (unsigned long long)r->start, 360 (unsigned long long)r->start,
368 (unsigned long long)r->end); 361 (unsigned long long)r->end);
369 r->flags = 0; 362 r->flags = 0;
@@ -529,8 +522,8 @@ void __ref pci_bus_assign_resources(struct pci_bus *bus)
529 break; 522 break;
530 523
531 default: 524 default:
532 printk(KERN_INFO "PCI: not setting up bridge %s " 525 dev_info(&dev->dev, "not setting up bridge for bus "
533 "for bus %d\n", pci_name(dev), b->number); 526 "%04x:%02x\n", pci_domain_nr(b), b->number);
534 break; 527 break;
535 } 528 }
536 } 529 }
diff --git a/drivers/pci/setup-irq.c b/drivers/pci/setup-irq.c
index 05ca2ed9eb51..aa795fd428de 100644
--- a/drivers/pci/setup-irq.c
+++ b/drivers/pci/setup-irq.c
@@ -47,8 +47,7 @@ pdev_fixup_irq(struct pci_dev *dev,
47 } 47 }
48 dev->irq = irq; 48 dev->irq = irq;
49 49
50 pr_debug("PCI: fixup irq: (%s) got %d\n", 50 dev_dbg(&dev->dev, "fixup irq: got %d\n", dev->irq);
51 kobject_name(&dev->dev.kobj), dev->irq);
52 51
53 /* Always tell the device, so the driver knows what is 52 /* Always tell the device, so the driver knows what is
54 the real IRQ to use; the device does not use it. */ 53 the real IRQ to use; the device does not use it. */
diff --git a/drivers/pci/setup-res.c b/drivers/pci/setup-res.c
index 7d35cdf4579f..1a5fc83c71b3 100644
--- a/drivers/pci/setup-res.c
+++ b/drivers/pci/setup-res.c
@@ -26,8 +26,7 @@
26#include "pci.h" 26#include "pci.h"
27 27
28 28
29void 29void pci_update_resource(struct pci_dev *dev, struct resource *res, int resno)
30pci_update_resource(struct pci_dev *dev, struct resource *res, int resno)
31{ 30{
32 struct pci_bus_region region; 31 struct pci_bus_region region;
33 u32 new, check, mask; 32 u32 new, check, mask;
@@ -43,20 +42,20 @@ pci_update_resource(struct pci_dev *dev, struct resource *res, int resno)
43 /* 42 /*
44 * Ignore non-moveable resources. This might be legacy resources for 43 * Ignore non-moveable resources. This might be legacy resources for
45 * which no functional BAR register exists or another important 44 * which no functional BAR register exists or another important
46 * system resource we should better not move around in system address 45 * system resource we shouldn't move around.
47 * space.
48 */ 46 */
49 if (res->flags & IORESOURCE_PCI_FIXED) 47 if (res->flags & IORESOURCE_PCI_FIXED)
50 return; 48 return;
51 49
52 pcibios_resource_to_bus(dev, &region, res); 50 pcibios_resource_to_bus(dev, &region, res);
53 51
54 pr_debug(" got res [%llx:%llx] bus [%llx:%llx] flags %lx for " 52 dev_dbg(&dev->dev, "BAR %d: got res [%#llx-%#llx] bus [%#llx-%#llx] "
55 "BAR %d of %s\n", (unsigned long long)res->start, 53 "flags %#lx\n", resno,
54 (unsigned long long)res->start,
56 (unsigned long long)res->end, 55 (unsigned long long)res->end,
57 (unsigned long long)region.start, 56 (unsigned long long)region.start,
58 (unsigned long long)region.end, 57 (unsigned long long)region.end,
59 (unsigned long)res->flags, resno, pci_name(dev)); 58 (unsigned long)res->flags);
60 59
61 new = region.start | (res->flags & PCI_REGION_FLAG_MASK); 60 new = region.start | (res->flags & PCI_REGION_FLAG_MASK);
62 if (res->flags & IORESOURCE_IO) 61 if (res->flags & IORESOURCE_IO)
@@ -81,9 +80,8 @@ pci_update_resource(struct pci_dev *dev, struct resource *res, int resno)
81 pci_read_config_dword(dev, reg, &check); 80 pci_read_config_dword(dev, reg, &check);
82 81
83 if ((new ^ check) & mask) { 82 if ((new ^ check) & mask) {
84 printk(KERN_ERR "PCI: Error while updating region " 83 dev_err(&dev->dev, "BAR %d: error updating (%#08x != %#08x)\n",
85 "%s/%d (%08x != %08x)\n", pci_name(dev), resno, 84 resno, new, check);
86 new, check);
87 } 85 }
88 86
89 if ((new & (PCI_BASE_ADDRESS_SPACE|PCI_BASE_ADDRESS_MEM_TYPE_MASK)) == 87 if ((new & (PCI_BASE_ADDRESS_SPACE|PCI_BASE_ADDRESS_MEM_TYPE_MASK)) ==
@@ -92,15 +90,14 @@ pci_update_resource(struct pci_dev *dev, struct resource *res, int resno)
92 pci_write_config_dword(dev, reg + 4, new); 90 pci_write_config_dword(dev, reg + 4, new);
93 pci_read_config_dword(dev, reg + 4, &check); 91 pci_read_config_dword(dev, reg + 4, &check);
94 if (check != new) { 92 if (check != new) {
95 printk(KERN_ERR "PCI: Error updating region " 93 dev_err(&dev->dev, "BAR %d: error updating "
96 "%s/%d (high %08x != %08x)\n", 94 "(high %#08x != %#08x)\n", resno, new, check);
97 pci_name(dev), resno, new, check);
98 } 95 }
99 } 96 }
100 res->flags &= ~IORESOURCE_UNSET; 97 res->flags &= ~IORESOURCE_UNSET;
101 pr_debug("PCI: moved device %s resource %d (%lx) to %x\n", 98 dev_dbg(&dev->dev, "BAR %d: moved to bus [%#llx-%#llx] flags %#lx\n",
102 pci_name(dev), resno, res->flags, 99 resno, (unsigned long long)region.start,
103 new & ~PCI_REGION_FLAG_MASK); 100 (unsigned long long)region.end, res->flags);
104} 101}
105 102
106int pci_claim_resource(struct pci_dev *dev, int resource) 103int pci_claim_resource(struct pci_dev *dev, int resource)
@@ -117,10 +114,11 @@ int pci_claim_resource(struct pci_dev *dev, int resource)
117 err = insert_resource(root, res); 114 err = insert_resource(root, res);
118 115
119 if (err) { 116 if (err) {
120 printk(KERN_ERR "PCI: %s region %d of %s %s [%llx:%llx]\n", 117 dev_err(&dev->dev, "BAR %d: %s of %s [%#llx-%#llx]\n",
121 root ? "Address space collision on" : 118 resource,
122 "No parent found for", 119 root ? "address space collision on" :
123 resource, dtype, pci_name(dev), 120 "no parent found for",
121 dtype,
124 (unsigned long long)res->start, 122 (unsigned long long)res->start,
125 (unsigned long long)res->end); 123 (unsigned long long)res->end);
126 } 124 }
@@ -140,11 +138,10 @@ int pci_assign_resource(struct pci_dev *dev, int resno)
140 138
141 align = resource_alignment(res); 139 align = resource_alignment(res);
142 if (!align) { 140 if (!align) {
143 printk(KERN_ERR "PCI: Cannot allocate resource (bogus " 141 dev_err(&dev->dev, "BAR %d: can't allocate resource (bogus "
144 "alignment) %d [%llx:%llx] (flags %lx) of %s\n", 142 "alignment) [%#llx-%#llx] flags %#lx\n",
145 resno, (unsigned long long)res->start, 143 resno, (unsigned long long)res->start,
146 (unsigned long long)res->end, res->flags, 144 (unsigned long long)res->end, res->flags);
147 pci_name(dev));
148 return -EINVAL; 145 return -EINVAL;
149 } 146 }
150 147
@@ -165,11 +162,11 @@ int pci_assign_resource(struct pci_dev *dev, int resno)
165 } 162 }
166 163
167 if (ret) { 164 if (ret) {
168 printk(KERN_ERR "PCI: Failed to allocate %s resource " 165 dev_err(&dev->dev, "BAR %d: can't allocate %s resource "
169 "#%d:%llx@%llx for %s\n", 166 "[%#llx-%#llx]\n", resno,
170 res->flags & IORESOURCE_IO ? "I/O" : "mem", 167 res->flags & IORESOURCE_IO ? "I/O" : "mem",
171 resno, (unsigned long long)size, 168 (unsigned long long)res->start,
172 (unsigned long long)res->start, pci_name(dev)); 169 (unsigned long long)res->end);
173 } else { 170 } else {
174 res->flags &= ~IORESOURCE_STARTALIGN; 171 res->flags &= ~IORESOURCE_STARTALIGN;
175 if (resno < PCI_BRIDGE_RESOURCES) 172 if (resno < PCI_BRIDGE_RESOURCES)
@@ -205,11 +202,11 @@ int pci_assign_resource_fixed(struct pci_dev *dev, int resno)
205 } 202 }
206 203
207 if (ret) { 204 if (ret) {
208 printk(KERN_ERR "PCI: Failed to allocate %s resource " 205 dev_err(&dev->dev, "BAR %d: can't allocate %s resource "
209 "#%d:%llx@%llx for %s\n", 206 "[%#llx-%#llx\n]", resno,
210 res->flags & IORESOURCE_IO ? "I/O" : "mem", 207 res->flags & IORESOURCE_IO ? "I/O" : "mem",
211 resno, (unsigned long long)(res->end - res->start + 1), 208 (unsigned long long)res->start,
212 (unsigned long long)res->start, pci_name(dev)); 209 (unsigned long long)res->end);
213 } else if (resno < PCI_BRIDGE_RESOURCES) { 210 } else if (resno < PCI_BRIDGE_RESOURCES) {
214 pci_update_resource(dev, res, resno); 211 pci_update_resource(dev, res, resno);
215 } 212 }
@@ -239,11 +236,10 @@ void pdev_sort_resources(struct pci_dev *dev, struct resource_list *head)
239 236
240 r_align = resource_alignment(r); 237 r_align = resource_alignment(r);
241 if (!r_align) { 238 if (!r_align) {
242 printk(KERN_WARNING "PCI: bogus alignment of resource " 239 dev_warn(&dev->dev, "BAR %d: bogus alignment "
243 "%d [%llx:%llx] (flags %lx) of %s\n", 240 "[%#llx-%#llx] flags %#lx\n",
244 i, (unsigned long long)r->start, 241 i, (unsigned long long)r->start,
245 (unsigned long long)r->end, r->flags, 242 (unsigned long long)r->end, r->flags);
246 pci_name(dev));
247 continue; 243 continue;
248 } 244 }
249 for (list = head; ; list = list->next) { 245 for (list = head; ; list = list->next) {
@@ -291,7 +287,7 @@ int pci_enable_resources(struct pci_dev *dev, int mask)
291 287
292 if (!r->parent) { 288 if (!r->parent) {
293 dev_err(&dev->dev, "device not available because of " 289 dev_err(&dev->dev, "device not available because of "
294 "BAR %d [%llx:%llx] collisions\n", i, 290 "BAR %d [%#llx-%#llx] collisions\n", i,
295 (unsigned long long) r->start, 291 (unsigned long long) r->start,
296 (unsigned long long) r->end); 292 (unsigned long long) r->end);
297 return -EINVAL; 293 return -EINVAL;
diff --git a/drivers/pci/slot.c b/drivers/pci/slot.c
new file mode 100644
index 000000000000..7e5b85cbd948
--- /dev/null
+++ b/drivers/pci/slot.c
@@ -0,0 +1,233 @@
1/*
2 * drivers/pci/slot.c
3 * Copyright (C) 2006 Matthew Wilcox <matthew@wil.cx>
4 * Copyright (C) 2006-2008 Hewlett-Packard Development Company, L.P.
5 * Alex Chiang <achiang@hp.com>
6 */
7
8#include <linux/kobject.h>
9#include <linux/pci.h>
10#include <linux/err.h>
11#include "pci.h"
12
13struct kset *pci_slots_kset;
14EXPORT_SYMBOL_GPL(pci_slots_kset);
15
16static ssize_t pci_slot_attr_show(struct kobject *kobj,
17 struct attribute *attr, char *buf)
18{
19 struct pci_slot *slot = to_pci_slot(kobj);
20 struct pci_slot_attribute *attribute = to_pci_slot_attr(attr);
21 return attribute->show ? attribute->show(slot, buf) : -EIO;
22}
23
24static ssize_t pci_slot_attr_store(struct kobject *kobj,
25 struct attribute *attr, const char *buf, size_t len)
26{
27 struct pci_slot *slot = to_pci_slot(kobj);
28 struct pci_slot_attribute *attribute = to_pci_slot_attr(attr);
29 return attribute->store ? attribute->store(slot, buf, len) : -EIO;
30}
31
32static struct sysfs_ops pci_slot_sysfs_ops = {
33 .show = pci_slot_attr_show,
34 .store = pci_slot_attr_store,
35};
36
37static ssize_t address_read_file(struct pci_slot *slot, char *buf)
38{
39 if (slot->number == 0xff)
40 return sprintf(buf, "%04x:%02x\n",
41 pci_domain_nr(slot->bus),
42 slot->bus->number);
43 else
44 return sprintf(buf, "%04x:%02x:%02x\n",
45 pci_domain_nr(slot->bus),
46 slot->bus->number,
47 slot->number);
48}
49
50static void pci_slot_release(struct kobject *kobj)
51{
52 struct pci_slot *slot = to_pci_slot(kobj);
53
54 pr_debug("%s: releasing pci_slot on %x:%d\n", __func__,
55 slot->bus->number, slot->number);
56
57 list_del(&slot->list);
58
59 kfree(slot);
60}
61
62static struct pci_slot_attribute pci_slot_attr_address =
63 __ATTR(address, (S_IFREG | S_IRUGO), address_read_file, NULL);
64
65static struct attribute *pci_slot_default_attrs[] = {
66 &pci_slot_attr_address.attr,
67 NULL,
68};
69
70static struct kobj_type pci_slot_ktype = {
71 .sysfs_ops = &pci_slot_sysfs_ops,
72 .release = &pci_slot_release,
73 .default_attrs = pci_slot_default_attrs,
74};
75
76/**
77 * pci_create_slot - create or increment refcount for physical PCI slot
78 * @parent: struct pci_bus of parent bridge
79 * @slot_nr: PCI_SLOT(pci_dev->devfn) or -1 for placeholder
80 * @name: user visible string presented in /sys/bus/pci/slots/<name>
81 *
82 * PCI slots have first class attributes such as address, speed, width,
83 * and a &struct pci_slot is used to manage them. This interface will
84 * either return a new &struct pci_slot to the caller, or if the pci_slot
85 * already exists, its refcount will be incremented.
86 *
87 * Slots are uniquely identified by a @pci_bus, @slot_nr, @name tuple.
88 *
89 * Placeholder slots:
90 * In most cases, @pci_bus, @slot_nr will be sufficient to uniquely identify
91 * a slot. There is one notable exception - pSeries (rpaphp), where the
92 * @slot_nr cannot be determined until a device is actually inserted into
93 * the slot. In this scenario, the caller may pass -1 for @slot_nr.
94 *
95 * The following semantics are imposed when the caller passes @slot_nr ==
96 * -1. First, the check for existing %struct pci_slot is skipped, as the
97 * caller may know about several unpopulated slots on a given %struct
98 * pci_bus, and each slot would have a @slot_nr of -1. Uniqueness for
99 * these slots is then determined by the @name parameter. We expect
100 * kobject_init_and_add() to warn us if the caller attempts to create
101 * multiple slots with the same name. The other change in semantics is
102 * user-visible, which is the 'address' parameter presented in sysfs will
103 * consist solely of a dddd:bb tuple, where dddd is the PCI domain of the
104 * %struct pci_bus and bb is the bus number. In other words, the devfn of
105 * the 'placeholder' slot will not be displayed.
106 */
107
108struct pci_slot *pci_create_slot(struct pci_bus *parent, int slot_nr,
109 const char *name)
110{
111 struct pci_slot *slot;
112 int err;
113
114 down_write(&pci_bus_sem);
115
116 if (slot_nr == -1)
117 goto placeholder;
118
119 /* If we've already created this slot, bump refcount and return. */
120 list_for_each_entry(slot, &parent->slots, list) {
121 if (slot->number == slot_nr) {
122 kobject_get(&slot->kobj);
123 pr_debug("%s: inc refcount to %d on %04x:%02x:%02x\n",
124 __func__,
125 atomic_read(&slot->kobj.kref.refcount),
126 pci_domain_nr(parent), parent->number,
127 slot_nr);
128 goto out;
129 }
130 }
131
132placeholder:
133 slot = kzalloc(sizeof(*slot), GFP_KERNEL);
134 if (!slot) {
135 slot = ERR_PTR(-ENOMEM);
136 goto out;
137 }
138
139 slot->bus = parent;
140 slot->number = slot_nr;
141
142 slot->kobj.kset = pci_slots_kset;
143 err = kobject_init_and_add(&slot->kobj, &pci_slot_ktype, NULL,
144 "%s", name);
145 if (err) {
146 printk(KERN_ERR "Unable to register kobject %s\n", name);
147 goto err;
148 }
149
150 INIT_LIST_HEAD(&slot->list);
151 list_add(&slot->list, &parent->slots);
152
153 /* Don't care if debug printk has a -1 for slot_nr */
154 pr_debug("%s: created pci_slot on %04x:%02x:%02x\n",
155 __func__, pci_domain_nr(parent), parent->number, slot_nr);
156
157 out:
158 up_write(&pci_bus_sem);
159 return slot;
160 err:
161 kfree(slot);
162 slot = ERR_PTR(err);
163 goto out;
164}
165EXPORT_SYMBOL_GPL(pci_create_slot);
166
167/**
168 * pci_update_slot_number - update %struct pci_slot -> number
169 * @slot - %struct pci_slot to update
170 * @slot_nr - new number for slot
171 *
172 * The primary purpose of this interface is to allow callers who earlier
173 * created a placeholder slot in pci_create_slot() by passing a -1 as
174 * slot_nr, to update their %struct pci_slot with the correct @slot_nr.
175 */
176
177void pci_update_slot_number(struct pci_slot *slot, int slot_nr)
178{
179 int name_count = 0;
180 struct pci_slot *tmp;
181
182 down_write(&pci_bus_sem);
183
184 list_for_each_entry(tmp, &slot->bus->slots, list) {
185 WARN_ON(tmp->number == slot_nr);
186 if (!strcmp(kobject_name(&tmp->kobj), kobject_name(&slot->kobj)))
187 name_count++;
188 }
189
190 if (name_count > 1)
191 printk(KERN_WARNING "pci_update_slot_number found %d slots with the same name: %s\n", name_count, kobject_name(&slot->kobj));
192
193 slot->number = slot_nr;
194 up_write(&pci_bus_sem);
195}
196EXPORT_SYMBOL_GPL(pci_update_slot_number);
197
198/**
199 * pci_destroy_slot - decrement refcount for physical PCI slot
200 * @slot: struct pci_slot to decrement
201 *
202 * %struct pci_slot is refcounted, so destroying them is really easy; we
203 * just call kobject_put on its kobj and let our release methods do the
204 * rest.
205 */
206
207void pci_destroy_slot(struct pci_slot *slot)
208{
209 pr_debug("%s: dec refcount to %d on %04x:%02x:%02x\n", __func__,
210 atomic_read(&slot->kobj.kref.refcount) - 1,
211 pci_domain_nr(slot->bus), slot->bus->number, slot->number);
212
213 down_write(&pci_bus_sem);
214 kobject_put(&slot->kobj);
215 up_write(&pci_bus_sem);
216}
217EXPORT_SYMBOL_GPL(pci_destroy_slot);
218
219static int pci_slot_init(void)
220{
221 struct kset *pci_bus_kset;
222
223 pci_bus_kset = bus_get_kset(&pci_bus_type);
224 pci_slots_kset = kset_create_and_add("slots", NULL,
225 &pci_bus_kset->kobj);
226 if (!pci_slots_kset) {
227 printk(KERN_ERR "PCI: Slot initialization failure\n");
228 return -ENOMEM;
229 }
230 return 0;
231}
232
233subsys_initcall(pci_slot_init);
diff --git a/include/acpi/acpi_bus.h b/include/acpi/acpi_bus.h
index db90a74f8714..a5ac0bc7f52e 100644
--- a/include/acpi/acpi_bus.h
+++ b/include/acpi/acpi_bus.h
@@ -259,6 +259,7 @@ struct acpi_device_perf {
259/* Wakeup Management */ 259/* Wakeup Management */
260struct acpi_device_wakeup_flags { 260struct acpi_device_wakeup_flags {
261 u8 valid:1; /* Can successfully enable wakeup? */ 261 u8 valid:1; /* Can successfully enable wakeup? */
262 u8 prepared:1; /* Has the wake-up capability been enabled? */
262 u8 run_wake:1; /* Run-Wake GPE devices */ 263 u8 run_wake:1; /* Run-Wake GPE devices */
263}; 264};
264 265
@@ -335,6 +336,8 @@ void acpi_bus_data_handler(acpi_handle handle, u32 function, void *context);
335int acpi_bus_get_status(struct acpi_device *device); 336int acpi_bus_get_status(struct acpi_device *device);
336int acpi_bus_get_power(acpi_handle handle, int *state); 337int acpi_bus_get_power(acpi_handle handle, int *state);
337int acpi_bus_set_power(acpi_handle handle, int state); 338int acpi_bus_set_power(acpi_handle handle, int state);
339bool acpi_bus_power_manageable(acpi_handle handle);
340bool acpi_bus_can_wakeup(acpi_handle handle);
338#ifdef CONFIG_ACPI_PROC_EVENT 341#ifdef CONFIG_ACPI_PROC_EVENT
339int acpi_bus_generate_proc_event(struct acpi_device *device, u8 type, int data); 342int acpi_bus_generate_proc_event(struct acpi_device *device, u8 type, int data);
340int acpi_bus_generate_proc_event4(const char *class, const char *bid, u8 type, int data); 343int acpi_bus_generate_proc_event4(const char *class, const char *bid, u8 type, int data);
@@ -377,6 +380,7 @@ acpi_handle acpi_get_pci_rootbridge_handle(unsigned int, unsigned int);
377 380
378#ifdef CONFIG_PM_SLEEP 381#ifdef CONFIG_PM_SLEEP
379int acpi_pm_device_sleep_state(struct device *, int *); 382int acpi_pm_device_sleep_state(struct device *, int *);
383int acpi_pm_device_sleep_wake(struct device *, bool);
380#else /* !CONFIG_PM_SLEEP */ 384#else /* !CONFIG_PM_SLEEP */
381static inline int acpi_pm_device_sleep_state(struct device *d, int *p) 385static inline int acpi_pm_device_sleep_state(struct device *d, int *p)
382{ 386{
@@ -384,6 +388,10 @@ static inline int acpi_pm_device_sleep_state(struct device *d, int *p)
384 *p = ACPI_STATE_D0; 388 *p = ACPI_STATE_D0;
385 return ACPI_STATE_D3; 389 return ACPI_STATE_D3;
386} 390}
391static inline int acpi_pm_device_sleep_wake(struct device *dev, bool enable)
392{
393 return -ENODEV;
394}
387#endif /* !CONFIG_PM_SLEEP */ 395#endif /* !CONFIG_PM_SLEEP */
388 396
389#endif /* CONFIG_ACPI */ 397#endif /* CONFIG_ACPI */
diff --git a/include/acpi/acpi_drivers.h b/include/acpi/acpi_drivers.h
index 9757a040a505..e5f38e5ce86f 100644
--- a/include/acpi/acpi_drivers.h
+++ b/include/acpi/acpi_drivers.h
@@ -87,7 +87,9 @@ struct pci_bus *pci_acpi_scan_root(struct acpi_device *device, int domain,
87 -------------------------------------------------------------------------- */ 87 -------------------------------------------------------------------------- */
88 88
89#ifdef CONFIG_ACPI_POWER 89#ifdef CONFIG_ACPI_POWER
90int acpi_enable_wakeup_device_power(struct acpi_device *dev); 90int acpi_device_sleep_wake(struct acpi_device *dev,
91 int enable, int sleep_state, int dev_state);
92int acpi_enable_wakeup_device_power(struct acpi_device *dev, int sleep_state);
91int acpi_disable_wakeup_device_power(struct acpi_device *dev); 93int acpi_disable_wakeup_device_power(struct acpi_device *dev);
92int acpi_power_get_inferred_state(struct acpi_device *device); 94int acpi_power_get_inferred_state(struct acpi_device *device);
93int acpi_power_transition(struct acpi_device *device, int state); 95int acpi_power_transition(struct acpi_device *device, int state);
diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h
index b20409404c7d..729f6b0a60e9 100644
--- a/include/asm-generic/vmlinux.lds.h
+++ b/include/asm-generic/vmlinux.lds.h
@@ -86,6 +86,12 @@
86 VMLINUX_SYMBOL(__start_pci_fixups_resume) = .; \ 86 VMLINUX_SYMBOL(__start_pci_fixups_resume) = .; \
87 *(.pci_fixup_resume) \ 87 *(.pci_fixup_resume) \
88 VMLINUX_SYMBOL(__end_pci_fixups_resume) = .; \ 88 VMLINUX_SYMBOL(__end_pci_fixups_resume) = .; \
89 VMLINUX_SYMBOL(__start_pci_fixups_resume_early) = .; \
90 *(.pci_fixup_resume_early) \
91 VMLINUX_SYMBOL(__end_pci_fixups_resume_early) = .; \
92 VMLINUX_SYMBOL(__start_pci_fixups_suspend) = .; \
93 *(.pci_fixup_suspend) \
94 VMLINUX_SYMBOL(__end_pci_fixups_suspend) = .; \
89 } \ 95 } \
90 \ 96 \
91 /* Built-in firmware blobs */ \ 97 /* Built-in firmware blobs */ \
diff --git a/include/asm-x86/pci-direct.h b/include/asm-x86/pci-direct.h
index 5b21485be573..80c775d9fe20 100644
--- a/include/asm-x86/pci-direct.h
+++ b/include/asm-x86/pci-direct.h
@@ -11,7 +11,11 @@ extern u8 read_pci_config_byte(u8 bus, u8 slot, u8 func, u8 offset);
11extern u16 read_pci_config_16(u8 bus, u8 slot, u8 func, u8 offset); 11extern u16 read_pci_config_16(u8 bus, u8 slot, u8 func, u8 offset);
12extern void write_pci_config(u8 bus, u8 slot, u8 func, u8 offset, u32 val); 12extern void write_pci_config(u8 bus, u8 slot, u8 func, u8 offset, u32 val);
13extern void write_pci_config_byte(u8 bus, u8 slot, u8 func, u8 offset, u8 val); 13extern void write_pci_config_byte(u8 bus, u8 slot, u8 func, u8 offset, u8 val);
14extern void write_pci_config_16(u8 bus, u8 slot, u8 func, u8 offset, u16 val);
14 15
15extern int early_pci_allowed(void); 16extern int early_pci_allowed(void);
16 17
18extern unsigned int pci_early_dump_regs;
19extern void early_dump_pci_device(u8 bus, u8 slot, u8 func);
20extern void early_dump_pci_devices(void);
17#endif 21#endif
diff --git a/include/linux/acpi.h b/include/linux/acpi.h
index 0601075d09a1..a17177639376 100644
--- a/include/linux/acpi.h
+++ b/include/linux/acpi.h
@@ -235,6 +235,9 @@ int acpi_check_region(resource_size_t start, resource_size_t n,
235int acpi_check_mem_region(resource_size_t start, resource_size_t n, 235int acpi_check_mem_region(resource_size_t start, resource_size_t n,
236 const char *name); 236 const char *name);
237 237
238#ifdef CONFIG_PM_SLEEP
239void __init acpi_old_suspend_ordering(void);
240#endif /* CONFIG_PM_SLEEP */
238#else /* CONFIG_ACPI */ 241#else /* CONFIG_ACPI */
239 242
240static inline int early_acpi_boot_init(void) 243static inline int early_acpi_boot_init(void)
diff --git a/include/linux/device.h b/include/linux/device.h
index 6a2d04c011bc..f71a78d123ae 100644
--- a/include/linux/device.h
+++ b/include/linux/device.h
@@ -68,6 +68,8 @@ struct bus_type {
68 int (*resume_early)(struct device *dev); 68 int (*resume_early)(struct device *dev);
69 int (*resume)(struct device *dev); 69 int (*resume)(struct device *dev);
70 70
71 struct pm_ext_ops *pm;
72
71 struct bus_type_private *p; 73 struct bus_type_private *p;
72}; 74};
73 75
@@ -131,6 +133,8 @@ struct device_driver {
131 int (*resume) (struct device *dev); 133 int (*resume) (struct device *dev);
132 struct attribute_group **groups; 134 struct attribute_group **groups;
133 135
136 struct pm_ops *pm;
137
134 struct driver_private *p; 138 struct driver_private *p;
135}; 139};
136 140
@@ -197,6 +201,8 @@ struct class {
197 201
198 int (*suspend)(struct device *dev, pm_message_t state); 202 int (*suspend)(struct device *dev, pm_message_t state);
199 int (*resume)(struct device *dev); 203 int (*resume)(struct device *dev);
204
205 struct pm_ops *pm;
200}; 206};
201 207
202extern int __must_check class_register(struct class *class); 208extern int __must_check class_register(struct class *class);
@@ -248,8 +254,11 @@ struct device_type {
248 struct attribute_group **groups; 254 struct attribute_group **groups;
249 int (*uevent)(struct device *dev, struct kobj_uevent_env *env); 255 int (*uevent)(struct device *dev, struct kobj_uevent_env *env);
250 void (*release)(struct device *dev); 256 void (*release)(struct device *dev);
257
251 int (*suspend)(struct device *dev, pm_message_t state); 258 int (*suspend)(struct device *dev, pm_message_t state);
252 int (*resume)(struct device *dev); 259 int (*resume)(struct device *dev);
260
261 struct pm_ops *pm;
253}; 262};
254 263
255/* interface for exporting device attributes */ 264/* interface for exporting device attributes */
diff --git a/include/linux/pci.h b/include/linux/pci.h
index d18b1dd49fab..a6a088e1a804 100644
--- a/include/linux/pci.h
+++ b/include/linux/pci.h
@@ -17,8 +17,7 @@
17#ifndef LINUX_PCI_H 17#ifndef LINUX_PCI_H
18#define LINUX_PCI_H 18#define LINUX_PCI_H
19 19
20/* Include the pci register defines */ 20#include <linux/pci_regs.h> /* The pci register defines */
21#include <linux/pci_regs.h>
22 21
23/* 22/*
24 * The PCI interface treats multi-function devices as independent 23 * The PCI interface treats multi-function devices as independent
@@ -49,12 +48,22 @@
49#include <linux/list.h> 48#include <linux/list.h>
50#include <linux/compiler.h> 49#include <linux/compiler.h>
51#include <linux/errno.h> 50#include <linux/errno.h>
51#include <linux/kobject.h>
52#include <asm/atomic.h> 52#include <asm/atomic.h>
53#include <linux/device.h> 53#include <linux/device.h>
54 54
55/* Include the ID list */ 55/* Include the ID list */
56#include <linux/pci_ids.h> 56#include <linux/pci_ids.h>
57 57
58/* pci_slot represents a physical slot */
59struct pci_slot {
60 struct pci_bus *bus; /* The bus this slot is on */
61 struct list_head list; /* node in list of slots on this bus */
62 struct hotplug_slot *hotplug; /* Hotplug info (migrate over time) */
63 unsigned char number; /* PCI_SLOT(pci_dev->devfn) */
64 struct kobject kobj;
65};
66
58/* File state for mmap()s on /proc/bus/pci/X/Y */ 67/* File state for mmap()s on /proc/bus/pci/X/Y */
59enum pci_mmap_state { 68enum pci_mmap_state {
60 pci_mmap_io, 69 pci_mmap_io,
@@ -142,6 +151,7 @@ struct pci_dev {
142 151
143 void *sysdata; /* hook for sys-specific extension */ 152 void *sysdata; /* hook for sys-specific extension */
144 struct proc_dir_entry *procent; /* device entry in /proc/bus/pci */ 153 struct proc_dir_entry *procent; /* device entry in /proc/bus/pci */
154 struct pci_slot *slot; /* Physical slot this device is in */
145 155
146 unsigned int devfn; /* encoded device & function index */ 156 unsigned int devfn; /* encoded device & function index */
147 unsigned short vendor; 157 unsigned short vendor;
@@ -167,6 +177,13 @@ struct pci_dev {
167 pci_power_t current_state; /* Current operating state. In ACPI-speak, 177 pci_power_t current_state; /* Current operating state. In ACPI-speak,
168 this is D0-D3, D0 being fully functional, 178 this is D0-D3, D0 being fully functional,
169 and D3 being off. */ 179 and D3 being off. */
180 int pm_cap; /* PM capability offset in the
181 configuration space */
182 unsigned int pme_support:5; /* Bitmask of states from which PME#
183 can be generated */
184 unsigned int d1_support:1; /* Low power state D1 is supported */
185 unsigned int d2_support:1; /* Low power state D2 is supported */
186 unsigned int no_d1d2:1; /* Only allow D0 and D3 */
170 187
171#ifdef CONFIG_PCIEASPM 188#ifdef CONFIG_PCIEASPM
172 struct pcie_link_state *link_state; /* ASPM link state. */ 189 struct pcie_link_state *link_state; /* ASPM link state. */
@@ -191,7 +208,6 @@ struct pci_dev {
191 unsigned int is_added:1; 208 unsigned int is_added:1;
192 unsigned int is_busmaster:1; /* device is busmaster */ 209 unsigned int is_busmaster:1; /* device is busmaster */
193 unsigned int no_msi:1; /* device may not use msi */ 210 unsigned int no_msi:1; /* device may not use msi */
194 unsigned int no_d1d2:1; /* only allow d0 or d3 */
195 unsigned int block_ucfg_access:1; /* userspace config space access is blocked */ 211 unsigned int block_ucfg_access:1; /* userspace config space access is blocked */
196 unsigned int broken_parity_status:1; /* Device generates false positive parity */ 212 unsigned int broken_parity_status:1; /* Device generates false positive parity */
197 unsigned int msi_enabled:1; 213 unsigned int msi_enabled:1;
@@ -267,6 +283,7 @@ struct pci_bus {
267 struct list_head children; /* list of child buses */ 283 struct list_head children; /* list of child buses */
268 struct list_head devices; /* list of devices on this bus */ 284 struct list_head devices; /* list of devices on this bus */
269 struct pci_dev *self; /* bridge device as seen by parent */ 285 struct pci_dev *self; /* bridge device as seen by parent */
286 struct list_head slots; /* list of slots on this bus */
270 struct resource *resource[PCI_BUS_NUM_RESOURCES]; 287 struct resource *resource[PCI_BUS_NUM_RESOURCES];
271 /* address space routed to this bus */ 288 /* address space routed to this bus */
272 289
@@ -328,7 +345,7 @@ struct pci_bus_region {
328struct pci_dynids { 345struct pci_dynids {
329 spinlock_t lock; /* protects list, index */ 346 spinlock_t lock; /* protects list, index */
330 struct list_head list; /* for IDs added at runtime */ 347 struct list_head list; /* for IDs added at runtime */
331 unsigned int use_driver_data:1; /* pci_driver->driver_data is used */ 348 unsigned int use_driver_data:1; /* pci_device_id->driver_data is used */
332}; 349};
333 350
334/* ---------------------------------------------------------------- */ 351/* ---------------------------------------------------------------- */
@@ -390,7 +407,7 @@ struct pci_driver {
390 int (*resume_early) (struct pci_dev *dev); 407 int (*resume_early) (struct pci_dev *dev);
391 int (*resume) (struct pci_dev *dev); /* Device woken up */ 408 int (*resume) (struct pci_dev *dev); /* Device woken up */
392 void (*shutdown) (struct pci_dev *dev); 409 void (*shutdown) (struct pci_dev *dev);
393 410 struct pm_ext_ops *pm;
394 struct pci_error_handlers *err_handler; 411 struct pci_error_handlers *err_handler;
395 struct device_driver driver; 412 struct device_driver driver;
396 struct pci_dynids dynids; 413 struct pci_dynids dynids;
@@ -489,6 +506,10 @@ struct pci_bus *pci_create_bus(struct device *parent, int bus,
489 struct pci_ops *ops, void *sysdata); 506 struct pci_ops *ops, void *sysdata);
490struct pci_bus *pci_add_new_bus(struct pci_bus *parent, struct pci_dev *dev, 507struct pci_bus *pci_add_new_bus(struct pci_bus *parent, struct pci_dev *dev,
491 int busnr); 508 int busnr);
509struct pci_slot *pci_create_slot(struct pci_bus *parent, int slot_nr,
510 const char *name);
511void pci_destroy_slot(struct pci_slot *slot);
512void pci_update_slot_number(struct pci_slot *slot, int slot_nr);
492int pci_scan_slot(struct pci_bus *bus, int devfn); 513int pci_scan_slot(struct pci_bus *bus, int devfn);
493struct pci_dev *pci_scan_single_device(struct pci_bus *bus, int devfn); 514struct pci_dev *pci_scan_single_device(struct pci_bus *bus, int devfn);
494void pci_device_add(struct pci_dev *dev, struct pci_bus *bus); 515void pci_device_add(struct pci_dev *dev, struct pci_bus *bus);
@@ -618,6 +639,8 @@ int pci_restore_state(struct pci_dev *dev);
618int pci_set_power_state(struct pci_dev *dev, pci_power_t state); 639int pci_set_power_state(struct pci_dev *dev, pci_power_t state);
619pci_power_t pci_choose_state(struct pci_dev *dev, pm_message_t state); 640pci_power_t pci_choose_state(struct pci_dev *dev, pm_message_t state);
620int pci_enable_wake(struct pci_dev *dev, pci_power_t state, int enable); 641int pci_enable_wake(struct pci_dev *dev, pci_power_t state, int enable);
642int pci_prepare_to_sleep(struct pci_dev *dev);
643int pci_back_from_sleep(struct pci_dev *dev);
621 644
622/* Functions for PCI Hotplug drivers to use */ 645/* Functions for PCI Hotplug drivers to use */
623int pci_bus_find_capability(struct pci_bus *bus, unsigned int devfn, int cap); 646int pci_bus_find_capability(struct pci_bus *bus, unsigned int devfn, int cap);
@@ -839,6 +862,11 @@ static inline int pci_set_dma_mask(struct pci_dev *dev, u64 mask)
839 return -EIO; 862 return -EIO;
840} 863}
841 864
865static inline int pci_set_consistent_dma_mask(struct pci_dev *dev, u64 mask)
866{
867 return -EIO;
868}
869
842static inline int pci_set_dma_max_seg_size(struct pci_dev *dev, 870static inline int pci_set_dma_max_seg_size(struct pci_dev *dev,
843 unsigned int size) 871 unsigned int size)
844{ 872{
@@ -977,9 +1005,9 @@ static inline void pci_set_drvdata(struct pci_dev *pdev, void *data)
977/* If you want to know what to call your pci_dev, ask this function. 1005/* If you want to know what to call your pci_dev, ask this function.
978 * Again, it's a wrapper around the generic device. 1006 * Again, it's a wrapper around the generic device.
979 */ 1007 */
980static inline char *pci_name(struct pci_dev *pdev) 1008static inline const char *pci_name(struct pci_dev *pdev)
981{ 1009{
982 return pdev->dev.bus_id; 1010 return dev_name(&pdev->dev);
983} 1011}
984 1012
985 1013
@@ -1014,7 +1042,9 @@ enum pci_fixup_pass {
1014 pci_fixup_header, /* After reading configuration header */ 1042 pci_fixup_header, /* After reading configuration header */
1015 pci_fixup_final, /* Final phase of device fixups */ 1043 pci_fixup_final, /* Final phase of device fixups */
1016 pci_fixup_enable, /* pci_enable_device() time */ 1044 pci_fixup_enable, /* pci_enable_device() time */
1017 pci_fixup_resume, /* pci_enable_device() time */ 1045 pci_fixup_resume, /* pci_device_resume() */
1046 pci_fixup_suspend, /* pci_device_suspend */
1047 pci_fixup_resume_early, /* pci_device_resume_early() */
1018}; 1048};
1019 1049
1020/* Anonymous variables would be nice... */ 1050/* Anonymous variables would be nice... */
@@ -1036,6 +1066,12 @@ enum pci_fixup_pass {
1036#define DECLARE_PCI_FIXUP_RESUME(vendor, device, hook) \ 1066#define DECLARE_PCI_FIXUP_RESUME(vendor, device, hook) \
1037 DECLARE_PCI_FIXUP_SECTION(.pci_fixup_resume, \ 1067 DECLARE_PCI_FIXUP_SECTION(.pci_fixup_resume, \
1038 resume##vendor##device##hook, vendor, device, hook) 1068 resume##vendor##device##hook, vendor, device, hook)
1069#define DECLARE_PCI_FIXUP_RESUME_EARLY(vendor, device, hook) \
1070 DECLARE_PCI_FIXUP_SECTION(.pci_fixup_resume_early, \
1071 resume_early##vendor##device##hook, vendor, device, hook)
1072#define DECLARE_PCI_FIXUP_SUSPEND(vendor, device, hook) \
1073 DECLARE_PCI_FIXUP_SECTION(.pci_fixup_suspend, \
1074 suspend##vendor##device##hook, vendor, device, hook)
1039 1075
1040 1076
1041void pci_fixup_device(enum pci_fixup_pass pass, struct pci_dev *dev); 1077void pci_fixup_device(enum pci_fixup_pass pass, struct pci_dev *dev);
@@ -1060,7 +1096,10 @@ extern int pci_pci_problems;
1060extern unsigned long pci_cardbus_io_size; 1096extern unsigned long pci_cardbus_io_size;
1061extern unsigned long pci_cardbus_mem_size; 1097extern unsigned long pci_cardbus_mem_size;
1062 1098
1063extern int pcibios_add_platform_entries(struct pci_dev *dev); 1099int pcibios_add_platform_entries(struct pci_dev *dev);
1100void pcibios_disable_device(struct pci_dev *dev);
1101int pcibios_set_pcie_reset_state(struct pci_dev *dev,
1102 enum pcie_reset_state state);
1064 1103
1065#ifdef CONFIG_PCI_MMCONFIG 1104#ifdef CONFIG_PCI_MMCONFIG
1066extern void __init pci_mmcfg_early_init(void); 1105extern void __init pci_mmcfg_early_init(void);
diff --git a/include/linux/pci_hotplug.h b/include/linux/pci_hotplug.h
index 8f67e8f2a3cc..a08cd06b541a 100644
--- a/include/linux/pci_hotplug.h
+++ b/include/linux/pci_hotplug.h
@@ -95,9 +95,6 @@ struct hotplug_slot_attribute {
95 * @get_adapter_status: Called to get see if an adapter is present in the slot or not. 95 * @get_adapter_status: Called to get see if an adapter is present in the slot or not.
96 * If this field is NULL, the value passed in the struct hotplug_slot_info 96 * If this field is NULL, the value passed in the struct hotplug_slot_info
97 * will be used when this value is requested by a user. 97 * will be used when this value is requested by a user.
98 * @get_address: Called to get pci address of a slot.
99 * If this field is NULL, the value passed in the struct hotplug_slot_info
100 * will be used when this value is requested by a user.
101 * @get_max_bus_speed: Called to get the max bus speed for a slot. 98 * @get_max_bus_speed: Called to get the max bus speed for a slot.
102 * If this field is NULL, the value passed in the struct hotplug_slot_info 99 * If this field is NULL, the value passed in the struct hotplug_slot_info
103 * will be used when this value is requested by a user. 100 * will be used when this value is requested by a user.
@@ -120,7 +117,6 @@ struct hotplug_slot_ops {
120 int (*get_attention_status) (struct hotplug_slot *slot, u8 *value); 117 int (*get_attention_status) (struct hotplug_slot *slot, u8 *value);
121 int (*get_latch_status) (struct hotplug_slot *slot, u8 *value); 118 int (*get_latch_status) (struct hotplug_slot *slot, u8 *value);
122 int (*get_adapter_status) (struct hotplug_slot *slot, u8 *value); 119 int (*get_adapter_status) (struct hotplug_slot *slot, u8 *value);
123 int (*get_address) (struct hotplug_slot *slot, u32 *value);
124 int (*get_max_bus_speed) (struct hotplug_slot *slot, enum pci_bus_speed *value); 120 int (*get_max_bus_speed) (struct hotplug_slot *slot, enum pci_bus_speed *value);
125 int (*get_cur_bus_speed) (struct hotplug_slot *slot, enum pci_bus_speed *value); 121 int (*get_cur_bus_speed) (struct hotplug_slot *slot, enum pci_bus_speed *value);
126}; 122};
@@ -140,7 +136,6 @@ struct hotplug_slot_info {
140 u8 attention_status; 136 u8 attention_status;
141 u8 latch_status; 137 u8 latch_status;
142 u8 adapter_status; 138 u8 adapter_status;
143 u32 address;
144 enum pci_bus_speed max_bus_speed; 139 enum pci_bus_speed max_bus_speed;
145 enum pci_bus_speed cur_bus_speed; 140 enum pci_bus_speed cur_bus_speed;
146}; 141};
@@ -166,15 +161,14 @@ struct hotplug_slot {
166 161
167 /* Variables below this are for use only by the hotplug pci core. */ 162 /* Variables below this are for use only by the hotplug pci core. */
168 struct list_head slot_list; 163 struct list_head slot_list;
169 struct kobject kobj; 164 struct pci_slot *pci_slot;
170}; 165};
171#define to_hotplug_slot(n) container_of(n, struct hotplug_slot, kobj) 166#define to_hotplug_slot(n) container_of(n, struct hotplug_slot, kobj)
172 167
173extern int pci_hp_register (struct hotplug_slot *slot); 168extern int pci_hp_register(struct hotplug_slot *, struct pci_bus *, int nr);
174extern int pci_hp_deregister (struct hotplug_slot *slot); 169extern int pci_hp_deregister(struct hotplug_slot *slot);
175extern int __must_check pci_hp_change_slot_info (struct hotplug_slot *slot, 170extern int __must_check pci_hp_change_slot_info (struct hotplug_slot *slot,
176 struct hotplug_slot_info *info); 171 struct hotplug_slot_info *info);
177extern struct kset *pci_hotplug_slots_kset;
178 172
179/* PCI Setting Record (Type 0) */ 173/* PCI Setting Record (Type 0) */
180struct hpp_type0 { 174struct hpp_type0 {
@@ -227,9 +221,9 @@ struct hotplug_params {
227#include <acpi/acpi.h> 221#include <acpi/acpi.h>
228#include <acpi/acpi_bus.h> 222#include <acpi/acpi_bus.h>
229#include <acpi/actypes.h> 223#include <acpi/actypes.h>
230extern acpi_status acpi_run_oshp(acpi_handle handle);
231extern acpi_status acpi_get_hp_params_from_firmware(struct pci_bus *bus, 224extern acpi_status acpi_get_hp_params_from_firmware(struct pci_bus *bus,
232 struct hotplug_params *hpp); 225 struct hotplug_params *hpp);
226int acpi_get_hp_hw_control_from_firmware(struct pci_dev *dev, u32 flags);
233int acpi_root_bridge(acpi_handle handle); 227int acpi_root_bridge(acpi_handle handle);
234#endif 228#endif
235#endif 229#endif
diff --git a/include/linux/pci_regs.h b/include/linux/pci_regs.h
index c0c1223c9194..19958b929905 100644
--- a/include/linux/pci_regs.h
+++ b/include/linux/pci_regs.h
@@ -231,6 +231,7 @@
231#define PCI_PM_CAP_PME_D2 0x2000 /* PME# from D2 */ 231#define PCI_PM_CAP_PME_D2 0x2000 /* PME# from D2 */
232#define PCI_PM_CAP_PME_D3 0x4000 /* PME# from D3 (hot) */ 232#define PCI_PM_CAP_PME_D3 0x4000 /* PME# from D3 (hot) */
233#define PCI_PM_CAP_PME_D3cold 0x8000 /* PME# from D3 (cold) */ 233#define PCI_PM_CAP_PME_D3cold 0x8000 /* PME# from D3 (cold) */
234#define PCI_PM_CAP_PME_SHIFT 11 /* Start of the PME Mask in PMC */
234#define PCI_PM_CTRL 4 /* PM control and status register */ 235#define PCI_PM_CTRL 4 /* PM control and status register */
235#define PCI_PM_CTRL_STATE_MASK 0x0003 /* Current power state (D0 to D3) */ 236#define PCI_PM_CTRL_STATE_MASK 0x0003 /* Current power state (D0 to D3) */
236#define PCI_PM_CTRL_NO_SOFT_RESET 0x0004 /* No reset for D3hot->D0 */ 237#define PCI_PM_CTRL_NO_SOFT_RESET 0x0004 /* No reset for D3hot->D0 */
diff --git a/include/linux/platform_device.h b/include/linux/platform_device.h
index 3261681c82a4..95ac21ab3a09 100644
--- a/include/linux/platform_device.h
+++ b/include/linux/platform_device.h
@@ -53,6 +53,7 @@ struct platform_driver {
53 int (*suspend_late)(struct platform_device *, pm_message_t state); 53 int (*suspend_late)(struct platform_device *, pm_message_t state);
54 int (*resume_early)(struct platform_device *); 54 int (*resume_early)(struct platform_device *);
55 int (*resume)(struct platform_device *); 55 int (*resume)(struct platform_device *);
56 struct pm_ext_ops *pm;
56 struct device_driver driver; 57 struct device_driver driver;
57}; 58};
58 59
diff --git a/include/linux/pm.h b/include/linux/pm.h
index 39a7ee859b67..4ad9de94449a 100644
--- a/include/linux/pm.h
+++ b/include/linux/pm.h
@@ -112,7 +112,9 @@ typedef struct pm_message {
112 int event; 112 int event;
113} pm_message_t; 113} pm_message_t;
114 114
115/* 115/**
116 * struct pm_ops - device PM callbacks
117 *
116 * Several driver power state transitions are externally visible, affecting 118 * Several driver power state transitions are externally visible, affecting
117 * the state of pending I/O queues and (for drivers that touch hardware) 119 * the state of pending I/O queues and (for drivers that touch hardware)
118 * interrupts, wakeups, DMA, and other hardware state. There may also be 120 * interrupts, wakeups, DMA, and other hardware state. There may also be
@@ -120,6 +122,284 @@ typedef struct pm_message {
120 * to the rest of the driver stack (such as a driver that's ON gating off 122 * to the rest of the driver stack (such as a driver that's ON gating off
121 * clocks which are not in active use). 123 * clocks which are not in active use).
122 * 124 *
125 * The externally visible transitions are handled with the help of the following
126 * callbacks included in this structure:
127 *
128 * @prepare: Prepare the device for the upcoming transition, but do NOT change
129 * its hardware state. Prevent new children of the device from being
130 * registered after @prepare() returns (the driver's subsystem and
131 * generally the rest of the kernel is supposed to prevent new calls to the
132 * probe method from being made too once @prepare() has succeeded). If
133 * @prepare() detects a situation it cannot handle (e.g. registration of a
134 * child already in progress), it may return -EAGAIN, so that the PM core
135 * can execute it once again (e.g. after the new child has been registered)
136 * to recover from the race condition. This method is executed for all
137 * kinds of suspend transitions and is followed by one of the suspend
138 * callbacks: @suspend(), @freeze(), or @poweroff().
139 * The PM core executes @prepare() for all devices before starting to
140 * execute suspend callbacks for any of them, so drivers may assume all of
141 * the other devices to be present and functional while @prepare() is being
142 * executed. In particular, it is safe to make GFP_KERNEL memory
143 * allocations from within @prepare(). However, drivers may NOT assume
144 * anything about the availability of the user space at that time and it
145 * is not correct to request firmware from within @prepare() (it's too
146 * late to do that). [To work around this limitation, drivers may
147 * register suspend and hibernation notifiers that are executed before the
148 * freezing of tasks.]
149 *
150 * @complete: Undo the changes made by @prepare(). This method is executed for
151 * all kinds of resume transitions, following one of the resume callbacks:
152 * @resume(), @thaw(), @restore(). Also called if the state transition
153 * fails before the driver's suspend callback (@suspend(), @freeze(),
154 * @poweroff()) can be executed (e.g. if the suspend callback fails for one
155 * of the other devices that the PM core has unsuccessfully attempted to
156 * suspend earlier).
157 * The PM core executes @complete() after it has executed the appropriate
158 * resume callback for all devices.
159 *
160 * @suspend: Executed before putting the system into a sleep state in which the
161 * contents of main memory are preserved. Quiesce the device, put it into
162 * a low power state appropriate for the upcoming system state (such as
163 * PCI_D3hot), and enable wakeup events as appropriate.
164 *
165 * @resume: Executed after waking the system up from a sleep state in which the
166 * contents of main memory were preserved. Put the device into the
167 * appropriate state, according to the information saved in memory by the
168 * preceding @suspend(). The driver starts working again, responding to
169 * hardware events and software requests. The hardware may have gone
170 * through a power-off reset, or it may have maintained state from the
171 * previous suspend() which the driver may rely on while resuming. On most
172 * platforms, there are no restrictions on availability of resources like
173 * clocks during @resume().
174 *
175 * @freeze: Hibernation-specific, executed before creating a hibernation image.
176 * Quiesce operations so that a consistent image can be created, but do NOT
177 * otherwise put the device into a low power device state and do NOT emit
178 * system wakeup events. Save in main memory the device settings to be
179 * used by @restore() during the subsequent resume from hibernation or by
180 * the subsequent @thaw(), if the creation of the image or the restoration
181 * of main memory contents from it fails.
182 *
183 * @thaw: Hibernation-specific, executed after creating a hibernation image OR
184 * if the creation of the image fails. Also executed after a failing
185 * attempt to restore the contents of main memory from such an image.
186 * Undo the changes made by the preceding @freeze(), so the device can be
187 * operated in the same way as immediately before the call to @freeze().
188 *
189 * @poweroff: Hibernation-specific, executed after saving a hibernation image.
190 * Quiesce the device, put it into a low power state appropriate for the
191 * upcoming system state (such as PCI_D3hot), and enable wakeup events as
192 * appropriate.
193 *
194 * @restore: Hibernation-specific, executed after restoring the contents of main
195 * memory from a hibernation image. Driver starts working again,
196 * responding to hardware events and software requests. Drivers may NOT
197 * make ANY assumptions about the hardware state right prior to @restore().
198 * On most platforms, there are no restrictions on availability of
199 * resources like clocks during @restore().
200 *
201 * All of the above callbacks, except for @complete(), return error codes.
202 * However, the error codes returned by the resume operations, @resume(),
203 * @thaw(), and @restore(), do not cause the PM core to abort the resume
204 * transition during which they are returned. The error codes returned in
205 * that cases are only printed by the PM core to the system logs for debugging
206 * purposes. Still, it is recommended that drivers only return error codes
207 * from their resume methods in case of an unrecoverable failure (i.e. when the
208 * device being handled refuses to resume and becomes unusable) to allow us to
209 * modify the PM core in the future, so that it can avoid attempting to handle
210 * devices that failed to resume and their children.
211 *
212 * It is allowed to unregister devices while the above callbacks are being
213 * executed. However, it is not allowed to unregister a device from within any
214 * of its own callbacks.
215 */
216
217struct pm_ops {
218 int (*prepare)(struct device *dev);
219 void (*complete)(struct device *dev);
220 int (*suspend)(struct device *dev);
221 int (*resume)(struct device *dev);
222 int (*freeze)(struct device *dev);
223 int (*thaw)(struct device *dev);
224 int (*poweroff)(struct device *dev);
225 int (*restore)(struct device *dev);
226};
227
228/**
229 * struct pm_ext_ops - extended device PM callbacks
230 *
231 * Some devices require certain operations related to suspend and hibernation
232 * to be carried out with interrupts disabled. Thus, 'struct pm_ext_ops' below
233 * is defined, adding callbacks to be executed with interrupts disabled to
234 * 'struct pm_ops'.
235 *
236 * The following callbacks included in 'struct pm_ext_ops' are executed with
237 * the nonboot CPUs switched off and with interrupts disabled on the only
238 * functional CPU. They also are executed with the PM core list of devices
239 * locked, so they must NOT unregister any devices.
240 *
241 * @suspend_noirq: Complete the operations of ->suspend() by carrying out any
242 * actions required for suspending the device that need interrupts to be
243 * disabled
244 *
245 * @resume_noirq: Prepare for the execution of ->resume() by carrying out any
246 * actions required for resuming the device that need interrupts to be
247 * disabled
248 *
249 * @freeze_noirq: Complete the operations of ->freeze() by carrying out any
250 * actions required for freezing the device that need interrupts to be
251 * disabled
252 *
253 * @thaw_noirq: Prepare for the execution of ->thaw() by carrying out any
254 * actions required for thawing the device that need interrupts to be
255 * disabled
256 *
257 * @poweroff_noirq: Complete the operations of ->poweroff() by carrying out any
258 * actions required for handling the device that need interrupts to be
259 * disabled
260 *
261 * @restore_noirq: Prepare for the execution of ->restore() by carrying out any
262 * actions required for restoring the operations of the device that need
263 * interrupts to be disabled
264 *
265 * All of the above callbacks return error codes, but the error codes returned
266 * by the resume operations, @resume_noirq(), @thaw_noirq(), and
267 * @restore_noirq(), do not cause the PM core to abort the resume transition
268 * during which they are returned. The error codes returned in that cases are
269 * only printed by the PM core to the system logs for debugging purposes.
270 * Still, as stated above, it is recommended that drivers only return error
271 * codes from their resume methods if the device being handled fails to resume
272 * and is not usable any more.
273 */
274
275struct pm_ext_ops {
276 struct pm_ops base;
277 int (*suspend_noirq)(struct device *dev);
278 int (*resume_noirq)(struct device *dev);
279 int (*freeze_noirq)(struct device *dev);
280 int (*thaw_noirq)(struct device *dev);
281 int (*poweroff_noirq)(struct device *dev);
282 int (*restore_noirq)(struct device *dev);
283};
284
285/**
286 * PM_EVENT_ messages
287 *
288 * The following PM_EVENT_ messages are defined for the internal use of the PM
289 * core, in order to provide a mechanism allowing the high level suspend and
290 * hibernation code to convey the necessary information to the device PM core
291 * code:
292 *
293 * ON No transition.
294 *
295 * FREEZE System is going to hibernate, call ->prepare() and ->freeze()
296 * for all devices.
297 *
298 * SUSPEND System is going to suspend, call ->prepare() and ->suspend()
299 * for all devices.
300 *
301 * HIBERNATE Hibernation image has been saved, call ->prepare() and
302 * ->poweroff() for all devices.
303 *
304 * QUIESCE Contents of main memory are going to be restored from a (loaded)
305 * hibernation image, call ->prepare() and ->freeze() for all
306 * devices.
307 *
308 * RESUME System is resuming, call ->resume() and ->complete() for all
309 * devices.
310 *
311 * THAW Hibernation image has been created, call ->thaw() and
312 * ->complete() for all devices.
313 *
314 * RESTORE Contents of main memory have been restored from a hibernation
315 * image, call ->restore() and ->complete() for all devices.
316 *
317 * RECOVER Creation of a hibernation image or restoration of the main
318 * memory contents from a hibernation image has failed, call
319 * ->thaw() and ->complete() for all devices.
320 */
321
322#define PM_EVENT_ON 0x0000
323#define PM_EVENT_FREEZE 0x0001
324#define PM_EVENT_SUSPEND 0x0002
325#define PM_EVENT_HIBERNATE 0x0004
326#define PM_EVENT_QUIESCE 0x0008
327#define PM_EVENT_RESUME 0x0010
328#define PM_EVENT_THAW 0x0020
329#define PM_EVENT_RESTORE 0x0040
330#define PM_EVENT_RECOVER 0x0080
331
332#define PM_EVENT_SLEEP (PM_EVENT_SUSPEND | PM_EVENT_HIBERNATE)
333
334#define PMSG_FREEZE ((struct pm_message){ .event = PM_EVENT_FREEZE, })
335#define PMSG_QUIESCE ((struct pm_message){ .event = PM_EVENT_QUIESCE, })
336#define PMSG_SUSPEND ((struct pm_message){ .event = PM_EVENT_SUSPEND, })
337#define PMSG_HIBERNATE ((struct pm_message){ .event = PM_EVENT_HIBERNATE, })
338#define PMSG_RESUME ((struct pm_message){ .event = PM_EVENT_RESUME, })
339#define PMSG_THAW ((struct pm_message){ .event = PM_EVENT_THAW, })
340#define PMSG_RESTORE ((struct pm_message){ .event = PM_EVENT_RESTORE, })
341#define PMSG_RECOVER ((struct pm_message){ .event = PM_EVENT_RECOVER, })
342#define PMSG_ON ((struct pm_message){ .event = PM_EVENT_ON, })
343
344/**
345 * Device power management states
346 *
347 * These state labels are used internally by the PM core to indicate the current
348 * status of a device with respect to the PM core operations.
349 *
350 * DPM_ON Device is regarded as operational. Set this way
351 * initially and when ->complete() is about to be called.
352 * Also set when ->prepare() fails.
353 *
354 * DPM_PREPARING Device is going to be prepared for a PM transition. Set
355 * when ->prepare() is about to be called.
356 *
357 * DPM_RESUMING Device is going to be resumed. Set when ->resume(),
358 * ->thaw(), or ->restore() is about to be called.
359 *
360 * DPM_SUSPENDING Device has been prepared for a power transition. Set
361 * when ->prepare() has just succeeded.
362 *
363 * DPM_OFF Device is regarded as inactive. Set immediately after
364 * ->suspend(), ->freeze(), or ->poweroff() has succeeded.
365 * Also set when ->resume()_noirq, ->thaw_noirq(), or
366 * ->restore_noirq() is about to be called.
367 *
368 * DPM_OFF_IRQ Device is in a "deep sleep". Set immediately after
369 * ->suspend_noirq(), ->freeze_noirq(), or
370 * ->poweroff_noirq() has just succeeded.
371 */
372
373enum dpm_state {
374 DPM_INVALID,
375 DPM_ON,
376 DPM_PREPARING,
377 DPM_RESUMING,
378 DPM_SUSPENDING,
379 DPM_OFF,
380 DPM_OFF_IRQ,
381};
382
383struct dev_pm_info {
384 pm_message_t power_state;
385 unsigned can_wakeup:1;
386 unsigned should_wakeup:1;
387 enum dpm_state status; /* Owned by the PM core */
388#ifdef CONFIG_PM_SLEEP
389 struct list_head entry;
390#endif
391};
392
393/*
394 * The PM_EVENT_ messages are also used by drivers implementing the legacy
395 * suspend framework, based on the ->suspend() and ->resume() callbacks common
396 * for suspend and hibernation transitions, according to the rules below.
397 */
398
399/* Necessary, because several drivers use PM_EVENT_PRETHAW */
400#define PM_EVENT_PRETHAW PM_EVENT_QUIESCE
401
402/*
123 * One transition is triggered by resume(), after a suspend() call; the 403 * One transition is triggered by resume(), after a suspend() call; the
124 * message is implicit: 404 * message is implicit:
125 * 405 *
@@ -164,35 +444,13 @@ typedef struct pm_message {
164 * or from system low-power states such as standby or suspend-to-RAM. 444 * or from system low-power states such as standby or suspend-to-RAM.
165 */ 445 */
166 446
167#define PM_EVENT_ON 0 447#ifdef CONFIG_PM_SLEEP
168#define PM_EVENT_FREEZE 1 448extern void device_pm_lock(void);
169#define PM_EVENT_SUSPEND 2 449extern void device_power_up(pm_message_t state);
170#define PM_EVENT_HIBERNATE 4 450extern void device_resume(pm_message_t state);
171#define PM_EVENT_PRETHAW 8
172
173#define PM_EVENT_SLEEP (PM_EVENT_SUSPEND | PM_EVENT_HIBERNATE)
174
175#define PMSG_FREEZE ((struct pm_message){ .event = PM_EVENT_FREEZE, })
176#define PMSG_PRETHAW ((struct pm_message){ .event = PM_EVENT_PRETHAW, })
177#define PMSG_SUSPEND ((struct pm_message){ .event = PM_EVENT_SUSPEND, })
178#define PMSG_HIBERNATE ((struct pm_message){ .event = PM_EVENT_HIBERNATE, })
179#define PMSG_ON ((struct pm_message){ .event = PM_EVENT_ON, })
180
181struct dev_pm_info {
182 pm_message_t power_state;
183 unsigned can_wakeup:1;
184 unsigned should_wakeup:1;
185 bool sleeping:1; /* Owned by the PM core */
186#ifdef CONFIG_PM_SLEEP
187 struct list_head entry;
188#endif
189};
190 451
452extern void device_pm_unlock(void);
191extern int device_power_down(pm_message_t state); 453extern int device_power_down(pm_message_t state);
192extern void device_power_up(void);
193extern void device_resume(void);
194
195#ifdef CONFIG_PM_SLEEP
196extern int device_suspend(pm_message_t state); 454extern int device_suspend(pm_message_t state);
197extern int device_prepare_suspend(pm_message_t state); 455extern int device_prepare_suspend(pm_message_t state);
198 456
diff --git a/include/linux/pm_wakeup.h b/include/linux/pm_wakeup.h
index f0d0b2cb8d20..0aae7776185e 100644
--- a/include/linux/pm_wakeup.h
+++ b/include/linux/pm_wakeup.h
@@ -35,6 +35,11 @@ static inline void device_init_wakeup(struct device *dev, int val)
35 dev->power.can_wakeup = dev->power.should_wakeup = !!val; 35 dev->power.can_wakeup = dev->power.should_wakeup = !!val;
36} 36}
37 37
38static inline void device_set_wakeup_capable(struct device *dev, int val)
39{
40 dev->power.can_wakeup = !!val;
41}
42
38static inline int device_can_wakeup(struct device *dev) 43static inline int device_can_wakeup(struct device *dev)
39{ 44{
40 return dev->power.can_wakeup; 45 return dev->power.can_wakeup;
@@ -47,21 +52,7 @@ static inline void device_set_wakeup_enable(struct device *dev, int val)
47 52
48static inline int device_may_wakeup(struct device *dev) 53static inline int device_may_wakeup(struct device *dev)
49{ 54{
50 return dev->power.can_wakeup & dev->power.should_wakeup; 55 return dev->power.can_wakeup && dev->power.should_wakeup;
51}
52
53/*
54 * Platform hook to activate device wakeup capability, if that's not already
55 * handled by enable_irq_wake() etc.
56 * Returns zero on success, else negative errno
57 */
58extern int (*platform_enable_wakeup)(struct device *dev, int is_on);
59
60static inline int call_platform_enable_wakeup(struct device *dev, int is_on)
61{
62 if (platform_enable_wakeup)
63 return (*platform_enable_wakeup)(dev, is_on);
64 return 0;
65} 56}
66 57
67#else /* !CONFIG_PM */ 58#else /* !CONFIG_PM */
@@ -72,6 +63,8 @@ static inline void device_init_wakeup(struct device *dev, int val)
72 dev->power.can_wakeup = !!val; 63 dev->power.can_wakeup = !!val;
73} 64}
74 65
66static inline void device_set_wakeup_capable(struct device *dev, int val) { }
67
75static inline int device_can_wakeup(struct device *dev) 68static inline int device_can_wakeup(struct device *dev)
76{ 69{
77 return dev->power.can_wakeup; 70 return dev->power.can_wakeup;
@@ -80,11 +73,6 @@ static inline int device_can_wakeup(struct device *dev)
80#define device_set_wakeup_enable(dev, val) do {} while (0) 73#define device_set_wakeup_enable(dev, val) do {} while (0)
81#define device_may_wakeup(dev) 0 74#define device_may_wakeup(dev) 0
82 75
83static inline int call_platform_enable_wakeup(struct device *dev, int is_on)
84{
85 return 0;
86}
87
88#endif /* !CONFIG_PM */ 76#endif /* !CONFIG_PM */
89 77
90#endif /* _LINUX_PM_WAKEUP_H */ 78#endif /* _LINUX_PM_WAKEUP_H */
diff --git a/include/linux/suspend.h b/include/linux/suspend.h
index a6977423baf7..e8e69159af71 100644
--- a/include/linux/suspend.h
+++ b/include/linux/suspend.h
@@ -86,6 +86,11 @@ typedef int __bitwise suspend_state_t;
86 * that implement @begin(), but platforms implementing @begin() should 86 * that implement @begin(), but platforms implementing @begin() should
87 * also provide a @end() which cleans up transitions aborted before 87 * also provide a @end() which cleans up transitions aborted before
88 * @enter(). 88 * @enter().
89 *
90 * @recover: Recover the platform from a suspend failure.
91 * Called by the PM core if the suspending of devices fails.
92 * This callback is optional and should only be implemented by platforms
93 * which require special recovery actions in that situation.
89 */ 94 */
90struct platform_suspend_ops { 95struct platform_suspend_ops {
91 int (*valid)(suspend_state_t state); 96 int (*valid)(suspend_state_t state);
@@ -94,6 +99,7 @@ struct platform_suspend_ops {
94 int (*enter)(suspend_state_t state); 99 int (*enter)(suspend_state_t state);
95 void (*finish)(void); 100 void (*finish)(void);
96 void (*end)(void); 101 void (*end)(void);
102 void (*recover)(void);
97}; 103};
98 104
99#ifdef CONFIG_SUSPEND 105#ifdef CONFIG_SUSPEND
@@ -149,7 +155,7 @@ extern void mark_free_pages(struct zone *zone);
149 * The methods in this structure allow a platform to carry out special 155 * The methods in this structure allow a platform to carry out special
150 * operations required by it during a hibernation transition. 156 * operations required by it during a hibernation transition.
151 * 157 *
152 * All the methods below must be implemented. 158 * All the methods below, except for @recover(), must be implemented.
153 * 159 *
154 * @begin: Tell the platform driver that we're starting hibernation. 160 * @begin: Tell the platform driver that we're starting hibernation.
155 * Called right after shrinking memory and before freezing devices. 161 * Called right after shrinking memory and before freezing devices.
@@ -189,6 +195,11 @@ extern void mark_free_pages(struct zone *zone);
189 * @restore_cleanup: Clean up after a failing image restoration. 195 * @restore_cleanup: Clean up after a failing image restoration.
190 * Called right after the nonboot CPUs have been enabled and before 196 * Called right after the nonboot CPUs have been enabled and before
191 * thawing devices (runs with IRQs on). 197 * thawing devices (runs with IRQs on).
198 *
199 * @recover: Recover the platform from a failure to suspend devices.
200 * Called by the PM core if the suspending of devices during hibernation
201 * fails. This callback is optional and should only be implemented by
202 * platforms which require special recovery actions in that situation.
192 */ 203 */
193struct platform_hibernation_ops { 204struct platform_hibernation_ops {
194 int (*begin)(void); 205 int (*begin)(void);
@@ -200,6 +211,7 @@ struct platform_hibernation_ops {
200 void (*leave)(void); 211 void (*leave)(void);
201 int (*pre_restore)(void); 212 int (*pre_restore)(void);
202 void (*restore_cleanup)(void); 213 void (*restore_cleanup)(void);
214 void (*recover)(void);
203}; 215};
204 216
205#ifdef CONFIG_HIBERNATION 217#ifdef CONFIG_HIBERNATION
diff --git a/kernel/power/disk.c b/kernel/power/disk.c
index 14a656cdc652..f011e0870b52 100644
--- a/kernel/power/disk.c
+++ b/kernel/power/disk.c
@@ -180,6 +180,17 @@ static void platform_restore_cleanup(int platform_mode)
180} 180}
181 181
182/** 182/**
183 * platform_recover - recover the platform from a failure to suspend
184 * devices.
185 */
186
187static void platform_recover(int platform_mode)
188{
189 if (platform_mode && hibernation_ops && hibernation_ops->recover)
190 hibernation_ops->recover();
191}
192
193/**
183 * create_image - freeze devices that need to be frozen with interrupts 194 * create_image - freeze devices that need to be frozen with interrupts
184 * off, create the hibernation image and thaw those devices. Control 195 * off, create the hibernation image and thaw those devices. Control
185 * reappears in this routine after a restore. 196 * reappears in this routine after a restore.
@@ -193,6 +204,7 @@ static int create_image(int platform_mode)
193 if (error) 204 if (error)
194 return error; 205 return error;
195 206
207 device_pm_lock();
196 local_irq_disable(); 208 local_irq_disable();
197 /* At this point, device_suspend() has been called, but *not* 209 /* At this point, device_suspend() has been called, but *not*
198 * device_power_down(). We *must* call device_power_down() now. 210 * device_power_down(). We *must* call device_power_down() now.
@@ -224,9 +236,11 @@ static int create_image(int platform_mode)
224 /* NOTE: device_power_up() is just a resume() for devices 236 /* NOTE: device_power_up() is just a resume() for devices
225 * that suspended with irqs off ... no overall powerup. 237 * that suspended with irqs off ... no overall powerup.
226 */ 238 */
227 device_power_up(); 239 device_power_up(in_suspend ?
240 (error ? PMSG_RECOVER : PMSG_THAW) : PMSG_RESTORE);
228 Enable_irqs: 241 Enable_irqs:
229 local_irq_enable(); 242 local_irq_enable();
243 device_pm_unlock();
230 return error; 244 return error;
231} 245}
232 246
@@ -255,10 +269,10 @@ int hibernation_snapshot(int platform_mode)
255 suspend_console(); 269 suspend_console();
256 error = device_suspend(PMSG_FREEZE); 270 error = device_suspend(PMSG_FREEZE);
257 if (error) 271 if (error)
258 goto Resume_console; 272 goto Recover_platform;
259 273
260 if (hibernation_test(TEST_DEVICES)) 274 if (hibernation_test(TEST_DEVICES))
261 goto Resume_devices; 275 goto Recover_platform;
262 276
263 error = platform_pre_snapshot(platform_mode); 277 error = platform_pre_snapshot(platform_mode);
264 if (error || hibernation_test(TEST_PLATFORM)) 278 if (error || hibernation_test(TEST_PLATFORM))
@@ -280,12 +294,16 @@ int hibernation_snapshot(int platform_mode)
280 Finish: 294 Finish:
281 platform_finish(platform_mode); 295 platform_finish(platform_mode);
282 Resume_devices: 296 Resume_devices:
283 device_resume(); 297 device_resume(in_suspend ?
284 Resume_console: 298 (error ? PMSG_RECOVER : PMSG_THAW) : PMSG_RESTORE);
285 resume_console(); 299 resume_console();
286 Close: 300 Close:
287 platform_end(platform_mode); 301 platform_end(platform_mode);
288 return error; 302 return error;
303
304 Recover_platform:
305 platform_recover(platform_mode);
306 goto Resume_devices;
289} 307}
290 308
291/** 309/**
@@ -300,8 +318,9 @@ static int resume_target_kernel(void)
300{ 318{
301 int error; 319 int error;
302 320
321 device_pm_lock();
303 local_irq_disable(); 322 local_irq_disable();
304 error = device_power_down(PMSG_PRETHAW); 323 error = device_power_down(PMSG_QUIESCE);
305 if (error) { 324 if (error) {
306 printk(KERN_ERR "PM: Some devices failed to power down, " 325 printk(KERN_ERR "PM: Some devices failed to power down, "
307 "aborting resume\n"); 326 "aborting resume\n");
@@ -329,9 +348,10 @@ static int resume_target_kernel(void)
329 swsusp_free(); 348 swsusp_free();
330 restore_processor_state(); 349 restore_processor_state();
331 touch_softlockup_watchdog(); 350 touch_softlockup_watchdog();
332 device_power_up(); 351 device_power_up(PMSG_RECOVER);
333 Enable_irqs: 352 Enable_irqs:
334 local_irq_enable(); 353 local_irq_enable();
354 device_pm_unlock();
335 return error; 355 return error;
336} 356}
337 357
@@ -350,7 +370,7 @@ int hibernation_restore(int platform_mode)
350 370
351 pm_prepare_console(); 371 pm_prepare_console();
352 suspend_console(); 372 suspend_console();
353 error = device_suspend(PMSG_PRETHAW); 373 error = device_suspend(PMSG_QUIESCE);
354 if (error) 374 if (error)
355 goto Finish; 375 goto Finish;
356 376
@@ -362,7 +382,7 @@ int hibernation_restore(int platform_mode)
362 enable_nonboot_cpus(); 382 enable_nonboot_cpus();
363 } 383 }
364 platform_restore_cleanup(platform_mode); 384 platform_restore_cleanup(platform_mode);
365 device_resume(); 385 device_resume(PMSG_RECOVER);
366 Finish: 386 Finish:
367 resume_console(); 387 resume_console();
368 pm_restore_console(); 388 pm_restore_console();
@@ -392,8 +412,11 @@ int hibernation_platform_enter(void)
392 412
393 suspend_console(); 413 suspend_console();
394 error = device_suspend(PMSG_HIBERNATE); 414 error = device_suspend(PMSG_HIBERNATE);
395 if (error) 415 if (error) {
396 goto Resume_console; 416 if (hibernation_ops->recover)
417 hibernation_ops->recover();
418 goto Resume_devices;
419 }
397 420
398 error = hibernation_ops->prepare(); 421 error = hibernation_ops->prepare();
399 if (error) 422 if (error)
@@ -403,6 +426,7 @@ int hibernation_platform_enter(void)
403 if (error) 426 if (error)
404 goto Finish; 427 goto Finish;
405 428
429 device_pm_lock();
406 local_irq_disable(); 430 local_irq_disable();
407 error = device_power_down(PMSG_HIBERNATE); 431 error = device_power_down(PMSG_HIBERNATE);
408 if (!error) { 432 if (!error) {
@@ -411,6 +435,7 @@ int hibernation_platform_enter(void)
411 while (1); 435 while (1);
412 } 436 }
413 local_irq_enable(); 437 local_irq_enable();
438 device_pm_unlock();
414 439
415 /* 440 /*
416 * We don't need to reenable the nonboot CPUs or resume consoles, since 441 * We don't need to reenable the nonboot CPUs or resume consoles, since
@@ -419,8 +444,7 @@ int hibernation_platform_enter(void)
419 Finish: 444 Finish:
420 hibernation_ops->finish(); 445 hibernation_ops->finish();
421 Resume_devices: 446 Resume_devices:
422 device_resume(); 447 device_resume(PMSG_RESTORE);
423 Resume_console:
424 resume_console(); 448 resume_console();
425 Close: 449 Close:
426 hibernation_ops->end(); 450 hibernation_ops->end();
diff --git a/kernel/power/main.c b/kernel/power/main.c
index 6a6d5eb3524e..3398f4651aa1 100644
--- a/kernel/power/main.c
+++ b/kernel/power/main.c
@@ -228,6 +228,7 @@ static int suspend_enter(suspend_state_t state)
228{ 228{
229 int error = 0; 229 int error = 0;
230 230
231 device_pm_lock();
231 arch_suspend_disable_irqs(); 232 arch_suspend_disable_irqs();
232 BUG_ON(!irqs_disabled()); 233 BUG_ON(!irqs_disabled());
233 234
@@ -239,10 +240,11 @@ static int suspend_enter(suspend_state_t state)
239 if (!suspend_test(TEST_CORE)) 240 if (!suspend_test(TEST_CORE))
240 error = suspend_ops->enter(state); 241 error = suspend_ops->enter(state);
241 242
242 device_power_up(); 243 device_power_up(PMSG_RESUME);
243 Done: 244 Done:
244 arch_suspend_enable_irqs(); 245 arch_suspend_enable_irqs();
245 BUG_ON(irqs_disabled()); 246 BUG_ON(irqs_disabled());
247 device_pm_unlock();
246 return error; 248 return error;
247} 249}
248 250
@@ -267,11 +269,11 @@ int suspend_devices_and_enter(suspend_state_t state)
267 error = device_suspend(PMSG_SUSPEND); 269 error = device_suspend(PMSG_SUSPEND);
268 if (error) { 270 if (error) {
269 printk(KERN_ERR "PM: Some devices failed to suspend\n"); 271 printk(KERN_ERR "PM: Some devices failed to suspend\n");
270 goto Resume_console; 272 goto Recover_platform;
271 } 273 }
272 274
273 if (suspend_test(TEST_DEVICES)) 275 if (suspend_test(TEST_DEVICES))
274 goto Resume_devices; 276 goto Recover_platform;
275 277
276 if (suspend_ops->prepare) { 278 if (suspend_ops->prepare) {
277 error = suspend_ops->prepare(); 279 error = suspend_ops->prepare();
@@ -291,13 +293,17 @@ int suspend_devices_and_enter(suspend_state_t state)
291 if (suspend_ops->finish) 293 if (suspend_ops->finish)
292 suspend_ops->finish(); 294 suspend_ops->finish();
293 Resume_devices: 295 Resume_devices:
294 device_resume(); 296 device_resume(PMSG_RESUME);
295 Resume_console:
296 resume_console(); 297 resume_console();
297 Close: 298 Close:
298 if (suspend_ops->end) 299 if (suspend_ops->end)
299 suspend_ops->end(); 300 suspend_ops->end();
300 return error; 301 return error;
302
303 Recover_platform:
304 if (suspend_ops->recover)
305 suspend_ops->recover();
306 goto Resume_devices;
301} 307}
302 308
303/** 309/**
diff --git a/lib/kobject.c b/lib/kobject.c
index 718e5101c263..dcade0543bd2 100644
--- a/lib/kobject.c
+++ b/lib/kobject.c
@@ -439,6 +439,7 @@ out:
439 439
440 return error; 440 return error;
441} 441}
442EXPORT_SYMBOL_GPL(kobject_rename);
442 443
443/** 444/**
444 * kobject_move - move object to another parent 445 * kobject_move - move object to another parent