diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2009-09-16 10:49:54 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2009-09-16 10:49:54 -0400 |
commit | 4406c56d0a4da7a37b9180abeaece6cd00bcc874 (patch) | |
tree | 65a85fa73a25d24cbed6d163fdcf8df1b934a0be /drivers | |
parent | 6b7b352f2102e21f9d8f38e932f01d9c5705c073 (diff) | |
parent | 5e3573db2bd5db6925159279d99576a4635bdb66 (diff) |
Merge branch 'linux-next' of git://git.kernel.org/pub/scm/linux/kernel/git/jbarnes/pci-2.6
* 'linux-next' of git://git.kernel.org/pub/scm/linux/kernel/git/jbarnes/pci-2.6: (75 commits)
PCI hotplug: clean up acpi_run_hpp()
PCI hotplug: acpiphp: use generic pci_configure_slot()
PCI hotplug: shpchp: use generic pci_configure_slot()
PCI hotplug: pciehp: use generic pci_configure_slot()
PCI hotplug: add pci_configure_slot()
PCI hotplug: clean up acpi_get_hp_params_from_firmware() interface
PCI hotplug: acpiphp: don't cache hotplug_params in acpiphp_bridge
PCI hotplug: acpiphp: remove superfluous _HPP/_HPX evaluation
PCI: Clear saved_state after the state has been restored
PCI PM: Return error codes from pci_pm_resume()
PCI: use dev_printk in quirk messages
PCI / PCIe portdrv: Fix pcie_portdrv_slot_reset()
PCI Hotplug: convert acpi_pci_detect_ejectable() to take an acpi_handle
PCI Hotplug: acpiphp: find bridges the easy way
PCI: pcie portdrv: remove unused variable
PCI / ACPI PM: Propagate wake-up enable for devices w/o ACPI support
ACPI PM: Replace wakeup.prepared with reference counter
PCI PM: Introduce device flag wakeup_prepared
PCI / ACPI PM: Rework some debug messages
PCI PM: Simplify PCI wake-up code
...
Fixed up conflict in arch/powerpc/kernel/pci_64.c due to OF device tree
scanning having been moved and merged for the 32- and 64-bit cases. The
'needs_freset' initialization added in 6e19314cc ("PCI/powerpc: support
PCIe fundamental reset") is now in arch/powerpc/kernel/pci_of_scan.c.
Diffstat (limited to 'drivers')
46 files changed, 2497 insertions, 1243 deletions
diff --git a/drivers/acpi/pci_root.c b/drivers/acpi/pci_root.c index 55b5b90c2a44..31b961c2f22f 100644 --- a/drivers/acpi/pci_root.c +++ b/drivers/acpi/pci_root.c | |||
@@ -61,20 +61,6 @@ static struct acpi_driver acpi_pci_root_driver = { | |||
61 | }, | 61 | }, |
62 | }; | 62 | }; |
63 | 63 | ||
64 | struct acpi_pci_root { | ||
65 | struct list_head node; | ||
66 | struct acpi_device *device; | ||
67 | struct pci_bus *bus; | ||
68 | u16 segment; | ||
69 | u8 bus_nr; | ||
70 | |||
71 | u32 osc_support_set; /* _OSC state of support bits */ | ||
72 | u32 osc_control_set; /* _OSC state of control bits */ | ||
73 | u32 osc_control_qry; /* the latest _OSC query result */ | ||
74 | |||
75 | u32 osc_queried:1; /* has _OSC control been queried? */ | ||
76 | }; | ||
77 | |||
78 | static LIST_HEAD(acpi_pci_roots); | 64 | static LIST_HEAD(acpi_pci_roots); |
79 | 65 | ||
80 | static struct acpi_pci_driver *sub_driver; | 66 | static struct acpi_pci_driver *sub_driver; |
@@ -317,7 +303,7 @@ static acpi_status acpi_pci_osc_support(struct acpi_pci_root *root, u32 flags) | |||
317 | return status; | 303 | return status; |
318 | } | 304 | } |
319 | 305 | ||
320 | static struct acpi_pci_root *acpi_pci_find_root(acpi_handle handle) | 306 | struct acpi_pci_root *acpi_pci_find_root(acpi_handle handle) |
321 | { | 307 | { |
322 | struct acpi_pci_root *root; | 308 | struct acpi_pci_root *root; |
323 | 309 | ||
@@ -327,6 +313,7 @@ static struct acpi_pci_root *acpi_pci_find_root(acpi_handle handle) | |||
327 | } | 313 | } |
328 | return NULL; | 314 | return NULL; |
329 | } | 315 | } |
316 | EXPORT_SYMBOL_GPL(acpi_pci_find_root); | ||
330 | 317 | ||
331 | struct acpi_handle_node { | 318 | struct acpi_handle_node { |
332 | struct list_head node; | 319 | struct list_head node; |
diff --git a/drivers/acpi/power.c b/drivers/acpi/power.c index d74365d4a6e7..5a09bf392ec1 100644 --- a/drivers/acpi/power.c +++ b/drivers/acpi/power.c | |||
@@ -44,6 +44,8 @@ | |||
44 | #include <acpi/acpi_bus.h> | 44 | #include <acpi/acpi_bus.h> |
45 | #include <acpi/acpi_drivers.h> | 45 | #include <acpi/acpi_drivers.h> |
46 | 46 | ||
47 | #include "sleep.h" | ||
48 | |||
47 | #define _COMPONENT ACPI_POWER_COMPONENT | 49 | #define _COMPONENT ACPI_POWER_COMPONENT |
48 | ACPI_MODULE_NAME("power"); | 50 | ACPI_MODULE_NAME("power"); |
49 | #define ACPI_POWER_CLASS "power_resource" | 51 | #define ACPI_POWER_CLASS "power_resource" |
@@ -361,17 +363,15 @@ int acpi_device_sleep_wake(struct acpi_device *dev, | |||
361 | */ | 363 | */ |
362 | int acpi_enable_wakeup_device_power(struct acpi_device *dev, int sleep_state) | 364 | int acpi_enable_wakeup_device_power(struct acpi_device *dev, int sleep_state) |
363 | { | 365 | { |
364 | int i, err; | 366 | int i, err = 0; |
365 | 367 | ||
366 | if (!dev || !dev->wakeup.flags.valid) | 368 | if (!dev || !dev->wakeup.flags.valid) |
367 | return -EINVAL; | 369 | return -EINVAL; |
368 | 370 | ||
369 | /* | 371 | mutex_lock(&acpi_device_lock); |
370 | * Do not execute the code below twice in a row without calling | 372 | |
371 | * acpi_disable_wakeup_device_power() in between for the same device | 373 | if (dev->wakeup.prepare_count++) |
372 | */ | 374 | goto out; |
373 | if (dev->wakeup.flags.prepared) | ||
374 | return 0; | ||
375 | 375 | ||
376 | /* Open power resource */ | 376 | /* Open power resource */ |
377 | for (i = 0; i < dev->wakeup.resources.count; i++) { | 377 | for (i = 0; i < dev->wakeup.resources.count; i++) { |
@@ -379,7 +379,8 @@ int acpi_enable_wakeup_device_power(struct acpi_device *dev, int sleep_state) | |||
379 | if (ret) { | 379 | if (ret) { |
380 | printk(KERN_ERR PREFIX "Transition power state\n"); | 380 | printk(KERN_ERR PREFIX "Transition power state\n"); |
381 | dev->wakeup.flags.valid = 0; | 381 | dev->wakeup.flags.valid = 0; |
382 | return -ENODEV; | 382 | err = -ENODEV; |
383 | goto err_out; | ||
383 | } | 384 | } |
384 | } | 385 | } |
385 | 386 | ||
@@ -388,9 +389,13 @@ int acpi_enable_wakeup_device_power(struct acpi_device *dev, int sleep_state) | |||
388 | * in arbitrary power state afterwards. | 389 | * in arbitrary power state afterwards. |
389 | */ | 390 | */ |
390 | err = acpi_device_sleep_wake(dev, 1, sleep_state, 3); | 391 | err = acpi_device_sleep_wake(dev, 1, sleep_state, 3); |
391 | if (!err) | ||
392 | dev->wakeup.flags.prepared = 1; | ||
393 | 392 | ||
393 | err_out: | ||
394 | if (err) | ||
395 | dev->wakeup.prepare_count = 0; | ||
396 | |||
397 | out: | ||
398 | mutex_unlock(&acpi_device_lock); | ||
394 | return err; | 399 | return err; |
395 | } | 400 | } |
396 | 401 | ||
@@ -402,35 +407,42 @@ int acpi_enable_wakeup_device_power(struct acpi_device *dev, int sleep_state) | |||
402 | */ | 407 | */ |
403 | int acpi_disable_wakeup_device_power(struct acpi_device *dev) | 408 | int acpi_disable_wakeup_device_power(struct acpi_device *dev) |
404 | { | 409 | { |
405 | int i, ret; | 410 | int i, err = 0; |
406 | 411 | ||
407 | if (!dev || !dev->wakeup.flags.valid) | 412 | if (!dev || !dev->wakeup.flags.valid) |
408 | return -EINVAL; | 413 | return -EINVAL; |
409 | 414 | ||
415 | mutex_lock(&acpi_device_lock); | ||
416 | |||
417 | if (--dev->wakeup.prepare_count > 0) | ||
418 | goto out; | ||
419 | |||
410 | /* | 420 | /* |
411 | * Do not execute the code below twice in a row without calling | 421 | * Executing the code below even if prepare_count is already zero when |
412 | * acpi_enable_wakeup_device_power() in between for the same device | 422 | * the function is called may be useful, for example for initialisation. |
413 | */ | 423 | */ |
414 | if (!dev->wakeup.flags.prepared) | 424 | if (dev->wakeup.prepare_count < 0) |
415 | return 0; | 425 | dev->wakeup.prepare_count = 0; |
416 | 426 | ||
417 | dev->wakeup.flags.prepared = 0; | 427 | err = acpi_device_sleep_wake(dev, 0, 0, 0); |
418 | 428 | if (err) | |
419 | ret = acpi_device_sleep_wake(dev, 0, 0, 0); | 429 | goto out; |
420 | if (ret) | ||
421 | return ret; | ||
422 | 430 | ||
423 | /* Close power resource */ | 431 | /* Close power resource */ |
424 | for (i = 0; i < dev->wakeup.resources.count; i++) { | 432 | for (i = 0; i < dev->wakeup.resources.count; i++) { |
425 | ret = acpi_power_off_device(dev->wakeup.resources.handles[i], dev); | 433 | int ret = acpi_power_off_device( |
434 | dev->wakeup.resources.handles[i], dev); | ||
426 | if (ret) { | 435 | if (ret) { |
427 | printk(KERN_ERR PREFIX "Transition power state\n"); | 436 | printk(KERN_ERR PREFIX "Transition power state\n"); |
428 | dev->wakeup.flags.valid = 0; | 437 | dev->wakeup.flags.valid = 0; |
429 | return -ENODEV; | 438 | err = -ENODEV; |
439 | goto out; | ||
430 | } | 440 | } |
431 | } | 441 | } |
432 | 442 | ||
433 | return ret; | 443 | out: |
444 | mutex_unlock(&acpi_device_lock); | ||
445 | return err; | ||
434 | } | 446 | } |
435 | 447 | ||
436 | /* -------------------------------------------------------------------------- | 448 | /* -------------------------------------------------------------------------- |
diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c index 781435d7e369..318b1ea7a5bf 100644 --- a/drivers/acpi/scan.c +++ b/drivers/acpi/scan.c | |||
@@ -781,6 +781,7 @@ static int acpi_bus_get_wakeup_device_flags(struct acpi_device *device) | |||
781 | kfree(buffer.pointer); | 781 | kfree(buffer.pointer); |
782 | 782 | ||
783 | device->wakeup.flags.valid = 1; | 783 | device->wakeup.flags.valid = 1; |
784 | device->wakeup.prepare_count = 0; | ||
784 | /* Call _PSW/_DSW object to disable its ability to wake the sleeping | 785 | /* Call _PSW/_DSW object to disable its ability to wake the sleeping |
785 | * system for the ACPI device with the _PRW object. | 786 | * system for the ACPI device with the _PRW object. |
786 | * The _PSW object is depreciated in ACPI 3.0 and is replaced by _DSW. | 787 | * The _PSW object is depreciated in ACPI 3.0 and is replaced by _DSW. |
diff --git a/drivers/acpi/sleep.c b/drivers/acpi/sleep.c index 42159a28f433..feece693d773 100644 --- a/drivers/acpi/sleep.c +++ b/drivers/acpi/sleep.c | |||
@@ -689,19 +689,25 @@ int acpi_pm_device_sleep_wake(struct device *dev, bool enable) | |||
689 | { | 689 | { |
690 | acpi_handle handle; | 690 | acpi_handle handle; |
691 | struct acpi_device *adev; | 691 | struct acpi_device *adev; |
692 | int error; | ||
692 | 693 | ||
693 | if (!device_may_wakeup(dev)) | 694 | if (!device_can_wakeup(dev)) |
694 | return -EINVAL; | 695 | return -EINVAL; |
695 | 696 | ||
696 | handle = DEVICE_ACPI_HANDLE(dev); | 697 | handle = DEVICE_ACPI_HANDLE(dev); |
697 | if (!handle || ACPI_FAILURE(acpi_bus_get_device(handle, &adev))) { | 698 | if (!handle || ACPI_FAILURE(acpi_bus_get_device(handle, &adev))) { |
698 | printk(KERN_DEBUG "ACPI handle has no context!\n"); | 699 | dev_dbg(dev, "ACPI handle has no context in %s!\n", __func__); |
699 | return -ENODEV; | 700 | return -ENODEV; |
700 | } | 701 | } |
701 | 702 | ||
702 | return enable ? | 703 | error = enable ? |
703 | acpi_enable_wakeup_device_power(adev, acpi_target_sleep_state) : | 704 | acpi_enable_wakeup_device_power(adev, acpi_target_sleep_state) : |
704 | acpi_disable_wakeup_device_power(adev); | 705 | acpi_disable_wakeup_device_power(adev); |
706 | if (!error) | ||
707 | dev_info(dev, "wake-up capability %s by ACPI\n", | ||
708 | enable ? "enabled" : "disabled"); | ||
709 | |||
710 | return error; | ||
705 | } | 711 | } |
706 | #endif | 712 | #endif |
707 | 713 | ||
diff --git a/drivers/acpi/wakeup.c b/drivers/acpi/wakeup.c index 88725dcdf8bc..e0ee0c036f5a 100644 --- a/drivers/acpi/wakeup.c +++ b/drivers/acpi/wakeup.c | |||
@@ -68,7 +68,7 @@ void acpi_enable_wakeup_device(u8 sleep_state) | |||
68 | /* If users want to disable run-wake GPE, | 68 | /* If users want to disable run-wake GPE, |
69 | * we only disable it for wake and leave it for runtime | 69 | * we only disable it for wake and leave it for runtime |
70 | */ | 70 | */ |
71 | if ((!dev->wakeup.state.enabled && !dev->wakeup.flags.prepared) | 71 | if ((!dev->wakeup.state.enabled && !dev->wakeup.prepare_count) |
72 | || sleep_state > (u32) dev->wakeup.sleep_state) { | 72 | || sleep_state > (u32) dev->wakeup.sleep_state) { |
73 | if (dev->wakeup.flags.run_wake) { | 73 | if (dev->wakeup.flags.run_wake) { |
74 | /* set_gpe_type will disable GPE, leave it like that */ | 74 | /* set_gpe_type will disable GPE, leave it like that */ |
@@ -100,7 +100,7 @@ void acpi_disable_wakeup_device(u8 sleep_state) | |||
100 | if (!dev->wakeup.flags.valid) | 100 | if (!dev->wakeup.flags.valid) |
101 | continue; | 101 | continue; |
102 | 102 | ||
103 | if ((!dev->wakeup.state.enabled && !dev->wakeup.flags.prepared) | 103 | if ((!dev->wakeup.state.enabled && !dev->wakeup.prepare_count) |
104 | || sleep_state > (u32) dev->wakeup.sleep_state) { | 104 | || sleep_state > (u32) dev->wakeup.sleep_state) { |
105 | if (dev->wakeup.flags.run_wake) { | 105 | if (dev->wakeup.flags.run_wake) { |
106 | acpi_set_gpe_type(dev->wakeup.gpe_device, | 106 | acpi_set_gpe_type(dev->wakeup.gpe_device, |
diff --git a/drivers/gpu/Makefile b/drivers/gpu/Makefile index de566cf0414c..30879df3daea 100644 --- a/drivers/gpu/Makefile +++ b/drivers/gpu/Makefile | |||
@@ -1 +1 @@ | |||
obj-y += drm/ | obj-y += drm/ vga/ | ||
diff --git a/drivers/gpu/vga/Kconfig b/drivers/gpu/vga/Kconfig new file mode 100644 index 000000000000..790e675b13eb --- /dev/null +++ b/drivers/gpu/vga/Kconfig | |||
@@ -0,0 +1,10 @@ | |||
1 | config VGA_ARB | ||
2 | bool "VGA Arbitration" if EMBEDDED | ||
3 | default y | ||
4 | depends on PCI | ||
5 | help | ||
6 | Some "legacy" VGA devices implemented on PCI typically have the same | ||
7 | hard-decoded addresses as they did on ISA. When multiple PCI devices | ||
8 | are accessed at same time they need some kind of coordination. Please | ||
9 | see Documentation/vgaarbiter.txt for more details. Select this to | ||
10 | enable VGA arbiter. | ||
diff --git a/drivers/gpu/vga/Makefile b/drivers/gpu/vga/Makefile new file mode 100644 index 000000000000..7cc8c1ed645b --- /dev/null +++ b/drivers/gpu/vga/Makefile | |||
@@ -0,0 +1 @@ | |||
obj-$(CONFIG_VGA_ARB) += vgaarb.o | |||
diff --git a/drivers/gpu/vga/vgaarb.c b/drivers/gpu/vga/vgaarb.c new file mode 100644 index 000000000000..1ac0c93603c9 --- /dev/null +++ b/drivers/gpu/vga/vgaarb.c | |||
@@ -0,0 +1,1205 @@ | |||
1 | /* | ||
2 | * vgaarb.c | ||
3 | * | ||
4 | * (C) Copyright 2005 Benjamin Herrenschmidt <benh@kernel.crashing.org> | ||
5 | * (C) Copyright 2007 Paulo R. Zanoni <przanoni@gmail.com> | ||
6 | * (C) Copyright 2007, 2009 Tiago Vignatti <vignatti@freedesktop.org> | ||
7 | * | ||
8 | * Implements the VGA arbitration. For details refer to | ||
9 | * Documentation/vgaarbiter.txt | ||
10 | */ | ||
11 | |||
12 | #include <linux/module.h> | ||
13 | #include <linux/kernel.h> | ||
14 | #include <linux/pci.h> | ||
15 | #include <linux/errno.h> | ||
16 | #include <linux/init.h> | ||
17 | #include <linux/list.h> | ||
18 | #include <linux/sched.h> | ||
19 | #include <linux/wait.h> | ||
20 | #include <linux/spinlock.h> | ||
21 | #include <linux/poll.h> | ||
22 | #include <linux/miscdevice.h> | ||
23 | |||
24 | #include <linux/uaccess.h> | ||
25 | |||
26 | #include <linux/vgaarb.h> | ||
27 | |||
28 | static void vga_arbiter_notify_clients(void); | ||
29 | /* | ||
30 | * We keep a list of all vga devices in the system to speed | ||
31 | * up the various operations of the arbiter | ||
32 | */ | ||
33 | struct vga_device { | ||
34 | struct list_head list; | ||
35 | struct pci_dev *pdev; | ||
36 | unsigned int decodes; /* what does it decodes */ | ||
37 | unsigned int owns; /* what does it owns */ | ||
38 | unsigned int locks; /* what does it locks */ | ||
39 | unsigned int io_lock_cnt; /* legacy IO lock count */ | ||
40 | unsigned int mem_lock_cnt; /* legacy MEM lock count */ | ||
41 | unsigned int io_norm_cnt; /* normal IO count */ | ||
42 | unsigned int mem_norm_cnt; /* normal MEM count */ | ||
43 | |||
44 | /* allow IRQ enable/disable hook */ | ||
45 | void *cookie; | ||
46 | void (*irq_set_state)(void *cookie, bool enable); | ||
47 | unsigned int (*set_vga_decode)(void *cookie, bool decode); | ||
48 | }; | ||
49 | |||
50 | static LIST_HEAD(vga_list); | ||
51 | static int vga_count, vga_decode_count; | ||
52 | static bool vga_arbiter_used; | ||
53 | static DEFINE_SPINLOCK(vga_lock); | ||
54 | static DECLARE_WAIT_QUEUE_HEAD(vga_wait_queue); | ||
55 | |||
56 | |||
57 | static const char *vga_iostate_to_str(unsigned int iostate) | ||
58 | { | ||
59 | /* Ignore VGA_RSRC_IO and VGA_RSRC_MEM */ | ||
60 | iostate &= VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM; | ||
61 | switch (iostate) { | ||
62 | case VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM: | ||
63 | return "io+mem"; | ||
64 | case VGA_RSRC_LEGACY_IO: | ||
65 | return "io"; | ||
66 | case VGA_RSRC_LEGACY_MEM: | ||
67 | return "mem"; | ||
68 | } | ||
69 | return "none"; | ||
70 | } | ||
71 | |||
72 | static int vga_str_to_iostate(char *buf, int str_size, int *io_state) | ||
73 | { | ||
74 | /* we could in theory hand out locks on IO and mem | ||
75 | * separately to userspace but it can cause deadlocks */ | ||
76 | if (strncmp(buf, "none", 4) == 0) { | ||
77 | *io_state = VGA_RSRC_NONE; | ||
78 | return 1; | ||
79 | } | ||
80 | |||
81 | /* XXX We're not chekcing the str_size! */ | ||
82 | if (strncmp(buf, "io+mem", 6) == 0) | ||
83 | goto both; | ||
84 | else if (strncmp(buf, "io", 2) == 0) | ||
85 | goto both; | ||
86 | else if (strncmp(buf, "mem", 3) == 0) | ||
87 | goto both; | ||
88 | return 0; | ||
89 | both: | ||
90 | *io_state = VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM; | ||
91 | return 1; | ||
92 | } | ||
93 | |||
94 | #ifndef __ARCH_HAS_VGA_DEFAULT_DEVICE | ||
95 | /* this is only used a cookie - it should not be dereferenced */ | ||
96 | static struct pci_dev *vga_default; | ||
97 | #endif | ||
98 | |||
99 | static void vga_arb_device_card_gone(struct pci_dev *pdev); | ||
100 | |||
101 | /* Find somebody in our list */ | ||
102 | static struct vga_device *vgadev_find(struct pci_dev *pdev) | ||
103 | { | ||
104 | struct vga_device *vgadev; | ||
105 | |||
106 | list_for_each_entry(vgadev, &vga_list, list) | ||
107 | if (pdev == vgadev->pdev) | ||
108 | return vgadev; | ||
109 | return NULL; | ||
110 | } | ||
111 | |||
112 | /* Returns the default VGA device (vgacon's babe) */ | ||
113 | #ifndef __ARCH_HAS_VGA_DEFAULT_DEVICE | ||
114 | struct pci_dev *vga_default_device(void) | ||
115 | { | ||
116 | return vga_default; | ||
117 | } | ||
118 | #endif | ||
119 | |||
120 | static inline void vga_irq_set_state(struct vga_device *vgadev, bool state) | ||
121 | { | ||
122 | if (vgadev->irq_set_state) | ||
123 | vgadev->irq_set_state(vgadev->cookie, state); | ||
124 | } | ||
125 | |||
126 | |||
127 | /* If we don't ever use VGA arb we should avoid | ||
128 | turning off anything anywhere due to old X servers getting | ||
129 | confused about the boot device not being VGA */ | ||
130 | static void vga_check_first_use(void) | ||
131 | { | ||
132 | /* we should inform all GPUs in the system that | ||
133 | * VGA arb has occured and to try and disable resources | ||
134 | * if they can */ | ||
135 | if (!vga_arbiter_used) { | ||
136 | vga_arbiter_used = true; | ||
137 | vga_arbiter_notify_clients(); | ||
138 | } | ||
139 | } | ||
140 | |||
141 | static struct vga_device *__vga_tryget(struct vga_device *vgadev, | ||
142 | unsigned int rsrc) | ||
143 | { | ||
144 | unsigned int wants, legacy_wants, match; | ||
145 | struct vga_device *conflict; | ||
146 | unsigned int pci_bits; | ||
147 | /* Account for "normal" resources to lock. If we decode the legacy, | ||
148 | * counterpart, we need to request it as well | ||
149 | */ | ||
150 | if ((rsrc & VGA_RSRC_NORMAL_IO) && | ||
151 | (vgadev->decodes & VGA_RSRC_LEGACY_IO)) | ||
152 | rsrc |= VGA_RSRC_LEGACY_IO; | ||
153 | if ((rsrc & VGA_RSRC_NORMAL_MEM) && | ||
154 | (vgadev->decodes & VGA_RSRC_LEGACY_MEM)) | ||
155 | rsrc |= VGA_RSRC_LEGACY_MEM; | ||
156 | |||
157 | pr_devel("%s: %d\n", __func__, rsrc); | ||
158 | pr_devel("%s: owns: %d\n", __func__, vgadev->owns); | ||
159 | |||
160 | /* Check what resources we need to acquire */ | ||
161 | wants = rsrc & ~vgadev->owns; | ||
162 | |||
163 | /* We already own everything, just mark locked & bye bye */ | ||
164 | if (wants == 0) | ||
165 | goto lock_them; | ||
166 | |||
167 | /* We don't need to request a legacy resource, we just enable | ||
168 | * appropriate decoding and go | ||
169 | */ | ||
170 | legacy_wants = wants & VGA_RSRC_LEGACY_MASK; | ||
171 | if (legacy_wants == 0) | ||
172 | goto enable_them; | ||
173 | |||
174 | /* Ok, we don't, let's find out how we need to kick off */ | ||
175 | list_for_each_entry(conflict, &vga_list, list) { | ||
176 | unsigned int lwants = legacy_wants; | ||
177 | unsigned int change_bridge = 0; | ||
178 | |||
179 | /* Don't conflict with myself */ | ||
180 | if (vgadev == conflict) | ||
181 | continue; | ||
182 | |||
183 | /* Check if the architecture allows a conflict between those | ||
184 | * 2 devices or if they are on separate domains | ||
185 | */ | ||
186 | if (!vga_conflicts(vgadev->pdev, conflict->pdev)) | ||
187 | continue; | ||
188 | |||
189 | /* We have a possible conflict. before we go further, we must | ||
190 | * check if we sit on the same bus as the conflicting device. | ||
191 | * if we don't, then we must tie both IO and MEM resources | ||
192 | * together since there is only a single bit controlling | ||
193 | * VGA forwarding on P2P bridges | ||
194 | */ | ||
195 | if (vgadev->pdev->bus != conflict->pdev->bus) { | ||
196 | change_bridge = 1; | ||
197 | lwants = VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM; | ||
198 | } | ||
199 | |||
200 | /* Check if the guy has a lock on the resource. If he does, | ||
201 | * return the conflicting entry | ||
202 | */ | ||
203 | if (conflict->locks & lwants) | ||
204 | return conflict; | ||
205 | |||
206 | /* Ok, now check if he owns the resource we want. We don't need | ||
207 | * to check "decodes" since it should be impossible to own | ||
208 | * own legacy resources you don't decode unless I have a bug | ||
209 | * in this code... | ||
210 | */ | ||
211 | WARN_ON(conflict->owns & ~conflict->decodes); | ||
212 | match = lwants & conflict->owns; | ||
213 | if (!match) | ||
214 | continue; | ||
215 | |||
216 | /* looks like he doesn't have a lock, we can steal | ||
217 | * them from him | ||
218 | */ | ||
219 | vga_irq_set_state(conflict, false); | ||
220 | |||
221 | pci_bits = 0; | ||
222 | if (lwants & (VGA_RSRC_LEGACY_MEM|VGA_RSRC_NORMAL_MEM)) | ||
223 | pci_bits |= PCI_COMMAND_MEMORY; | ||
224 | if (lwants & (VGA_RSRC_LEGACY_IO|VGA_RSRC_NORMAL_IO)) | ||
225 | pci_bits |= PCI_COMMAND_IO; | ||
226 | |||
227 | pci_set_vga_state(conflict->pdev, false, pci_bits, | ||
228 | change_bridge); | ||
229 | conflict->owns &= ~lwants; | ||
230 | /* If he also owned non-legacy, that is no longer the case */ | ||
231 | if (lwants & VGA_RSRC_LEGACY_MEM) | ||
232 | conflict->owns &= ~VGA_RSRC_NORMAL_MEM; | ||
233 | if (lwants & VGA_RSRC_LEGACY_IO) | ||
234 | conflict->owns &= ~VGA_RSRC_NORMAL_IO; | ||
235 | } | ||
236 | |||
237 | enable_them: | ||
238 | /* ok dude, we got it, everybody conflicting has been disabled, let's | ||
239 | * enable us. Make sure we don't mark a bit in "owns" that we don't | ||
240 | * also have in "decodes". We can lock resources we don't decode but | ||
241 | * not own them. | ||
242 | */ | ||
243 | pci_bits = 0; | ||
244 | if (wants & (VGA_RSRC_LEGACY_MEM|VGA_RSRC_NORMAL_MEM)) | ||
245 | pci_bits |= PCI_COMMAND_MEMORY; | ||
246 | if (wants & (VGA_RSRC_LEGACY_IO|VGA_RSRC_NORMAL_IO)) | ||
247 | pci_bits |= PCI_COMMAND_IO; | ||
248 | pci_set_vga_state(vgadev->pdev, true, pci_bits, !!(wants & VGA_RSRC_LEGACY_MASK)); | ||
249 | |||
250 | vga_irq_set_state(vgadev, true); | ||
251 | vgadev->owns |= (wants & vgadev->decodes); | ||
252 | lock_them: | ||
253 | vgadev->locks |= (rsrc & VGA_RSRC_LEGACY_MASK); | ||
254 | if (rsrc & VGA_RSRC_LEGACY_IO) | ||
255 | vgadev->io_lock_cnt++; | ||
256 | if (rsrc & VGA_RSRC_LEGACY_MEM) | ||
257 | vgadev->mem_lock_cnt++; | ||
258 | if (rsrc & VGA_RSRC_NORMAL_IO) | ||
259 | vgadev->io_norm_cnt++; | ||
260 | if (rsrc & VGA_RSRC_NORMAL_MEM) | ||
261 | vgadev->mem_norm_cnt++; | ||
262 | |||
263 | return NULL; | ||
264 | } | ||
265 | |||
266 | static void __vga_put(struct vga_device *vgadev, unsigned int rsrc) | ||
267 | { | ||
268 | unsigned int old_locks = vgadev->locks; | ||
269 | |||
270 | pr_devel("%s\n", __func__); | ||
271 | |||
272 | /* Update our counters, and account for equivalent legacy resources | ||
273 | * if we decode them | ||
274 | */ | ||
275 | if ((rsrc & VGA_RSRC_NORMAL_IO) && vgadev->io_norm_cnt > 0) { | ||
276 | vgadev->io_norm_cnt--; | ||
277 | if (vgadev->decodes & VGA_RSRC_LEGACY_IO) | ||
278 | rsrc |= VGA_RSRC_LEGACY_IO; | ||
279 | } | ||
280 | if ((rsrc & VGA_RSRC_NORMAL_MEM) && vgadev->mem_norm_cnt > 0) { | ||
281 | vgadev->mem_norm_cnt--; | ||
282 | if (vgadev->decodes & VGA_RSRC_LEGACY_MEM) | ||
283 | rsrc |= VGA_RSRC_LEGACY_MEM; | ||
284 | } | ||
285 | if ((rsrc & VGA_RSRC_LEGACY_IO) && vgadev->io_lock_cnt > 0) | ||
286 | vgadev->io_lock_cnt--; | ||
287 | if ((rsrc & VGA_RSRC_LEGACY_MEM) && vgadev->mem_lock_cnt > 0) | ||
288 | vgadev->mem_lock_cnt--; | ||
289 | |||
290 | /* Just clear lock bits, we do lazy operations so we don't really | ||
291 | * have to bother about anything else at this point | ||
292 | */ | ||
293 | if (vgadev->io_lock_cnt == 0) | ||
294 | vgadev->locks &= ~VGA_RSRC_LEGACY_IO; | ||
295 | if (vgadev->mem_lock_cnt == 0) | ||
296 | vgadev->locks &= ~VGA_RSRC_LEGACY_MEM; | ||
297 | |||
298 | /* Kick the wait queue in case somebody was waiting if we actually | ||
299 | * released something | ||
300 | */ | ||
301 | if (old_locks != vgadev->locks) | ||
302 | wake_up_all(&vga_wait_queue); | ||
303 | } | ||
304 | |||
305 | int vga_get(struct pci_dev *pdev, unsigned int rsrc, int interruptible) | ||
306 | { | ||
307 | struct vga_device *vgadev, *conflict; | ||
308 | unsigned long flags; | ||
309 | wait_queue_t wait; | ||
310 | int rc = 0; | ||
311 | |||
312 | vga_check_first_use(); | ||
313 | /* The one who calls us should check for this, but lets be sure... */ | ||
314 | if (pdev == NULL) | ||
315 | pdev = vga_default_device(); | ||
316 | if (pdev == NULL) | ||
317 | return 0; | ||
318 | |||
319 | for (;;) { | ||
320 | spin_lock_irqsave(&vga_lock, flags); | ||
321 | vgadev = vgadev_find(pdev); | ||
322 | if (vgadev == NULL) { | ||
323 | spin_unlock_irqrestore(&vga_lock, flags); | ||
324 | rc = -ENODEV; | ||
325 | break; | ||
326 | } | ||
327 | conflict = __vga_tryget(vgadev, rsrc); | ||
328 | spin_unlock_irqrestore(&vga_lock, flags); | ||
329 | if (conflict == NULL) | ||
330 | break; | ||
331 | |||
332 | |||
333 | /* We have a conflict, we wait until somebody kicks the | ||
334 | * work queue. Currently we have one work queue that we | ||
335 | * kick each time some resources are released, but it would | ||
336 | * be fairly easy to have a per device one so that we only | ||
337 | * need to attach to the conflicting device | ||
338 | */ | ||
339 | init_waitqueue_entry(&wait, current); | ||
340 | add_wait_queue(&vga_wait_queue, &wait); | ||
341 | set_current_state(interruptible ? | ||
342 | TASK_INTERRUPTIBLE : | ||
343 | TASK_UNINTERRUPTIBLE); | ||
344 | if (signal_pending(current)) { | ||
345 | rc = -EINTR; | ||
346 | break; | ||
347 | } | ||
348 | schedule(); | ||
349 | remove_wait_queue(&vga_wait_queue, &wait); | ||
350 | set_current_state(TASK_RUNNING); | ||
351 | } | ||
352 | return rc; | ||
353 | } | ||
354 | EXPORT_SYMBOL(vga_get); | ||
355 | |||
356 | int vga_tryget(struct pci_dev *pdev, unsigned int rsrc) | ||
357 | { | ||
358 | struct vga_device *vgadev; | ||
359 | unsigned long flags; | ||
360 | int rc = 0; | ||
361 | |||
362 | vga_check_first_use(); | ||
363 | |||
364 | /* The one who calls us should check for this, but lets be sure... */ | ||
365 | if (pdev == NULL) | ||
366 | pdev = vga_default_device(); | ||
367 | if (pdev == NULL) | ||
368 | return 0; | ||
369 | spin_lock_irqsave(&vga_lock, flags); | ||
370 | vgadev = vgadev_find(pdev); | ||
371 | if (vgadev == NULL) { | ||
372 | rc = -ENODEV; | ||
373 | goto bail; | ||
374 | } | ||
375 | if (__vga_tryget(vgadev, rsrc)) | ||
376 | rc = -EBUSY; | ||
377 | bail: | ||
378 | spin_unlock_irqrestore(&vga_lock, flags); | ||
379 | return rc; | ||
380 | } | ||
381 | EXPORT_SYMBOL(vga_tryget); | ||
382 | |||
383 | void vga_put(struct pci_dev *pdev, unsigned int rsrc) | ||
384 | { | ||
385 | struct vga_device *vgadev; | ||
386 | unsigned long flags; | ||
387 | |||
388 | /* The one who calls us should check for this, but lets be sure... */ | ||
389 | if (pdev == NULL) | ||
390 | pdev = vga_default_device(); | ||
391 | if (pdev == NULL) | ||
392 | return; | ||
393 | spin_lock_irqsave(&vga_lock, flags); | ||
394 | vgadev = vgadev_find(pdev); | ||
395 | if (vgadev == NULL) | ||
396 | goto bail; | ||
397 | __vga_put(vgadev, rsrc); | ||
398 | bail: | ||
399 | spin_unlock_irqrestore(&vga_lock, flags); | ||
400 | } | ||
401 | EXPORT_SYMBOL(vga_put); | ||
402 | |||
403 | /* | ||
404 | * Currently, we assume that the "initial" setup of the system is | ||
405 | * not sane, that is we come up with conflicting devices and let | ||
406 | * the arbiter's client decides if devices decodes or not legacy | ||
407 | * things. | ||
408 | */ | ||
409 | static bool vga_arbiter_add_pci_device(struct pci_dev *pdev) | ||
410 | { | ||
411 | struct vga_device *vgadev; | ||
412 | unsigned long flags; | ||
413 | struct pci_bus *bus; | ||
414 | struct pci_dev *bridge; | ||
415 | u16 cmd; | ||
416 | |||
417 | /* Only deal with VGA class devices */ | ||
418 | if ((pdev->class >> 8) != PCI_CLASS_DISPLAY_VGA) | ||
419 | return false; | ||
420 | |||
421 | /* Allocate structure */ | ||
422 | vgadev = kmalloc(sizeof(struct vga_device), GFP_KERNEL); | ||
423 | if (vgadev == NULL) { | ||
424 | pr_err("vgaarb: failed to allocate pci device\n"); | ||
425 | /* What to do on allocation failure ? For now, let's | ||
426 | * just do nothing, I'm not sure there is anything saner | ||
427 | * to be done | ||
428 | */ | ||
429 | return false; | ||
430 | } | ||
431 | |||
432 | memset(vgadev, 0, sizeof(*vgadev)); | ||
433 | |||
434 | /* Take lock & check for duplicates */ | ||
435 | spin_lock_irqsave(&vga_lock, flags); | ||
436 | if (vgadev_find(pdev) != NULL) { | ||
437 | BUG_ON(1); | ||
438 | goto fail; | ||
439 | } | ||
440 | vgadev->pdev = pdev; | ||
441 | |||
442 | /* By default, assume we decode everything */ | ||
443 | vgadev->decodes = VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM | | ||
444 | VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM; | ||
445 | |||
446 | /* by default mark it as decoding */ | ||
447 | vga_decode_count++; | ||
448 | /* Mark that we "own" resources based on our enables, we will | ||
449 | * clear that below if the bridge isn't forwarding | ||
450 | */ | ||
451 | pci_read_config_word(pdev, PCI_COMMAND, &cmd); | ||
452 | if (cmd & PCI_COMMAND_IO) | ||
453 | vgadev->owns |= VGA_RSRC_LEGACY_IO; | ||
454 | if (cmd & PCI_COMMAND_MEMORY) | ||
455 | vgadev->owns |= VGA_RSRC_LEGACY_MEM; | ||
456 | |||
457 | /* Check if VGA cycles can get down to us */ | ||
458 | bus = pdev->bus; | ||
459 | while (bus) { | ||
460 | bridge = bus->self; | ||
461 | if (bridge) { | ||
462 | u16 l; | ||
463 | pci_read_config_word(bridge, PCI_BRIDGE_CONTROL, | ||
464 | &l); | ||
465 | if (!(l & PCI_BRIDGE_CTL_VGA)) { | ||
466 | vgadev->owns = 0; | ||
467 | break; | ||
468 | } | ||
469 | } | ||
470 | bus = bus->parent; | ||
471 | } | ||
472 | |||
473 | /* Deal with VGA default device. Use first enabled one | ||
474 | * by default if arch doesn't have it's own hook | ||
475 | */ | ||
476 | #ifndef __ARCH_HAS_VGA_DEFAULT_DEVICE | ||
477 | if (vga_default == NULL && | ||
478 | ((vgadev->owns & VGA_RSRC_LEGACY_MASK) == VGA_RSRC_LEGACY_MASK)) | ||
479 | vga_default = pci_dev_get(pdev); | ||
480 | #endif | ||
481 | |||
482 | /* Add to the list */ | ||
483 | list_add(&vgadev->list, &vga_list); | ||
484 | vga_count++; | ||
485 | pr_info("vgaarb: device added: PCI:%s,decodes=%s,owns=%s,locks=%s\n", | ||
486 | pci_name(pdev), | ||
487 | vga_iostate_to_str(vgadev->decodes), | ||
488 | vga_iostate_to_str(vgadev->owns), | ||
489 | vga_iostate_to_str(vgadev->locks)); | ||
490 | |||
491 | spin_unlock_irqrestore(&vga_lock, flags); | ||
492 | return true; | ||
493 | fail: | ||
494 | spin_unlock_irqrestore(&vga_lock, flags); | ||
495 | kfree(vgadev); | ||
496 | return false; | ||
497 | } | ||
498 | |||
499 | static bool vga_arbiter_del_pci_device(struct pci_dev *pdev) | ||
500 | { | ||
501 | struct vga_device *vgadev; | ||
502 | unsigned long flags; | ||
503 | bool ret = true; | ||
504 | |||
505 | spin_lock_irqsave(&vga_lock, flags); | ||
506 | vgadev = vgadev_find(pdev); | ||
507 | if (vgadev == NULL) { | ||
508 | ret = false; | ||
509 | goto bail; | ||
510 | } | ||
511 | |||
512 | if (vga_default == pdev) { | ||
513 | pci_dev_put(vga_default); | ||
514 | vga_default = NULL; | ||
515 | } | ||
516 | |||
517 | if (vgadev->decodes & (VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM)) | ||
518 | vga_decode_count--; | ||
519 | |||
520 | /* Remove entry from list */ | ||
521 | list_del(&vgadev->list); | ||
522 | vga_count--; | ||
523 | /* Notify userland driver that the device is gone so it discards | ||
524 | * it's copies of the pci_dev pointer | ||
525 | */ | ||
526 | vga_arb_device_card_gone(pdev); | ||
527 | |||
528 | /* Wake up all possible waiters */ | ||
529 | wake_up_all(&vga_wait_queue); | ||
530 | bail: | ||
531 | spin_unlock_irqrestore(&vga_lock, flags); | ||
532 | kfree(vgadev); | ||
533 | return ret; | ||
534 | } | ||
535 | |||
536 | /* this is called with the lock */ | ||
537 | static inline void vga_update_device_decodes(struct vga_device *vgadev, | ||
538 | int new_decodes) | ||
539 | { | ||
540 | int old_decodes; | ||
541 | struct vga_device *new_vgadev, *conflict; | ||
542 | |||
543 | old_decodes = vgadev->decodes; | ||
544 | vgadev->decodes = new_decodes; | ||
545 | |||
546 | pr_info("vgaarb: device changed decodes: PCI:%s,olddecodes=%s,decodes=%s:owns=%s\n", | ||
547 | pci_name(vgadev->pdev), | ||
548 | vga_iostate_to_str(old_decodes), | ||
549 | vga_iostate_to_str(vgadev->decodes), | ||
550 | vga_iostate_to_str(vgadev->owns)); | ||
551 | |||
552 | |||
553 | /* if we own the decodes we should move them along to | ||
554 | another card */ | ||
555 | if ((vgadev->owns & old_decodes) && (vga_count > 1)) { | ||
556 | /* set us to own nothing */ | ||
557 | vgadev->owns &= ~old_decodes; | ||
558 | list_for_each_entry(new_vgadev, &vga_list, list) { | ||
559 | if ((new_vgadev != vgadev) && | ||
560 | (new_vgadev->decodes & VGA_RSRC_LEGACY_MASK)) { | ||
561 | pr_info("vgaarb: transferring owner from PCI:%s to PCI:%s\n", pci_name(vgadev->pdev), pci_name(new_vgadev->pdev)); | ||
562 | conflict = __vga_tryget(new_vgadev, VGA_RSRC_LEGACY_MASK); | ||
563 | if (!conflict) | ||
564 | __vga_put(new_vgadev, VGA_RSRC_LEGACY_MASK); | ||
565 | break; | ||
566 | } | ||
567 | } | ||
568 | } | ||
569 | |||
570 | /* change decodes counter */ | ||
571 | if (old_decodes != new_decodes) { | ||
572 | if (new_decodes & (VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM)) | ||
573 | vga_decode_count++; | ||
574 | else | ||
575 | vga_decode_count--; | ||
576 | } | ||
577 | } | ||
578 | |||
579 | void __vga_set_legacy_decoding(struct pci_dev *pdev, unsigned int decodes, bool userspace) | ||
580 | { | ||
581 | struct vga_device *vgadev; | ||
582 | unsigned long flags; | ||
583 | |||
584 | decodes &= VGA_RSRC_LEGACY_MASK; | ||
585 | |||
586 | spin_lock_irqsave(&vga_lock, flags); | ||
587 | vgadev = vgadev_find(pdev); | ||
588 | if (vgadev == NULL) | ||
589 | goto bail; | ||
590 | |||
591 | /* don't let userspace futz with kernel driver decodes */ | ||
592 | if (userspace && vgadev->set_vga_decode) | ||
593 | goto bail; | ||
594 | |||
595 | /* update the device decodes + counter */ | ||
596 | vga_update_device_decodes(vgadev, decodes); | ||
597 | |||
598 | /* XXX if somebody is going from "doesn't decode" to "decodes" state | ||
599 | * here, additional care must be taken as we may have pending owner | ||
600 | * ship of non-legacy region ... | ||
601 | */ | ||
602 | bail: | ||
603 | spin_unlock_irqrestore(&vga_lock, flags); | ||
604 | } | ||
605 | |||
606 | void vga_set_legacy_decoding(struct pci_dev *pdev, unsigned int decodes) | ||
607 | { | ||
608 | __vga_set_legacy_decoding(pdev, decodes, false); | ||
609 | } | ||
610 | EXPORT_SYMBOL(vga_set_legacy_decoding); | ||
611 | |||
612 | /* call with NULL to unregister */ | ||
613 | int vga_client_register(struct pci_dev *pdev, void *cookie, | ||
614 | void (*irq_set_state)(void *cookie, bool state), | ||
615 | unsigned int (*set_vga_decode)(void *cookie, bool decode)) | ||
616 | { | ||
617 | int ret = -1; | ||
618 | struct vga_device *vgadev; | ||
619 | unsigned long flags; | ||
620 | |||
621 | spin_lock_irqsave(&vga_lock, flags); | ||
622 | vgadev = vgadev_find(pdev); | ||
623 | if (!vgadev) | ||
624 | goto bail; | ||
625 | |||
626 | vgadev->irq_set_state = irq_set_state; | ||
627 | vgadev->set_vga_decode = set_vga_decode; | ||
628 | vgadev->cookie = cookie; | ||
629 | ret = 0; | ||
630 | |||
631 | bail: | ||
632 | spin_unlock_irqrestore(&vga_lock, flags); | ||
633 | return ret; | ||
634 | |||
635 | } | ||
636 | EXPORT_SYMBOL(vga_client_register); | ||
637 | |||
638 | /* | ||
639 | * Char driver implementation | ||
640 | * | ||
641 | * Semantics is: | ||
642 | * | ||
643 | * open : open user instance of the arbitrer. by default, it's | ||
644 | * attached to the default VGA device of the system. | ||
645 | * | ||
646 | * close : close user instance, release locks | ||
647 | * | ||
648 | * read : return a string indicating the status of the target. | ||
649 | * an IO state string is of the form {io,mem,io+mem,none}, | ||
650 | * mc and ic are respectively mem and io lock counts (for | ||
651 | * debugging/diagnostic only). "decodes" indicate what the | ||
652 | * card currently decodes, "owns" indicates what is currently | ||
653 | * enabled on it, and "locks" indicates what is locked by this | ||
654 | * card. If the card is unplugged, we get "invalid" then for | ||
655 | * card_ID and an -ENODEV error is returned for any command | ||
656 | * until a new card is targeted | ||
657 | * | ||
658 | * "<card_ID>,decodes=<io_state>,owns=<io_state>,locks=<io_state> (ic,mc)" | ||
659 | * | ||
660 | * write : write a command to the arbiter. List of commands is: | ||
661 | * | ||
662 | * target <card_ID> : switch target to card <card_ID> (see below) | ||
663 | * lock <io_state> : acquires locks on target ("none" is invalid io_state) | ||
664 | * trylock <io_state> : non-blocking acquire locks on target | ||
665 | * unlock <io_state> : release locks on target | ||
666 | * unlock all : release all locks on target held by this user | ||
667 | * decodes <io_state> : set the legacy decoding attributes for the card | ||
668 | * | ||
669 | * poll : event if something change on any card (not just the target) | ||
670 | * | ||
671 | * card_ID is of the form "PCI:domain:bus:dev.fn". It can be set to "default" | ||
672 | * to go back to the system default card (TODO: not implemented yet). | ||
673 | * Currently, only PCI is supported as a prefix, but the userland API may | ||
674 | * support other bus types in the future, even if the current kernel | ||
675 | * implementation doesn't. | ||
676 | * | ||
677 | * Note about locks: | ||
678 | * | ||
679 | * The driver keeps track of which user has what locks on which card. It | ||
680 | * supports stacking, like the kernel one. This complexifies the implementation | ||
681 | * a bit, but makes the arbiter more tolerant to userspace problems and able | ||
682 | * to properly cleanup in all cases when a process dies. | ||
683 | * Currently, a max of 16 cards simultaneously can have locks issued from | ||
684 | * userspace for a given user (file descriptor instance) of the arbiter. | ||
685 | * | ||
686 | * If the device is hot-unplugged, there is a hook inside the module to notify | ||
687 | * they being added/removed in the system and automatically added/removed in | ||
688 | * the arbiter. | ||
689 | */ | ||
690 | |||
691 | #define MAX_USER_CARDS 16 | ||
692 | #define PCI_INVALID_CARD ((struct pci_dev *)-1UL) | ||
693 | |||
694 | /* | ||
695 | * Each user has an array of these, tracking which cards have locks | ||
696 | */ | ||
697 | struct vga_arb_user_card { | ||
698 | struct pci_dev *pdev; | ||
699 | unsigned int mem_cnt; | ||
700 | unsigned int io_cnt; | ||
701 | }; | ||
702 | |||
703 | struct vga_arb_private { | ||
704 | struct list_head list; | ||
705 | struct pci_dev *target; | ||
706 | struct vga_arb_user_card cards[MAX_USER_CARDS]; | ||
707 | spinlock_t lock; | ||
708 | }; | ||
709 | |||
710 | static LIST_HEAD(vga_user_list); | ||
711 | static DEFINE_SPINLOCK(vga_user_lock); | ||
712 | |||
713 | |||
714 | /* | ||
715 | * This function gets a string in the format: "PCI:domain:bus:dev.fn" and | ||
716 | * returns the respective values. If the string is not in this format, | ||
717 | * it returns 0. | ||
718 | */ | ||
719 | static int vga_pci_str_to_vars(char *buf, int count, unsigned int *domain, | ||
720 | unsigned int *bus, unsigned int *devfn) | ||
721 | { | ||
722 | int n; | ||
723 | unsigned int slot, func; | ||
724 | |||
725 | |||
726 | n = sscanf(buf, "PCI:%x:%x:%x.%x", domain, bus, &slot, &func); | ||
727 | if (n != 4) | ||
728 | return 0; | ||
729 | |||
730 | *devfn = PCI_DEVFN(slot, func); | ||
731 | |||
732 | return 1; | ||
733 | } | ||
734 | |||
735 | static ssize_t vga_arb_read(struct file *file, char __user * buf, | ||
736 | size_t count, loff_t *ppos) | ||
737 | { | ||
738 | struct vga_arb_private *priv = file->private_data; | ||
739 | struct vga_device *vgadev; | ||
740 | struct pci_dev *pdev; | ||
741 | unsigned long flags; | ||
742 | size_t len; | ||
743 | int rc; | ||
744 | char *lbuf; | ||
745 | |||
746 | lbuf = kmalloc(1024, GFP_KERNEL); | ||
747 | if (lbuf == NULL) | ||
748 | return -ENOMEM; | ||
749 | |||
750 | /* Shields against vga_arb_device_card_gone (pci_dev going | ||
751 | * away), and allows access to vga list | ||
752 | */ | ||
753 | spin_lock_irqsave(&vga_lock, flags); | ||
754 | |||
755 | /* If we are targetting the default, use it */ | ||
756 | pdev = priv->target; | ||
757 | if (pdev == NULL || pdev == PCI_INVALID_CARD) { | ||
758 | spin_unlock_irqrestore(&vga_lock, flags); | ||
759 | len = sprintf(lbuf, "invalid"); | ||
760 | goto done; | ||
761 | } | ||
762 | |||
763 | /* Find card vgadev structure */ | ||
764 | vgadev = vgadev_find(pdev); | ||
765 | if (vgadev == NULL) { | ||
766 | /* Wow, it's not in the list, that shouldn't happen, | ||
767 | * let's fix us up and return invalid card | ||
768 | */ | ||
769 | if (pdev == priv->target) | ||
770 | vga_arb_device_card_gone(pdev); | ||
771 | spin_unlock_irqrestore(&vga_lock, flags); | ||
772 | len = sprintf(lbuf, "invalid"); | ||
773 | goto done; | ||
774 | } | ||
775 | |||
776 | /* Fill the buffer with infos */ | ||
777 | len = snprintf(lbuf, 1024, | ||
778 | "count:%d,PCI:%s,decodes=%s,owns=%s,locks=%s(%d:%d)\n", | ||
779 | vga_decode_count, pci_name(pdev), | ||
780 | vga_iostate_to_str(vgadev->decodes), | ||
781 | vga_iostate_to_str(vgadev->owns), | ||
782 | vga_iostate_to_str(vgadev->locks), | ||
783 | vgadev->io_lock_cnt, vgadev->mem_lock_cnt); | ||
784 | |||
785 | spin_unlock_irqrestore(&vga_lock, flags); | ||
786 | done: | ||
787 | |||
788 | /* Copy that to user */ | ||
789 | if (len > count) | ||
790 | len = count; | ||
791 | rc = copy_to_user(buf, lbuf, len); | ||
792 | kfree(lbuf); | ||
793 | if (rc) | ||
794 | return -EFAULT; | ||
795 | return len; | ||
796 | } | ||
797 | |||
798 | /* | ||
799 | * TODO: To avoid parsing inside kernel and to improve the speed we may | ||
800 | * consider use ioctl here | ||
801 | */ | ||
802 | static ssize_t vga_arb_write(struct file *file, const char __user * buf, | ||
803 | size_t count, loff_t *ppos) | ||
804 | { | ||
805 | struct vga_arb_private *priv = file->private_data; | ||
806 | struct vga_arb_user_card *uc = NULL; | ||
807 | struct pci_dev *pdev; | ||
808 | |||
809 | unsigned int io_state; | ||
810 | |||
811 | char *kbuf, *curr_pos; | ||
812 | size_t remaining = count; | ||
813 | |||
814 | int ret_val; | ||
815 | int i; | ||
816 | |||
817 | |||
818 | kbuf = kmalloc(count + 1, GFP_KERNEL); | ||
819 | if (!kbuf) | ||
820 | return -ENOMEM; | ||
821 | |||
822 | if (copy_from_user(kbuf, buf, count)) { | ||
823 | kfree(kbuf); | ||
824 | return -EFAULT; | ||
825 | } | ||
826 | curr_pos = kbuf; | ||
827 | kbuf[count] = '\0'; /* Just to make sure... */ | ||
828 | |||
829 | if (strncmp(curr_pos, "lock ", 5) == 0) { | ||
830 | curr_pos += 5; | ||
831 | remaining -= 5; | ||
832 | |||
833 | pr_devel("client 0x%p called 'lock'\n", priv); | ||
834 | |||
835 | if (!vga_str_to_iostate(curr_pos, remaining, &io_state)) { | ||
836 | ret_val = -EPROTO; | ||
837 | goto done; | ||
838 | } | ||
839 | if (io_state == VGA_RSRC_NONE) { | ||
840 | ret_val = -EPROTO; | ||
841 | goto done; | ||
842 | } | ||
843 | |||
844 | pdev = priv->target; | ||
845 | if (priv->target == NULL) { | ||
846 | ret_val = -ENODEV; | ||
847 | goto done; | ||
848 | } | ||
849 | |||
850 | vga_get_uninterruptible(pdev, io_state); | ||
851 | |||
852 | /* Update the client's locks lists... */ | ||
853 | for (i = 0; i < MAX_USER_CARDS; i++) { | ||
854 | if (priv->cards[i].pdev == pdev) { | ||
855 | if (io_state & VGA_RSRC_LEGACY_IO) | ||
856 | priv->cards[i].io_cnt++; | ||
857 | if (io_state & VGA_RSRC_LEGACY_MEM) | ||
858 | priv->cards[i].mem_cnt++; | ||
859 | break; | ||
860 | } | ||
861 | } | ||
862 | |||
863 | ret_val = count; | ||
864 | goto done; | ||
865 | } else if (strncmp(curr_pos, "unlock ", 7) == 0) { | ||
866 | curr_pos += 7; | ||
867 | remaining -= 7; | ||
868 | |||
869 | pr_devel("client 0x%p called 'unlock'\n", priv); | ||
870 | |||
871 | if (strncmp(curr_pos, "all", 3) == 0) | ||
872 | io_state = VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM; | ||
873 | else { | ||
874 | if (!vga_str_to_iostate | ||
875 | (curr_pos, remaining, &io_state)) { | ||
876 | ret_val = -EPROTO; | ||
877 | goto done; | ||
878 | } | ||
879 | /* TODO: Add this? | ||
880 | if (io_state == VGA_RSRC_NONE) { | ||
881 | ret_val = -EPROTO; | ||
882 | goto done; | ||
883 | } | ||
884 | */ | ||
885 | } | ||
886 | |||
887 | pdev = priv->target; | ||
888 | if (priv->target == NULL) { | ||
889 | ret_val = -ENODEV; | ||
890 | goto done; | ||
891 | } | ||
892 | for (i = 0; i < MAX_USER_CARDS; i++) { | ||
893 | if (priv->cards[i].pdev == pdev) | ||
894 | uc = &priv->cards[i]; | ||
895 | } | ||
896 | |||
897 | if (!uc) | ||
898 | return -EINVAL; | ||
899 | |||
900 | if (io_state & VGA_RSRC_LEGACY_IO && uc->io_cnt == 0) | ||
901 | return -EINVAL; | ||
902 | |||
903 | if (io_state & VGA_RSRC_LEGACY_MEM && uc->mem_cnt == 0) | ||
904 | return -EINVAL; | ||
905 | |||
906 | vga_put(pdev, io_state); | ||
907 | |||
908 | if (io_state & VGA_RSRC_LEGACY_IO) | ||
909 | uc->io_cnt--; | ||
910 | if (io_state & VGA_RSRC_LEGACY_MEM) | ||
911 | uc->mem_cnt--; | ||
912 | |||
913 | ret_val = count; | ||
914 | goto done; | ||
915 | } else if (strncmp(curr_pos, "trylock ", 8) == 0) { | ||
916 | curr_pos += 8; | ||
917 | remaining -= 8; | ||
918 | |||
919 | pr_devel("client 0x%p called 'trylock'\n", priv); | ||
920 | |||
921 | if (!vga_str_to_iostate(curr_pos, remaining, &io_state)) { | ||
922 | ret_val = -EPROTO; | ||
923 | goto done; | ||
924 | } | ||
925 | /* TODO: Add this? | ||
926 | if (io_state == VGA_RSRC_NONE) { | ||
927 | ret_val = -EPROTO; | ||
928 | goto done; | ||
929 | } | ||
930 | */ | ||
931 | |||
932 | pdev = priv->target; | ||
933 | if (priv->target == NULL) { | ||
934 | ret_val = -ENODEV; | ||
935 | goto done; | ||
936 | } | ||
937 | |||
938 | if (vga_tryget(pdev, io_state)) { | ||
939 | /* Update the client's locks lists... */ | ||
940 | for (i = 0; i < MAX_USER_CARDS; i++) { | ||
941 | if (priv->cards[i].pdev == pdev) { | ||
942 | if (io_state & VGA_RSRC_LEGACY_IO) | ||
943 | priv->cards[i].io_cnt++; | ||
944 | if (io_state & VGA_RSRC_LEGACY_MEM) | ||
945 | priv->cards[i].mem_cnt++; | ||
946 | break; | ||
947 | } | ||
948 | } | ||
949 | ret_val = count; | ||
950 | goto done; | ||
951 | } else { | ||
952 | ret_val = -EBUSY; | ||
953 | goto done; | ||
954 | } | ||
955 | |||
956 | } else if (strncmp(curr_pos, "target ", 7) == 0) { | ||
957 | unsigned int domain, bus, devfn; | ||
958 | struct vga_device *vgadev; | ||
959 | |||
960 | curr_pos += 7; | ||
961 | remaining -= 7; | ||
962 | pr_devel("client 0x%p called 'target'\n", priv); | ||
963 | /* if target is default */ | ||
964 | if (!strncmp(buf, "default", 7)) | ||
965 | pdev = pci_dev_get(vga_default_device()); | ||
966 | else { | ||
967 | if (!vga_pci_str_to_vars(curr_pos, remaining, | ||
968 | &domain, &bus, &devfn)) { | ||
969 | ret_val = -EPROTO; | ||
970 | goto done; | ||
971 | } | ||
972 | |||
973 | pdev = pci_get_bus_and_slot(bus, devfn); | ||
974 | if (!pdev) { | ||
975 | pr_info("vgaarb: invalid PCI address!\n"); | ||
976 | ret_val = -ENODEV; | ||
977 | goto done; | ||
978 | } | ||
979 | } | ||
980 | |||
981 | vgadev = vgadev_find(pdev); | ||
982 | if (vgadev == NULL) { | ||
983 | pr_info("vgaarb: this pci device is not a vga device\n"); | ||
984 | pci_dev_put(pdev); | ||
985 | ret_val = -ENODEV; | ||
986 | goto done; | ||
987 | } | ||
988 | |||
989 | priv->target = pdev; | ||
990 | for (i = 0; i < MAX_USER_CARDS; i++) { | ||
991 | if (priv->cards[i].pdev == pdev) | ||
992 | break; | ||
993 | if (priv->cards[i].pdev == NULL) { | ||
994 | priv->cards[i].pdev = pdev; | ||
995 | priv->cards[i].io_cnt = 0; | ||
996 | priv->cards[i].mem_cnt = 0; | ||
997 | break; | ||
998 | } | ||
999 | } | ||
1000 | if (i == MAX_USER_CARDS) { | ||
1001 | pr_err("vgaarb: maximum user cards number reached!\n"); | ||
1002 | pci_dev_put(pdev); | ||
1003 | /* XXX: which value to return? */ | ||
1004 | ret_val = -ENOMEM; | ||
1005 | goto done; | ||
1006 | } | ||
1007 | |||
1008 | ret_val = count; | ||
1009 | pci_dev_put(pdev); | ||
1010 | goto done; | ||
1011 | |||
1012 | |||
1013 | } else if (strncmp(curr_pos, "decodes ", 8) == 0) { | ||
1014 | curr_pos += 8; | ||
1015 | remaining -= 8; | ||
1016 | pr_devel("vgaarb: client 0x%p called 'decodes'\n", priv); | ||
1017 | |||
1018 | if (!vga_str_to_iostate(curr_pos, remaining, &io_state)) { | ||
1019 | ret_val = -EPROTO; | ||
1020 | goto done; | ||
1021 | } | ||
1022 | pdev = priv->target; | ||
1023 | if (priv->target == NULL) { | ||
1024 | ret_val = -ENODEV; | ||
1025 | goto done; | ||
1026 | } | ||
1027 | |||
1028 | __vga_set_legacy_decoding(pdev, io_state, true); | ||
1029 | ret_val = count; | ||
1030 | goto done; | ||
1031 | } | ||
1032 | /* If we got here, the message written is not part of the protocol! */ | ||
1033 | kfree(kbuf); | ||
1034 | return -EPROTO; | ||
1035 | |||
1036 | done: | ||
1037 | kfree(kbuf); | ||
1038 | return ret_val; | ||
1039 | } | ||
1040 | |||
1041 | static unsigned int vga_arb_fpoll(struct file *file, poll_table * wait) | ||
1042 | { | ||
1043 | struct vga_arb_private *priv = file->private_data; | ||
1044 | |||
1045 | pr_devel("%s\n", __func__); | ||
1046 | |||
1047 | if (priv == NULL) | ||
1048 | return -ENODEV; | ||
1049 | poll_wait(file, &vga_wait_queue, wait); | ||
1050 | return POLLIN; | ||
1051 | } | ||
1052 | |||
1053 | static int vga_arb_open(struct inode *inode, struct file *file) | ||
1054 | { | ||
1055 | struct vga_arb_private *priv; | ||
1056 | unsigned long flags; | ||
1057 | |||
1058 | pr_devel("%s\n", __func__); | ||
1059 | |||
1060 | priv = kmalloc(sizeof(struct vga_arb_private), GFP_KERNEL); | ||
1061 | if (priv == NULL) | ||
1062 | return -ENOMEM; | ||
1063 | memset(priv, 0, sizeof(*priv)); | ||
1064 | spin_lock_init(&priv->lock); | ||
1065 | file->private_data = priv; | ||
1066 | |||
1067 | spin_lock_irqsave(&vga_user_lock, flags); | ||
1068 | list_add(&priv->list, &vga_user_list); | ||
1069 | spin_unlock_irqrestore(&vga_user_lock, flags); | ||
1070 | |||
1071 | /* Set the client' lists of locks */ | ||
1072 | priv->target = vga_default_device(); /* Maybe this is still null! */ | ||
1073 | priv->cards[0].pdev = priv->target; | ||
1074 | priv->cards[0].io_cnt = 0; | ||
1075 | priv->cards[0].mem_cnt = 0; | ||
1076 | |||
1077 | |||
1078 | return 0; | ||
1079 | } | ||
1080 | |||
1081 | static int vga_arb_release(struct inode *inode, struct file *file) | ||
1082 | { | ||
1083 | struct vga_arb_private *priv = file->private_data; | ||
1084 | struct vga_arb_user_card *uc; | ||
1085 | unsigned long flags; | ||
1086 | int i; | ||
1087 | |||
1088 | pr_devel("%s\n", __func__); | ||
1089 | |||
1090 | if (priv == NULL) | ||
1091 | return -ENODEV; | ||
1092 | |||
1093 | spin_lock_irqsave(&vga_user_lock, flags); | ||
1094 | list_del(&priv->list); | ||
1095 | for (i = 0; i < MAX_USER_CARDS; i++) { | ||
1096 | uc = &priv->cards[i]; | ||
1097 | if (uc->pdev == NULL) | ||
1098 | continue; | ||
1099 | pr_devel("uc->io_cnt == %d, uc->mem_cnt == %d\n", | ||
1100 | uc->io_cnt, uc->mem_cnt); | ||
1101 | while (uc->io_cnt--) | ||
1102 | vga_put(uc->pdev, VGA_RSRC_LEGACY_IO); | ||
1103 | while (uc->mem_cnt--) | ||
1104 | vga_put(uc->pdev, VGA_RSRC_LEGACY_MEM); | ||
1105 | } | ||
1106 | spin_unlock_irqrestore(&vga_user_lock, flags); | ||
1107 | |||
1108 | kfree(priv); | ||
1109 | |||
1110 | return 0; | ||
1111 | } | ||
1112 | |||
1113 | static void vga_arb_device_card_gone(struct pci_dev *pdev) | ||
1114 | { | ||
1115 | } | ||
1116 | |||
1117 | /* | ||
1118 | * callback any registered clients to let them know we have a | ||
1119 | * change in VGA cards | ||
1120 | */ | ||
1121 | static void vga_arbiter_notify_clients(void) | ||
1122 | { | ||
1123 | struct vga_device *vgadev; | ||
1124 | unsigned long flags; | ||
1125 | uint32_t new_decodes; | ||
1126 | bool new_state; | ||
1127 | |||
1128 | if (!vga_arbiter_used) | ||
1129 | return; | ||
1130 | |||
1131 | spin_lock_irqsave(&vga_lock, flags); | ||
1132 | list_for_each_entry(vgadev, &vga_list, list) { | ||
1133 | if (vga_count > 1) | ||
1134 | new_state = false; | ||
1135 | else | ||
1136 | new_state = true; | ||
1137 | if (vgadev->set_vga_decode) { | ||
1138 | new_decodes = vgadev->set_vga_decode(vgadev->cookie, new_state); | ||
1139 | vga_update_device_decodes(vgadev, new_decodes); | ||
1140 | } | ||
1141 | } | ||
1142 | spin_unlock_irqrestore(&vga_lock, flags); | ||
1143 | } | ||
1144 | |||
1145 | static int pci_notify(struct notifier_block *nb, unsigned long action, | ||
1146 | void *data) | ||
1147 | { | ||
1148 | struct device *dev = data; | ||
1149 | struct pci_dev *pdev = to_pci_dev(dev); | ||
1150 | bool notify = false; | ||
1151 | |||
1152 | pr_devel("%s\n", __func__); | ||
1153 | |||
1154 | /* For now we're only intereted in devices added and removed. I didn't | ||
1155 | * test this thing here, so someone needs to double check for the | ||
1156 | * cases of hotplugable vga cards. */ | ||
1157 | if (action == BUS_NOTIFY_ADD_DEVICE) | ||
1158 | notify = vga_arbiter_add_pci_device(pdev); | ||
1159 | else if (action == BUS_NOTIFY_DEL_DEVICE) | ||
1160 | notify = vga_arbiter_del_pci_device(pdev); | ||
1161 | |||
1162 | if (notify) | ||
1163 | vga_arbiter_notify_clients(); | ||
1164 | return 0; | ||
1165 | } | ||
1166 | |||
1167 | static struct notifier_block pci_notifier = { | ||
1168 | .notifier_call = pci_notify, | ||
1169 | }; | ||
1170 | |||
1171 | static const struct file_operations vga_arb_device_fops = { | ||
1172 | .read = vga_arb_read, | ||
1173 | .write = vga_arb_write, | ||
1174 | .poll = vga_arb_fpoll, | ||
1175 | .open = vga_arb_open, | ||
1176 | .release = vga_arb_release, | ||
1177 | }; | ||
1178 | |||
1179 | static struct miscdevice vga_arb_device = { | ||
1180 | MISC_DYNAMIC_MINOR, "vga_arbiter", &vga_arb_device_fops | ||
1181 | }; | ||
1182 | |||
1183 | static int __init vga_arb_device_init(void) | ||
1184 | { | ||
1185 | int rc; | ||
1186 | struct pci_dev *pdev; | ||
1187 | |||
1188 | rc = misc_register(&vga_arb_device); | ||
1189 | if (rc < 0) | ||
1190 | pr_err("vgaarb: error %d registering device\n", rc); | ||
1191 | |||
1192 | bus_register_notifier(&pci_bus_type, &pci_notifier); | ||
1193 | |||
1194 | /* We add all pci devices satisfying vga class in the arbiter by | ||
1195 | * default */ | ||
1196 | pdev = NULL; | ||
1197 | while ((pdev = | ||
1198 | pci_get_subsys(PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, | ||
1199 | PCI_ANY_ID, pdev)) != NULL) | ||
1200 | vga_arbiter_add_pci_device(pdev); | ||
1201 | |||
1202 | pr_info("vgaarb: loaded\n"); | ||
1203 | return rc; | ||
1204 | } | ||
1205 | subsys_initcall(vga_arb_device_init); | ||
diff --git a/drivers/pci/Makefile b/drivers/pci/Makefile index 1ebd6b4c743b..4a7f11d8f432 100644 --- a/drivers/pci/Makefile +++ b/drivers/pci/Makefile | |||
@@ -8,6 +8,9 @@ obj-y += access.o bus.o probe.o remove.o pci.o quirks.o \ | |||
8 | obj-$(CONFIG_PROC_FS) += proc.o | 8 | obj-$(CONFIG_PROC_FS) += proc.o |
9 | obj-$(CONFIG_SYSFS) += slot.o | 9 | obj-$(CONFIG_SYSFS) += slot.o |
10 | 10 | ||
11 | obj-$(CONFIG_PCI_LEGACY) += legacy.o | ||
12 | CFLAGS_legacy.o += -Wno-deprecated-declarations | ||
13 | |||
11 | # Build PCI Express stuff if needed | 14 | # Build PCI Express stuff if needed |
12 | obj-$(CONFIG_PCIEPORTBUS) += pcie/ | 15 | obj-$(CONFIG_PCIEPORTBUS) += pcie/ |
13 | 16 | ||
diff --git a/drivers/pci/hotplug/Makefile b/drivers/pci/hotplug/Makefile index 2aa117c8cd87..3625b094bf7e 100644 --- a/drivers/pci/hotplug/Makefile +++ b/drivers/pci/hotplug/Makefile | |||
@@ -22,7 +22,7 @@ obj-$(CONFIG_HOTPLUG_PCI_SGI) += sgi_hotplug.o | |||
22 | # Link this last so it doesn't claim devices that have a real hotplug driver | 22 | # Link this last so it doesn't claim devices that have a real hotplug driver |
23 | obj-$(CONFIG_HOTPLUG_PCI_FAKE) += fakephp.o | 23 | obj-$(CONFIG_HOTPLUG_PCI_FAKE) += fakephp.o |
24 | 24 | ||
25 | pci_hotplug-objs := pci_hotplug_core.o | 25 | pci_hotplug-objs := pci_hotplug_core.o pcihp_slot.o |
26 | 26 | ||
27 | ifdef CONFIG_HOTPLUG_PCI_CPCI | 27 | ifdef CONFIG_HOTPLUG_PCI_CPCI |
28 | pci_hotplug-objs += cpci_hotplug_core.o \ | 28 | pci_hotplug-objs += cpci_hotplug_core.o \ |
diff --git a/drivers/pci/hotplug/acpi_pcihp.c b/drivers/pci/hotplug/acpi_pcihp.c index eb159587d0bf..a73028ec52e5 100644 --- a/drivers/pci/hotplug/acpi_pcihp.c +++ b/drivers/pci/hotplug/acpi_pcihp.c | |||
@@ -41,7 +41,6 @@ | |||
41 | #define warn(format, arg...) printk(KERN_WARNING "%s: " format , MY_NAME , ## arg) | 41 | #define warn(format, arg...) printk(KERN_WARNING "%s: " format , MY_NAME , ## arg) |
42 | 42 | ||
43 | #define METHOD_NAME__SUN "_SUN" | 43 | #define METHOD_NAME__SUN "_SUN" |
44 | #define METHOD_NAME__HPP "_HPP" | ||
45 | #define METHOD_NAME_OSHP "OSHP" | 44 | #define METHOD_NAME_OSHP "OSHP" |
46 | 45 | ||
47 | static int debug_acpi; | 46 | static int debug_acpi; |
@@ -215,80 +214,41 @@ acpi_run_hpx(acpi_handle handle, struct hotplug_params *hpx) | |||
215 | static acpi_status | 214 | static acpi_status |
216 | acpi_run_hpp(acpi_handle handle, struct hotplug_params *hpp) | 215 | acpi_run_hpp(acpi_handle handle, struct hotplug_params *hpp) |
217 | { | 216 | { |
218 | acpi_status status; | 217 | acpi_status status; |
219 | u8 nui[4]; | 218 | struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; |
220 | struct acpi_buffer ret_buf = { 0, NULL}; | 219 | union acpi_object *package, *fields; |
221 | struct acpi_buffer string = { ACPI_ALLOCATE_BUFFER, NULL }; | 220 | int i; |
222 | union acpi_object *ext_obj, *package; | ||
223 | int i, len = 0; | ||
224 | |||
225 | acpi_get_name(handle, ACPI_FULL_PATHNAME, &string); | ||
226 | 221 | ||
227 | /* Clear the return buffer with zeros */ | ||
228 | memset(hpp, 0, sizeof(struct hotplug_params)); | 222 | memset(hpp, 0, sizeof(struct hotplug_params)); |
229 | 223 | ||
230 | /* get _hpp */ | 224 | status = acpi_evaluate_object(handle, "_HPP", NULL, &buffer); |
231 | status = acpi_evaluate_object(handle, METHOD_NAME__HPP, NULL, &ret_buf); | 225 | if (ACPI_FAILURE(status)) |
232 | switch (status) { | 226 | return status; |
233 | case AE_BUFFER_OVERFLOW: | ||
234 | ret_buf.pointer = kmalloc (ret_buf.length, GFP_KERNEL); | ||
235 | if (!ret_buf.pointer) { | ||
236 | printk(KERN_ERR "%s:%s alloc for _HPP fail\n", | ||
237 | __func__, (char *)string.pointer); | ||
238 | kfree(string.pointer); | ||
239 | return AE_NO_MEMORY; | ||
240 | } | ||
241 | status = acpi_evaluate_object(handle, METHOD_NAME__HPP, | ||
242 | NULL, &ret_buf); | ||
243 | if (ACPI_SUCCESS(status)) | ||
244 | break; | ||
245 | default: | ||
246 | if (ACPI_FAILURE(status)) { | ||
247 | pr_debug("%s:%s _HPP fail=0x%x\n", __func__, | ||
248 | (char *)string.pointer, status); | ||
249 | kfree(string.pointer); | ||
250 | return status; | ||
251 | } | ||
252 | } | ||
253 | 227 | ||
254 | ext_obj = (union acpi_object *) ret_buf.pointer; | 228 | package = (union acpi_object *) buffer.pointer; |
255 | if (ext_obj->type != ACPI_TYPE_PACKAGE) { | 229 | if (package->type != ACPI_TYPE_PACKAGE || |
256 | printk(KERN_ERR "%s:%s _HPP obj not a package\n", __func__, | 230 | package->package.count != 4) { |
257 | (char *)string.pointer); | ||
258 | status = AE_ERROR; | 231 | status = AE_ERROR; |
259 | goto free_and_return; | 232 | goto exit; |
260 | } | 233 | } |
261 | 234 | ||
262 | len = ext_obj->package.count; | 235 | fields = package->package.elements; |
263 | package = (union acpi_object *) ret_buf.pointer; | 236 | for (i = 0; i < 4; i++) { |
264 | for ( i = 0; (i < len) || (i < 4); i++) { | 237 | if (fields[i].type != ACPI_TYPE_INTEGER) { |
265 | ext_obj = (union acpi_object *) &package->package.elements[i]; | ||
266 | switch (ext_obj->type) { | ||
267 | case ACPI_TYPE_INTEGER: | ||
268 | nui[i] = (u8)ext_obj->integer.value; | ||
269 | break; | ||
270 | default: | ||
271 | printk(KERN_ERR "%s:%s _HPP obj type incorrect\n", | ||
272 | __func__, (char *)string.pointer); | ||
273 | status = AE_ERROR; | 238 | status = AE_ERROR; |
274 | goto free_and_return; | 239 | goto exit; |
275 | } | 240 | } |
276 | } | 241 | } |
277 | 242 | ||
278 | hpp->t0 = &hpp->type0_data; | 243 | hpp->t0 = &hpp->type0_data; |
279 | hpp->t0->cache_line_size = nui[0]; | 244 | hpp->t0->revision = 1; |
280 | hpp->t0->latency_timer = nui[1]; | 245 | hpp->t0->cache_line_size = fields[0].integer.value; |
281 | hpp->t0->enable_serr = nui[2]; | 246 | hpp->t0->latency_timer = fields[1].integer.value; |
282 | hpp->t0->enable_perr = nui[3]; | 247 | hpp->t0->enable_serr = fields[2].integer.value; |
283 | 248 | hpp->t0->enable_perr = fields[3].integer.value; | |
284 | pr_debug(" _HPP: cache_line_size=0x%x\n", hpp->t0->cache_line_size); | ||
285 | pr_debug(" _HPP: latency timer =0x%x\n", hpp->t0->latency_timer); | ||
286 | pr_debug(" _HPP: enable SERR =0x%x\n", hpp->t0->enable_serr); | ||
287 | pr_debug(" _HPP: enable PERR =0x%x\n", hpp->t0->enable_perr); | ||
288 | 249 | ||
289 | free_and_return: | 250 | exit: |
290 | kfree(string.pointer); | 251 | kfree(buffer.pointer); |
291 | kfree(ret_buf.pointer); | ||
292 | return status; | 252 | return status; |
293 | } | 253 | } |
294 | 254 | ||
@@ -322,20 +282,19 @@ static acpi_status acpi_run_oshp(acpi_handle handle) | |||
322 | return status; | 282 | return status; |
323 | } | 283 | } |
324 | 284 | ||
325 | /* acpi_get_hp_params_from_firmware | 285 | /* pci_get_hp_params |
326 | * | 286 | * |
327 | * @bus - the pci_bus of the bus on which the device is newly added | 287 | * @dev - the pci_dev for which we want parameters |
328 | * @hpp - allocated by the caller | 288 | * @hpp - allocated by the caller |
329 | */ | 289 | */ |
330 | acpi_status acpi_get_hp_params_from_firmware(struct pci_bus *bus, | 290 | int pci_get_hp_params(struct pci_dev *dev, struct hotplug_params *hpp) |
331 | struct hotplug_params *hpp) | ||
332 | { | 291 | { |
333 | acpi_status status = AE_NOT_FOUND; | 292 | acpi_status status; |
334 | acpi_handle handle, phandle; | 293 | acpi_handle handle, phandle; |
335 | struct pci_bus *pbus; | 294 | struct pci_bus *pbus; |
336 | 295 | ||
337 | handle = NULL; | 296 | handle = NULL; |
338 | for (pbus = bus; pbus; pbus = pbus->parent) { | 297 | for (pbus = dev->bus; pbus; pbus = pbus->parent) { |
339 | handle = acpi_pci_get_bridge_handle(pbus); | 298 | handle = acpi_pci_get_bridge_handle(pbus); |
340 | if (handle) | 299 | if (handle) |
341 | break; | 300 | break; |
@@ -345,15 +304,15 @@ acpi_status acpi_get_hp_params_from_firmware(struct pci_bus *bus, | |||
345 | * _HPP settings apply to all child buses, until another _HPP is | 304 | * _HPP settings apply to all child buses, until another _HPP is |
346 | * encountered. If we don't find an _HPP for the input pci dev, | 305 | * encountered. If we don't find an _HPP for the input pci dev, |
347 | * look for it in the parent device scope since that would apply to | 306 | * look for it in the parent device scope since that would apply to |
348 | * this pci dev. If we don't find any _HPP, use hardcoded defaults | 307 | * this pci dev. |
349 | */ | 308 | */ |
350 | while (handle) { | 309 | while (handle) { |
351 | status = acpi_run_hpx(handle, hpp); | 310 | status = acpi_run_hpx(handle, hpp); |
352 | if (ACPI_SUCCESS(status)) | 311 | if (ACPI_SUCCESS(status)) |
353 | break; | 312 | return 0; |
354 | status = acpi_run_hpp(handle, hpp); | 313 | status = acpi_run_hpp(handle, hpp); |
355 | if (ACPI_SUCCESS(status)) | 314 | if (ACPI_SUCCESS(status)) |
356 | break; | 315 | return 0; |
357 | if (acpi_is_root_bridge(handle)) | 316 | if (acpi_is_root_bridge(handle)) |
358 | break; | 317 | break; |
359 | status = acpi_get_parent(handle, &phandle); | 318 | status = acpi_get_parent(handle, &phandle); |
@@ -361,9 +320,9 @@ acpi_status acpi_get_hp_params_from_firmware(struct pci_bus *bus, | |||
361 | break; | 320 | break; |
362 | handle = phandle; | 321 | handle = phandle; |
363 | } | 322 | } |
364 | return status; | 323 | return -ENODEV; |
365 | } | 324 | } |
366 | EXPORT_SYMBOL_GPL(acpi_get_hp_params_from_firmware); | 325 | EXPORT_SYMBOL_GPL(pci_get_hp_params); |
367 | 326 | ||
368 | /** | 327 | /** |
369 | * acpi_get_hp_hw_control_from_firmware | 328 | * acpi_get_hp_hw_control_from_firmware |
@@ -500,18 +459,18 @@ check_hotplug(acpi_handle handle, u32 lvl, void *context, void **rv) | |||
500 | 459 | ||
501 | /** | 460 | /** |
502 | * acpi_pci_detect_ejectable - check if the PCI bus has ejectable slots | 461 | * acpi_pci_detect_ejectable - check if the PCI bus has ejectable slots |
503 | * @pbus - PCI bus to scan | 462 | * @handle - handle of the PCI bus to scan |
504 | * | 463 | * |
505 | * Returns 1 if the PCI bus has ACPI based ejectable slots, 0 otherwise. | 464 | * Returns 1 if the PCI bus has ACPI based ejectable slots, 0 otherwise. |
506 | */ | 465 | */ |
507 | int acpi_pci_detect_ejectable(struct pci_bus *pbus) | 466 | int acpi_pci_detect_ejectable(acpi_handle handle) |
508 | { | 467 | { |
509 | acpi_handle handle; | ||
510 | int found = 0; | 468 | int found = 0; |
511 | 469 | ||
512 | if (!(handle = acpi_pci_get_bridge_handle(pbus))) | 470 | if (!handle) |
513 | return 0; | 471 | return found; |
514 | acpi_walk_namespace(ACPI_TYPE_DEVICE, handle, (u32)1, | 472 | |
473 | acpi_walk_namespace(ACPI_TYPE_DEVICE, handle, 1, | ||
515 | check_hotplug, (void *)&found, NULL); | 474 | check_hotplug, (void *)&found, NULL); |
516 | return found; | 475 | return found; |
517 | } | 476 | } |
diff --git a/drivers/pci/hotplug/acpiphp.h b/drivers/pci/hotplug/acpiphp.h index e68d5f20ffb3..7d938df79206 100644 --- a/drivers/pci/hotplug/acpiphp.h +++ b/drivers/pci/hotplug/acpiphp.h | |||
@@ -91,9 +91,6 @@ struct acpiphp_bridge { | |||
91 | /* PCI-to-PCI bridge device */ | 91 | /* PCI-to-PCI bridge device */ |
92 | struct pci_dev *pci_dev; | 92 | struct pci_dev *pci_dev; |
93 | 93 | ||
94 | /* ACPI 2.0 _HPP parameters */ | ||
95 | struct hotplug_params hpp; | ||
96 | |||
97 | spinlock_t res_lock; | 94 | spinlock_t res_lock; |
98 | }; | 95 | }; |
99 | 96 | ||
diff --git a/drivers/pci/hotplug/acpiphp_glue.c b/drivers/pci/hotplug/acpiphp_glue.c index 0cb0f830a993..58d25a163a8b 100644 --- a/drivers/pci/hotplug/acpiphp_glue.c +++ b/drivers/pci/hotplug/acpiphp_glue.c | |||
@@ -59,7 +59,7 @@ static DEFINE_SPINLOCK(ioapic_list_lock); | |||
59 | 59 | ||
60 | static void handle_hotplug_event_bridge (acpi_handle, u32, void *); | 60 | static void handle_hotplug_event_bridge (acpi_handle, u32, void *); |
61 | static void acpiphp_sanitize_bus(struct pci_bus *bus); | 61 | static void acpiphp_sanitize_bus(struct pci_bus *bus); |
62 | static void acpiphp_set_hpp_values(acpi_handle handle, struct pci_bus *bus); | 62 | static void acpiphp_set_hpp_values(struct pci_bus *bus); |
63 | static void handle_hotplug_event_func(acpi_handle handle, u32 type, void *context); | 63 | static void handle_hotplug_event_func(acpi_handle handle, u32 type, void *context); |
64 | 64 | ||
65 | /* callback routine to check for the existence of a pci dock device */ | 65 | /* callback routine to check for the existence of a pci dock device */ |
@@ -261,51 +261,21 @@ register_slot(acpi_handle handle, u32 lvl, void *context, void **rv) | |||
261 | 261 | ||
262 | 262 | ||
263 | /* see if it's worth looking at this bridge */ | 263 | /* see if it's worth looking at this bridge */ |
264 | static int detect_ejectable_slots(struct pci_bus *pbus) | 264 | static int detect_ejectable_slots(acpi_handle handle) |
265 | { | 265 | { |
266 | int found = acpi_pci_detect_ejectable(pbus); | 266 | int found = acpi_pci_detect_ejectable(handle); |
267 | if (!found) { | 267 | if (!found) { |
268 | acpi_handle bridge_handle = acpi_pci_get_bridge_handle(pbus); | 268 | acpi_walk_namespace(ACPI_TYPE_DEVICE, handle, (u32)1, |
269 | if (!bridge_handle) | ||
270 | return 0; | ||
271 | acpi_walk_namespace(ACPI_TYPE_DEVICE, bridge_handle, (u32)1, | ||
272 | is_pci_dock_device, (void *)&found, NULL); | 269 | is_pci_dock_device, (void *)&found, NULL); |
273 | } | 270 | } |
274 | return found; | 271 | return found; |
275 | } | 272 | } |
276 | 273 | ||
277 | |||
278 | /* decode ACPI 2.0 _HPP hot plug parameters */ | ||
279 | static void decode_hpp(struct acpiphp_bridge *bridge) | ||
280 | { | ||
281 | acpi_status status; | ||
282 | |||
283 | status = acpi_get_hp_params_from_firmware(bridge->pci_bus, &bridge->hpp); | ||
284 | if (ACPI_FAILURE(status) || | ||
285 | !bridge->hpp.t0 || (bridge->hpp.t0->revision > 1)) { | ||
286 | /* use default numbers */ | ||
287 | printk(KERN_WARNING | ||
288 | "%s: Could not get hotplug parameters. Use defaults\n", | ||
289 | __func__); | ||
290 | bridge->hpp.t0 = &bridge->hpp.type0_data; | ||
291 | bridge->hpp.t0->revision = 0; | ||
292 | bridge->hpp.t0->cache_line_size = 0x10; | ||
293 | bridge->hpp.t0->latency_timer = 0x40; | ||
294 | bridge->hpp.t0->enable_serr = 0; | ||
295 | bridge->hpp.t0->enable_perr = 0; | ||
296 | } | ||
297 | } | ||
298 | |||
299 | |||
300 | |||
301 | /* initialize miscellaneous stuff for both root and PCI-to-PCI bridge */ | 274 | /* initialize miscellaneous stuff for both root and PCI-to-PCI bridge */ |
302 | static void init_bridge_misc(struct acpiphp_bridge *bridge) | 275 | static void init_bridge_misc(struct acpiphp_bridge *bridge) |
303 | { | 276 | { |
304 | acpi_status status; | 277 | acpi_status status; |
305 | 278 | ||
306 | /* decode ACPI 2.0 _HPP (hot plug parameters) */ | ||
307 | decode_hpp(bridge); | ||
308 | |||
309 | /* must be added to the list prior to calling register_slot */ | 279 | /* must be added to the list prior to calling register_slot */ |
310 | list_add(&bridge->list, &bridge_list); | 280 | list_add(&bridge->list, &bridge_list); |
311 | 281 | ||
@@ -399,9 +369,10 @@ static inline void config_p2p_bridge_flags(struct acpiphp_bridge *bridge) | |||
399 | 369 | ||
400 | 370 | ||
401 | /* allocate and initialize host bridge data structure */ | 371 | /* allocate and initialize host bridge data structure */ |
402 | static void add_host_bridge(acpi_handle *handle, struct pci_bus *pci_bus) | 372 | static void add_host_bridge(acpi_handle *handle) |
403 | { | 373 | { |
404 | struct acpiphp_bridge *bridge; | 374 | struct acpiphp_bridge *bridge; |
375 | struct acpi_pci_root *root = acpi_pci_find_root(handle); | ||
405 | 376 | ||
406 | bridge = kzalloc(sizeof(struct acpiphp_bridge), GFP_KERNEL); | 377 | bridge = kzalloc(sizeof(struct acpiphp_bridge), GFP_KERNEL); |
407 | if (bridge == NULL) | 378 | if (bridge == NULL) |
@@ -410,7 +381,7 @@ static void add_host_bridge(acpi_handle *handle, struct pci_bus *pci_bus) | |||
410 | bridge->type = BRIDGE_TYPE_HOST; | 381 | bridge->type = BRIDGE_TYPE_HOST; |
411 | bridge->handle = handle; | 382 | bridge->handle = handle; |
412 | 383 | ||
413 | bridge->pci_bus = pci_bus; | 384 | bridge->pci_bus = root->bus; |
414 | 385 | ||
415 | spin_lock_init(&bridge->res_lock); | 386 | spin_lock_init(&bridge->res_lock); |
416 | 387 | ||
@@ -419,7 +390,7 @@ static void add_host_bridge(acpi_handle *handle, struct pci_bus *pci_bus) | |||
419 | 390 | ||
420 | 391 | ||
421 | /* allocate and initialize PCI-to-PCI bridge data structure */ | 392 | /* allocate and initialize PCI-to-PCI bridge data structure */ |
422 | static void add_p2p_bridge(acpi_handle *handle, struct pci_dev *pci_dev) | 393 | static void add_p2p_bridge(acpi_handle *handle) |
423 | { | 394 | { |
424 | struct acpiphp_bridge *bridge; | 395 | struct acpiphp_bridge *bridge; |
425 | 396 | ||
@@ -433,8 +404,8 @@ static void add_p2p_bridge(acpi_handle *handle, struct pci_dev *pci_dev) | |||
433 | bridge->handle = handle; | 404 | bridge->handle = handle; |
434 | config_p2p_bridge_flags(bridge); | 405 | config_p2p_bridge_flags(bridge); |
435 | 406 | ||
436 | bridge->pci_dev = pci_dev_get(pci_dev); | 407 | bridge->pci_dev = acpi_get_pci_dev(handle); |
437 | bridge->pci_bus = pci_dev->subordinate; | 408 | bridge->pci_bus = bridge->pci_dev->subordinate; |
438 | if (!bridge->pci_bus) { | 409 | if (!bridge->pci_bus) { |
439 | err("This is not a PCI-to-PCI bridge!\n"); | 410 | err("This is not a PCI-to-PCI bridge!\n"); |
440 | goto err; | 411 | goto err; |
@@ -451,7 +422,7 @@ static void add_p2p_bridge(acpi_handle *handle, struct pci_dev *pci_dev) | |||
451 | init_bridge_misc(bridge); | 422 | init_bridge_misc(bridge); |
452 | return; | 423 | return; |
453 | err: | 424 | err: |
454 | pci_dev_put(pci_dev); | 425 | pci_dev_put(bridge->pci_dev); |
455 | kfree(bridge); | 426 | kfree(bridge); |
456 | return; | 427 | return; |
457 | } | 428 | } |
@@ -462,39 +433,21 @@ static acpi_status | |||
462 | find_p2p_bridge(acpi_handle handle, u32 lvl, void *context, void **rv) | 433 | find_p2p_bridge(acpi_handle handle, u32 lvl, void *context, void **rv) |
463 | { | 434 | { |
464 | acpi_status status; | 435 | acpi_status status; |
465 | acpi_handle dummy_handle; | ||
466 | unsigned long long tmp; | ||
467 | int device, function; | ||
468 | struct pci_dev *dev; | 436 | struct pci_dev *dev; |
469 | struct pci_bus *pci_bus = context; | ||
470 | |||
471 | status = acpi_get_handle(handle, "_ADR", &dummy_handle); | ||
472 | if (ACPI_FAILURE(status)) | ||
473 | return AE_OK; /* continue */ | ||
474 | |||
475 | status = acpi_evaluate_integer(handle, "_ADR", NULL, &tmp); | ||
476 | if (ACPI_FAILURE(status)) { | ||
477 | dbg("%s: _ADR evaluation failure\n", __func__); | ||
478 | return AE_OK; | ||
479 | } | ||
480 | |||
481 | device = (tmp >> 16) & 0xffff; | ||
482 | function = tmp & 0xffff; | ||
483 | |||
484 | dev = pci_get_slot(pci_bus, PCI_DEVFN(device, function)); | ||
485 | 437 | ||
438 | dev = acpi_get_pci_dev(handle); | ||
486 | if (!dev || !dev->subordinate) | 439 | if (!dev || !dev->subordinate) |
487 | goto out; | 440 | goto out; |
488 | 441 | ||
489 | /* check if this bridge has ejectable slots */ | 442 | /* check if this bridge has ejectable slots */ |
490 | if ((detect_ejectable_slots(dev->subordinate) > 0)) { | 443 | if ((detect_ejectable_slots(handle) > 0)) { |
491 | dbg("found PCI-to-PCI bridge at PCI %s\n", pci_name(dev)); | 444 | dbg("found PCI-to-PCI bridge at PCI %s\n", pci_name(dev)); |
492 | add_p2p_bridge(handle, dev); | 445 | add_p2p_bridge(handle); |
493 | } | 446 | } |
494 | 447 | ||
495 | /* search P2P bridges under this p2p bridge */ | 448 | /* search P2P bridges under this p2p bridge */ |
496 | status = acpi_walk_namespace(ACPI_TYPE_DEVICE, handle, (u32)1, | 449 | status = acpi_walk_namespace(ACPI_TYPE_DEVICE, handle, (u32)1, |
497 | find_p2p_bridge, dev->subordinate, NULL); | 450 | find_p2p_bridge, NULL, NULL); |
498 | if (ACPI_FAILURE(status)) | 451 | if (ACPI_FAILURE(status)) |
499 | warn("find_p2p_bridge failed (error code = 0x%x)\n", status); | 452 | warn("find_p2p_bridge failed (error code = 0x%x)\n", status); |
500 | 453 | ||
@@ -509,9 +462,7 @@ static int add_bridge(acpi_handle handle) | |||
509 | { | 462 | { |
510 | acpi_status status; | 463 | acpi_status status; |
511 | unsigned long long tmp; | 464 | unsigned long long tmp; |
512 | int seg, bus; | ||
513 | acpi_handle dummy_handle; | 465 | acpi_handle dummy_handle; |
514 | struct pci_bus *pci_bus; | ||
515 | 466 | ||
516 | /* if the bridge doesn't have _STA, we assume it is always there */ | 467 | /* if the bridge doesn't have _STA, we assume it is always there */ |
517 | status = acpi_get_handle(handle, "_STA", &dummy_handle); | 468 | status = acpi_get_handle(handle, "_STA", &dummy_handle); |
@@ -526,36 +477,15 @@ static int add_bridge(acpi_handle handle) | |||
526 | return 0; | 477 | return 0; |
527 | } | 478 | } |
528 | 479 | ||
529 | /* get PCI segment number */ | ||
530 | status = acpi_evaluate_integer(handle, "_SEG", NULL, &tmp); | ||
531 | |||
532 | seg = ACPI_SUCCESS(status) ? tmp : 0; | ||
533 | |||
534 | /* get PCI bus number */ | ||
535 | status = acpi_evaluate_integer(handle, "_BBN", NULL, &tmp); | ||
536 | |||
537 | if (ACPI_SUCCESS(status)) { | ||
538 | bus = tmp; | ||
539 | } else { | ||
540 | warn("can't get bus number, assuming 0\n"); | ||
541 | bus = 0; | ||
542 | } | ||
543 | |||
544 | pci_bus = pci_find_bus(seg, bus); | ||
545 | if (!pci_bus) { | ||
546 | err("Can't find bus %04x:%02x\n", seg, bus); | ||
547 | return 0; | ||
548 | } | ||
549 | |||
550 | /* check if this bridge has ejectable slots */ | 480 | /* check if this bridge has ejectable slots */ |
551 | if (detect_ejectable_slots(pci_bus) > 0) { | 481 | if (detect_ejectable_slots(handle) > 0) { |
552 | dbg("found PCI host-bus bridge with hot-pluggable slots\n"); | 482 | dbg("found PCI host-bus bridge with hot-pluggable slots\n"); |
553 | add_host_bridge(handle, pci_bus); | 483 | add_host_bridge(handle); |
554 | } | 484 | } |
555 | 485 | ||
556 | /* search P2P bridges under this host bridge */ | 486 | /* search P2P bridges under this host bridge */ |
557 | status = acpi_walk_namespace(ACPI_TYPE_DEVICE, handle, (u32)1, | 487 | status = acpi_walk_namespace(ACPI_TYPE_DEVICE, handle, (u32)1, |
558 | find_p2p_bridge, pci_bus, NULL); | 488 | find_p2p_bridge, NULL, NULL); |
559 | 489 | ||
560 | if (ACPI_FAILURE(status)) | 490 | if (ACPI_FAILURE(status)) |
561 | warn("find_p2p_bridge failed (error code = 0x%x)\n", status); | 491 | warn("find_p2p_bridge failed (error code = 0x%x)\n", status); |
@@ -1083,7 +1013,7 @@ static int __ref enable_device(struct acpiphp_slot *slot) | |||
1083 | 1013 | ||
1084 | pci_bus_assign_resources(bus); | 1014 | pci_bus_assign_resources(bus); |
1085 | acpiphp_sanitize_bus(bus); | 1015 | acpiphp_sanitize_bus(bus); |
1086 | acpiphp_set_hpp_values(slot->bridge->handle, bus); | 1016 | acpiphp_set_hpp_values(bus); |
1087 | list_for_each_entry(func, &slot->funcs, sibling) | 1017 | list_for_each_entry(func, &slot->funcs, sibling) |
1088 | acpiphp_configure_ioapics(func->handle); | 1018 | acpiphp_configure_ioapics(func->handle); |
1089 | pci_enable_bridges(bus); | 1019 | pci_enable_bridges(bus); |
@@ -1294,70 +1224,12 @@ static int acpiphp_check_bridge(struct acpiphp_bridge *bridge) | |||
1294 | return retval; | 1224 | return retval; |
1295 | } | 1225 | } |
1296 | 1226 | ||
1297 | static void program_hpp(struct pci_dev *dev, struct acpiphp_bridge *bridge) | 1227 | static void acpiphp_set_hpp_values(struct pci_bus *bus) |
1298 | { | 1228 | { |
1299 | u16 pci_cmd, pci_bctl; | ||
1300 | struct pci_dev *cdev; | ||
1301 | |||
1302 | /* Program hpp values for this device */ | ||
1303 | if (!(dev->hdr_type == PCI_HEADER_TYPE_NORMAL || | ||
1304 | (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE && | ||
1305 | (dev->class >> 8) == PCI_CLASS_BRIDGE_PCI))) | ||
1306 | return; | ||
1307 | |||
1308 | if ((dev->class >> 8) == PCI_CLASS_BRIDGE_HOST) | ||
1309 | return; | ||
1310 | |||
1311 | pci_write_config_byte(dev, PCI_CACHE_LINE_SIZE, | ||
1312 | bridge->hpp.t0->cache_line_size); | ||
1313 | pci_write_config_byte(dev, PCI_LATENCY_TIMER, | ||
1314 | bridge->hpp.t0->latency_timer); | ||
1315 | pci_read_config_word(dev, PCI_COMMAND, &pci_cmd); | ||
1316 | if (bridge->hpp.t0->enable_serr) | ||
1317 | pci_cmd |= PCI_COMMAND_SERR; | ||
1318 | else | ||
1319 | pci_cmd &= ~PCI_COMMAND_SERR; | ||
1320 | if (bridge->hpp.t0->enable_perr) | ||
1321 | pci_cmd |= PCI_COMMAND_PARITY; | ||
1322 | else | ||
1323 | pci_cmd &= ~PCI_COMMAND_PARITY; | ||
1324 | pci_write_config_word(dev, PCI_COMMAND, pci_cmd); | ||
1325 | |||
1326 | /* Program bridge control value and child devices */ | ||
1327 | if ((dev->class >> 8) == PCI_CLASS_BRIDGE_PCI) { | ||
1328 | pci_write_config_byte(dev, PCI_SEC_LATENCY_TIMER, | ||
1329 | bridge->hpp.t0->latency_timer); | ||
1330 | pci_read_config_word(dev, PCI_BRIDGE_CONTROL, &pci_bctl); | ||
1331 | if (bridge->hpp.t0->enable_serr) | ||
1332 | pci_bctl |= PCI_BRIDGE_CTL_SERR; | ||
1333 | else | ||
1334 | pci_bctl &= ~PCI_BRIDGE_CTL_SERR; | ||
1335 | if (bridge->hpp.t0->enable_perr) | ||
1336 | pci_bctl |= PCI_BRIDGE_CTL_PARITY; | ||
1337 | else | ||
1338 | pci_bctl &= ~PCI_BRIDGE_CTL_PARITY; | ||
1339 | pci_write_config_word(dev, PCI_BRIDGE_CONTROL, pci_bctl); | ||
1340 | if (dev->subordinate) { | ||
1341 | list_for_each_entry(cdev, &dev->subordinate->devices, | ||
1342 | bus_list) | ||
1343 | program_hpp(cdev, bridge); | ||
1344 | } | ||
1345 | } | ||
1346 | } | ||
1347 | |||
1348 | static void acpiphp_set_hpp_values(acpi_handle handle, struct pci_bus *bus) | ||
1349 | { | ||
1350 | struct acpiphp_bridge bridge; | ||
1351 | struct pci_dev *dev; | 1229 | struct pci_dev *dev; |
1352 | 1230 | ||
1353 | memset(&bridge, 0, sizeof(bridge)); | ||
1354 | bridge.handle = handle; | ||
1355 | bridge.pci_bus = bus; | ||
1356 | bridge.pci_dev = bus->self; | ||
1357 | decode_hpp(&bridge); | ||
1358 | list_for_each_entry(dev, &bus->devices, bus_list) | 1231 | list_for_each_entry(dev, &bus->devices, bus_list) |
1359 | program_hpp(dev, &bridge); | 1232 | pci_configure_slot(dev); |
1360 | |||
1361 | } | 1233 | } |
1362 | 1234 | ||
1363 | /* | 1235 | /* |
@@ -1387,24 +1259,23 @@ static void acpiphp_sanitize_bus(struct pci_bus *bus) | |||
1387 | /* Program resources in newly inserted bridge */ | 1259 | /* Program resources in newly inserted bridge */ |
1388 | static int acpiphp_configure_bridge (acpi_handle handle) | 1260 | static int acpiphp_configure_bridge (acpi_handle handle) |
1389 | { | 1261 | { |
1390 | struct pci_dev *dev; | ||
1391 | struct pci_bus *bus; | 1262 | struct pci_bus *bus; |
1392 | 1263 | ||
1393 | dev = acpi_get_pci_dev(handle); | 1264 | if (acpi_is_root_bridge(handle)) { |
1394 | if (!dev) { | 1265 | struct acpi_pci_root *root = acpi_pci_find_root(handle); |
1395 | err("cannot get PCI domain and bus number for bridge\n"); | 1266 | bus = root->bus; |
1396 | return -EINVAL; | 1267 | } else { |
1268 | struct pci_dev *pdev = acpi_get_pci_dev(handle); | ||
1269 | bus = pdev->subordinate; | ||
1270 | pci_dev_put(pdev); | ||
1397 | } | 1271 | } |
1398 | 1272 | ||
1399 | bus = dev->bus; | ||
1400 | |||
1401 | pci_bus_size_bridges(bus); | 1273 | pci_bus_size_bridges(bus); |
1402 | pci_bus_assign_resources(bus); | 1274 | pci_bus_assign_resources(bus); |
1403 | acpiphp_sanitize_bus(bus); | 1275 | acpiphp_sanitize_bus(bus); |
1404 | acpiphp_set_hpp_values(handle, bus); | 1276 | acpiphp_set_hpp_values(bus); |
1405 | pci_enable_bridges(bus); | 1277 | pci_enable_bridges(bus); |
1406 | acpiphp_configure_ioapics(handle); | 1278 | acpiphp_configure_ioapics(handle); |
1407 | pci_dev_put(dev); | ||
1408 | return 0; | 1279 | return 0; |
1409 | } | 1280 | } |
1410 | 1281 | ||
diff --git a/drivers/pci/hotplug/pci_hotplug_core.c b/drivers/pci/hotplug/pci_hotplug_core.c index 5c5043f239cf..0325d989bb46 100644 --- a/drivers/pci/hotplug/pci_hotplug_core.c +++ b/drivers/pci/hotplug/pci_hotplug_core.c | |||
@@ -86,7 +86,8 @@ static char *pci_bus_speed_strings[] = { | |||
86 | "66 MHz PCIX 533", /* 0x11 */ | 86 | "66 MHz PCIX 533", /* 0x11 */ |
87 | "100 MHz PCIX 533", /* 0x12 */ | 87 | "100 MHz PCIX 533", /* 0x12 */ |
88 | "133 MHz PCIX 533", /* 0x13 */ | 88 | "133 MHz PCIX 533", /* 0x13 */ |
89 | "25 GBps PCI-E", /* 0x14 */ | 89 | "2.5 GT/s PCI-E", /* 0x14 */ |
90 | "5.0 GT/s PCI-E", /* 0x15 */ | ||
90 | }; | 91 | }; |
91 | 92 | ||
92 | #ifdef CONFIG_HOTPLUG_PCI_CPCI | 93 | #ifdef CONFIG_HOTPLUG_PCI_CPCI |
diff --git a/drivers/pci/hotplug/pciehp.h b/drivers/pci/hotplug/pciehp.h index e6cf096498be..36faa9a8e18f 100644 --- a/drivers/pci/hotplug/pciehp.h +++ b/drivers/pci/hotplug/pciehp.h | |||
@@ -237,17 +237,8 @@ static inline int pciehp_get_hp_hw_control_from_firmware(struct pci_dev *dev) | |||
237 | return retval; | 237 | return retval; |
238 | return pciehp_acpi_slot_detection_check(dev); | 238 | return pciehp_acpi_slot_detection_check(dev); |
239 | } | 239 | } |
240 | |||
241 | static inline int pciehp_get_hp_params_from_firmware(struct pci_dev *dev, | ||
242 | struct hotplug_params *hpp) | ||
243 | { | ||
244 | if (ACPI_FAILURE(acpi_get_hp_params_from_firmware(dev->bus, hpp))) | ||
245 | return -ENODEV; | ||
246 | return 0; | ||
247 | } | ||
248 | #else | 240 | #else |
249 | #define pciehp_firmware_init() do {} while (0) | 241 | #define pciehp_firmware_init() do {} while (0) |
250 | #define pciehp_get_hp_hw_control_from_firmware(dev) 0 | 242 | #define pciehp_get_hp_hw_control_from_firmware(dev) 0 |
251 | #define pciehp_get_hp_params_from_firmware(dev, hpp) (-ENODEV) | ||
252 | #endif /* CONFIG_ACPI */ | 243 | #endif /* CONFIG_ACPI */ |
253 | #endif /* _PCIEHP_H */ | 244 | #endif /* _PCIEHP_H */ |
diff --git a/drivers/pci/hotplug/pciehp_acpi.c b/drivers/pci/hotplug/pciehp_acpi.c index 96048010e7d9..7163e6a6cfae 100644 --- a/drivers/pci/hotplug/pciehp_acpi.c +++ b/drivers/pci/hotplug/pciehp_acpi.c | |||
@@ -47,7 +47,7 @@ int pciehp_acpi_slot_detection_check(struct pci_dev *dev) | |||
47 | { | 47 | { |
48 | if (slot_detection_mode != PCIEHP_DETECT_ACPI) | 48 | if (slot_detection_mode != PCIEHP_DETECT_ACPI) |
49 | return 0; | 49 | return 0; |
50 | if (acpi_pci_detect_ejectable(dev->subordinate)) | 50 | if (acpi_pci_detect_ejectable(DEVICE_ACPI_HANDLE(&dev->dev))) |
51 | return 0; | 51 | return 0; |
52 | return -ENODEV; | 52 | return -ENODEV; |
53 | } | 53 | } |
@@ -76,9 +76,9 @@ static int __init dummy_probe(struct pcie_device *dev) | |||
76 | { | 76 | { |
77 | int pos; | 77 | int pos; |
78 | u32 slot_cap; | 78 | u32 slot_cap; |
79 | acpi_handle handle; | ||
79 | struct slot *slot, *tmp; | 80 | struct slot *slot, *tmp; |
80 | struct pci_dev *pdev = dev->port; | 81 | struct pci_dev *pdev = dev->port; |
81 | struct pci_bus *pbus = pdev->subordinate; | ||
82 | /* Note: pciehp_detect_mode != PCIEHP_DETECT_ACPI here */ | 82 | /* Note: pciehp_detect_mode != PCIEHP_DETECT_ACPI here */ |
83 | if (pciehp_get_hp_hw_control_from_firmware(pdev)) | 83 | if (pciehp_get_hp_hw_control_from_firmware(pdev)) |
84 | return -ENODEV; | 84 | return -ENODEV; |
@@ -94,7 +94,8 @@ static int __init dummy_probe(struct pcie_device *dev) | |||
94 | dup_slot_id++; | 94 | dup_slot_id++; |
95 | } | 95 | } |
96 | list_add_tail(&slot->slot_list, &dummy_slots); | 96 | list_add_tail(&slot->slot_list, &dummy_slots); |
97 | if (!acpi_slot_detected && acpi_pci_detect_ejectable(pbus)) | 97 | handle = DEVICE_ACPI_HANDLE(&pdev->dev); |
98 | if (!acpi_slot_detected && acpi_pci_detect_ejectable(handle)) | ||
98 | acpi_slot_detected = 1; | 99 | acpi_slot_detected = 1; |
99 | return -ENODEV; /* dummy driver always returns error */ | 100 | return -ENODEV; /* dummy driver always returns error */ |
100 | } | 101 | } |
diff --git a/drivers/pci/hotplug/pciehp_ctrl.c b/drivers/pci/hotplug/pciehp_ctrl.c index 8aab8edf123e..b97cb4c3e0fe 100644 --- a/drivers/pci/hotplug/pciehp_ctrl.c +++ b/drivers/pci/hotplug/pciehp_ctrl.c | |||
@@ -246,11 +246,6 @@ static int board_added(struct slot *p_slot) | |||
246 | goto err_exit; | 246 | goto err_exit; |
247 | } | 247 | } |
248 | 248 | ||
249 | /* | ||
250 | * Some PCI Express root ports require fixup after hot-plug operation. | ||
251 | */ | ||
252 | if (pcie_mch_quirk) | ||
253 | pci_fixup_device(pci_fixup_final, ctrl->pci_dev); | ||
254 | if (PWR_LED(ctrl)) | 249 | if (PWR_LED(ctrl)) |
255 | p_slot->hpc_ops->green_led_on(p_slot); | 250 | p_slot->hpc_ops->green_led_on(p_slot); |
256 | 251 | ||
diff --git a/drivers/pci/hotplug/pciehp_hpc.c b/drivers/pci/hotplug/pciehp_hpc.c index 52813257e5bf..271f917b6f2c 100644 --- a/drivers/pci/hotplug/pciehp_hpc.c +++ b/drivers/pci/hotplug/pciehp_hpc.c | |||
@@ -693,7 +693,10 @@ static int hpc_get_max_lnk_speed(struct slot *slot, enum pci_bus_speed *value) | |||
693 | 693 | ||
694 | switch (lnk_cap & 0x000F) { | 694 | switch (lnk_cap & 0x000F) { |
695 | case 1: | 695 | case 1: |
696 | lnk_speed = PCIE_2PT5GB; | 696 | lnk_speed = PCIE_2_5GB; |
697 | break; | ||
698 | case 2: | ||
699 | lnk_speed = PCIE_5_0GB; | ||
697 | break; | 700 | break; |
698 | default: | 701 | default: |
699 | lnk_speed = PCIE_LNK_SPEED_UNKNOWN; | 702 | lnk_speed = PCIE_LNK_SPEED_UNKNOWN; |
@@ -772,7 +775,10 @@ static int hpc_get_cur_lnk_speed(struct slot *slot, enum pci_bus_speed *value) | |||
772 | 775 | ||
773 | switch (lnk_status & PCI_EXP_LNKSTA_CLS) { | 776 | switch (lnk_status & PCI_EXP_LNKSTA_CLS) { |
774 | case 1: | 777 | case 1: |
775 | lnk_speed = PCIE_2PT5GB; | 778 | lnk_speed = PCIE_2_5GB; |
779 | break; | ||
780 | case 2: | ||
781 | lnk_speed = PCIE_5_0GB; | ||
776 | break; | 782 | break; |
777 | default: | 783 | default: |
778 | lnk_speed = PCIE_LNK_SPEED_UNKNOWN; | 784 | lnk_speed = PCIE_LNK_SPEED_UNKNOWN; |
diff --git a/drivers/pci/hotplug/pciehp_pci.c b/drivers/pci/hotplug/pciehp_pci.c index 10f9566cceeb..02e24d63b3ee 100644 --- a/drivers/pci/hotplug/pciehp_pci.c +++ b/drivers/pci/hotplug/pciehp_pci.c | |||
@@ -34,136 +34,6 @@ | |||
34 | #include "../pci.h" | 34 | #include "../pci.h" |
35 | #include "pciehp.h" | 35 | #include "pciehp.h" |
36 | 36 | ||
37 | static void program_hpp_type0(struct pci_dev *dev, struct hpp_type0 *hpp) | ||
38 | { | ||
39 | u16 pci_cmd, pci_bctl; | ||
40 | |||
41 | if (hpp->revision > 1) { | ||
42 | warn("Rev.%d type0 record not supported\n", hpp->revision); | ||
43 | return; | ||
44 | } | ||
45 | |||
46 | pci_write_config_byte(dev, PCI_CACHE_LINE_SIZE, hpp->cache_line_size); | ||
47 | pci_write_config_byte(dev, PCI_LATENCY_TIMER, hpp->latency_timer); | ||
48 | pci_read_config_word(dev, PCI_COMMAND, &pci_cmd); | ||
49 | if (hpp->enable_serr) | ||
50 | pci_cmd |= PCI_COMMAND_SERR; | ||
51 | else | ||
52 | pci_cmd &= ~PCI_COMMAND_SERR; | ||
53 | if (hpp->enable_perr) | ||
54 | pci_cmd |= PCI_COMMAND_PARITY; | ||
55 | else | ||
56 | pci_cmd &= ~PCI_COMMAND_PARITY; | ||
57 | pci_write_config_word(dev, PCI_COMMAND, pci_cmd); | ||
58 | |||
59 | /* Program bridge control value */ | ||
60 | if ((dev->class >> 8) == PCI_CLASS_BRIDGE_PCI) { | ||
61 | pci_write_config_byte(dev, PCI_SEC_LATENCY_TIMER, | ||
62 | hpp->latency_timer); | ||
63 | pci_read_config_word(dev, PCI_BRIDGE_CONTROL, &pci_bctl); | ||
64 | if (hpp->enable_serr) | ||
65 | pci_bctl |= PCI_BRIDGE_CTL_SERR; | ||
66 | else | ||
67 | pci_bctl &= ~PCI_BRIDGE_CTL_SERR; | ||
68 | if (hpp->enable_perr) | ||
69 | pci_bctl |= PCI_BRIDGE_CTL_PARITY; | ||
70 | else | ||
71 | pci_bctl &= ~PCI_BRIDGE_CTL_PARITY; | ||
72 | pci_write_config_word(dev, PCI_BRIDGE_CONTROL, pci_bctl); | ||
73 | } | ||
74 | } | ||
75 | |||
76 | static void program_hpp_type2(struct pci_dev *dev, struct hpp_type2 *hpp) | ||
77 | { | ||
78 | int pos; | ||
79 | u16 reg16; | ||
80 | u32 reg32; | ||
81 | |||
82 | if (hpp->revision > 1) { | ||
83 | warn("Rev.%d type2 record not supported\n", hpp->revision); | ||
84 | return; | ||
85 | } | ||
86 | |||
87 | /* Find PCI Express capability */ | ||
88 | pos = pci_find_capability(dev, PCI_CAP_ID_EXP); | ||
89 | if (!pos) | ||
90 | return; | ||
91 | |||
92 | /* Initialize Device Control Register */ | ||
93 | pci_read_config_word(dev, pos + PCI_EXP_DEVCTL, ®16); | ||
94 | reg16 = (reg16 & hpp->pci_exp_devctl_and) | hpp->pci_exp_devctl_or; | ||
95 | pci_write_config_word(dev, pos + PCI_EXP_DEVCTL, reg16); | ||
96 | |||
97 | /* Initialize Link Control Register */ | ||
98 | if (dev->subordinate) { | ||
99 | pci_read_config_word(dev, pos + PCI_EXP_LNKCTL, ®16); | ||
100 | reg16 = (reg16 & hpp->pci_exp_lnkctl_and) | ||
101 | | hpp->pci_exp_lnkctl_or; | ||
102 | pci_write_config_word(dev, pos + PCI_EXP_LNKCTL, reg16); | ||
103 | } | ||
104 | |||
105 | /* Find Advanced Error Reporting Enhanced Capability */ | ||
106 | pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR); | ||
107 | if (!pos) | ||
108 | return; | ||
109 | |||
110 | /* Initialize Uncorrectable Error Mask Register */ | ||
111 | pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_MASK, ®32); | ||
112 | reg32 = (reg32 & hpp->unc_err_mask_and) | hpp->unc_err_mask_or; | ||
113 | pci_write_config_dword(dev, pos + PCI_ERR_UNCOR_MASK, reg32); | ||
114 | |||
115 | /* Initialize Uncorrectable Error Severity Register */ | ||
116 | pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_SEVER, ®32); | ||
117 | reg32 = (reg32 & hpp->unc_err_sever_and) | hpp->unc_err_sever_or; | ||
118 | pci_write_config_dword(dev, pos + PCI_ERR_UNCOR_SEVER, reg32); | ||
119 | |||
120 | /* Initialize Correctable Error Mask Register */ | ||
121 | pci_read_config_dword(dev, pos + PCI_ERR_COR_MASK, ®32); | ||
122 | reg32 = (reg32 & hpp->cor_err_mask_and) | hpp->cor_err_mask_or; | ||
123 | pci_write_config_dword(dev, pos + PCI_ERR_COR_MASK, reg32); | ||
124 | |||
125 | /* Initialize Advanced Error Capabilities and Control Register */ | ||
126 | pci_read_config_dword(dev, pos + PCI_ERR_CAP, ®32); | ||
127 | reg32 = (reg32 & hpp->adv_err_cap_and) | hpp->adv_err_cap_or; | ||
128 | pci_write_config_dword(dev, pos + PCI_ERR_CAP, reg32); | ||
129 | |||
130 | /* | ||
131 | * FIXME: The following two registers are not supported yet. | ||
132 | * | ||
133 | * o Secondary Uncorrectable Error Severity Register | ||
134 | * o Secondary Uncorrectable Error Mask Register | ||
135 | */ | ||
136 | } | ||
137 | |||
138 | static void program_fw_provided_values(struct pci_dev *dev) | ||
139 | { | ||
140 | struct pci_dev *cdev; | ||
141 | struct hotplug_params hpp; | ||
142 | |||
143 | /* Program hpp values for this device */ | ||
144 | if (!(dev->hdr_type == PCI_HEADER_TYPE_NORMAL || | ||
145 | (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE && | ||
146 | (dev->class >> 8) == PCI_CLASS_BRIDGE_PCI))) | ||
147 | return; | ||
148 | |||
149 | if (pciehp_get_hp_params_from_firmware(dev, &hpp)) { | ||
150 | warn("Could not get hotplug parameters\n"); | ||
151 | return; | ||
152 | } | ||
153 | |||
154 | if (hpp.t2) | ||
155 | program_hpp_type2(dev, hpp.t2); | ||
156 | if (hpp.t0) | ||
157 | program_hpp_type0(dev, hpp.t0); | ||
158 | |||
159 | /* Program child devices */ | ||
160 | if (dev->subordinate) { | ||
161 | list_for_each_entry(cdev, &dev->subordinate->devices, | ||
162 | bus_list) | ||
163 | program_fw_provided_values(cdev); | ||
164 | } | ||
165 | } | ||
166 | |||
167 | static int __ref pciehp_add_bridge(struct pci_dev *dev) | 37 | static int __ref pciehp_add_bridge(struct pci_dev *dev) |
168 | { | 38 | { |
169 | struct pci_bus *parent = dev->bus; | 39 | struct pci_bus *parent = dev->bus; |
@@ -226,7 +96,7 @@ int pciehp_configure_device(struct slot *p_slot) | |||
226 | (dev->hdr_type == PCI_HEADER_TYPE_CARDBUS)) { | 96 | (dev->hdr_type == PCI_HEADER_TYPE_CARDBUS)) { |
227 | pciehp_add_bridge(dev); | 97 | pciehp_add_bridge(dev); |
228 | } | 98 | } |
229 | program_fw_provided_values(dev); | 99 | pci_configure_slot(dev); |
230 | pci_dev_put(dev); | 100 | pci_dev_put(dev); |
231 | } | 101 | } |
232 | 102 | ||
@@ -285,11 +155,6 @@ int pciehp_unconfigure_device(struct slot *p_slot) | |||
285 | } | 155 | } |
286 | pci_dev_put(temp); | 156 | pci_dev_put(temp); |
287 | } | 157 | } |
288 | /* | ||
289 | * Some PCI Express root ports require fixup after hot-plug operation. | ||
290 | */ | ||
291 | if (pcie_mch_quirk) | ||
292 | pci_fixup_device(pci_fixup_final, p_slot->ctrl->pci_dev); | ||
293 | 158 | ||
294 | return rc; | 159 | return rc; |
295 | } | 160 | } |
diff --git a/drivers/pci/hotplug/pcihp_slot.c b/drivers/pci/hotplug/pcihp_slot.c new file mode 100644 index 000000000000..cc8ec3aa41a7 --- /dev/null +++ b/drivers/pci/hotplug/pcihp_slot.c | |||
@@ -0,0 +1,187 @@ | |||
1 | /* | ||
2 | * Copyright (C) 1995,2001 Compaq Computer Corporation | ||
3 | * Copyright (C) 2001 Greg Kroah-Hartman (greg@kroah.com) | ||
4 | * Copyright (C) 2001 IBM Corp. | ||
5 | * Copyright (C) 2003-2004 Intel Corporation | ||
6 | * (c) Copyright 2009 Hewlett-Packard Development Company, L.P. | ||
7 | * | ||
8 | * All rights reserved. | ||
9 | * | ||
10 | * This program is free software; you can redistribute it and/or modify | ||
11 | * it under the terms of the GNU General Public License as published by | ||
12 | * the Free Software Foundation; either version 2 of the License, or (at | ||
13 | * your option) any later version. | ||
14 | * | ||
15 | * This program is distributed in the hope that it will be useful, but | ||
16 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
17 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
18 | * NON INFRINGEMENT. See the GNU General Public License for more | ||
19 | * details. | ||
20 | * | ||
21 | * You should have received a copy of the GNU General Public License | ||
22 | * along with this program; if not, write to the Free Software | ||
23 | * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. | ||
24 | */ | ||
25 | |||
26 | #include <linux/pci.h> | ||
27 | #include <linux/pci_hotplug.h> | ||
28 | |||
29 | static struct hpp_type0 pci_default_type0 = { | ||
30 | .revision = 1, | ||
31 | .cache_line_size = 8, | ||
32 | .latency_timer = 0x40, | ||
33 | .enable_serr = 0, | ||
34 | .enable_perr = 0, | ||
35 | }; | ||
36 | |||
37 | static void program_hpp_type0(struct pci_dev *dev, struct hpp_type0 *hpp) | ||
38 | { | ||
39 | u16 pci_cmd, pci_bctl; | ||
40 | |||
41 | if (!hpp) { | ||
42 | /* | ||
43 | * Perhaps we *should* use default settings for PCIe, but | ||
44 | * pciehp didn't, so we won't either. | ||
45 | */ | ||
46 | if (dev->is_pcie) | ||
47 | return; | ||
48 | dev_info(&dev->dev, "using default PCI settings\n"); | ||
49 | hpp = &pci_default_type0; | ||
50 | } | ||
51 | |||
52 | if (hpp->revision > 1) { | ||
53 | dev_warn(&dev->dev, | ||
54 | "PCI settings rev %d not supported; using defaults\n", | ||
55 | hpp->revision); | ||
56 | hpp = &pci_default_type0; | ||
57 | } | ||
58 | |||
59 | pci_write_config_byte(dev, PCI_CACHE_LINE_SIZE, hpp->cache_line_size); | ||
60 | pci_write_config_byte(dev, PCI_LATENCY_TIMER, hpp->latency_timer); | ||
61 | pci_read_config_word(dev, PCI_COMMAND, &pci_cmd); | ||
62 | if (hpp->enable_serr) | ||
63 | pci_cmd |= PCI_COMMAND_SERR; | ||
64 | else | ||
65 | pci_cmd &= ~PCI_COMMAND_SERR; | ||
66 | if (hpp->enable_perr) | ||
67 | pci_cmd |= PCI_COMMAND_PARITY; | ||
68 | else | ||
69 | pci_cmd &= ~PCI_COMMAND_PARITY; | ||
70 | pci_write_config_word(dev, PCI_COMMAND, pci_cmd); | ||
71 | |||
72 | /* Program bridge control value */ | ||
73 | if ((dev->class >> 8) == PCI_CLASS_BRIDGE_PCI) { | ||
74 | pci_write_config_byte(dev, PCI_SEC_LATENCY_TIMER, | ||
75 | hpp->latency_timer); | ||
76 | pci_read_config_word(dev, PCI_BRIDGE_CONTROL, &pci_bctl); | ||
77 | if (hpp->enable_serr) | ||
78 | pci_bctl |= PCI_BRIDGE_CTL_SERR; | ||
79 | else | ||
80 | pci_bctl &= ~PCI_BRIDGE_CTL_SERR; | ||
81 | if (hpp->enable_perr) | ||
82 | pci_bctl |= PCI_BRIDGE_CTL_PARITY; | ||
83 | else | ||
84 | pci_bctl &= ~PCI_BRIDGE_CTL_PARITY; | ||
85 | pci_write_config_word(dev, PCI_BRIDGE_CONTROL, pci_bctl); | ||
86 | } | ||
87 | } | ||
88 | |||
89 | static void program_hpp_type1(struct pci_dev *dev, struct hpp_type1 *hpp) | ||
90 | { | ||
91 | if (hpp) | ||
92 | dev_warn(&dev->dev, "PCI-X settings not supported\n"); | ||
93 | } | ||
94 | |||
95 | static void program_hpp_type2(struct pci_dev *dev, struct hpp_type2 *hpp) | ||
96 | { | ||
97 | int pos; | ||
98 | u16 reg16; | ||
99 | u32 reg32; | ||
100 | |||
101 | if (!hpp) | ||
102 | return; | ||
103 | |||
104 | /* Find PCI Express capability */ | ||
105 | pos = pci_find_capability(dev, PCI_CAP_ID_EXP); | ||
106 | if (!pos) | ||
107 | return; | ||
108 | |||
109 | if (hpp->revision > 1) { | ||
110 | dev_warn(&dev->dev, "PCIe settings rev %d not supported\n", | ||
111 | hpp->revision); | ||
112 | return; | ||
113 | } | ||
114 | |||
115 | /* Initialize Device Control Register */ | ||
116 | pci_read_config_word(dev, pos + PCI_EXP_DEVCTL, ®16); | ||
117 | reg16 = (reg16 & hpp->pci_exp_devctl_and) | hpp->pci_exp_devctl_or; | ||
118 | pci_write_config_word(dev, pos + PCI_EXP_DEVCTL, reg16); | ||
119 | |||
120 | /* Initialize Link Control Register */ | ||
121 | if (dev->subordinate) { | ||
122 | pci_read_config_word(dev, pos + PCI_EXP_LNKCTL, ®16); | ||
123 | reg16 = (reg16 & hpp->pci_exp_lnkctl_and) | ||
124 | | hpp->pci_exp_lnkctl_or; | ||
125 | pci_write_config_word(dev, pos + PCI_EXP_LNKCTL, reg16); | ||
126 | } | ||
127 | |||
128 | /* Find Advanced Error Reporting Enhanced Capability */ | ||
129 | pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR); | ||
130 | if (!pos) | ||
131 | return; | ||
132 | |||
133 | /* Initialize Uncorrectable Error Mask Register */ | ||
134 | pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_MASK, ®32); | ||
135 | reg32 = (reg32 & hpp->unc_err_mask_and) | hpp->unc_err_mask_or; | ||
136 | pci_write_config_dword(dev, pos + PCI_ERR_UNCOR_MASK, reg32); | ||
137 | |||
138 | /* Initialize Uncorrectable Error Severity Register */ | ||
139 | pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_SEVER, ®32); | ||
140 | reg32 = (reg32 & hpp->unc_err_sever_and) | hpp->unc_err_sever_or; | ||
141 | pci_write_config_dword(dev, pos + PCI_ERR_UNCOR_SEVER, reg32); | ||
142 | |||
143 | /* Initialize Correctable Error Mask Register */ | ||
144 | pci_read_config_dword(dev, pos + PCI_ERR_COR_MASK, ®32); | ||
145 | reg32 = (reg32 & hpp->cor_err_mask_and) | hpp->cor_err_mask_or; | ||
146 | pci_write_config_dword(dev, pos + PCI_ERR_COR_MASK, reg32); | ||
147 | |||
148 | /* Initialize Advanced Error Capabilities and Control Register */ | ||
149 | pci_read_config_dword(dev, pos + PCI_ERR_CAP, ®32); | ||
150 | reg32 = (reg32 & hpp->adv_err_cap_and) | hpp->adv_err_cap_or; | ||
151 | pci_write_config_dword(dev, pos + PCI_ERR_CAP, reg32); | ||
152 | |||
153 | /* | ||
154 | * FIXME: The following two registers are not supported yet. | ||
155 | * | ||
156 | * o Secondary Uncorrectable Error Severity Register | ||
157 | * o Secondary Uncorrectable Error Mask Register | ||
158 | */ | ||
159 | } | ||
160 | |||
161 | void pci_configure_slot(struct pci_dev *dev) | ||
162 | { | ||
163 | struct pci_dev *cdev; | ||
164 | struct hotplug_params hpp; | ||
165 | int ret; | ||
166 | |||
167 | if (!(dev->hdr_type == PCI_HEADER_TYPE_NORMAL || | ||
168 | (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE && | ||
169 | (dev->class >> 8) == PCI_CLASS_BRIDGE_PCI))) | ||
170 | return; | ||
171 | |||
172 | memset(&hpp, 0, sizeof(hpp)); | ||
173 | ret = pci_get_hp_params(dev, &hpp); | ||
174 | if (ret) | ||
175 | dev_warn(&dev->dev, "no hotplug settings from platform\n"); | ||
176 | |||
177 | program_hpp_type2(dev, hpp.t2); | ||
178 | program_hpp_type1(dev, hpp.t1); | ||
179 | program_hpp_type0(dev, hpp.t0); | ||
180 | |||
181 | if (dev->subordinate) { | ||
182 | list_for_each_entry(cdev, &dev->subordinate->devices, | ||
183 | bus_list) | ||
184 | pci_configure_slot(cdev); | ||
185 | } | ||
186 | } | ||
187 | EXPORT_SYMBOL_GPL(pci_configure_slot); | ||
diff --git a/drivers/pci/hotplug/shpchp.h b/drivers/pci/hotplug/shpchp.h index 974e924ca96d..bd588eb8e922 100644 --- a/drivers/pci/hotplug/shpchp.h +++ b/drivers/pci/hotplug/shpchp.h | |||
@@ -188,21 +188,12 @@ static inline const char *slot_name(struct slot *slot) | |||
188 | 188 | ||
189 | #ifdef CONFIG_ACPI | 189 | #ifdef CONFIG_ACPI |
190 | #include <linux/pci-acpi.h> | 190 | #include <linux/pci-acpi.h> |
191 | static inline int get_hp_params_from_firmware(struct pci_dev *dev, | ||
192 | struct hotplug_params *hpp) | ||
193 | { | ||
194 | if (ACPI_FAILURE(acpi_get_hp_params_from_firmware(dev->bus, hpp))) | ||
195 | return -ENODEV; | ||
196 | return 0; | ||
197 | } | ||
198 | |||
199 | static inline int get_hp_hw_control_from_firmware(struct pci_dev *dev) | 191 | static inline int get_hp_hw_control_from_firmware(struct pci_dev *dev) |
200 | { | 192 | { |
201 | u32 flags = OSC_SHPC_NATIVE_HP_CONTROL; | 193 | u32 flags = OSC_SHPC_NATIVE_HP_CONTROL; |
202 | return acpi_get_hp_hw_control_from_firmware(dev, flags); | 194 | return acpi_get_hp_hw_control_from_firmware(dev, flags); |
203 | } | 195 | } |
204 | #else | 196 | #else |
205 | #define get_hp_params_from_firmware(dev, hpp) (-ENODEV) | ||
206 | #define get_hp_hw_control_from_firmware(dev) (0) | 197 | #define get_hp_hw_control_from_firmware(dev) (0) |
207 | #endif | 198 | #endif |
208 | 199 | ||
diff --git a/drivers/pci/hotplug/shpchp_pci.c b/drivers/pci/hotplug/shpchp_pci.c index aa315e52529b..8c3d3219f227 100644 --- a/drivers/pci/hotplug/shpchp_pci.c +++ b/drivers/pci/hotplug/shpchp_pci.c | |||
@@ -34,66 +34,6 @@ | |||
34 | #include "../pci.h" | 34 | #include "../pci.h" |
35 | #include "shpchp.h" | 35 | #include "shpchp.h" |
36 | 36 | ||
37 | static void program_fw_provided_values(struct pci_dev *dev) | ||
38 | { | ||
39 | u16 pci_cmd, pci_bctl; | ||
40 | struct pci_dev *cdev; | ||
41 | struct hotplug_params hpp; | ||
42 | |||
43 | /* Program hpp values for this device */ | ||
44 | if (!(dev->hdr_type == PCI_HEADER_TYPE_NORMAL || | ||
45 | (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE && | ||
46 | (dev->class >> 8) == PCI_CLASS_BRIDGE_PCI))) | ||
47 | return; | ||
48 | |||
49 | /* use default values if we can't get them from firmware */ | ||
50 | if (get_hp_params_from_firmware(dev, &hpp) || | ||
51 | !hpp.t0 || (hpp.t0->revision > 1)) { | ||
52 | warn("Could not get hotplug parameters. Use defaults\n"); | ||
53 | hpp.t0 = &hpp.type0_data; | ||
54 | hpp.t0->revision = 0; | ||
55 | hpp.t0->cache_line_size = 8; | ||
56 | hpp.t0->latency_timer = 0x40; | ||
57 | hpp.t0->enable_serr = 0; | ||
58 | hpp.t0->enable_perr = 0; | ||
59 | } | ||
60 | |||
61 | pci_write_config_byte(dev, | ||
62 | PCI_CACHE_LINE_SIZE, hpp.t0->cache_line_size); | ||
63 | pci_write_config_byte(dev, PCI_LATENCY_TIMER, hpp.t0->latency_timer); | ||
64 | pci_read_config_word(dev, PCI_COMMAND, &pci_cmd); | ||
65 | if (hpp.t0->enable_serr) | ||
66 | pci_cmd |= PCI_COMMAND_SERR; | ||
67 | else | ||
68 | pci_cmd &= ~PCI_COMMAND_SERR; | ||
69 | if (hpp.t0->enable_perr) | ||
70 | pci_cmd |= PCI_COMMAND_PARITY; | ||
71 | else | ||
72 | pci_cmd &= ~PCI_COMMAND_PARITY; | ||
73 | pci_write_config_word(dev, PCI_COMMAND, pci_cmd); | ||
74 | |||
75 | /* Program bridge control value and child devices */ | ||
76 | if ((dev->class >> 8) == PCI_CLASS_BRIDGE_PCI) { | ||
77 | pci_write_config_byte(dev, PCI_SEC_LATENCY_TIMER, | ||
78 | hpp.t0->latency_timer); | ||
79 | pci_read_config_word(dev, PCI_BRIDGE_CONTROL, &pci_bctl); | ||
80 | if (hpp.t0->enable_serr) | ||
81 | pci_bctl |= PCI_BRIDGE_CTL_SERR; | ||
82 | else | ||
83 | pci_bctl &= ~PCI_BRIDGE_CTL_SERR; | ||
84 | if (hpp.t0->enable_perr) | ||
85 | pci_bctl |= PCI_BRIDGE_CTL_PARITY; | ||
86 | else | ||
87 | pci_bctl &= ~PCI_BRIDGE_CTL_PARITY; | ||
88 | pci_write_config_word(dev, PCI_BRIDGE_CONTROL, pci_bctl); | ||
89 | if (dev->subordinate) { | ||
90 | list_for_each_entry(cdev, &dev->subordinate->devices, | ||
91 | bus_list) | ||
92 | program_fw_provided_values(cdev); | ||
93 | } | ||
94 | } | ||
95 | } | ||
96 | |||
97 | int __ref shpchp_configure_device(struct slot *p_slot) | 37 | int __ref shpchp_configure_device(struct slot *p_slot) |
98 | { | 38 | { |
99 | struct pci_dev *dev; | 39 | struct pci_dev *dev; |
@@ -153,7 +93,7 @@ int __ref shpchp_configure_device(struct slot *p_slot) | |||
153 | child->subordinate = pci_do_scan_bus(child); | 93 | child->subordinate = pci_do_scan_bus(child); |
154 | pci_bus_size_bridges(child); | 94 | pci_bus_size_bridges(child); |
155 | } | 95 | } |
156 | program_fw_provided_values(dev); | 96 | pci_configure_slot(dev); |
157 | pci_dev_put(dev); | 97 | pci_dev_put(dev); |
158 | } | 98 | } |
159 | 99 | ||
diff --git a/drivers/pci/legacy.c b/drivers/pci/legacy.c new file mode 100644 index 000000000000..871f65c15936 --- /dev/null +++ b/drivers/pci/legacy.c | |||
@@ -0,0 +1,34 @@ | |||
1 | #include <linux/init.h> | ||
2 | #include <linux/pci.h> | ||
3 | #include <linux/module.h> | ||
4 | #include <linux/interrupt.h> | ||
5 | #include "pci.h" | ||
6 | |||
7 | /** | ||
8 | * pci_find_device - begin or continue searching for a PCI device by vendor/device id | ||
9 | * @vendor: PCI vendor id to match, or %PCI_ANY_ID to match all vendor ids | ||
10 | * @device: PCI device id to match, or %PCI_ANY_ID to match all device ids | ||
11 | * @from: Previous PCI device found in search, or %NULL for new search. | ||
12 | * | ||
13 | * Iterates through the list of known PCI devices. If a PCI device is found | ||
14 | * with a matching @vendor and @device, a pointer to its device structure is | ||
15 | * returned. Otherwise, %NULL is returned. | ||
16 | * A new search is initiated by passing %NULL as the @from argument. | ||
17 | * Otherwise if @from is not %NULL, searches continue from next device | ||
18 | * on the global list. | ||
19 | * | ||
20 | * NOTE: Do not use this function any more; use pci_get_device() instead, as | ||
21 | * the PCI device returned by this function can disappear at any moment in | ||
22 | * time. | ||
23 | */ | ||
24 | struct pci_dev *pci_find_device(unsigned int vendor, unsigned int device, | ||
25 | struct pci_dev *from) | ||
26 | { | ||
27 | struct pci_dev *pdev; | ||
28 | |||
29 | pci_dev_get(from); | ||
30 | pdev = pci_get_subsys(vendor, device, PCI_ANY_ID, PCI_ANY_ID, from); | ||
31 | pci_dev_put(pdev); | ||
32 | return pdev; | ||
33 | } | ||
34 | EXPORT_SYMBOL(pci_find_device); | ||
diff --git a/drivers/pci/msi.c b/drivers/pci/msi.c index d986afb7032b..f9cf3173b23d 100644 --- a/drivers/pci/msi.c +++ b/drivers/pci/msi.c | |||
@@ -16,9 +16,8 @@ | |||
16 | #include <linux/proc_fs.h> | 16 | #include <linux/proc_fs.h> |
17 | #include <linux/msi.h> | 17 | #include <linux/msi.h> |
18 | #include <linux/smp.h> | 18 | #include <linux/smp.h> |
19 | 19 | #include <linux/errno.h> | |
20 | #include <asm/errno.h> | 20 | #include <linux/io.h> |
21 | #include <asm/io.h> | ||
22 | 21 | ||
23 | #include "pci.h" | 22 | #include "pci.h" |
24 | #include "msi.h" | 23 | #include "msi.h" |
@@ -272,7 +271,30 @@ void write_msi_msg(unsigned int irq, struct msi_msg *msg) | |||
272 | write_msi_msg_desc(desc, msg); | 271 | write_msi_msg_desc(desc, msg); |
273 | } | 272 | } |
274 | 273 | ||
275 | static int msi_free_irqs(struct pci_dev* dev); | 274 | static void free_msi_irqs(struct pci_dev *dev) |
275 | { | ||
276 | struct msi_desc *entry, *tmp; | ||
277 | |||
278 | list_for_each_entry(entry, &dev->msi_list, list) { | ||
279 | int i, nvec; | ||
280 | if (!entry->irq) | ||
281 | continue; | ||
282 | nvec = 1 << entry->msi_attrib.multiple; | ||
283 | for (i = 0; i < nvec; i++) | ||
284 | BUG_ON(irq_has_action(entry->irq + i)); | ||
285 | } | ||
286 | |||
287 | arch_teardown_msi_irqs(dev); | ||
288 | |||
289 | list_for_each_entry_safe(entry, tmp, &dev->msi_list, list) { | ||
290 | if (entry->msi_attrib.is_msix) { | ||
291 | if (list_is_last(&entry->list, &dev->msi_list)) | ||
292 | iounmap(entry->mask_base); | ||
293 | } | ||
294 | list_del(&entry->list); | ||
295 | kfree(entry); | ||
296 | } | ||
297 | } | ||
276 | 298 | ||
277 | static struct msi_desc *alloc_msi_entry(struct pci_dev *dev) | 299 | static struct msi_desc *alloc_msi_entry(struct pci_dev *dev) |
278 | { | 300 | { |
@@ -324,7 +346,7 @@ static void __pci_restore_msix_state(struct pci_dev *dev) | |||
324 | if (!dev->msix_enabled) | 346 | if (!dev->msix_enabled) |
325 | return; | 347 | return; |
326 | BUG_ON(list_empty(&dev->msi_list)); | 348 | BUG_ON(list_empty(&dev->msi_list)); |
327 | entry = list_entry(dev->msi_list.next, struct msi_desc, list); | 349 | entry = list_first_entry(&dev->msi_list, struct msi_desc, list); |
328 | pos = entry->msi_attrib.pos; | 350 | pos = entry->msi_attrib.pos; |
329 | pci_read_config_word(dev, pos + PCI_MSIX_FLAGS, &control); | 351 | pci_read_config_word(dev, pos + PCI_MSIX_FLAGS, &control); |
330 | 352 | ||
@@ -367,7 +389,7 @@ static int msi_capability_init(struct pci_dev *dev, int nvec) | |||
367 | u16 control; | 389 | u16 control; |
368 | unsigned mask; | 390 | unsigned mask; |
369 | 391 | ||
370 | pos = pci_find_capability(dev, PCI_CAP_ID_MSI); | 392 | pos = pci_find_capability(dev, PCI_CAP_ID_MSI); |
371 | msi_set_enable(dev, pos, 0); /* Disable MSI during set up */ | 393 | msi_set_enable(dev, pos, 0); /* Disable MSI during set up */ |
372 | 394 | ||
373 | pci_read_config_word(dev, msi_control_reg(pos), &control); | 395 | pci_read_config_word(dev, msi_control_reg(pos), &control); |
@@ -376,12 +398,12 @@ static int msi_capability_init(struct pci_dev *dev, int nvec) | |||
376 | if (!entry) | 398 | if (!entry) |
377 | return -ENOMEM; | 399 | return -ENOMEM; |
378 | 400 | ||
379 | entry->msi_attrib.is_msix = 0; | 401 | entry->msi_attrib.is_msix = 0; |
380 | entry->msi_attrib.is_64 = is_64bit_address(control); | 402 | entry->msi_attrib.is_64 = is_64bit_address(control); |
381 | entry->msi_attrib.entry_nr = 0; | 403 | entry->msi_attrib.entry_nr = 0; |
382 | entry->msi_attrib.maskbit = is_mask_bit_support(control); | 404 | entry->msi_attrib.maskbit = is_mask_bit_support(control); |
383 | entry->msi_attrib.default_irq = dev->irq; /* Save IOAPIC IRQ */ | 405 | entry->msi_attrib.default_irq = dev->irq; /* Save IOAPIC IRQ */ |
384 | entry->msi_attrib.pos = pos; | 406 | entry->msi_attrib.pos = pos; |
385 | 407 | ||
386 | entry->mask_pos = msi_mask_reg(pos, entry->msi_attrib.is_64); | 408 | entry->mask_pos = msi_mask_reg(pos, entry->msi_attrib.is_64); |
387 | /* All MSIs are unmasked by default, Mask them all */ | 409 | /* All MSIs are unmasked by default, Mask them all */ |
@@ -396,7 +418,7 @@ static int msi_capability_init(struct pci_dev *dev, int nvec) | |||
396 | ret = arch_setup_msi_irqs(dev, nvec, PCI_CAP_ID_MSI); | 418 | ret = arch_setup_msi_irqs(dev, nvec, PCI_CAP_ID_MSI); |
397 | if (ret) { | 419 | if (ret) { |
398 | msi_mask_irq(entry, mask, ~mask); | 420 | msi_mask_irq(entry, mask, ~mask); |
399 | msi_free_irqs(dev); | 421 | free_msi_irqs(dev); |
400 | return ret; | 422 | return ret; |
401 | } | 423 | } |
402 | 424 | ||
@@ -409,44 +431,27 @@ static int msi_capability_init(struct pci_dev *dev, int nvec) | |||
409 | return 0; | 431 | return 0; |
410 | } | 432 | } |
411 | 433 | ||
412 | /** | 434 | static void __iomem *msix_map_region(struct pci_dev *dev, unsigned pos, |
413 | * msix_capability_init - configure device's MSI-X capability | 435 | unsigned nr_entries) |
414 | * @dev: pointer to the pci_dev data structure of MSI-X device function | ||
415 | * @entries: pointer to an array of struct msix_entry entries | ||
416 | * @nvec: number of @entries | ||
417 | * | ||
418 | * Setup the MSI-X capability structure of device function with a | ||
419 | * single MSI-X irq. A return of zero indicates the successful setup of | ||
420 | * requested MSI-X entries with allocated irqs or non-zero for otherwise. | ||
421 | **/ | ||
422 | static int msix_capability_init(struct pci_dev *dev, | ||
423 | struct msix_entry *entries, int nvec) | ||
424 | { | 436 | { |
425 | struct msi_desc *entry; | ||
426 | int pos, i, j, nr_entries, ret; | ||
427 | unsigned long phys_addr; | 437 | unsigned long phys_addr; |
428 | u32 table_offset; | 438 | u32 table_offset; |
429 | u16 control; | ||
430 | u8 bir; | 439 | u8 bir; |
431 | void __iomem *base; | ||
432 | 440 | ||
433 | pos = pci_find_capability(dev, PCI_CAP_ID_MSIX); | 441 | pci_read_config_dword(dev, msix_table_offset_reg(pos), &table_offset); |
434 | pci_read_config_word(dev, pos + PCI_MSIX_FLAGS, &control); | ||
435 | |||
436 | /* Ensure MSI-X is disabled while it is set up */ | ||
437 | control &= ~PCI_MSIX_FLAGS_ENABLE; | ||
438 | pci_write_config_word(dev, pos + PCI_MSIX_FLAGS, control); | ||
439 | |||
440 | /* Request & Map MSI-X table region */ | ||
441 | nr_entries = multi_msix_capable(control); | ||
442 | |||
443 | pci_read_config_dword(dev, msix_table_offset_reg(pos), &table_offset); | ||
444 | bir = (u8)(table_offset & PCI_MSIX_FLAGS_BIRMASK); | 442 | bir = (u8)(table_offset & PCI_MSIX_FLAGS_BIRMASK); |
445 | table_offset &= ~PCI_MSIX_FLAGS_BIRMASK; | 443 | table_offset &= ~PCI_MSIX_FLAGS_BIRMASK; |
446 | phys_addr = pci_resource_start (dev, bir) + table_offset; | 444 | phys_addr = pci_resource_start(dev, bir) + table_offset; |
447 | base = ioremap_nocache(phys_addr, nr_entries * PCI_MSIX_ENTRY_SIZE); | 445 | |
448 | if (base == NULL) | 446 | return ioremap_nocache(phys_addr, nr_entries * PCI_MSIX_ENTRY_SIZE); |
449 | return -ENOMEM; | 447 | } |
448 | |||
449 | static int msix_setup_entries(struct pci_dev *dev, unsigned pos, | ||
450 | void __iomem *base, struct msix_entry *entries, | ||
451 | int nvec) | ||
452 | { | ||
453 | struct msi_desc *entry; | ||
454 | int i; | ||
450 | 455 | ||
451 | for (i = 0; i < nvec; i++) { | 456 | for (i = 0; i < nvec; i++) { |
452 | entry = alloc_msi_entry(dev); | 457 | entry = alloc_msi_entry(dev); |
@@ -454,41 +459,78 @@ static int msix_capability_init(struct pci_dev *dev, | |||
454 | if (!i) | 459 | if (!i) |
455 | iounmap(base); | 460 | iounmap(base); |
456 | else | 461 | else |
457 | msi_free_irqs(dev); | 462 | free_msi_irqs(dev); |
458 | /* No enough memory. Don't try again */ | 463 | /* No enough memory. Don't try again */ |
459 | return -ENOMEM; | 464 | return -ENOMEM; |
460 | } | 465 | } |
461 | 466 | ||
462 | j = entries[i].entry; | 467 | entry->msi_attrib.is_msix = 1; |
463 | entry->msi_attrib.is_msix = 1; | 468 | entry->msi_attrib.is_64 = 1; |
464 | entry->msi_attrib.is_64 = 1; | 469 | entry->msi_attrib.entry_nr = entries[i].entry; |
465 | entry->msi_attrib.entry_nr = j; | 470 | entry->msi_attrib.default_irq = dev->irq; |
466 | entry->msi_attrib.default_irq = dev->irq; | 471 | entry->msi_attrib.pos = pos; |
467 | entry->msi_attrib.pos = pos; | 472 | entry->mask_base = base; |
468 | entry->mask_base = base; | ||
469 | 473 | ||
470 | list_add_tail(&entry->list, &dev->msi_list); | 474 | list_add_tail(&entry->list, &dev->msi_list); |
471 | } | 475 | } |
472 | 476 | ||
473 | ret = arch_setup_msi_irqs(dev, nvec, PCI_CAP_ID_MSIX); | 477 | return 0; |
474 | if (ret < 0) { | 478 | } |
475 | /* If we had some success report the number of irqs | ||
476 | * we succeeded in setting up. */ | ||
477 | int avail = 0; | ||
478 | list_for_each_entry(entry, &dev->msi_list, list) { | ||
479 | if (entry->irq != 0) { | ||
480 | avail++; | ||
481 | } | ||
482 | } | ||
483 | 479 | ||
484 | if (avail != 0) | 480 | static void msix_program_entries(struct pci_dev *dev, |
485 | ret = avail; | 481 | struct msix_entry *entries) |
482 | { | ||
483 | struct msi_desc *entry; | ||
484 | int i = 0; | ||
485 | |||
486 | list_for_each_entry(entry, &dev->msi_list, list) { | ||
487 | int offset = entries[i].entry * PCI_MSIX_ENTRY_SIZE + | ||
488 | PCI_MSIX_ENTRY_VECTOR_CTRL; | ||
489 | |||
490 | entries[i].vector = entry->irq; | ||
491 | set_irq_msi(entry->irq, entry); | ||
492 | entry->masked = readl(entry->mask_base + offset); | ||
493 | msix_mask_irq(entry, 1); | ||
494 | i++; | ||
486 | } | 495 | } |
496 | } | ||
487 | 497 | ||
488 | if (ret) { | 498 | /** |
489 | msi_free_irqs(dev); | 499 | * msix_capability_init - configure device's MSI-X capability |
500 | * @dev: pointer to the pci_dev data structure of MSI-X device function | ||
501 | * @entries: pointer to an array of struct msix_entry entries | ||
502 | * @nvec: number of @entries | ||
503 | * | ||
504 | * Setup the MSI-X capability structure of device function with a | ||
505 | * single MSI-X irq. A return of zero indicates the successful setup of | ||
506 | * requested MSI-X entries with allocated irqs or non-zero for otherwise. | ||
507 | **/ | ||
508 | static int msix_capability_init(struct pci_dev *dev, | ||
509 | struct msix_entry *entries, int nvec) | ||
510 | { | ||
511 | int pos, ret; | ||
512 | u16 control; | ||
513 | void __iomem *base; | ||
514 | |||
515 | pos = pci_find_capability(dev, PCI_CAP_ID_MSIX); | ||
516 | pci_read_config_word(dev, pos + PCI_MSIX_FLAGS, &control); | ||
517 | |||
518 | /* Ensure MSI-X is disabled while it is set up */ | ||
519 | control &= ~PCI_MSIX_FLAGS_ENABLE; | ||
520 | pci_write_config_word(dev, pos + PCI_MSIX_FLAGS, control); | ||
521 | |||
522 | /* Request & Map MSI-X table region */ | ||
523 | base = msix_map_region(dev, pos, multi_msix_capable(control)); | ||
524 | if (!base) | ||
525 | return -ENOMEM; | ||
526 | |||
527 | ret = msix_setup_entries(dev, pos, base, entries, nvec); | ||
528 | if (ret) | ||
490 | return ret; | 529 | return ret; |
491 | } | 530 | |
531 | ret = arch_setup_msi_irqs(dev, nvec, PCI_CAP_ID_MSIX); | ||
532 | if (ret) | ||
533 | goto error; | ||
492 | 534 | ||
493 | /* | 535 | /* |
494 | * Some devices require MSI-X to be enabled before we can touch the | 536 | * Some devices require MSI-X to be enabled before we can touch the |
@@ -498,16 +540,7 @@ static int msix_capability_init(struct pci_dev *dev, | |||
498 | control |= PCI_MSIX_FLAGS_MASKALL | PCI_MSIX_FLAGS_ENABLE; | 540 | control |= PCI_MSIX_FLAGS_MASKALL | PCI_MSIX_FLAGS_ENABLE; |
499 | pci_write_config_word(dev, pos + PCI_MSIX_FLAGS, control); | 541 | pci_write_config_word(dev, pos + PCI_MSIX_FLAGS, control); |
500 | 542 | ||
501 | i = 0; | 543 | msix_program_entries(dev, entries); |
502 | list_for_each_entry(entry, &dev->msi_list, list) { | ||
503 | entries[i].vector = entry->irq; | ||
504 | set_irq_msi(entry->irq, entry); | ||
505 | j = entries[i].entry; | ||
506 | entry->masked = readl(base + j * PCI_MSIX_ENTRY_SIZE + | ||
507 | PCI_MSIX_ENTRY_VECTOR_CTRL); | ||
508 | msix_mask_irq(entry, 1); | ||
509 | i++; | ||
510 | } | ||
511 | 544 | ||
512 | /* Set MSI-X enabled bits and unmask the function */ | 545 | /* Set MSI-X enabled bits and unmask the function */ |
513 | pci_intx_for_msi(dev, 0); | 546 | pci_intx_for_msi(dev, 0); |
@@ -517,6 +550,27 @@ static int msix_capability_init(struct pci_dev *dev, | |||
517 | pci_write_config_word(dev, pos + PCI_MSIX_FLAGS, control); | 550 | pci_write_config_word(dev, pos + PCI_MSIX_FLAGS, control); |
518 | 551 | ||
519 | return 0; | 552 | return 0; |
553 | |||
554 | error: | ||
555 | if (ret < 0) { | ||
556 | /* | ||
557 | * If we had some success, report the number of irqs | ||
558 | * we succeeded in setting up. | ||
559 | */ | ||
560 | struct msi_desc *entry; | ||
561 | int avail = 0; | ||
562 | |||
563 | list_for_each_entry(entry, &dev->msi_list, list) { | ||
564 | if (entry->irq != 0) | ||
565 | avail++; | ||
566 | } | ||
567 | if (avail != 0) | ||
568 | ret = avail; | ||
569 | } | ||
570 | |||
571 | free_msi_irqs(dev); | ||
572 | |||
573 | return ret; | ||
520 | } | 574 | } |
521 | 575 | ||
522 | /** | 576 | /** |
@@ -529,7 +583,7 @@ static int msix_capability_init(struct pci_dev *dev, | |||
529 | * to determine if MSI/-X are supported for the device. If MSI/-X is | 583 | * to determine if MSI/-X are supported for the device. If MSI/-X is |
530 | * supported return 0, else return an error code. | 584 | * supported return 0, else return an error code. |
531 | **/ | 585 | **/ |
532 | static int pci_msi_check_device(struct pci_dev* dev, int nvec, int type) | 586 | static int pci_msi_check_device(struct pci_dev *dev, int nvec, int type) |
533 | { | 587 | { |
534 | struct pci_bus *bus; | 588 | struct pci_bus *bus; |
535 | int ret; | 589 | int ret; |
@@ -546,8 +600,9 @@ static int pci_msi_check_device(struct pci_dev* dev, int nvec, int type) | |||
546 | if (nvec < 1) | 600 | if (nvec < 1) |
547 | return -ERANGE; | 601 | return -ERANGE; |
548 | 602 | ||
549 | /* Any bridge which does NOT route MSI transactions from it's | 603 | /* |
550 | * secondary bus to it's primary bus must set NO_MSI flag on | 604 | * Any bridge which does NOT route MSI transactions from its |
605 | * secondary bus to its primary bus must set NO_MSI flag on | ||
551 | * the secondary pci_bus. | 606 | * the secondary pci_bus. |
552 | * We expect only arch-specific PCI host bus controller driver | 607 | * We expect only arch-specific PCI host bus controller driver |
553 | * or quirks for specific PCI bridges to be setting NO_MSI. | 608 | * or quirks for specific PCI bridges to be setting NO_MSI. |
@@ -638,50 +693,16 @@ void pci_msi_shutdown(struct pci_dev *dev) | |||
638 | dev->irq = desc->msi_attrib.default_irq; | 693 | dev->irq = desc->msi_attrib.default_irq; |
639 | } | 694 | } |
640 | 695 | ||
641 | void pci_disable_msi(struct pci_dev* dev) | 696 | void pci_disable_msi(struct pci_dev *dev) |
642 | { | 697 | { |
643 | struct msi_desc *entry; | ||
644 | |||
645 | if (!pci_msi_enable || !dev || !dev->msi_enabled) | 698 | if (!pci_msi_enable || !dev || !dev->msi_enabled) |
646 | return; | 699 | return; |
647 | 700 | ||
648 | pci_msi_shutdown(dev); | 701 | pci_msi_shutdown(dev); |
649 | 702 | free_msi_irqs(dev); | |
650 | entry = list_entry(dev->msi_list.next, struct msi_desc, list); | ||
651 | if (entry->msi_attrib.is_msix) | ||
652 | return; | ||
653 | |||
654 | msi_free_irqs(dev); | ||
655 | } | 703 | } |
656 | EXPORT_SYMBOL(pci_disable_msi); | 704 | EXPORT_SYMBOL(pci_disable_msi); |
657 | 705 | ||
658 | static int msi_free_irqs(struct pci_dev* dev) | ||
659 | { | ||
660 | struct msi_desc *entry, *tmp; | ||
661 | |||
662 | list_for_each_entry(entry, &dev->msi_list, list) { | ||
663 | int i, nvec; | ||
664 | if (!entry->irq) | ||
665 | continue; | ||
666 | nvec = 1 << entry->msi_attrib.multiple; | ||
667 | for (i = 0; i < nvec; i++) | ||
668 | BUG_ON(irq_has_action(entry->irq + i)); | ||
669 | } | ||
670 | |||
671 | arch_teardown_msi_irqs(dev); | ||
672 | |||
673 | list_for_each_entry_safe(entry, tmp, &dev->msi_list, list) { | ||
674 | if (entry->msi_attrib.is_msix) { | ||
675 | if (list_is_last(&entry->list, &dev->msi_list)) | ||
676 | iounmap(entry->mask_base); | ||
677 | } | ||
678 | list_del(&entry->list); | ||
679 | kfree(entry); | ||
680 | } | ||
681 | |||
682 | return 0; | ||
683 | } | ||
684 | |||
685 | /** | 706 | /** |
686 | * pci_msix_table_size - return the number of device's MSI-X table entries | 707 | * pci_msix_table_size - return the number of device's MSI-X table entries |
687 | * @dev: pointer to the pci_dev data structure of MSI-X device function | 708 | * @dev: pointer to the pci_dev data structure of MSI-X device function |
@@ -714,13 +735,13 @@ int pci_msix_table_size(struct pci_dev *dev) | |||
714 | * of irqs or MSI-X vectors available. Driver should use the returned value to | 735 | * of irqs or MSI-X vectors available. Driver should use the returned value to |
715 | * re-send its request. | 736 | * re-send its request. |
716 | **/ | 737 | **/ |
717 | int pci_enable_msix(struct pci_dev* dev, struct msix_entry *entries, int nvec) | 738 | int pci_enable_msix(struct pci_dev *dev, struct msix_entry *entries, int nvec) |
718 | { | 739 | { |
719 | int status, nr_entries; | 740 | int status, nr_entries; |
720 | int i, j; | 741 | int i, j; |
721 | 742 | ||
722 | if (!entries) | 743 | if (!entries) |
723 | return -EINVAL; | 744 | return -EINVAL; |
724 | 745 | ||
725 | status = pci_msi_check_device(dev, nvec, PCI_CAP_ID_MSIX); | 746 | status = pci_msi_check_device(dev, nvec, PCI_CAP_ID_MSIX); |
726 | if (status) | 747 | if (status) |
@@ -742,7 +763,7 @@ int pci_enable_msix(struct pci_dev* dev, struct msix_entry *entries, int nvec) | |||
742 | WARN_ON(!!dev->msix_enabled); | 763 | WARN_ON(!!dev->msix_enabled); |
743 | 764 | ||
744 | /* Check whether driver already requested for MSI irq */ | 765 | /* Check whether driver already requested for MSI irq */ |
745 | if (dev->msi_enabled) { | 766 | if (dev->msi_enabled) { |
746 | dev_info(&dev->dev, "can't enable MSI-X " | 767 | dev_info(&dev->dev, "can't enable MSI-X " |
747 | "(MSI IRQ already assigned)\n"); | 768 | "(MSI IRQ already assigned)\n"); |
748 | return -EINVAL; | 769 | return -EINVAL; |
@@ -752,12 +773,7 @@ int pci_enable_msix(struct pci_dev* dev, struct msix_entry *entries, int nvec) | |||
752 | } | 773 | } |
753 | EXPORT_SYMBOL(pci_enable_msix); | 774 | EXPORT_SYMBOL(pci_enable_msix); |
754 | 775 | ||
755 | static void msix_free_all_irqs(struct pci_dev *dev) | 776 | void pci_msix_shutdown(struct pci_dev *dev) |
756 | { | ||
757 | msi_free_irqs(dev); | ||
758 | } | ||
759 | |||
760 | void pci_msix_shutdown(struct pci_dev* dev) | ||
761 | { | 777 | { |
762 | struct msi_desc *entry; | 778 | struct msi_desc *entry; |
763 | 779 | ||
@@ -774,14 +790,14 @@ void pci_msix_shutdown(struct pci_dev* dev) | |||
774 | pci_intx_for_msi(dev, 1); | 790 | pci_intx_for_msi(dev, 1); |
775 | dev->msix_enabled = 0; | 791 | dev->msix_enabled = 0; |
776 | } | 792 | } |
777 | void pci_disable_msix(struct pci_dev* dev) | 793 | |
794 | void pci_disable_msix(struct pci_dev *dev) | ||
778 | { | 795 | { |
779 | if (!pci_msi_enable || !dev || !dev->msix_enabled) | 796 | if (!pci_msi_enable || !dev || !dev->msix_enabled) |
780 | return; | 797 | return; |
781 | 798 | ||
782 | pci_msix_shutdown(dev); | 799 | pci_msix_shutdown(dev); |
783 | 800 | free_msi_irqs(dev); | |
784 | msix_free_all_irqs(dev); | ||
785 | } | 801 | } |
786 | EXPORT_SYMBOL(pci_disable_msix); | 802 | EXPORT_SYMBOL(pci_disable_msix); |
787 | 803 | ||
@@ -794,16 +810,13 @@ EXPORT_SYMBOL(pci_disable_msix); | |||
794 | * allocated for this device function, are reclaimed to unused state, | 810 | * allocated for this device function, are reclaimed to unused state, |
795 | * which may be used later on. | 811 | * which may be used later on. |
796 | **/ | 812 | **/ |
797 | void msi_remove_pci_irq_vectors(struct pci_dev* dev) | 813 | void msi_remove_pci_irq_vectors(struct pci_dev *dev) |
798 | { | 814 | { |
799 | if (!pci_msi_enable || !dev) | 815 | if (!pci_msi_enable || !dev) |
800 | return; | 816 | return; |
801 | |||
802 | if (dev->msi_enabled) | ||
803 | msi_free_irqs(dev); | ||
804 | 817 | ||
805 | if (dev->msix_enabled) | 818 | if (dev->msi_enabled || dev->msix_enabled) |
806 | msix_free_all_irqs(dev); | 819 | free_msi_irqs(dev); |
807 | } | 820 | } |
808 | 821 | ||
809 | void pci_no_msi(void) | 822 | void pci_no_msi(void) |
diff --git a/drivers/pci/pci-acpi.c b/drivers/pci/pci-acpi.c index ea15b0537457..33317df47699 100644 --- a/drivers/pci/pci-acpi.c +++ b/drivers/pci/pci-acpi.c | |||
@@ -109,15 +109,32 @@ static bool acpi_pci_can_wakeup(struct pci_dev *dev) | |||
109 | return handle ? acpi_bus_can_wakeup(handle) : false; | 109 | return handle ? acpi_bus_can_wakeup(handle) : false; |
110 | } | 110 | } |
111 | 111 | ||
112 | static void acpi_pci_propagate_wakeup_enable(struct pci_bus *bus, bool enable) | ||
113 | { | ||
114 | while (bus->parent) { | ||
115 | struct pci_dev *bridge = bus->self; | ||
116 | int ret; | ||
117 | |||
118 | ret = acpi_pm_device_sleep_wake(&bridge->dev, enable); | ||
119 | if (!ret || bridge->is_pcie) | ||
120 | return; | ||
121 | bus = bus->parent; | ||
122 | } | ||
123 | |||
124 | /* We have reached the root bus. */ | ||
125 | if (bus->bridge) | ||
126 | acpi_pm_device_sleep_wake(bus->bridge, enable); | ||
127 | } | ||
128 | |||
112 | static int acpi_pci_sleep_wake(struct pci_dev *dev, bool enable) | 129 | static int acpi_pci_sleep_wake(struct pci_dev *dev, bool enable) |
113 | { | 130 | { |
114 | int error = acpi_pm_device_sleep_wake(&dev->dev, enable); | 131 | if (acpi_pci_can_wakeup(dev)) |
132 | return acpi_pm_device_sleep_wake(&dev->dev, enable); | ||
115 | 133 | ||
116 | if (!error) | 134 | if (!dev->is_pcie) |
117 | dev_printk(KERN_INFO, &dev->dev, | 135 | acpi_pci_propagate_wakeup_enable(dev->bus, enable); |
118 | "wake-up capability %s by ACPI\n", | 136 | |
119 | enable ? "enabled" : "disabled"); | 137 | return 0; |
120 | return error; | ||
121 | } | 138 | } |
122 | 139 | ||
123 | static struct pci_platform_pm_ops acpi_pci_platform_pm = { | 140 | static struct pci_platform_pm_ops acpi_pci_platform_pm = { |
diff --git a/drivers/pci/pci-driver.c b/drivers/pci/pci-driver.c index a7eb7277b106..e5d47be3c6d7 100644 --- a/drivers/pci/pci-driver.c +++ b/drivers/pci/pci-driver.c | |||
@@ -19,37 +19,98 @@ | |||
19 | #include <linux/cpu.h> | 19 | #include <linux/cpu.h> |
20 | #include "pci.h" | 20 | #include "pci.h" |
21 | 21 | ||
22 | /* | ||
23 | * Dynamic device IDs are disabled for !CONFIG_HOTPLUG | ||
24 | */ | ||
25 | |||
26 | struct pci_dynid { | 22 | struct pci_dynid { |
27 | struct list_head node; | 23 | struct list_head node; |
28 | struct pci_device_id id; | 24 | struct pci_device_id id; |
29 | }; | 25 | }; |
30 | 26 | ||
31 | #ifdef CONFIG_HOTPLUG | 27 | /** |
28 | * pci_add_dynid - add a new PCI device ID to this driver and re-probe devices | ||
29 | * @drv: target pci driver | ||
30 | * @vendor: PCI vendor ID | ||
31 | * @device: PCI device ID | ||
32 | * @subvendor: PCI subvendor ID | ||
33 | * @subdevice: PCI subdevice ID | ||
34 | * @class: PCI class | ||
35 | * @class_mask: PCI class mask | ||
36 | * @driver_data: private driver data | ||
37 | * | ||
38 | * Adds a new dynamic pci device ID to this driver and causes the | ||
39 | * driver to probe for all devices again. @drv must have been | ||
40 | * registered prior to calling this function. | ||
41 | * | ||
42 | * CONTEXT: | ||
43 | * Does GFP_KERNEL allocation. | ||
44 | * | ||
45 | * RETURNS: | ||
46 | * 0 on success, -errno on failure. | ||
47 | */ | ||
48 | int pci_add_dynid(struct pci_driver *drv, | ||
49 | unsigned int vendor, unsigned int device, | ||
50 | unsigned int subvendor, unsigned int subdevice, | ||
51 | unsigned int class, unsigned int class_mask, | ||
52 | unsigned long driver_data) | ||
53 | { | ||
54 | struct pci_dynid *dynid; | ||
55 | int retval; | ||
56 | |||
57 | dynid = kzalloc(sizeof(*dynid), GFP_KERNEL); | ||
58 | if (!dynid) | ||
59 | return -ENOMEM; | ||
60 | |||
61 | dynid->id.vendor = vendor; | ||
62 | dynid->id.device = device; | ||
63 | dynid->id.subvendor = subvendor; | ||
64 | dynid->id.subdevice = subdevice; | ||
65 | dynid->id.class = class; | ||
66 | dynid->id.class_mask = class_mask; | ||
67 | dynid->id.driver_data = driver_data; | ||
68 | |||
69 | spin_lock(&drv->dynids.lock); | ||
70 | list_add_tail(&dynid->node, &drv->dynids.list); | ||
71 | spin_unlock(&drv->dynids.lock); | ||
72 | |||
73 | get_driver(&drv->driver); | ||
74 | retval = driver_attach(&drv->driver); | ||
75 | put_driver(&drv->driver); | ||
76 | |||
77 | return retval; | ||
78 | } | ||
79 | |||
80 | static void pci_free_dynids(struct pci_driver *drv) | ||
81 | { | ||
82 | struct pci_dynid *dynid, *n; | ||
32 | 83 | ||
84 | spin_lock(&drv->dynids.lock); | ||
85 | list_for_each_entry_safe(dynid, n, &drv->dynids.list, node) { | ||
86 | list_del(&dynid->node); | ||
87 | kfree(dynid); | ||
88 | } | ||
89 | spin_unlock(&drv->dynids.lock); | ||
90 | } | ||
91 | |||
92 | /* | ||
93 | * Dynamic device ID manipulation via sysfs is disabled for !CONFIG_HOTPLUG | ||
94 | */ | ||
95 | #ifdef CONFIG_HOTPLUG | ||
33 | /** | 96 | /** |
34 | * store_new_id - add a new PCI device ID to this driver and re-probe devices | 97 | * store_new_id - sysfs frontend to pci_add_dynid() |
35 | * @driver: target device driver | 98 | * @driver: target device driver |
36 | * @buf: buffer for scanning device ID data | 99 | * @buf: buffer for scanning device ID data |
37 | * @count: input size | 100 | * @count: input size |
38 | * | 101 | * |
39 | * Adds a new dynamic pci device ID to this driver, | 102 | * Allow PCI IDs to be added to an existing driver via sysfs. |
40 | * and causes the driver to probe for all devices again. | ||
41 | */ | 103 | */ |
42 | static ssize_t | 104 | static ssize_t |
43 | store_new_id(struct device_driver *driver, const char *buf, size_t count) | 105 | store_new_id(struct device_driver *driver, const char *buf, size_t count) |
44 | { | 106 | { |
45 | struct pci_dynid *dynid; | ||
46 | struct pci_driver *pdrv = to_pci_driver(driver); | 107 | struct pci_driver *pdrv = to_pci_driver(driver); |
47 | const struct pci_device_id *ids = pdrv->id_table; | 108 | const struct pci_device_id *ids = pdrv->id_table; |
48 | __u32 vendor, device, subvendor=PCI_ANY_ID, | 109 | __u32 vendor, device, subvendor=PCI_ANY_ID, |
49 | subdevice=PCI_ANY_ID, class=0, class_mask=0; | 110 | subdevice=PCI_ANY_ID, class=0, class_mask=0; |
50 | unsigned long driver_data=0; | 111 | unsigned long driver_data=0; |
51 | int fields=0; | 112 | int fields=0; |
52 | int retval=0; | 113 | int retval; |
53 | 114 | ||
54 | fields = sscanf(buf, "%x %x %x %x %x %x %lx", | 115 | fields = sscanf(buf, "%x %x %x %x %x %x %lx", |
55 | &vendor, &device, &subvendor, &subdevice, | 116 | &vendor, &device, &subvendor, &subdevice, |
@@ -72,27 +133,8 @@ store_new_id(struct device_driver *driver, const char *buf, size_t count) | |||
72 | return retval; | 133 | return retval; |
73 | } | 134 | } |
74 | 135 | ||
75 | dynid = kzalloc(sizeof(*dynid), GFP_KERNEL); | 136 | retval = pci_add_dynid(pdrv, vendor, device, subvendor, subdevice, |
76 | if (!dynid) | 137 | class, class_mask, driver_data); |
77 | return -ENOMEM; | ||
78 | |||
79 | dynid->id.vendor = vendor; | ||
80 | dynid->id.device = device; | ||
81 | dynid->id.subvendor = subvendor; | ||
82 | dynid->id.subdevice = subdevice; | ||
83 | dynid->id.class = class; | ||
84 | dynid->id.class_mask = class_mask; | ||
85 | dynid->id.driver_data = driver_data; | ||
86 | |||
87 | spin_lock(&pdrv->dynids.lock); | ||
88 | list_add_tail(&dynid->node, &pdrv->dynids.list); | ||
89 | spin_unlock(&pdrv->dynids.lock); | ||
90 | |||
91 | if (get_driver(&pdrv->driver)) { | ||
92 | retval = driver_attach(&pdrv->driver); | ||
93 | put_driver(&pdrv->driver); | ||
94 | } | ||
95 | |||
96 | if (retval) | 138 | if (retval) |
97 | return retval; | 139 | return retval; |
98 | return count; | 140 | return count; |
@@ -145,19 +187,6 @@ store_remove_id(struct device_driver *driver, const char *buf, size_t count) | |||
145 | } | 187 | } |
146 | static DRIVER_ATTR(remove_id, S_IWUSR, NULL, store_remove_id); | 188 | static DRIVER_ATTR(remove_id, S_IWUSR, NULL, store_remove_id); |
147 | 189 | ||
148 | static void | ||
149 | pci_free_dynids(struct pci_driver *drv) | ||
150 | { | ||
151 | struct pci_dynid *dynid, *n; | ||
152 | |||
153 | spin_lock(&drv->dynids.lock); | ||
154 | list_for_each_entry_safe(dynid, n, &drv->dynids.list, node) { | ||
155 | list_del(&dynid->node); | ||
156 | kfree(dynid); | ||
157 | } | ||
158 | spin_unlock(&drv->dynids.lock); | ||
159 | } | ||
160 | |||
161 | static int | 190 | static int |
162 | pci_create_newid_file(struct pci_driver *drv) | 191 | pci_create_newid_file(struct pci_driver *drv) |
163 | { | 192 | { |
@@ -186,7 +215,6 @@ static void pci_remove_removeid_file(struct pci_driver *drv) | |||
186 | driver_remove_file(&drv->driver, &driver_attr_remove_id); | 215 | driver_remove_file(&drv->driver, &driver_attr_remove_id); |
187 | } | 216 | } |
188 | #else /* !CONFIG_HOTPLUG */ | 217 | #else /* !CONFIG_HOTPLUG */ |
189 | static inline void pci_free_dynids(struct pci_driver *drv) {} | ||
190 | static inline int pci_create_newid_file(struct pci_driver *drv) | 218 | static inline int pci_create_newid_file(struct pci_driver *drv) |
191 | { | 219 | { |
192 | return 0; | 220 | return 0; |
@@ -417,8 +445,6 @@ static int pci_legacy_suspend(struct device *dev, pm_message_t state) | |||
417 | struct pci_dev * pci_dev = to_pci_dev(dev); | 445 | struct pci_dev * pci_dev = to_pci_dev(dev); |
418 | struct pci_driver * drv = pci_dev->driver; | 446 | struct pci_driver * drv = pci_dev->driver; |
419 | 447 | ||
420 | pci_dev->state_saved = false; | ||
421 | |||
422 | if (drv && drv->suspend) { | 448 | if (drv && drv->suspend) { |
423 | pci_power_t prev = pci_dev->current_state; | 449 | pci_power_t prev = pci_dev->current_state; |
424 | int error; | 450 | int error; |
@@ -514,7 +540,6 @@ static int pci_restore_standard_config(struct pci_dev *pci_dev) | |||
514 | static void pci_pm_default_resume_noirq(struct pci_dev *pci_dev) | 540 | static void pci_pm_default_resume_noirq(struct pci_dev *pci_dev) |
515 | { | 541 | { |
516 | pci_restore_standard_config(pci_dev); | 542 | pci_restore_standard_config(pci_dev); |
517 | pci_dev->state_saved = false; | ||
518 | pci_fixup_device(pci_fixup_resume_early, pci_dev); | 543 | pci_fixup_device(pci_fixup_resume_early, pci_dev); |
519 | } | 544 | } |
520 | 545 | ||
@@ -580,8 +605,6 @@ static int pci_pm_suspend(struct device *dev) | |||
580 | if (pci_has_legacy_pm_support(pci_dev)) | 605 | if (pci_has_legacy_pm_support(pci_dev)) |
581 | return pci_legacy_suspend(dev, PMSG_SUSPEND); | 606 | return pci_legacy_suspend(dev, PMSG_SUSPEND); |
582 | 607 | ||
583 | pci_dev->state_saved = false; | ||
584 | |||
585 | if (!pm) { | 608 | if (!pm) { |
586 | pci_pm_default_suspend(pci_dev); | 609 | pci_pm_default_suspend(pci_dev); |
587 | goto Fixup; | 610 | goto Fixup; |
@@ -694,7 +717,7 @@ static int pci_pm_resume(struct device *dev) | |||
694 | pci_pm_reenable_device(pci_dev); | 717 | pci_pm_reenable_device(pci_dev); |
695 | } | 718 | } |
696 | 719 | ||
697 | return 0; | 720 | return error; |
698 | } | 721 | } |
699 | 722 | ||
700 | #else /* !CONFIG_SUSPEND */ | 723 | #else /* !CONFIG_SUSPEND */ |
@@ -716,8 +739,6 @@ static int pci_pm_freeze(struct device *dev) | |||
716 | if (pci_has_legacy_pm_support(pci_dev)) | 739 | if (pci_has_legacy_pm_support(pci_dev)) |
717 | return pci_legacy_suspend(dev, PMSG_FREEZE); | 740 | return pci_legacy_suspend(dev, PMSG_FREEZE); |
718 | 741 | ||
719 | pci_dev->state_saved = false; | ||
720 | |||
721 | if (!pm) { | 742 | if (!pm) { |
722 | pci_pm_default_suspend(pci_dev); | 743 | pci_pm_default_suspend(pci_dev); |
723 | return 0; | 744 | return 0; |
@@ -793,6 +814,8 @@ static int pci_pm_thaw(struct device *dev) | |||
793 | pci_pm_reenable_device(pci_dev); | 814 | pci_pm_reenable_device(pci_dev); |
794 | } | 815 | } |
795 | 816 | ||
817 | pci_dev->state_saved = false; | ||
818 | |||
796 | return error; | 819 | return error; |
797 | } | 820 | } |
798 | 821 | ||
@@ -804,8 +827,6 @@ static int pci_pm_poweroff(struct device *dev) | |||
804 | if (pci_has_legacy_pm_support(pci_dev)) | 827 | if (pci_has_legacy_pm_support(pci_dev)) |
805 | return pci_legacy_suspend(dev, PMSG_HIBERNATE); | 828 | return pci_legacy_suspend(dev, PMSG_HIBERNATE); |
806 | 829 | ||
807 | pci_dev->state_saved = false; | ||
808 | |||
809 | if (!pm) { | 830 | if (!pm) { |
810 | pci_pm_default_suspend(pci_dev); | 831 | pci_pm_default_suspend(pci_dev); |
811 | goto Fixup; | 832 | goto Fixup; |
@@ -1106,6 +1127,7 @@ static int __init pci_driver_init(void) | |||
1106 | 1127 | ||
1107 | postcore_initcall(pci_driver_init); | 1128 | postcore_initcall(pci_driver_init); |
1108 | 1129 | ||
1130 | EXPORT_SYMBOL_GPL(pci_add_dynid); | ||
1109 | EXPORT_SYMBOL(pci_match_id); | 1131 | EXPORT_SYMBOL(pci_match_id); |
1110 | EXPORT_SYMBOL(__pci_register_driver); | 1132 | EXPORT_SYMBOL(__pci_register_driver); |
1111 | EXPORT_SYMBOL(pci_unregister_driver); | 1133 | EXPORT_SYMBOL(pci_unregister_driver); |
diff --git a/drivers/pci/pci-stub.c b/drivers/pci/pci-stub.c index 74fbec0bf6cb..f7b68ca6cc98 100644 --- a/drivers/pci/pci-stub.c +++ b/drivers/pci/pci-stub.c | |||
@@ -19,8 +19,16 @@ | |||
19 | #include <linux/module.h> | 19 | #include <linux/module.h> |
20 | #include <linux/pci.h> | 20 | #include <linux/pci.h> |
21 | 21 | ||
22 | static char ids[1024] __initdata; | ||
23 | |||
24 | module_param_string(ids, ids, sizeof(ids), 0); | ||
25 | MODULE_PARM_DESC(ids, "Initial PCI IDs to add to the stub driver, format is " | ||
26 | "\"vendor:device[:subvendor[:subdevice[:class[:class_mask]]]]\"" | ||
27 | " and multiple comma separated entries can be specified"); | ||
28 | |||
22 | static int pci_stub_probe(struct pci_dev *dev, const struct pci_device_id *id) | 29 | static int pci_stub_probe(struct pci_dev *dev, const struct pci_device_id *id) |
23 | { | 30 | { |
31 | dev_printk(KERN_INFO, &dev->dev, "claimed by stub\n"); | ||
24 | return 0; | 32 | return 0; |
25 | } | 33 | } |
26 | 34 | ||
@@ -32,7 +40,42 @@ static struct pci_driver stub_driver = { | |||
32 | 40 | ||
33 | static int __init pci_stub_init(void) | 41 | static int __init pci_stub_init(void) |
34 | { | 42 | { |
35 | return pci_register_driver(&stub_driver); | 43 | char *p, *id; |
44 | int rc; | ||
45 | |||
46 | rc = pci_register_driver(&stub_driver); | ||
47 | if (rc) | ||
48 | return rc; | ||
49 | |||
50 | /* add ids specified in the module parameter */ | ||
51 | p = ids; | ||
52 | while ((id = strsep(&p, ","))) { | ||
53 | unsigned int vendor, device, subvendor = PCI_ANY_ID, | ||
54 | subdevice = PCI_ANY_ID, class=0, class_mask=0; | ||
55 | int fields; | ||
56 | |||
57 | fields = sscanf(id, "%x:%x:%x:%x:%x:%x", | ||
58 | &vendor, &device, &subvendor, &subdevice, | ||
59 | &class, &class_mask); | ||
60 | |||
61 | if (fields < 2) { | ||
62 | printk(KERN_WARNING | ||
63 | "pci-stub: invalid id string \"%s\"\n", id); | ||
64 | continue; | ||
65 | } | ||
66 | |||
67 | printk(KERN_INFO | ||
68 | "pci-stub: add %04X:%04X sub=%04X:%04X cls=%08X/%08X\n", | ||
69 | vendor, device, subvendor, subdevice, class, class_mask); | ||
70 | |||
71 | rc = pci_add_dynid(&stub_driver, vendor, device, | ||
72 | subvendor, subdevice, class, class_mask, 0); | ||
73 | if (rc) | ||
74 | printk(KERN_WARNING | ||
75 | "pci-stub: failed to add dynamic id (%d)\n", rc); | ||
76 | } | ||
77 | |||
78 | return 0; | ||
36 | } | 79 | } |
37 | 80 | ||
38 | static void __exit pci_stub_exit(void) | 81 | static void __exit pci_stub_exit(void) |
diff --git a/drivers/pci/pci-sysfs.c b/drivers/pci/pci-sysfs.c index 85ebd02a64a7..0f6382f090ee 100644 --- a/drivers/pci/pci-sysfs.c +++ b/drivers/pci/pci-sysfs.c | |||
@@ -916,6 +916,24 @@ int __attribute__ ((weak)) pcibios_add_platform_entries(struct pci_dev *dev) | |||
916 | return 0; | 916 | return 0; |
917 | } | 917 | } |
918 | 918 | ||
919 | static ssize_t reset_store(struct device *dev, | ||
920 | struct device_attribute *attr, const char *buf, | ||
921 | size_t count) | ||
922 | { | ||
923 | struct pci_dev *pdev = to_pci_dev(dev); | ||
924 | unsigned long val; | ||
925 | ssize_t result = strict_strtoul(buf, 0, &val); | ||
926 | |||
927 | if (result < 0) | ||
928 | return result; | ||
929 | |||
930 | if (val != 1) | ||
931 | return -EINVAL; | ||
932 | return pci_reset_function(pdev); | ||
933 | } | ||
934 | |||
935 | static struct device_attribute reset_attr = __ATTR(reset, 0200, NULL, reset_store); | ||
936 | |||
919 | static int pci_create_capabilities_sysfs(struct pci_dev *dev) | 937 | static int pci_create_capabilities_sysfs(struct pci_dev *dev) |
920 | { | 938 | { |
921 | int retval; | 939 | int retval; |
@@ -943,7 +961,22 @@ static int pci_create_capabilities_sysfs(struct pci_dev *dev) | |||
943 | /* Active State Power Management */ | 961 | /* Active State Power Management */ |
944 | pcie_aspm_create_sysfs_dev_files(dev); | 962 | pcie_aspm_create_sysfs_dev_files(dev); |
945 | 963 | ||
964 | if (!pci_probe_reset_function(dev)) { | ||
965 | retval = device_create_file(&dev->dev, &reset_attr); | ||
966 | if (retval) | ||
967 | goto error; | ||
968 | dev->reset_fn = 1; | ||
969 | } | ||
946 | return 0; | 970 | return 0; |
971 | |||
972 | error: | ||
973 | pcie_aspm_remove_sysfs_dev_files(dev); | ||
974 | if (dev->vpd && dev->vpd->attr) { | ||
975 | sysfs_remove_bin_file(&dev->dev.kobj, dev->vpd->attr); | ||
976 | kfree(dev->vpd->attr); | ||
977 | } | ||
978 | |||
979 | return retval; | ||
947 | } | 980 | } |
948 | 981 | ||
949 | int __must_check pci_create_sysfs_dev_files (struct pci_dev *pdev) | 982 | int __must_check pci_create_sysfs_dev_files (struct pci_dev *pdev) |
@@ -1037,6 +1070,10 @@ static void pci_remove_capabilities_sysfs(struct pci_dev *dev) | |||
1037 | } | 1070 | } |
1038 | 1071 | ||
1039 | pcie_aspm_remove_sysfs_dev_files(dev); | 1072 | pcie_aspm_remove_sysfs_dev_files(dev); |
1073 | if (dev->reset_fn) { | ||
1074 | device_remove_file(&dev->dev, &reset_attr); | ||
1075 | dev->reset_fn = 0; | ||
1076 | } | ||
1040 | } | 1077 | } |
1041 | 1078 | ||
1042 | /** | 1079 | /** |
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c index 7b70312181d7..6edecff0b419 100644 --- a/drivers/pci/pci.c +++ b/drivers/pci/pci.c | |||
@@ -41,6 +41,12 @@ int pci_domains_supported = 1; | |||
41 | unsigned long pci_cardbus_io_size = DEFAULT_CARDBUS_IO_SIZE; | 41 | unsigned long pci_cardbus_io_size = DEFAULT_CARDBUS_IO_SIZE; |
42 | unsigned long pci_cardbus_mem_size = DEFAULT_CARDBUS_MEM_SIZE; | 42 | unsigned long pci_cardbus_mem_size = DEFAULT_CARDBUS_MEM_SIZE; |
43 | 43 | ||
44 | #define DEFAULT_HOTPLUG_IO_SIZE (256) | ||
45 | #define DEFAULT_HOTPLUG_MEM_SIZE (2*1024*1024) | ||
46 | /* pci=hpmemsize=nnM,hpiosize=nn can override this */ | ||
47 | unsigned long pci_hotplug_io_size = DEFAULT_HOTPLUG_IO_SIZE; | ||
48 | unsigned long pci_hotplug_mem_size = DEFAULT_HOTPLUG_MEM_SIZE; | ||
49 | |||
44 | /** | 50 | /** |
45 | * pci_bus_max_busnr - returns maximum PCI bus number of given bus' children | 51 | * pci_bus_max_busnr - returns maximum PCI bus number of given bus' children |
46 | * @bus: pointer to PCI bus structure to search | 52 | * @bus: pointer to PCI bus structure to search |
@@ -848,6 +854,7 @@ pci_restore_state(struct pci_dev *dev) | |||
848 | 854 | ||
849 | if (!dev->state_saved) | 855 | if (!dev->state_saved) |
850 | return 0; | 856 | return 0; |
857 | |||
851 | /* PCI Express register must be restored first */ | 858 | /* PCI Express register must be restored first */ |
852 | pci_restore_pcie_state(dev); | 859 | pci_restore_pcie_state(dev); |
853 | 860 | ||
@@ -869,6 +876,8 @@ pci_restore_state(struct pci_dev *dev) | |||
869 | pci_restore_msi_state(dev); | 876 | pci_restore_msi_state(dev); |
870 | pci_restore_iov_state(dev); | 877 | pci_restore_iov_state(dev); |
871 | 878 | ||
879 | dev->state_saved = false; | ||
880 | |||
872 | return 0; | 881 | return 0; |
873 | } | 882 | } |
874 | 883 | ||
@@ -1214,30 +1223,40 @@ void pci_pme_active(struct pci_dev *dev, bool enable) | |||
1214 | */ | 1223 | */ |
1215 | int pci_enable_wake(struct pci_dev *dev, pci_power_t state, bool enable) | 1224 | int pci_enable_wake(struct pci_dev *dev, pci_power_t state, bool enable) |
1216 | { | 1225 | { |
1217 | int error = 0; | 1226 | int ret = 0; |
1218 | bool pme_done = false; | ||
1219 | 1227 | ||
1220 | if (enable && !device_may_wakeup(&dev->dev)) | 1228 | if (enable && !device_may_wakeup(&dev->dev)) |
1221 | return -EINVAL; | 1229 | return -EINVAL; |
1222 | 1230 | ||
1231 | /* Don't do the same thing twice in a row for one device. */ | ||
1232 | if (!!enable == !!dev->wakeup_prepared) | ||
1233 | return 0; | ||
1234 | |||
1223 | /* | 1235 | /* |
1224 | * According to "PCI System Architecture" 4th ed. by Tom Shanley & Don | 1236 | * According to "PCI System Architecture" 4th ed. by Tom Shanley & Don |
1225 | * Anderson we should be doing PME# wake enable followed by ACPI wake | 1237 | * Anderson we should be doing PME# wake enable followed by ACPI wake |
1226 | * enable. To disable wake-up we call the platform first, for symmetry. | 1238 | * enable. To disable wake-up we call the platform first, for symmetry. |
1227 | */ | 1239 | */ |
1228 | 1240 | ||
1229 | if (!enable && platform_pci_can_wakeup(dev)) | 1241 | if (enable) { |
1230 | error = platform_pci_sleep_wake(dev, false); | 1242 | int error; |
1231 | |||
1232 | if (!enable || pci_pme_capable(dev, state)) { | ||
1233 | pci_pme_active(dev, enable); | ||
1234 | pme_done = true; | ||
1235 | } | ||
1236 | 1243 | ||
1237 | if (enable && platform_pci_can_wakeup(dev)) | 1244 | if (pci_pme_capable(dev, state)) |
1245 | pci_pme_active(dev, true); | ||
1246 | else | ||
1247 | ret = 1; | ||
1238 | error = platform_pci_sleep_wake(dev, true); | 1248 | error = platform_pci_sleep_wake(dev, true); |
1249 | if (ret) | ||
1250 | ret = error; | ||
1251 | if (!ret) | ||
1252 | dev->wakeup_prepared = true; | ||
1253 | } else { | ||
1254 | platform_pci_sleep_wake(dev, false); | ||
1255 | pci_pme_active(dev, false); | ||
1256 | dev->wakeup_prepared = false; | ||
1257 | } | ||
1239 | 1258 | ||
1240 | return pme_done ? 0 : error; | 1259 | return ret; |
1241 | } | 1260 | } |
1242 | 1261 | ||
1243 | /** | 1262 | /** |
@@ -1356,6 +1375,7 @@ void pci_pm_init(struct pci_dev *dev) | |||
1356 | int pm; | 1375 | int pm; |
1357 | u16 pmc; | 1376 | u16 pmc; |
1358 | 1377 | ||
1378 | dev->wakeup_prepared = false; | ||
1359 | dev->pm_cap = 0; | 1379 | dev->pm_cap = 0; |
1360 | 1380 | ||
1361 | /* find PCI PM capability in list */ | 1381 | /* find PCI PM capability in list */ |
@@ -2262,6 +2282,22 @@ int __pci_reset_function(struct pci_dev *dev) | |||
2262 | EXPORT_SYMBOL_GPL(__pci_reset_function); | 2282 | EXPORT_SYMBOL_GPL(__pci_reset_function); |
2263 | 2283 | ||
2264 | /** | 2284 | /** |
2285 | * pci_probe_reset_function - check whether the device can be safely reset | ||
2286 | * @dev: PCI device to reset | ||
2287 | * | ||
2288 | * Some devices allow an individual function to be reset without affecting | ||
2289 | * other functions in the same device. The PCI device must be responsive | ||
2290 | * to PCI config space in order to use this function. | ||
2291 | * | ||
2292 | * Returns 0 if the device function can be reset or negative if the | ||
2293 | * device doesn't support resetting a single function. | ||
2294 | */ | ||
2295 | int pci_probe_reset_function(struct pci_dev *dev) | ||
2296 | { | ||
2297 | return pci_dev_reset(dev, 1); | ||
2298 | } | ||
2299 | |||
2300 | /** | ||
2265 | * pci_reset_function - quiesce and reset a PCI device function | 2301 | * pci_reset_function - quiesce and reset a PCI device function |
2266 | * @dev: PCI device to reset | 2302 | * @dev: PCI device to reset |
2267 | * | 2303 | * |
@@ -2504,6 +2540,50 @@ int pci_resource_bar(struct pci_dev *dev, int resno, enum pci_bar_type *type) | |||
2504 | return 0; | 2540 | return 0; |
2505 | } | 2541 | } |
2506 | 2542 | ||
2543 | /** | ||
2544 | * pci_set_vga_state - set VGA decode state on device and parents if requested | ||
2545 | * @dev the PCI device | ||
2546 | * @decode - true = enable decoding, false = disable decoding | ||
2547 | * @command_bits PCI_COMMAND_IO and/or PCI_COMMAND_MEMORY | ||
2548 | * @change_bridge - traverse ancestors and change bridges | ||
2549 | */ | ||
2550 | int pci_set_vga_state(struct pci_dev *dev, bool decode, | ||
2551 | unsigned int command_bits, bool change_bridge) | ||
2552 | { | ||
2553 | struct pci_bus *bus; | ||
2554 | struct pci_dev *bridge; | ||
2555 | u16 cmd; | ||
2556 | |||
2557 | WARN_ON(command_bits & ~(PCI_COMMAND_IO|PCI_COMMAND_MEMORY)); | ||
2558 | |||
2559 | pci_read_config_word(dev, PCI_COMMAND, &cmd); | ||
2560 | if (decode == true) | ||
2561 | cmd |= command_bits; | ||
2562 | else | ||
2563 | cmd &= ~command_bits; | ||
2564 | pci_write_config_word(dev, PCI_COMMAND, cmd); | ||
2565 | |||
2566 | if (change_bridge == false) | ||
2567 | return 0; | ||
2568 | |||
2569 | bus = dev->bus; | ||
2570 | while (bus) { | ||
2571 | bridge = bus->self; | ||
2572 | if (bridge) { | ||
2573 | pci_read_config_word(bridge, PCI_BRIDGE_CONTROL, | ||
2574 | &cmd); | ||
2575 | if (decode == true) | ||
2576 | cmd |= PCI_BRIDGE_CTL_VGA; | ||
2577 | else | ||
2578 | cmd &= ~PCI_BRIDGE_CTL_VGA; | ||
2579 | pci_write_config_word(bridge, PCI_BRIDGE_CONTROL, | ||
2580 | cmd); | ||
2581 | } | ||
2582 | bus = bus->parent; | ||
2583 | } | ||
2584 | return 0; | ||
2585 | } | ||
2586 | |||
2507 | #define RESOURCE_ALIGNMENT_PARAM_SIZE COMMAND_LINE_SIZE | 2587 | #define RESOURCE_ALIGNMENT_PARAM_SIZE COMMAND_LINE_SIZE |
2508 | static char resource_alignment_param[RESOURCE_ALIGNMENT_PARAM_SIZE] = {0}; | 2588 | static char resource_alignment_param[RESOURCE_ALIGNMENT_PARAM_SIZE] = {0}; |
2509 | spinlock_t resource_alignment_lock = SPIN_LOCK_UNLOCKED; | 2589 | spinlock_t resource_alignment_lock = SPIN_LOCK_UNLOCKED; |
@@ -2672,6 +2752,10 @@ static int __init pci_setup(char *str) | |||
2672 | strlen(str + 19)); | 2752 | strlen(str + 19)); |
2673 | } else if (!strncmp(str, "ecrc=", 5)) { | 2753 | } else if (!strncmp(str, "ecrc=", 5)) { |
2674 | pcie_ecrc_get_policy(str + 5); | 2754 | pcie_ecrc_get_policy(str + 5); |
2755 | } else if (!strncmp(str, "hpiosize=", 9)) { | ||
2756 | pci_hotplug_io_size = memparse(str + 9, &str); | ||
2757 | } else if (!strncmp(str, "hpmemsize=", 10)) { | ||
2758 | pci_hotplug_mem_size = memparse(str + 10, &str); | ||
2675 | } else { | 2759 | } else { |
2676 | printk(KERN_ERR "PCI: Unknown option `%s'\n", | 2760 | printk(KERN_ERR "PCI: Unknown option `%s'\n", |
2677 | str); | 2761 | str); |
diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h index 5ff4d25bf0e9..d92d1954a2fb 100644 --- a/drivers/pci/pci.h +++ b/drivers/pci/pci.h | |||
@@ -16,6 +16,7 @@ extern void pci_cleanup_rom(struct pci_dev *dev); | |||
16 | extern int pci_mmap_fits(struct pci_dev *pdev, int resno, | 16 | extern int pci_mmap_fits(struct pci_dev *pdev, int resno, |
17 | struct vm_area_struct *vma); | 17 | struct vm_area_struct *vma); |
18 | #endif | 18 | #endif |
19 | int pci_probe_reset_function(struct pci_dev *dev); | ||
19 | 20 | ||
20 | /** | 21 | /** |
21 | * struct pci_platform_pm_ops - Firmware PM callbacks | 22 | * struct pci_platform_pm_ops - Firmware PM callbacks |
@@ -133,7 +134,6 @@ static inline int pci_no_d1d2(struct pci_dev *dev) | |||
133 | return (dev->no_d1d2 || parent_dstates); | 134 | return (dev->no_d1d2 || parent_dstates); |
134 | 135 | ||
135 | } | 136 | } |
136 | extern int pcie_mch_quirk; | ||
137 | extern struct device_attribute pci_dev_attrs[]; | 137 | extern struct device_attribute pci_dev_attrs[]; |
138 | extern struct device_attribute dev_attr_cpuaffinity; | 138 | extern struct device_attribute dev_attr_cpuaffinity; |
139 | extern struct device_attribute dev_attr_cpulistaffinity; | 139 | extern struct device_attribute dev_attr_cpulistaffinity; |
diff --git a/drivers/pci/pcie/aer/aer_inject.c b/drivers/pci/pcie/aer/aer_inject.c index d92ae21a59d8..62d15f652bb6 100644 --- a/drivers/pci/pcie/aer/aer_inject.c +++ b/drivers/pci/pcie/aer/aer_inject.c | |||
@@ -22,11 +22,10 @@ | |||
22 | #include <linux/miscdevice.h> | 22 | #include <linux/miscdevice.h> |
23 | #include <linux/pci.h> | 23 | #include <linux/pci.h> |
24 | #include <linux/fs.h> | 24 | #include <linux/fs.h> |
25 | #include <asm/uaccess.h> | 25 | #include <linux/uaccess.h> |
26 | #include "aerdrv.h" | 26 | #include "aerdrv.h" |
27 | 27 | ||
28 | struct aer_error_inj | 28 | struct aer_error_inj { |
29 | { | ||
30 | u8 bus; | 29 | u8 bus; |
31 | u8 dev; | 30 | u8 dev; |
32 | u8 fn; | 31 | u8 fn; |
@@ -38,8 +37,7 @@ struct aer_error_inj | |||
38 | u32 header_log3; | 37 | u32 header_log3; |
39 | }; | 38 | }; |
40 | 39 | ||
41 | struct aer_error | 40 | struct aer_error { |
42 | { | ||
43 | struct list_head list; | 41 | struct list_head list; |
44 | unsigned int bus; | 42 | unsigned int bus; |
45 | unsigned int devfn; | 43 | unsigned int devfn; |
@@ -55,8 +53,7 @@ struct aer_error | |||
55 | u32 source_id; | 53 | u32 source_id; |
56 | }; | 54 | }; |
57 | 55 | ||
58 | struct pci_bus_ops | 56 | struct pci_bus_ops { |
59 | { | ||
60 | struct list_head list; | 57 | struct list_head list; |
61 | struct pci_bus *bus; | 58 | struct pci_bus *bus; |
62 | struct pci_ops *ops; | 59 | struct pci_ops *ops; |
@@ -150,7 +147,7 @@ static u32 *find_pci_config_dword(struct aer_error *err, int where, | |||
150 | target = &err->header_log1; | 147 | target = &err->header_log1; |
151 | break; | 148 | break; |
152 | case PCI_ERR_HEADER_LOG+8: | 149 | case PCI_ERR_HEADER_LOG+8: |
153 | target = &err->header_log2; | 150 | target = &err->header_log2; |
154 | break; | 151 | break; |
155 | case PCI_ERR_HEADER_LOG+12: | 152 | case PCI_ERR_HEADER_LOG+12: |
156 | target = &err->header_log3; | 153 | target = &err->header_log3; |
@@ -258,8 +255,7 @@ static int pci_bus_set_aer_ops(struct pci_bus *bus) | |||
258 | bus_ops = NULL; | 255 | bus_ops = NULL; |
259 | out: | 256 | out: |
260 | spin_unlock_irqrestore(&inject_lock, flags); | 257 | spin_unlock_irqrestore(&inject_lock, flags); |
261 | if (bus_ops) | 258 | kfree(bus_ops); |
262 | kfree(bus_ops); | ||
263 | return 0; | 259 | return 0; |
264 | } | 260 | } |
265 | 261 | ||
@@ -401,10 +397,8 @@ static int aer_inject(struct aer_error_inj *einj) | |||
401 | else | 397 | else |
402 | ret = -EINVAL; | 398 | ret = -EINVAL; |
403 | out_put: | 399 | out_put: |
404 | if (err_alloc) | 400 | kfree(err_alloc); |
405 | kfree(err_alloc); | 401 | kfree(rperr_alloc); |
406 | if (rperr_alloc) | ||
407 | kfree(rperr_alloc); | ||
408 | pci_dev_put(dev); | 402 | pci_dev_put(dev); |
409 | return ret; | 403 | return ret; |
410 | } | 404 | } |
@@ -458,8 +452,7 @@ static void __exit aer_inject_exit(void) | |||
458 | } | 452 | } |
459 | 453 | ||
460 | spin_lock_irqsave(&inject_lock, flags); | 454 | spin_lock_irqsave(&inject_lock, flags); |
461 | list_for_each_entry_safe(err, err_next, | 455 | list_for_each_entry_safe(err, err_next, &pci_bus_ops_list, list) { |
462 | &pci_bus_ops_list, list) { | ||
463 | list_del(&err->list); | 456 | list_del(&err->list); |
464 | kfree(err); | 457 | kfree(err); |
465 | } | 458 | } |
diff --git a/drivers/pci/pcie/aer/aerdrv.c b/drivers/pci/pcie/aer/aerdrv.c index 4770f13b3ca1..10c0e62bd5a8 100644 --- a/drivers/pci/pcie/aer/aerdrv.c +++ b/drivers/pci/pcie/aer/aerdrv.c | |||
@@ -38,7 +38,7 @@ MODULE_AUTHOR(DRIVER_AUTHOR); | |||
38 | MODULE_DESCRIPTION(DRIVER_DESC); | 38 | MODULE_DESCRIPTION(DRIVER_DESC); |
39 | MODULE_LICENSE("GPL"); | 39 | MODULE_LICENSE("GPL"); |
40 | 40 | ||
41 | static int __devinit aer_probe (struct pcie_device *dev); | 41 | static int __devinit aer_probe(struct pcie_device *dev); |
42 | static void aer_remove(struct pcie_device *dev); | 42 | static void aer_remove(struct pcie_device *dev); |
43 | static pci_ers_result_t aer_error_detected(struct pci_dev *dev, | 43 | static pci_ers_result_t aer_error_detected(struct pci_dev *dev, |
44 | enum pci_channel_state error); | 44 | enum pci_channel_state error); |
@@ -47,7 +47,7 @@ static pci_ers_result_t aer_root_reset(struct pci_dev *dev); | |||
47 | 47 | ||
48 | static struct pci_error_handlers aer_error_handlers = { | 48 | static struct pci_error_handlers aer_error_handlers = { |
49 | .error_detected = aer_error_detected, | 49 | .error_detected = aer_error_detected, |
50 | .resume = aer_error_resume, | 50 | .resume = aer_error_resume, |
51 | }; | 51 | }; |
52 | 52 | ||
53 | static struct pcie_port_service_driver aerdriver = { | 53 | static struct pcie_port_service_driver aerdriver = { |
@@ -134,12 +134,12 @@ EXPORT_SYMBOL_GPL(aer_irq); | |||
134 | * | 134 | * |
135 | * Invoked when Root Port's AER service is loaded. | 135 | * Invoked when Root Port's AER service is loaded. |
136 | **/ | 136 | **/ |
137 | static struct aer_rpc* aer_alloc_rpc(struct pcie_device *dev) | 137 | static struct aer_rpc *aer_alloc_rpc(struct pcie_device *dev) |
138 | { | 138 | { |
139 | struct aer_rpc *rpc; | 139 | struct aer_rpc *rpc; |
140 | 140 | ||
141 | if (!(rpc = kzalloc(sizeof(struct aer_rpc), | 141 | rpc = kzalloc(sizeof(struct aer_rpc), GFP_KERNEL); |
142 | GFP_KERNEL))) | 142 | if (!rpc) |
143 | return NULL; | 143 | return NULL; |
144 | 144 | ||
145 | /* | 145 | /* |
@@ -189,26 +189,28 @@ static void aer_remove(struct pcie_device *dev) | |||
189 | * | 189 | * |
190 | * Invoked when PCI Express bus loads AER service driver. | 190 | * Invoked when PCI Express bus loads AER service driver. |
191 | **/ | 191 | **/ |
192 | static int __devinit aer_probe (struct pcie_device *dev) | 192 | static int __devinit aer_probe(struct pcie_device *dev) |
193 | { | 193 | { |
194 | int status; | 194 | int status; |
195 | struct aer_rpc *rpc; | 195 | struct aer_rpc *rpc; |
196 | struct device *device = &dev->device; | 196 | struct device *device = &dev->device; |
197 | 197 | ||
198 | /* Init */ | 198 | /* Init */ |
199 | if ((status = aer_init(dev))) | 199 | status = aer_init(dev); |
200 | if (status) | ||
200 | return status; | 201 | return status; |
201 | 202 | ||
202 | /* Alloc rpc data structure */ | 203 | /* Alloc rpc data structure */ |
203 | if (!(rpc = aer_alloc_rpc(dev))) { | 204 | rpc = aer_alloc_rpc(dev); |
205 | if (!rpc) { | ||
204 | dev_printk(KERN_DEBUG, device, "alloc rpc failed\n"); | 206 | dev_printk(KERN_DEBUG, device, "alloc rpc failed\n"); |
205 | aer_remove(dev); | 207 | aer_remove(dev); |
206 | return -ENOMEM; | 208 | return -ENOMEM; |
207 | } | 209 | } |
208 | 210 | ||
209 | /* Request IRQ ISR */ | 211 | /* Request IRQ ISR */ |
210 | if ((status = request_irq(dev->irq, aer_irq, IRQF_SHARED, "aerdrv", | 212 | status = request_irq(dev->irq, aer_irq, IRQF_SHARED, "aerdrv", dev); |
211 | dev))) { | 213 | if (status) { |
212 | dev_printk(KERN_DEBUG, device, "request IRQ failed\n"); | 214 | dev_printk(KERN_DEBUG, device, "request IRQ failed\n"); |
213 | aer_remove(dev); | 215 | aer_remove(dev); |
214 | return status; | 216 | return status; |
diff --git a/drivers/pci/pcie/aer/aerdrv.h b/drivers/pci/pcie/aer/aerdrv.h index bbd7428ca2d0..bd833ea3ba49 100644 --- a/drivers/pci/pcie/aer/aerdrv.h +++ b/drivers/pci/pcie/aer/aerdrv.h | |||
@@ -16,12 +16,9 @@ | |||
16 | #define AER_NONFATAL 0 | 16 | #define AER_NONFATAL 0 |
17 | #define AER_FATAL 1 | 17 | #define AER_FATAL 1 |
18 | #define AER_CORRECTABLE 2 | 18 | #define AER_CORRECTABLE 2 |
19 | #define AER_UNCORRECTABLE 4 | ||
20 | #define AER_ERROR_MASK 0x001fffff | ||
21 | #define AER_ERROR(d) (d & AER_ERROR_MASK) | ||
22 | 19 | ||
23 | /* Root Error Status Register Bits */ | 20 | /* Root Error Status Register Bits */ |
24 | #define ROOT_ERR_STATUS_MASKS 0x0f | 21 | #define ROOT_ERR_STATUS_MASKS 0x0f |
25 | 22 | ||
26 | #define SYSTEM_ERROR_INTR_ON_MESG_MASK (PCI_EXP_RTCTL_SECEE| \ | 23 | #define SYSTEM_ERROR_INTR_ON_MESG_MASK (PCI_EXP_RTCTL_SECEE| \ |
27 | PCI_EXP_RTCTL_SENFEE| \ | 24 | PCI_EXP_RTCTL_SENFEE| \ |
@@ -32,8 +29,6 @@ | |||
32 | #define ERR_COR_ID(d) (d & 0xffff) | 29 | #define ERR_COR_ID(d) (d & 0xffff) |
33 | #define ERR_UNCOR_ID(d) (d >> 16) | 30 | #define ERR_UNCOR_ID(d) (d >> 16) |
34 | 31 | ||
35 | #define AER_SUCCESS 0 | ||
36 | #define AER_UNSUCCESS 1 | ||
37 | #define AER_ERROR_SOURCES_MAX 100 | 32 | #define AER_ERROR_SOURCES_MAX 100 |
38 | 33 | ||
39 | #define AER_LOG_TLP_MASKS (PCI_ERR_UNC_POISON_TLP| \ | 34 | #define AER_LOG_TLP_MASKS (PCI_ERR_UNC_POISON_TLP| \ |
@@ -43,13 +38,6 @@ | |||
43 | PCI_ERR_UNC_UNX_COMP| \ | 38 | PCI_ERR_UNC_UNX_COMP| \ |
44 | PCI_ERR_UNC_MALF_TLP) | 39 | PCI_ERR_UNC_MALF_TLP) |
45 | 40 | ||
46 | /* AER Error Info Flags */ | ||
47 | #define AER_TLP_HEADER_VALID_FLAG 0x00000001 | ||
48 | #define AER_MULTI_ERROR_VALID_FLAG 0x00000002 | ||
49 | |||
50 | #define ERR_CORRECTABLE_ERROR_MASK 0x000031c1 | ||
51 | #define ERR_UNCORRECTABLE_ERROR_MASK 0x001ff010 | ||
52 | |||
53 | struct header_log_regs { | 41 | struct header_log_regs { |
54 | unsigned int dw0; | 42 | unsigned int dw0; |
55 | unsigned int dw1; | 43 | unsigned int dw1; |
@@ -61,11 +49,20 @@ struct header_log_regs { | |||
61 | struct aer_err_info { | 49 | struct aer_err_info { |
62 | struct pci_dev *dev[AER_MAX_MULTI_ERR_DEVICES]; | 50 | struct pci_dev *dev[AER_MAX_MULTI_ERR_DEVICES]; |
63 | int error_dev_num; | 51 | int error_dev_num; |
64 | u16 id; | 52 | |
65 | int severity; /* 0:NONFATAL | 1:FATAL | 2:COR */ | 53 | unsigned int id:16; |
66 | int flags; | 54 | |
55 | unsigned int severity:2; /* 0:NONFATAL | 1:FATAL | 2:COR */ | ||
56 | unsigned int __pad1:5; | ||
57 | unsigned int multi_error_valid:1; | ||
58 | |||
59 | unsigned int first_error:5; | ||
60 | unsigned int __pad2:2; | ||
61 | unsigned int tlp_header_valid:1; | ||
62 | |||
67 | unsigned int status; /* COR/UNCOR Error Status */ | 63 | unsigned int status; /* COR/UNCOR Error Status */ |
68 | struct header_log_regs tlp; /* TLP Header */ | 64 | unsigned int mask; /* COR/UNCOR Error Mask */ |
65 | struct header_log_regs tlp; /* TLP Header */ | ||
69 | }; | 66 | }; |
70 | 67 | ||
71 | struct aer_err_source { | 68 | struct aer_err_source { |
@@ -125,6 +122,7 @@ extern void aer_delete_rootport(struct aer_rpc *rpc); | |||
125 | extern int aer_init(struct pcie_device *dev); | 122 | extern int aer_init(struct pcie_device *dev); |
126 | extern void aer_isr(struct work_struct *work); | 123 | extern void aer_isr(struct work_struct *work); |
127 | extern void aer_print_error(struct pci_dev *dev, struct aer_err_info *info); | 124 | extern void aer_print_error(struct pci_dev *dev, struct aer_err_info *info); |
125 | extern void aer_print_port_info(struct pci_dev *dev, struct aer_err_info *info); | ||
128 | extern irqreturn_t aer_irq(int irq, void *context); | 126 | extern irqreturn_t aer_irq(int irq, void *context); |
129 | 127 | ||
130 | #ifdef CONFIG_ACPI | 128 | #ifdef CONFIG_ACPI |
@@ -136,4 +134,4 @@ static inline int aer_osc_setup(struct pcie_device *pciedev) | |||
136 | } | 134 | } |
137 | #endif | 135 | #endif |
138 | 136 | ||
139 | #endif //_AERDRV_H_ | 137 | #endif /* _AERDRV_H_ */ |
diff --git a/drivers/pci/pcie/aer/aerdrv_core.c b/drivers/pci/pcie/aer/aerdrv_core.c index 3d8872704a58..9f5ccbeb4fa5 100644 --- a/drivers/pci/pcie/aer/aerdrv_core.c +++ b/drivers/pci/pcie/aer/aerdrv_core.c | |||
@@ -49,10 +49,11 @@ int pci_enable_pcie_error_reporting(struct pci_dev *dev) | |||
49 | PCI_EXP_DEVCTL_NFERE | | 49 | PCI_EXP_DEVCTL_NFERE | |
50 | PCI_EXP_DEVCTL_FERE | | 50 | PCI_EXP_DEVCTL_FERE | |
51 | PCI_EXP_DEVCTL_URRE; | 51 | PCI_EXP_DEVCTL_URRE; |
52 | pci_write_config_word(dev, pos+PCI_EXP_DEVCTL, | 52 | pci_write_config_word(dev, pos+PCI_EXP_DEVCTL, reg16); |
53 | reg16); | 53 | |
54 | return 0; | 54 | return 0; |
55 | } | 55 | } |
56 | EXPORT_SYMBOL_GPL(pci_enable_pcie_error_reporting); | ||
56 | 57 | ||
57 | int pci_disable_pcie_error_reporting(struct pci_dev *dev) | 58 | int pci_disable_pcie_error_reporting(struct pci_dev *dev) |
58 | { | 59 | { |
@@ -68,10 +69,11 @@ int pci_disable_pcie_error_reporting(struct pci_dev *dev) | |||
68 | PCI_EXP_DEVCTL_NFERE | | 69 | PCI_EXP_DEVCTL_NFERE | |
69 | PCI_EXP_DEVCTL_FERE | | 70 | PCI_EXP_DEVCTL_FERE | |
70 | PCI_EXP_DEVCTL_URRE); | 71 | PCI_EXP_DEVCTL_URRE); |
71 | pci_write_config_word(dev, pos+PCI_EXP_DEVCTL, | 72 | pci_write_config_word(dev, pos+PCI_EXP_DEVCTL, reg16); |
72 | reg16); | 73 | |
73 | return 0; | 74 | return 0; |
74 | } | 75 | } |
76 | EXPORT_SYMBOL_GPL(pci_disable_pcie_error_reporting); | ||
75 | 77 | ||
76 | int pci_cleanup_aer_uncorrect_error_status(struct pci_dev *dev) | 78 | int pci_cleanup_aer_uncorrect_error_status(struct pci_dev *dev) |
77 | { | 79 | { |
@@ -92,6 +94,7 @@ int pci_cleanup_aer_uncorrect_error_status(struct pci_dev *dev) | |||
92 | 94 | ||
93 | return 0; | 95 | return 0; |
94 | } | 96 | } |
97 | EXPORT_SYMBOL_GPL(pci_cleanup_aer_uncorrect_error_status); | ||
95 | 98 | ||
96 | #if 0 | 99 | #if 0 |
97 | int pci_cleanup_aer_correct_error_status(struct pci_dev *dev) | 100 | int pci_cleanup_aer_correct_error_status(struct pci_dev *dev) |
@@ -110,7 +113,6 @@ int pci_cleanup_aer_correct_error_status(struct pci_dev *dev) | |||
110 | } | 113 | } |
111 | #endif /* 0 */ | 114 | #endif /* 0 */ |
112 | 115 | ||
113 | |||
114 | static int set_device_error_reporting(struct pci_dev *dev, void *data) | 116 | static int set_device_error_reporting(struct pci_dev *dev, void *data) |
115 | { | 117 | { |
116 | bool enable = *((bool *)data); | 118 | bool enable = *((bool *)data); |
@@ -164,8 +166,9 @@ static int add_error_device(struct aer_err_info *e_info, struct pci_dev *dev) | |||
164 | e_info->dev[e_info->error_dev_num] = dev; | 166 | e_info->dev[e_info->error_dev_num] = dev; |
165 | e_info->error_dev_num++; | 167 | e_info->error_dev_num++; |
166 | return 1; | 168 | return 1; |
167 | } else | 169 | } |
168 | return 0; | 170 | |
171 | return 0; | ||
169 | } | 172 | } |
170 | 173 | ||
171 | 174 | ||
@@ -193,7 +196,7 @@ static int find_device_iter(struct pci_dev *dev, void *data) | |||
193 | * If there is no multiple error, we stop | 196 | * If there is no multiple error, we stop |
194 | * or continue based on the id comparing. | 197 | * or continue based on the id comparing. |
195 | */ | 198 | */ |
196 | if (!(e_info->flags & AER_MULTI_ERROR_VALID_FLAG)) | 199 | if (!e_info->multi_error_valid) |
197 | return result; | 200 | return result; |
198 | 201 | ||
199 | /* | 202 | /* |
@@ -233,24 +236,16 @@ static int find_device_iter(struct pci_dev *dev, void *data) | |||
233 | status = 0; | 236 | status = 0; |
234 | mask = 0; | 237 | mask = 0; |
235 | if (e_info->severity == AER_CORRECTABLE) { | 238 | if (e_info->severity == AER_CORRECTABLE) { |
236 | pci_read_config_dword(dev, | 239 | pci_read_config_dword(dev, pos + PCI_ERR_COR_STATUS, &status); |
237 | pos + PCI_ERR_COR_STATUS, | 240 | pci_read_config_dword(dev, pos + PCI_ERR_COR_MASK, &mask); |
238 | &status); | 241 | if (status & ~mask) { |
239 | pci_read_config_dword(dev, | ||
240 | pos + PCI_ERR_COR_MASK, | ||
241 | &mask); | ||
242 | if (status & ERR_CORRECTABLE_ERROR_MASK & ~mask) { | ||
243 | add_error_device(e_info, dev); | 242 | add_error_device(e_info, dev); |
244 | goto added; | 243 | goto added; |
245 | } | 244 | } |
246 | } else { | 245 | } else { |
247 | pci_read_config_dword(dev, | 246 | pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_STATUS, &status); |
248 | pos + PCI_ERR_UNCOR_STATUS, | 247 | pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_MASK, &mask); |
249 | &status); | 248 | if (status & ~mask) { |
250 | pci_read_config_dword(dev, | ||
251 | pos + PCI_ERR_UNCOR_MASK, | ||
252 | &mask); | ||
253 | if (status & ERR_UNCORRECTABLE_ERROR_MASK & ~mask) { | ||
254 | add_error_device(e_info, dev); | 249 | add_error_device(e_info, dev); |
255 | goto added; | 250 | goto added; |
256 | } | 251 | } |
@@ -259,7 +254,7 @@ static int find_device_iter(struct pci_dev *dev, void *data) | |||
259 | return 0; | 254 | return 0; |
260 | 255 | ||
261 | added: | 256 | added: |
262 | if (e_info->flags & AER_MULTI_ERROR_VALID_FLAG) | 257 | if (e_info->multi_error_valid) |
263 | return 0; | 258 | return 0; |
264 | else | 259 | else |
265 | return 1; | 260 | return 1; |
@@ -411,8 +406,7 @@ static pci_ers_result_t broadcast_error_message(struct pci_dev *dev, | |||
411 | pci_cleanup_aer_uncorrect_error_status(dev); | 406 | pci_cleanup_aer_uncorrect_error_status(dev); |
412 | dev->error_state = pci_channel_io_normal; | 407 | dev->error_state = pci_channel_io_normal; |
413 | } | 408 | } |
414 | } | 409 | } else { |
415 | else { | ||
416 | /* | 410 | /* |
417 | * If the error is reported by an end point, we think this | 411 | * If the error is reported by an end point, we think this |
418 | * error is related to the upstream link of the end point. | 412 | * error is related to the upstream link of the end point. |
@@ -473,7 +467,7 @@ static pci_ers_result_t reset_link(struct pcie_device *aerdev, | |||
473 | if (dev->hdr_type & PCI_HEADER_TYPE_BRIDGE) | 467 | if (dev->hdr_type & PCI_HEADER_TYPE_BRIDGE) |
474 | udev = dev; | 468 | udev = dev; |
475 | else | 469 | else |
476 | udev= dev->bus->self; | 470 | udev = dev->bus->self; |
477 | 471 | ||
478 | data.is_downstream = 0; | 472 | data.is_downstream = 0; |
479 | data.aer_driver = NULL; | 473 | data.aer_driver = NULL; |
@@ -576,7 +570,7 @@ static pci_ers_result_t do_recovery(struct pcie_device *aerdev, | |||
576 | * | 570 | * |
577 | * Invoked when an error being detected by Root Port. | 571 | * Invoked when an error being detected by Root Port. |
578 | */ | 572 | */ |
579 | static void handle_error_source(struct pcie_device * aerdev, | 573 | static void handle_error_source(struct pcie_device *aerdev, |
580 | struct pci_dev *dev, | 574 | struct pci_dev *dev, |
581 | struct aer_err_info *info) | 575 | struct aer_err_info *info) |
582 | { | 576 | { |
@@ -682,7 +676,7 @@ static void disable_root_aer(struct aer_rpc *rpc) | |||
682 | * | 676 | * |
683 | * Invoked by DPC handler to consume an error. | 677 | * Invoked by DPC handler to consume an error. |
684 | */ | 678 | */ |
685 | static struct aer_err_source* get_e_source(struct aer_rpc *rpc) | 679 | static struct aer_err_source *get_e_source(struct aer_rpc *rpc) |
686 | { | 680 | { |
687 | struct aer_err_source *e_source; | 681 | struct aer_err_source *e_source; |
688 | unsigned long flags; | 682 | unsigned long flags; |
@@ -702,32 +696,50 @@ static struct aer_err_source* get_e_source(struct aer_rpc *rpc) | |||
702 | return e_source; | 696 | return e_source; |
703 | } | 697 | } |
704 | 698 | ||
699 | /** | ||
700 | * get_device_error_info - read error status from dev and store it to info | ||
701 | * @dev: pointer to the device expected to have a error record | ||
702 | * @info: pointer to structure to store the error record | ||
703 | * | ||
704 | * Return 1 on success, 0 on error. | ||
705 | */ | ||
705 | static int get_device_error_info(struct pci_dev *dev, struct aer_err_info *info) | 706 | static int get_device_error_info(struct pci_dev *dev, struct aer_err_info *info) |
706 | { | 707 | { |
707 | int pos; | 708 | int pos, temp; |
709 | |||
710 | info->status = 0; | ||
711 | info->tlp_header_valid = 0; | ||
708 | 712 | ||
709 | pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR); | 713 | pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR); |
710 | 714 | ||
711 | /* The device might not support AER */ | 715 | /* The device might not support AER */ |
712 | if (!pos) | 716 | if (!pos) |
713 | return AER_SUCCESS; | 717 | return 1; |
714 | 718 | ||
715 | if (info->severity == AER_CORRECTABLE) { | 719 | if (info->severity == AER_CORRECTABLE) { |
716 | pci_read_config_dword(dev, pos + PCI_ERR_COR_STATUS, | 720 | pci_read_config_dword(dev, pos + PCI_ERR_COR_STATUS, |
717 | &info->status); | 721 | &info->status); |
718 | if (!(info->status & ERR_CORRECTABLE_ERROR_MASK)) | 722 | pci_read_config_dword(dev, pos + PCI_ERR_COR_MASK, |
719 | return AER_UNSUCCESS; | 723 | &info->mask); |
724 | if (!(info->status & ~info->mask)) | ||
725 | return 0; | ||
720 | } else if (dev->hdr_type & PCI_HEADER_TYPE_BRIDGE || | 726 | } else if (dev->hdr_type & PCI_HEADER_TYPE_BRIDGE || |
721 | info->severity == AER_NONFATAL) { | 727 | info->severity == AER_NONFATAL) { |
722 | 728 | ||
723 | /* Link is still healthy for IO reads */ | 729 | /* Link is still healthy for IO reads */ |
724 | pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_STATUS, | 730 | pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_STATUS, |
725 | &info->status); | 731 | &info->status); |
726 | if (!(info->status & ERR_UNCORRECTABLE_ERROR_MASK)) | 732 | pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_MASK, |
727 | return AER_UNSUCCESS; | 733 | &info->mask); |
734 | if (!(info->status & ~info->mask)) | ||
735 | return 0; | ||
736 | |||
737 | /* Get First Error Pointer */ | ||
738 | pci_read_config_dword(dev, pos + PCI_ERR_CAP, &temp); | ||
739 | info->first_error = PCI_ERR_CAP_FEP(temp); | ||
728 | 740 | ||
729 | if (info->status & AER_LOG_TLP_MASKS) { | 741 | if (info->status & AER_LOG_TLP_MASKS) { |
730 | info->flags |= AER_TLP_HEADER_VALID_FLAG; | 742 | info->tlp_header_valid = 1; |
731 | pci_read_config_dword(dev, | 743 | pci_read_config_dword(dev, |
732 | pos + PCI_ERR_HEADER_LOG, &info->tlp.dw0); | 744 | pos + PCI_ERR_HEADER_LOG, &info->tlp.dw0); |
733 | pci_read_config_dword(dev, | 745 | pci_read_config_dword(dev, |
@@ -739,7 +751,7 @@ static int get_device_error_info(struct pci_dev *dev, struct aer_err_info *info) | |||
739 | } | 751 | } |
740 | } | 752 | } |
741 | 753 | ||
742 | return AER_SUCCESS; | 754 | return 1; |
743 | } | 755 | } |
744 | 756 | ||
745 | static inline void aer_process_err_devices(struct pcie_device *p_device, | 757 | static inline void aer_process_err_devices(struct pcie_device *p_device, |
@@ -753,14 +765,14 @@ static inline void aer_process_err_devices(struct pcie_device *p_device, | |||
753 | e_info->id); | 765 | e_info->id); |
754 | } | 766 | } |
755 | 767 | ||
768 | /* Report all before handle them, not to lost records by reset etc. */ | ||
756 | for (i = 0; i < e_info->error_dev_num && e_info->dev[i]; i++) { | 769 | for (i = 0; i < e_info->error_dev_num && e_info->dev[i]; i++) { |
757 | if (get_device_error_info(e_info->dev[i], e_info) == | 770 | if (get_device_error_info(e_info->dev[i], e_info)) |
758 | AER_SUCCESS) { | ||
759 | aer_print_error(e_info->dev[i], e_info); | 771 | aer_print_error(e_info->dev[i], e_info); |
760 | handle_error_source(p_device, | 772 | } |
761 | e_info->dev[i], | 773 | for (i = 0; i < e_info->error_dev_num && e_info->dev[i]; i++) { |
762 | e_info); | 774 | if (get_device_error_info(e_info->dev[i], e_info)) |
763 | } | 775 | handle_error_source(p_device, e_info->dev[i], e_info); |
764 | } | 776 | } |
765 | } | 777 | } |
766 | 778 | ||
@@ -806,7 +818,9 @@ static void aer_isr_one_error(struct pcie_device *p_device, | |||
806 | if (e_src->status & | 818 | if (e_src->status & |
807 | (PCI_ERR_ROOT_MULTI_COR_RCV | | 819 | (PCI_ERR_ROOT_MULTI_COR_RCV | |
808 | PCI_ERR_ROOT_MULTI_UNCOR_RCV)) | 820 | PCI_ERR_ROOT_MULTI_UNCOR_RCV)) |
809 | e_info->flags |= AER_MULTI_ERROR_VALID_FLAG; | 821 | e_info->multi_error_valid = 1; |
822 | |||
823 | aer_print_port_info(p_device->port, e_info); | ||
810 | 824 | ||
811 | find_source_device(p_device->port, e_info); | 825 | find_source_device(p_device->port, e_info); |
812 | aer_process_err_devices(p_device, e_info); | 826 | aer_process_err_devices(p_device, e_info); |
@@ -863,10 +877,5 @@ int aer_init(struct pcie_device *dev) | |||
863 | if (aer_osc_setup(dev) && !forceload) | 877 | if (aer_osc_setup(dev) && !forceload) |
864 | return -ENXIO; | 878 | return -ENXIO; |
865 | 879 | ||
866 | return AER_SUCCESS; | 880 | return 0; |
867 | } | 881 | } |
868 | |||
869 | EXPORT_SYMBOL_GPL(pci_enable_pcie_error_reporting); | ||
870 | EXPORT_SYMBOL_GPL(pci_disable_pcie_error_reporting); | ||
871 | EXPORT_SYMBOL_GPL(pci_cleanup_aer_uncorrect_error_status); | ||
872 | |||
diff --git a/drivers/pci/pcie/aer/aerdrv_errprint.c b/drivers/pci/pcie/aer/aerdrv_errprint.c index 0fc29ae80df8..44acde72294f 100644 --- a/drivers/pci/pcie/aer/aerdrv_errprint.c +++ b/drivers/pci/pcie/aer/aerdrv_errprint.c | |||
@@ -27,69 +27,70 @@ | |||
27 | #define AER_AGENT_COMPLETER 2 | 27 | #define AER_AGENT_COMPLETER 2 |
28 | #define AER_AGENT_TRANSMITTER 3 | 28 | #define AER_AGENT_TRANSMITTER 3 |
29 | 29 | ||
30 | #define AER_AGENT_REQUESTER_MASK (PCI_ERR_UNC_COMP_TIME| \ | 30 | #define AER_AGENT_REQUESTER_MASK(t) ((t == AER_CORRECTABLE) ? \ |
31 | PCI_ERR_UNC_UNSUP) | 31 | 0 : (PCI_ERR_UNC_COMP_TIME|PCI_ERR_UNC_UNSUP)) |
32 | 32 | #define AER_AGENT_COMPLETER_MASK(t) ((t == AER_CORRECTABLE) ? \ | |
33 | #define AER_AGENT_COMPLETER_MASK PCI_ERR_UNC_COMP_ABORT | 33 | 0 : PCI_ERR_UNC_COMP_ABORT) |
34 | 34 | #define AER_AGENT_TRANSMITTER_MASK(t) ((t == AER_CORRECTABLE) ? \ | |
35 | #define AER_AGENT_TRANSMITTER_MASK(t, e) (e & (PCI_ERR_COR_REP_ROLL| \ | 35 | (PCI_ERR_COR_REP_ROLL|PCI_ERR_COR_REP_TIMER) : 0) |
36 | ((t == AER_CORRECTABLE) ? PCI_ERR_COR_REP_TIMER: 0))) | ||
37 | 36 | ||
38 | #define AER_GET_AGENT(t, e) \ | 37 | #define AER_GET_AGENT(t, e) \ |
39 | ((e & AER_AGENT_COMPLETER_MASK) ? AER_AGENT_COMPLETER : \ | 38 | ((e & AER_AGENT_COMPLETER_MASK(t)) ? AER_AGENT_COMPLETER : \ |
40 | (e & AER_AGENT_REQUESTER_MASK) ? AER_AGENT_REQUESTER : \ | 39 | (e & AER_AGENT_REQUESTER_MASK(t)) ? AER_AGENT_REQUESTER : \ |
41 | (AER_AGENT_TRANSMITTER_MASK(t, e)) ? AER_AGENT_TRANSMITTER : \ | 40 | (e & AER_AGENT_TRANSMITTER_MASK(t)) ? AER_AGENT_TRANSMITTER : \ |
42 | AER_AGENT_RECEIVER) | 41 | AER_AGENT_RECEIVER) |
43 | 42 | ||
44 | #define AER_PHYSICAL_LAYER_ERROR_MASK PCI_ERR_COR_RCVR | ||
45 | #define AER_DATA_LINK_LAYER_ERROR_MASK(t, e) \ | ||
46 | (PCI_ERR_UNC_DLP| \ | ||
47 | PCI_ERR_COR_BAD_TLP| \ | ||
48 | PCI_ERR_COR_BAD_DLLP| \ | ||
49 | PCI_ERR_COR_REP_ROLL| \ | ||
50 | ((t == AER_CORRECTABLE) ? \ | ||
51 | PCI_ERR_COR_REP_TIMER: 0)) | ||
52 | |||
53 | #define AER_PHYSICAL_LAYER_ERROR 0 | 43 | #define AER_PHYSICAL_LAYER_ERROR 0 |
54 | #define AER_DATA_LINK_LAYER_ERROR 1 | 44 | #define AER_DATA_LINK_LAYER_ERROR 1 |
55 | #define AER_TRANSACTION_LAYER_ERROR 2 | 45 | #define AER_TRANSACTION_LAYER_ERROR 2 |
56 | 46 | ||
57 | #define AER_GET_LAYER_ERROR(t, e) \ | 47 | #define AER_PHYSICAL_LAYER_ERROR_MASK(t) ((t == AER_CORRECTABLE) ? \ |
58 | ((e & AER_PHYSICAL_LAYER_ERROR_MASK) ? \ | 48 | PCI_ERR_COR_RCVR : 0) |
59 | AER_PHYSICAL_LAYER_ERROR : \ | 49 | #define AER_DATA_LINK_LAYER_ERROR_MASK(t) ((t == AER_CORRECTABLE) ? \ |
60 | (e & AER_DATA_LINK_LAYER_ERROR_MASK(t, e)) ? \ | 50 | (PCI_ERR_COR_BAD_TLP| \ |
61 | AER_DATA_LINK_LAYER_ERROR : \ | 51 | PCI_ERR_COR_BAD_DLLP| \ |
62 | AER_TRANSACTION_LAYER_ERROR) | 52 | PCI_ERR_COR_REP_ROLL| \ |
53 | PCI_ERR_COR_REP_TIMER) : PCI_ERR_UNC_DLP) | ||
54 | |||
55 | #define AER_GET_LAYER_ERROR(t, e) \ | ||
56 | ((e & AER_PHYSICAL_LAYER_ERROR_MASK(t)) ? AER_PHYSICAL_LAYER_ERROR : \ | ||
57 | (e & AER_DATA_LINK_LAYER_ERROR_MASK(t)) ? AER_DATA_LINK_LAYER_ERROR : \ | ||
58 | AER_TRANSACTION_LAYER_ERROR) | ||
59 | |||
60 | #define AER_PR(info, pdev, fmt, args...) \ | ||
61 | printk("%s%s %s: " fmt, (info->severity == AER_CORRECTABLE) ? \ | ||
62 | KERN_WARNING : KERN_ERR, dev_driver_string(&pdev->dev), \ | ||
63 | dev_name(&pdev->dev), ## args) | ||
63 | 64 | ||
64 | /* | 65 | /* |
65 | * AER error strings | 66 | * AER error strings |
66 | */ | 67 | */ |
67 | static char* aer_error_severity_string[] = { | 68 | static char *aer_error_severity_string[] = { |
68 | "Uncorrected (Non-Fatal)", | 69 | "Uncorrected (Non-Fatal)", |
69 | "Uncorrected (Fatal)", | 70 | "Uncorrected (Fatal)", |
70 | "Corrected" | 71 | "Corrected" |
71 | }; | 72 | }; |
72 | 73 | ||
73 | static char* aer_error_layer[] = { | 74 | static char *aer_error_layer[] = { |
74 | "Physical Layer", | 75 | "Physical Layer", |
75 | "Data Link Layer", | 76 | "Data Link Layer", |
76 | "Transaction Layer" | 77 | "Transaction Layer" |
77 | }; | 78 | }; |
78 | static char* aer_correctable_error_string[] = { | 79 | static char *aer_correctable_error_string[] = { |
79 | "Receiver Error ", /* Bit Position 0 */ | 80 | "Receiver Error ", /* Bit Position 0 */ |
80 | NULL, | 81 | NULL, |
81 | NULL, | 82 | NULL, |
82 | NULL, | 83 | NULL, |
83 | NULL, | 84 | NULL, |
84 | NULL, | 85 | NULL, |
85 | "Bad TLP ", /* Bit Position 6 */ | 86 | "Bad TLP ", /* Bit Position 6 */ |
86 | "Bad DLLP ", /* Bit Position 7 */ | 87 | "Bad DLLP ", /* Bit Position 7 */ |
87 | "RELAY_NUM Rollover ", /* Bit Position 8 */ | 88 | "RELAY_NUM Rollover ", /* Bit Position 8 */ |
88 | NULL, | 89 | NULL, |
89 | NULL, | 90 | NULL, |
90 | NULL, | 91 | NULL, |
91 | "Replay Timer Timeout ", /* Bit Position 12 */ | 92 | "Replay Timer Timeout ", /* Bit Position 12 */ |
92 | "Advisory Non-Fatal ", /* Bit Position 13 */ | 93 | "Advisory Non-Fatal ", /* Bit Position 13 */ |
93 | NULL, | 94 | NULL, |
94 | NULL, | 95 | NULL, |
95 | NULL, | 96 | NULL, |
@@ -110,7 +111,7 @@ static char* aer_correctable_error_string[] = { | |||
110 | NULL, | 111 | NULL, |
111 | }; | 112 | }; |
112 | 113 | ||
113 | static char* aer_uncorrectable_error_string[] = { | 114 | static char *aer_uncorrectable_error_string[] = { |
114 | NULL, | 115 | NULL, |
115 | NULL, | 116 | NULL, |
116 | NULL, | 117 | NULL, |
@@ -123,10 +124,10 @@ static char* aer_uncorrectable_error_string[] = { | |||
123 | NULL, | 124 | NULL, |
124 | NULL, | 125 | NULL, |
125 | NULL, | 126 | NULL, |
126 | "Poisoned TLP ", /* Bit Position 12 */ | 127 | "Poisoned TLP ", /* Bit Position 12 */ |
127 | "Flow Control Protocol ", /* Bit Position 13 */ | 128 | "Flow Control Protocol ", /* Bit Position 13 */ |
128 | "Completion Timeout ", /* Bit Position 14 */ | 129 | "Completion Timeout ", /* Bit Position 14 */ |
129 | "Completer Abort ", /* Bit Position 15 */ | 130 | "Completer Abort ", /* Bit Position 15 */ |
130 | "Unexpected Completion ", /* Bit Position 16 */ | 131 | "Unexpected Completion ", /* Bit Position 16 */ |
131 | "Receiver Overflow ", /* Bit Position 17 */ | 132 | "Receiver Overflow ", /* Bit Position 17 */ |
132 | "Malformed TLP ", /* Bit Position 18 */ | 133 | "Malformed TLP ", /* Bit Position 18 */ |
@@ -145,98 +146,69 @@ static char* aer_uncorrectable_error_string[] = { | |||
145 | NULL, | 146 | NULL, |
146 | }; | 147 | }; |
147 | 148 | ||
148 | static char* aer_agent_string[] = { | 149 | static char *aer_agent_string[] = { |
149 | "Receiver ID", | 150 | "Receiver ID", |
150 | "Requester ID", | 151 | "Requester ID", |
151 | "Completer ID", | 152 | "Completer ID", |
152 | "Transmitter ID" | 153 | "Transmitter ID" |
153 | }; | 154 | }; |
154 | 155 | ||
155 | static char * aer_get_error_source_name(int severity, | 156 | static void __aer_print_error(struct aer_err_info *info, struct pci_dev *dev) |
156 | unsigned int status, | ||
157 | char errmsg_buff[]) | ||
158 | { | 157 | { |
159 | int i; | 158 | int i, status; |
160 | char * errmsg = NULL; | 159 | char *errmsg = NULL; |
160 | |||
161 | status = (info->status & ~info->mask); | ||
161 | 162 | ||
162 | for (i = 0; i < 32; i++) { | 163 | for (i = 0; i < 32; i++) { |
163 | if (!(status & (1 << i))) | 164 | if (!(status & (1 << i))) |
164 | continue; | 165 | continue; |
165 | 166 | ||
166 | if (severity == AER_CORRECTABLE) | 167 | if (info->severity == AER_CORRECTABLE) |
167 | errmsg = aer_correctable_error_string[i]; | 168 | errmsg = aer_correctable_error_string[i]; |
168 | else | 169 | else |
169 | errmsg = aer_uncorrectable_error_string[i]; | 170 | errmsg = aer_uncorrectable_error_string[i]; |
170 | 171 | ||
171 | if (!errmsg) { | 172 | if (errmsg) |
172 | sprintf(errmsg_buff, "Unknown Error Bit %2d ", i); | 173 | AER_PR(info, dev, " [%2d] %s%s\n", i, errmsg, |
173 | errmsg = errmsg_buff; | 174 | info->first_error == i ? " (First)" : ""); |
174 | } | 175 | else |
175 | 176 | AER_PR(info, dev, " [%2d] Unknown Error Bit%s\n", i, | |
176 | break; | 177 | info->first_error == i ? " (First)" : ""); |
177 | } | 178 | } |
178 | |||
179 | return errmsg; | ||
180 | } | 179 | } |
181 | 180 | ||
182 | static DEFINE_SPINLOCK(logbuf_lock); | ||
183 | static char errmsg_buff[100]; | ||
184 | void aer_print_error(struct pci_dev *dev, struct aer_err_info *info) | 181 | void aer_print_error(struct pci_dev *dev, struct aer_err_info *info) |
185 | { | 182 | { |
186 | char * errmsg; | 183 | int id = ((dev->bus->number << 8) | dev->devfn); |
187 | int err_layer, agent; | 184 | |
188 | char * loglevel; | 185 | if (info->status == 0) { |
189 | 186 | AER_PR(info, dev, | |
190 | if (info->severity == AER_CORRECTABLE) | 187 | "PCIE Bus Error: severity=%s, type=Unaccessible, " |
191 | loglevel = KERN_WARNING; | 188 | "id=%04x(Unregistered Agent ID)\n", |
192 | else | 189 | aer_error_severity_string[info->severity], id); |
193 | loglevel = KERN_ERR; | ||
194 | |||
195 | printk("%s+------ PCI-Express Device Error ------+\n", loglevel); | ||
196 | printk("%sError Severity\t\t: %s\n", loglevel, | ||
197 | aer_error_severity_string[info->severity]); | ||
198 | |||
199 | if ( info->status == 0) { | ||
200 | printk("%sPCIE Bus Error type\t: (Unaccessible)\n", loglevel); | ||
201 | printk("%sUnaccessible Received\t: %s\n", loglevel, | ||
202 | info->flags & AER_MULTI_ERROR_VALID_FLAG ? | ||
203 | "Multiple" : "First"); | ||
204 | printk("%sUnregistered Agent ID\t: %04x\n", loglevel, | ||
205 | (dev->bus->number << 8) | dev->devfn); | ||
206 | } else { | 190 | } else { |
207 | err_layer = AER_GET_LAYER_ERROR(info->severity, info->status); | 191 | int layer, agent; |
208 | printk("%sPCIE Bus Error type\t: %s\n", loglevel, | ||
209 | aer_error_layer[err_layer]); | ||
210 | |||
211 | spin_lock(&logbuf_lock); | ||
212 | errmsg = aer_get_error_source_name(info->severity, | ||
213 | info->status, | ||
214 | errmsg_buff); | ||
215 | printk("%s%s\t: %s\n", loglevel, errmsg, | ||
216 | info->flags & AER_MULTI_ERROR_VALID_FLAG ? | ||
217 | "Multiple" : "First"); | ||
218 | spin_unlock(&logbuf_lock); | ||
219 | 192 | ||
193 | layer = AER_GET_LAYER_ERROR(info->severity, info->status); | ||
220 | agent = AER_GET_AGENT(info->severity, info->status); | 194 | agent = AER_GET_AGENT(info->severity, info->status); |
221 | printk("%s%s\t\t: %04x\n", loglevel, | 195 | |
222 | aer_agent_string[agent], | 196 | AER_PR(info, dev, |
223 | (dev->bus->number << 8) | dev->devfn); | 197 | "PCIE Bus Error: severity=%s, type=%s, id=%04x(%s)\n", |
224 | 198 | aer_error_severity_string[info->severity], | |
225 | printk("%sVendorID=%04xh, DeviceID=%04xh," | 199 | aer_error_layer[layer], id, aer_agent_string[agent]); |
226 | " Bus=%02xh, Device=%02xh, Function=%02xh\n", | 200 | |
227 | loglevel, | 201 | AER_PR(info, dev, |
228 | dev->vendor, | 202 | " device [%04x:%04x] error status/mask=%08x/%08x\n", |
229 | dev->device, | 203 | dev->vendor, dev->device, info->status, info->mask); |
230 | dev->bus->number, | 204 | |
231 | PCI_SLOT(dev->devfn), | 205 | __aer_print_error(info, dev); |
232 | PCI_FUNC(dev->devfn)); | 206 | |
233 | 207 | if (info->tlp_header_valid) { | |
234 | if (info->flags & AER_TLP_HEADER_VALID_FLAG) { | ||
235 | unsigned char *tlp = (unsigned char *) &info->tlp; | 208 | unsigned char *tlp = (unsigned char *) &info->tlp; |
236 | printk("%sTLP Header:\n", loglevel); | 209 | AER_PR(info, dev, " TLP Header:" |
237 | printk("%s%02x%02x%02x%02x %02x%02x%02x%02x" | 210 | " %02x%02x%02x%02x %02x%02x%02x%02x" |
238 | " %02x%02x%02x%02x %02x%02x%02x%02x\n", | 211 | " %02x%02x%02x%02x %02x%02x%02x%02x\n", |
239 | loglevel, | ||
240 | *(tlp + 3), *(tlp + 2), *(tlp + 1), *tlp, | 212 | *(tlp + 3), *(tlp + 2), *(tlp + 1), *tlp, |
241 | *(tlp + 7), *(tlp + 6), *(tlp + 5), *(tlp + 4), | 213 | *(tlp + 7), *(tlp + 6), *(tlp + 5), *(tlp + 4), |
242 | *(tlp + 11), *(tlp + 10), *(tlp + 9), | 214 | *(tlp + 11), *(tlp + 10), *(tlp + 9), |
@@ -244,5 +216,15 @@ void aer_print_error(struct pci_dev *dev, struct aer_err_info *info) | |||
244 | *(tlp + 13), *(tlp + 12)); | 216 | *(tlp + 13), *(tlp + 12)); |
245 | } | 217 | } |
246 | } | 218 | } |
219 | |||
220 | if (info->id && info->error_dev_num > 1 && info->id == id) | ||
221 | AER_PR(info, dev, | ||
222 | " Error of this Agent(%04x) is reported first\n", id); | ||
247 | } | 223 | } |
248 | 224 | ||
225 | void aer_print_port_info(struct pci_dev *dev, struct aer_err_info *info) | ||
226 | { | ||
227 | dev_info(&dev->dev, "AER: %s%s error received: id=%04x\n", | ||
228 | info->multi_error_valid ? "Multiple " : "", | ||
229 | aer_error_severity_string[info->severity], info->id); | ||
230 | } | ||
diff --git a/drivers/pci/pcie/aspm.c b/drivers/pci/pcie/aspm.c index 3d27c97e0486..f289ca9bf18d 100644 --- a/drivers/pci/pcie/aspm.c +++ b/drivers/pci/pcie/aspm.c | |||
@@ -26,6 +26,13 @@ | |||
26 | #endif | 26 | #endif |
27 | #define MODULE_PARAM_PREFIX "pcie_aspm." | 27 | #define MODULE_PARAM_PREFIX "pcie_aspm." |
28 | 28 | ||
29 | /* Note: those are not register definitions */ | ||
30 | #define ASPM_STATE_L0S_UP (1) /* Upstream direction L0s state */ | ||
31 | #define ASPM_STATE_L0S_DW (2) /* Downstream direction L0s state */ | ||
32 | #define ASPM_STATE_L1 (4) /* L1 state */ | ||
33 | #define ASPM_STATE_L0S (ASPM_STATE_L0S_UP | ASPM_STATE_L0S_DW) | ||
34 | #define ASPM_STATE_ALL (ASPM_STATE_L0S | ASPM_STATE_L1) | ||
35 | |||
29 | struct aspm_latency { | 36 | struct aspm_latency { |
30 | u32 l0s; /* L0s latency (nsec) */ | 37 | u32 l0s; /* L0s latency (nsec) */ |
31 | u32 l1; /* L1 latency (nsec) */ | 38 | u32 l1; /* L1 latency (nsec) */ |
@@ -40,17 +47,20 @@ struct pcie_link_state { | |||
40 | struct list_head link; /* node in parent's children list */ | 47 | struct list_head link; /* node in parent's children list */ |
41 | 48 | ||
42 | /* ASPM state */ | 49 | /* ASPM state */ |
43 | u32 aspm_support:2; /* Supported ASPM state */ | 50 | u32 aspm_support:3; /* Supported ASPM state */ |
44 | u32 aspm_enabled:2; /* Enabled ASPM state */ | 51 | u32 aspm_enabled:3; /* Enabled ASPM state */ |
45 | u32 aspm_default:2; /* Default ASPM state by BIOS */ | 52 | u32 aspm_capable:3; /* Capable ASPM state with latency */ |
53 | u32 aspm_default:3; /* Default ASPM state by BIOS */ | ||
54 | u32 aspm_disable:3; /* Disabled ASPM state */ | ||
46 | 55 | ||
47 | /* Clock PM state */ | 56 | /* Clock PM state */ |
48 | u32 clkpm_capable:1; /* Clock PM capable? */ | 57 | u32 clkpm_capable:1; /* Clock PM capable? */ |
49 | u32 clkpm_enabled:1; /* Current Clock PM state */ | 58 | u32 clkpm_enabled:1; /* Current Clock PM state */ |
50 | u32 clkpm_default:1; /* Default Clock PM state by BIOS */ | 59 | u32 clkpm_default:1; /* Default Clock PM state by BIOS */ |
51 | 60 | ||
52 | /* Latencies */ | 61 | /* Exit latencies */ |
53 | struct aspm_latency latency; /* Exit latency */ | 62 | struct aspm_latency latency_up; /* Upstream direction exit latency */ |
63 | struct aspm_latency latency_dw; /* Downstream direction exit latency */ | ||
54 | /* | 64 | /* |
55 | * Endpoint acceptable latencies. A pcie downstream port only | 65 | * Endpoint acceptable latencies. A pcie downstream port only |
56 | * has one slot under it, so at most there are 8 functions. | 66 | * has one slot under it, so at most there are 8 functions. |
@@ -82,7 +92,7 @@ static int policy_to_aspm_state(struct pcie_link_state *link) | |||
82 | return 0; | 92 | return 0; |
83 | case POLICY_POWERSAVE: | 93 | case POLICY_POWERSAVE: |
84 | /* Enable ASPM L0s/L1 */ | 94 | /* Enable ASPM L0s/L1 */ |
85 | return PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1; | 95 | return ASPM_STATE_ALL; |
86 | case POLICY_DEFAULT: | 96 | case POLICY_DEFAULT: |
87 | return link->aspm_default; | 97 | return link->aspm_default; |
88 | } | 98 | } |
@@ -164,18 +174,6 @@ static void pcie_clkpm_cap_init(struct pcie_link_state *link, int blacklist) | |||
164 | link->clkpm_capable = (blacklist) ? 0 : capable; | 174 | link->clkpm_capable = (blacklist) ? 0 : capable; |
165 | } | 175 | } |
166 | 176 | ||
167 | static bool pcie_aspm_downstream_has_switch(struct pcie_link_state *link) | ||
168 | { | ||
169 | struct pci_dev *child; | ||
170 | struct pci_bus *linkbus = link->pdev->subordinate; | ||
171 | |||
172 | list_for_each_entry(child, &linkbus->devices, bus_list) { | ||
173 | if (child->pcie_type == PCI_EXP_TYPE_UPSTREAM) | ||
174 | return true; | ||
175 | } | ||
176 | return false; | ||
177 | } | ||
178 | |||
179 | /* | 177 | /* |
180 | * pcie_aspm_configure_common_clock: check if the 2 ends of a link | 178 | * pcie_aspm_configure_common_clock: check if the 2 ends of a link |
181 | * could use common clock. If they are, configure them to use the | 179 | * could use common clock. If they are, configure them to use the |
@@ -288,71 +286,133 @@ static u32 calc_l1_acceptable(u32 encoding) | |||
288 | return (1000 << encoding); | 286 | return (1000 << encoding); |
289 | } | 287 | } |
290 | 288 | ||
291 | static void pcie_aspm_get_cap_device(struct pci_dev *pdev, u32 *state, | 289 | struct aspm_register_info { |
292 | u32 *l0s, u32 *l1, u32 *enabled) | 290 | u32 support:2; |
291 | u32 enabled:2; | ||
292 | u32 latency_encoding_l0s; | ||
293 | u32 latency_encoding_l1; | ||
294 | }; | ||
295 | |||
296 | static void pcie_get_aspm_reg(struct pci_dev *pdev, | ||
297 | struct aspm_register_info *info) | ||
293 | { | 298 | { |
294 | int pos; | 299 | int pos; |
295 | u16 reg16; | 300 | u16 reg16; |
296 | u32 reg32, encoding; | 301 | u32 reg32; |
297 | 302 | ||
298 | *l0s = *l1 = *enabled = 0; | ||
299 | pos = pci_find_capability(pdev, PCI_CAP_ID_EXP); | 303 | pos = pci_find_capability(pdev, PCI_CAP_ID_EXP); |
300 | pci_read_config_dword(pdev, pos + PCI_EXP_LNKCAP, ®32); | 304 | pci_read_config_dword(pdev, pos + PCI_EXP_LNKCAP, ®32); |
301 | *state = (reg32 & PCI_EXP_LNKCAP_ASPMS) >> 10; | 305 | info->support = (reg32 & PCI_EXP_LNKCAP_ASPMS) >> 10; |
302 | if (*state != PCIE_LINK_STATE_L0S && | 306 | /* 00b and 10b are defined as "Reserved". */ |
303 | *state != (PCIE_LINK_STATE_L1 | PCIE_LINK_STATE_L0S)) | 307 | if (info->support == PCIE_LINK_STATE_L1) |
304 | *state = 0; | 308 | info->support = 0; |
305 | if (*state == 0) | 309 | info->latency_encoding_l0s = (reg32 & PCI_EXP_LNKCAP_L0SEL) >> 12; |
310 | info->latency_encoding_l1 = (reg32 & PCI_EXP_LNKCAP_L1EL) >> 15; | ||
311 | pci_read_config_word(pdev, pos + PCI_EXP_LNKCTL, ®16); | ||
312 | info->enabled = reg16 & PCI_EXP_LNKCTL_ASPMC; | ||
313 | } | ||
314 | |||
315 | static void pcie_aspm_check_latency(struct pci_dev *endpoint) | ||
316 | { | ||
317 | u32 latency, l1_switch_latency = 0; | ||
318 | struct aspm_latency *acceptable; | ||
319 | struct pcie_link_state *link; | ||
320 | |||
321 | /* Device not in D0 doesn't need latency check */ | ||
322 | if ((endpoint->current_state != PCI_D0) && | ||
323 | (endpoint->current_state != PCI_UNKNOWN)) | ||
306 | return; | 324 | return; |
307 | 325 | ||
308 | encoding = (reg32 & PCI_EXP_LNKCAP_L0SEL) >> 12; | 326 | link = endpoint->bus->self->link_state; |
309 | *l0s = calc_l0s_latency(encoding); | 327 | acceptable = &link->acceptable[PCI_FUNC(endpoint->devfn)]; |
310 | if (*state & PCIE_LINK_STATE_L1) { | 328 | |
311 | encoding = (reg32 & PCI_EXP_LNKCAP_L1EL) >> 15; | 329 | while (link) { |
312 | *l1 = calc_l1_latency(encoding); | 330 | /* Check upstream direction L0s latency */ |
331 | if ((link->aspm_capable & ASPM_STATE_L0S_UP) && | ||
332 | (link->latency_up.l0s > acceptable->l0s)) | ||
333 | link->aspm_capable &= ~ASPM_STATE_L0S_UP; | ||
334 | |||
335 | /* Check downstream direction L0s latency */ | ||
336 | if ((link->aspm_capable & ASPM_STATE_L0S_DW) && | ||
337 | (link->latency_dw.l0s > acceptable->l0s)) | ||
338 | link->aspm_capable &= ~ASPM_STATE_L0S_DW; | ||
339 | /* | ||
340 | * Check L1 latency. | ||
341 | * Every switch on the path to root complex need 1 | ||
342 | * more microsecond for L1. Spec doesn't mention L0s. | ||
343 | */ | ||
344 | latency = max_t(u32, link->latency_up.l1, link->latency_dw.l1); | ||
345 | if ((link->aspm_capable & ASPM_STATE_L1) && | ||
346 | (latency + l1_switch_latency > acceptable->l1)) | ||
347 | link->aspm_capable &= ~ASPM_STATE_L1; | ||
348 | l1_switch_latency += 1000; | ||
349 | |||
350 | link = link->parent; | ||
313 | } | 351 | } |
314 | pci_read_config_word(pdev, pos + PCI_EXP_LNKCTL, ®16); | ||
315 | *enabled = reg16 & (PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1); | ||
316 | } | 352 | } |
317 | 353 | ||
318 | static void pcie_aspm_cap_init(struct pcie_link_state *link, int blacklist) | 354 | static void pcie_aspm_cap_init(struct pcie_link_state *link, int blacklist) |
319 | { | 355 | { |
320 | u32 support, l0s, l1, enabled; | ||
321 | struct pci_dev *child, *parent = link->pdev; | 356 | struct pci_dev *child, *parent = link->pdev; |
322 | struct pci_bus *linkbus = parent->subordinate; | 357 | struct pci_bus *linkbus = parent->subordinate; |
358 | struct aspm_register_info upreg, dwreg; | ||
323 | 359 | ||
324 | if (blacklist) { | 360 | if (blacklist) { |
325 | /* Set support state to 0, so we will disable ASPM later */ | 361 | /* Set enabled/disable so that we will disable ASPM later */ |
326 | link->aspm_support = 0; | 362 | link->aspm_enabled = ASPM_STATE_ALL; |
327 | link->aspm_default = 0; | 363 | link->aspm_disable = ASPM_STATE_ALL; |
328 | link->aspm_enabled = PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1; | ||
329 | return; | 364 | return; |
330 | } | 365 | } |
331 | 366 | ||
332 | /* Configure common clock before checking latencies */ | 367 | /* Configure common clock before checking latencies */ |
333 | pcie_aspm_configure_common_clock(link); | 368 | pcie_aspm_configure_common_clock(link); |
334 | 369 | ||
335 | /* upstream component states */ | 370 | /* Get upstream/downstream components' register state */ |
336 | pcie_aspm_get_cap_device(parent, &support, &l0s, &l1, &enabled); | 371 | pcie_get_aspm_reg(parent, &upreg); |
337 | link->aspm_support = support; | ||
338 | link->latency.l0s = l0s; | ||
339 | link->latency.l1 = l1; | ||
340 | link->aspm_enabled = enabled; | ||
341 | |||
342 | /* downstream component states, all functions have the same setting */ | ||
343 | child = list_entry(linkbus->devices.next, struct pci_dev, bus_list); | 372 | child = list_entry(linkbus->devices.next, struct pci_dev, bus_list); |
344 | pcie_aspm_get_cap_device(child, &support, &l0s, &l1, &enabled); | 373 | pcie_get_aspm_reg(child, &dwreg); |
345 | link->aspm_support &= support; | ||
346 | link->latency.l0s = max_t(u32, link->latency.l0s, l0s); | ||
347 | link->latency.l1 = max_t(u32, link->latency.l1, l1); | ||
348 | 374 | ||
349 | if (!link->aspm_support) | 375 | /* |
350 | return; | 376 | * Setup L0s state |
351 | 377 | * | |
352 | link->aspm_enabled &= link->aspm_support; | 378 | * Note that we must not enable L0s in either direction on a |
379 | * given link unless components on both sides of the link each | ||
380 | * support L0s. | ||
381 | */ | ||
382 | if (dwreg.support & upreg.support & PCIE_LINK_STATE_L0S) | ||
383 | link->aspm_support |= ASPM_STATE_L0S; | ||
384 | if (dwreg.enabled & PCIE_LINK_STATE_L0S) | ||
385 | link->aspm_enabled |= ASPM_STATE_L0S_UP; | ||
386 | if (upreg.enabled & PCIE_LINK_STATE_L0S) | ||
387 | link->aspm_enabled |= ASPM_STATE_L0S_DW; | ||
388 | link->latency_up.l0s = calc_l0s_latency(upreg.latency_encoding_l0s); | ||
389 | link->latency_dw.l0s = calc_l0s_latency(dwreg.latency_encoding_l0s); | ||
390 | |||
391 | /* Setup L1 state */ | ||
392 | if (upreg.support & dwreg.support & PCIE_LINK_STATE_L1) | ||
393 | link->aspm_support |= ASPM_STATE_L1; | ||
394 | if (upreg.enabled & dwreg.enabled & PCIE_LINK_STATE_L1) | ||
395 | link->aspm_enabled |= ASPM_STATE_L1; | ||
396 | link->latency_up.l1 = calc_l1_latency(upreg.latency_encoding_l1); | ||
397 | link->latency_dw.l1 = calc_l1_latency(dwreg.latency_encoding_l1); | ||
398 | |||
399 | /* Save default state */ | ||
353 | link->aspm_default = link->aspm_enabled; | 400 | link->aspm_default = link->aspm_enabled; |
354 | 401 | ||
355 | /* ENDPOINT states*/ | 402 | /* Setup initial capable state. Will be updated later */ |
403 | link->aspm_capable = link->aspm_support; | ||
404 | /* | ||
405 | * If the downstream component has pci bridge function, don't | ||
406 | * do ASPM for now. | ||
407 | */ | ||
408 | list_for_each_entry(child, &linkbus->devices, bus_list) { | ||
409 | if (child->pcie_type == PCI_EXP_TYPE_PCI_BRIDGE) { | ||
410 | link->aspm_disable = ASPM_STATE_ALL; | ||
411 | break; | ||
412 | } | ||
413 | } | ||
414 | |||
415 | /* Get and check endpoint acceptable latencies */ | ||
356 | list_for_each_entry(child, &linkbus->devices, bus_list) { | 416 | list_for_each_entry(child, &linkbus->devices, bus_list) { |
357 | int pos; | 417 | int pos; |
358 | u32 reg32, encoding; | 418 | u32 reg32, encoding; |
@@ -365,109 +425,46 @@ static void pcie_aspm_cap_init(struct pcie_link_state *link, int blacklist) | |||
365 | 425 | ||
366 | pos = pci_find_capability(child, PCI_CAP_ID_EXP); | 426 | pos = pci_find_capability(child, PCI_CAP_ID_EXP); |
367 | pci_read_config_dword(child, pos + PCI_EXP_DEVCAP, ®32); | 427 | pci_read_config_dword(child, pos + PCI_EXP_DEVCAP, ®32); |
428 | /* Calculate endpoint L0s acceptable latency */ | ||
368 | encoding = (reg32 & PCI_EXP_DEVCAP_L0S) >> 6; | 429 | encoding = (reg32 & PCI_EXP_DEVCAP_L0S) >> 6; |
369 | acceptable->l0s = calc_l0s_acceptable(encoding); | 430 | acceptable->l0s = calc_l0s_acceptable(encoding); |
370 | if (link->aspm_support & PCIE_LINK_STATE_L1) { | 431 | /* Calculate endpoint L1 acceptable latency */ |
371 | encoding = (reg32 & PCI_EXP_DEVCAP_L1) >> 9; | 432 | encoding = (reg32 & PCI_EXP_DEVCAP_L1) >> 9; |
372 | acceptable->l1 = calc_l1_acceptable(encoding); | 433 | acceptable->l1 = calc_l1_acceptable(encoding); |
373 | } | ||
374 | } | ||
375 | } | ||
376 | |||
377 | /** | ||
378 | * __pcie_aspm_check_state_one - check latency for endpoint device. | ||
379 | * @endpoint: pointer to the struct pci_dev of endpoint device | ||
380 | * | ||
381 | * TBD: The latency from the endpoint to root complex vary per switch's | ||
382 | * upstream link state above the device. Here we just do a simple check | ||
383 | * which assumes all links above the device can be in L1 state, that | ||
384 | * is we just consider the worst case. If switch's upstream link can't | ||
385 | * be put into L0S/L1, then our check is too strictly. | ||
386 | */ | ||
387 | static u32 __pcie_aspm_check_state_one(struct pci_dev *endpoint, u32 state) | ||
388 | { | ||
389 | u32 l1_switch_latency = 0; | ||
390 | struct aspm_latency *acceptable; | ||
391 | struct pcie_link_state *link; | ||
392 | |||
393 | link = endpoint->bus->self->link_state; | ||
394 | state &= link->aspm_support; | ||
395 | acceptable = &link->acceptable[PCI_FUNC(endpoint->devfn)]; | ||
396 | 434 | ||
397 | while (link && state) { | 435 | pcie_aspm_check_latency(child); |
398 | if ((state & PCIE_LINK_STATE_L0S) && | ||
399 | (link->latency.l0s > acceptable->l0s)) | ||
400 | state &= ~PCIE_LINK_STATE_L0S; | ||
401 | if ((state & PCIE_LINK_STATE_L1) && | ||
402 | (link->latency.l1 + l1_switch_latency > acceptable->l1)) | ||
403 | state &= ~PCIE_LINK_STATE_L1; | ||
404 | link = link->parent; | ||
405 | /* | ||
406 | * Every switch on the path to root complex need 1 | ||
407 | * more microsecond for L1. Spec doesn't mention L0s. | ||
408 | */ | ||
409 | l1_switch_latency += 1000; | ||
410 | } | ||
411 | return state; | ||
412 | } | ||
413 | |||
414 | static u32 pcie_aspm_check_state(struct pcie_link_state *link, u32 state) | ||
415 | { | ||
416 | pci_power_t power_state; | ||
417 | struct pci_dev *child; | ||
418 | struct pci_bus *linkbus = link->pdev->subordinate; | ||
419 | |||
420 | /* If no child, ignore the link */ | ||
421 | if (list_empty(&linkbus->devices)) | ||
422 | return state; | ||
423 | |||
424 | list_for_each_entry(child, &linkbus->devices, bus_list) { | ||
425 | /* | ||
426 | * If downstream component of a link is pci bridge, we | ||
427 | * disable ASPM for now for the link | ||
428 | */ | ||
429 | if (child->pcie_type == PCI_EXP_TYPE_PCI_BRIDGE) | ||
430 | return 0; | ||
431 | |||
432 | if ((child->pcie_type != PCI_EXP_TYPE_ENDPOINT && | ||
433 | child->pcie_type != PCI_EXP_TYPE_LEG_END)) | ||
434 | continue; | ||
435 | /* Device not in D0 doesn't need check latency */ | ||
436 | power_state = child->current_state; | ||
437 | if (power_state == PCI_D1 || power_state == PCI_D2 || | ||
438 | power_state == PCI_D3hot || power_state == PCI_D3cold) | ||
439 | continue; | ||
440 | state = __pcie_aspm_check_state_one(child, state); | ||
441 | } | 436 | } |
442 | return state; | ||
443 | } | 437 | } |
444 | 438 | ||
445 | static void __pcie_aspm_config_one_dev(struct pci_dev *pdev, unsigned int state) | 439 | static void pcie_config_aspm_dev(struct pci_dev *pdev, u32 val) |
446 | { | 440 | { |
447 | u16 reg16; | 441 | u16 reg16; |
448 | int pos = pci_find_capability(pdev, PCI_CAP_ID_EXP); | 442 | int pos = pci_find_capability(pdev, PCI_CAP_ID_EXP); |
449 | 443 | ||
450 | pci_read_config_word(pdev, pos + PCI_EXP_LNKCTL, ®16); | 444 | pci_read_config_word(pdev, pos + PCI_EXP_LNKCTL, ®16); |
451 | reg16 &= ~0x3; | 445 | reg16 &= ~0x3; |
452 | reg16 |= state; | 446 | reg16 |= val; |
453 | pci_write_config_word(pdev, pos + PCI_EXP_LNKCTL, reg16); | 447 | pci_write_config_word(pdev, pos + PCI_EXP_LNKCTL, reg16); |
454 | } | 448 | } |
455 | 449 | ||
456 | static void __pcie_aspm_config_link(struct pcie_link_state *link, u32 state) | 450 | static void pcie_config_aspm_link(struct pcie_link_state *link, u32 state) |
457 | { | 451 | { |
452 | u32 upstream = 0, dwstream = 0; | ||
458 | struct pci_dev *child, *parent = link->pdev; | 453 | struct pci_dev *child, *parent = link->pdev; |
459 | struct pci_bus *linkbus = parent->subordinate; | 454 | struct pci_bus *linkbus = parent->subordinate; |
460 | 455 | ||
461 | /* If no child, disable the link */ | 456 | /* Nothing to do if the link is already in the requested state */ |
462 | if (list_empty(&linkbus->devices)) | 457 | state &= (link->aspm_capable & ~link->aspm_disable); |
463 | state = 0; | 458 | if (link->aspm_enabled == state) |
464 | /* | 459 | return; |
465 | * If the downstream component has pci bridge function, don't | 460 | /* Convert ASPM state to upstream/downstream ASPM register state */ |
466 | * do ASPM now. | 461 | if (state & ASPM_STATE_L0S_UP) |
467 | */ | 462 | dwstream |= PCIE_LINK_STATE_L0S; |
468 | list_for_each_entry(child, &linkbus->devices, bus_list) { | 463 | if (state & ASPM_STATE_L0S_DW) |
469 | if (child->pcie_type == PCI_EXP_TYPE_PCI_BRIDGE) | 464 | upstream |= PCIE_LINK_STATE_L0S; |
470 | return; | 465 | if (state & ASPM_STATE_L1) { |
466 | upstream |= PCIE_LINK_STATE_L1; | ||
467 | dwstream |= PCIE_LINK_STATE_L1; | ||
471 | } | 468 | } |
472 | /* | 469 | /* |
473 | * Spec 2.0 suggests all functions should be configured the | 470 | * Spec 2.0 suggests all functions should be configured the |
@@ -475,67 +472,24 @@ static void __pcie_aspm_config_link(struct pcie_link_state *link, u32 state) | |||
475 | * upstream component first and then downstream, and vice | 472 | * upstream component first and then downstream, and vice |
476 | * versa for disabling ASPM L1. Spec doesn't mention L0S. | 473 | * versa for disabling ASPM L1. Spec doesn't mention L0S. |
477 | */ | 474 | */ |
478 | if (state & PCIE_LINK_STATE_L1) | 475 | if (state & ASPM_STATE_L1) |
479 | __pcie_aspm_config_one_dev(parent, state); | 476 | pcie_config_aspm_dev(parent, upstream); |
480 | |||
481 | list_for_each_entry(child, &linkbus->devices, bus_list) | 477 | list_for_each_entry(child, &linkbus->devices, bus_list) |
482 | __pcie_aspm_config_one_dev(child, state); | 478 | pcie_config_aspm_dev(child, dwstream); |
483 | 479 | if (!(state & ASPM_STATE_L1)) | |
484 | if (!(state & PCIE_LINK_STATE_L1)) | 480 | pcie_config_aspm_dev(parent, upstream); |
485 | __pcie_aspm_config_one_dev(parent, state); | ||
486 | 481 | ||
487 | link->aspm_enabled = state; | 482 | link->aspm_enabled = state; |
488 | } | 483 | } |
489 | 484 | ||
490 | /* Check the whole hierarchy, and configure each link in the hierarchy */ | 485 | static void pcie_config_aspm_path(struct pcie_link_state *link) |
491 | static void __pcie_aspm_configure_link_state(struct pcie_link_state *link, | ||
492 | u32 state) | ||
493 | { | 486 | { |
494 | struct pcie_link_state *leaf, *root = link->root; | 487 | while (link) { |
495 | 488 | pcie_config_aspm_link(link, policy_to_aspm_state(link)); | |
496 | state &= (PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1); | 489 | link = link->parent; |
497 | |||
498 | /* Check all links who have specific root port link */ | ||
499 | list_for_each_entry(leaf, &link_list, sibling) { | ||
500 | if (!list_empty(&leaf->children) || (leaf->root != root)) | ||
501 | continue; | ||
502 | state = pcie_aspm_check_state(leaf, state); | ||
503 | } | ||
504 | /* Check root port link too in case it hasn't children */ | ||
505 | state = pcie_aspm_check_state(root, state); | ||
506 | if (link->aspm_enabled == state) | ||
507 | return; | ||
508 | /* | ||
509 | * We must change the hierarchy. See comments in | ||
510 | * __pcie_aspm_config_link for the order | ||
511 | **/ | ||
512 | if (state & PCIE_LINK_STATE_L1) { | ||
513 | list_for_each_entry(leaf, &link_list, sibling) { | ||
514 | if (leaf->root == root) | ||
515 | __pcie_aspm_config_link(leaf, state); | ||
516 | } | ||
517 | } else { | ||
518 | list_for_each_entry_reverse(leaf, &link_list, sibling) { | ||
519 | if (leaf->root == root) | ||
520 | __pcie_aspm_config_link(leaf, state); | ||
521 | } | ||
522 | } | 490 | } |
523 | } | 491 | } |
524 | 492 | ||
525 | /* | ||
526 | * pcie_aspm_configure_link_state: enable/disable PCI express link state | ||
527 | * @pdev: the root port or switch downstream port | ||
528 | */ | ||
529 | static void pcie_aspm_configure_link_state(struct pcie_link_state *link, | ||
530 | u32 state) | ||
531 | { | ||
532 | down_read(&pci_bus_sem); | ||
533 | mutex_lock(&aspm_lock); | ||
534 | __pcie_aspm_configure_link_state(link, state); | ||
535 | mutex_unlock(&aspm_lock); | ||
536 | up_read(&pci_bus_sem); | ||
537 | } | ||
538 | |||
539 | static void free_link_state(struct pcie_link_state *link) | 493 | static void free_link_state(struct pcie_link_state *link) |
540 | { | 494 | { |
541 | link->pdev->link_state = NULL; | 495 | link->pdev->link_state = NULL; |
@@ -570,10 +524,9 @@ static int pcie_aspm_sanity_check(struct pci_dev *pdev) | |||
570 | return 0; | 524 | return 0; |
571 | } | 525 | } |
572 | 526 | ||
573 | static struct pcie_link_state *pcie_aspm_setup_link_state(struct pci_dev *pdev) | 527 | static struct pcie_link_state *alloc_pcie_link_state(struct pci_dev *pdev) |
574 | { | 528 | { |
575 | struct pcie_link_state *link; | 529 | struct pcie_link_state *link; |
576 | int blacklist = !!pcie_aspm_sanity_check(pdev); | ||
577 | 530 | ||
578 | link = kzalloc(sizeof(*link), GFP_KERNEL); | 531 | link = kzalloc(sizeof(*link), GFP_KERNEL); |
579 | if (!link) | 532 | if (!link) |
@@ -599,15 +552,7 @@ static struct pcie_link_state *pcie_aspm_setup_link_state(struct pci_dev *pdev) | |||
599 | link->root = link->parent->root; | 552 | link->root = link->parent->root; |
600 | 553 | ||
601 | list_add(&link->sibling, &link_list); | 554 | list_add(&link->sibling, &link_list); |
602 | |||
603 | pdev->link_state = link; | 555 | pdev->link_state = link; |
604 | |||
605 | /* Check ASPM capability */ | ||
606 | pcie_aspm_cap_init(link, blacklist); | ||
607 | |||
608 | /* Check Clock PM capability */ | ||
609 | pcie_clkpm_cap_init(link, blacklist); | ||
610 | |||
611 | return link; | 556 | return link; |
612 | } | 557 | } |
613 | 558 | ||
@@ -618,8 +563,8 @@ static struct pcie_link_state *pcie_aspm_setup_link_state(struct pci_dev *pdev) | |||
618 | */ | 563 | */ |
619 | void pcie_aspm_init_link_state(struct pci_dev *pdev) | 564 | void pcie_aspm_init_link_state(struct pci_dev *pdev) |
620 | { | 565 | { |
621 | u32 state; | ||
622 | struct pcie_link_state *link; | 566 | struct pcie_link_state *link; |
567 | int blacklist = !!pcie_aspm_sanity_check(pdev); | ||
623 | 568 | ||
624 | if (aspm_disabled || !pdev->is_pcie || pdev->link_state) | 569 | if (aspm_disabled || !pdev->is_pcie || pdev->link_state) |
625 | return; | 570 | return; |
@@ -637,47 +582,64 @@ void pcie_aspm_init_link_state(struct pci_dev *pdev) | |||
637 | goto out; | 582 | goto out; |
638 | 583 | ||
639 | mutex_lock(&aspm_lock); | 584 | mutex_lock(&aspm_lock); |
640 | link = pcie_aspm_setup_link_state(pdev); | 585 | link = alloc_pcie_link_state(pdev); |
641 | if (!link) | 586 | if (!link) |
642 | goto unlock; | 587 | goto unlock; |
643 | /* | 588 | /* |
644 | * Setup initial ASPM state | 589 | * Setup initial ASPM state. Note that we need to configure |
645 | * | 590 | * upstream links also because capable state of them can be |
646 | * If link has switch, delay the link config. The leaf link | 591 | * update through pcie_aspm_cap_init(). |
647 | * initialization will config the whole hierarchy. But we must | ||
648 | * make sure BIOS doesn't set unsupported link state. | ||
649 | */ | 592 | */ |
650 | if (pcie_aspm_downstream_has_switch(link)) { | 593 | pcie_aspm_cap_init(link, blacklist); |
651 | state = pcie_aspm_check_state(link, link->aspm_default); | 594 | pcie_config_aspm_path(link); |
652 | __pcie_aspm_config_link(link, state); | ||
653 | } else { | ||
654 | state = policy_to_aspm_state(link); | ||
655 | __pcie_aspm_configure_link_state(link, state); | ||
656 | } | ||
657 | 595 | ||
658 | /* Setup initial Clock PM state */ | 596 | /* Setup initial Clock PM state */ |
659 | state = (link->clkpm_capable) ? policy_to_clkpm_state(link) : 0; | 597 | pcie_clkpm_cap_init(link, blacklist); |
660 | pcie_set_clkpm(link, state); | 598 | pcie_set_clkpm(link, policy_to_clkpm_state(link)); |
661 | unlock: | 599 | unlock: |
662 | mutex_unlock(&aspm_lock); | 600 | mutex_unlock(&aspm_lock); |
663 | out: | 601 | out: |
664 | up_read(&pci_bus_sem); | 602 | up_read(&pci_bus_sem); |
665 | } | 603 | } |
666 | 604 | ||
605 | /* Recheck latencies and update aspm_capable for links under the root */ | ||
606 | static void pcie_update_aspm_capable(struct pcie_link_state *root) | ||
607 | { | ||
608 | struct pcie_link_state *link; | ||
609 | BUG_ON(root->parent); | ||
610 | list_for_each_entry(link, &link_list, sibling) { | ||
611 | if (link->root != root) | ||
612 | continue; | ||
613 | link->aspm_capable = link->aspm_support; | ||
614 | } | ||
615 | list_for_each_entry(link, &link_list, sibling) { | ||
616 | struct pci_dev *child; | ||
617 | struct pci_bus *linkbus = link->pdev->subordinate; | ||
618 | if (link->root != root) | ||
619 | continue; | ||
620 | list_for_each_entry(child, &linkbus->devices, bus_list) { | ||
621 | if ((child->pcie_type != PCI_EXP_TYPE_ENDPOINT) && | ||
622 | (child->pcie_type != PCI_EXP_TYPE_LEG_END)) | ||
623 | continue; | ||
624 | pcie_aspm_check_latency(child); | ||
625 | } | ||
626 | } | ||
627 | } | ||
628 | |||
667 | /* @pdev: the endpoint device */ | 629 | /* @pdev: the endpoint device */ |
668 | void pcie_aspm_exit_link_state(struct pci_dev *pdev) | 630 | void pcie_aspm_exit_link_state(struct pci_dev *pdev) |
669 | { | 631 | { |
670 | struct pci_dev *parent = pdev->bus->self; | 632 | struct pci_dev *parent = pdev->bus->self; |
671 | struct pcie_link_state *link_state = parent->link_state; | 633 | struct pcie_link_state *link, *root, *parent_link; |
672 | 634 | ||
673 | if (aspm_disabled || !pdev->is_pcie || !parent || !link_state) | 635 | if (aspm_disabled || !pdev->is_pcie || !parent || !parent->link_state) |
674 | return; | 636 | return; |
675 | if (parent->pcie_type != PCI_EXP_TYPE_ROOT_PORT && | 637 | if ((parent->pcie_type != PCI_EXP_TYPE_ROOT_PORT) && |
676 | parent->pcie_type != PCI_EXP_TYPE_DOWNSTREAM) | 638 | (parent->pcie_type != PCI_EXP_TYPE_DOWNSTREAM)) |
677 | return; | 639 | return; |
640 | |||
678 | down_read(&pci_bus_sem); | 641 | down_read(&pci_bus_sem); |
679 | mutex_lock(&aspm_lock); | 642 | mutex_lock(&aspm_lock); |
680 | |||
681 | /* | 643 | /* |
682 | * All PCIe functions are in one slot, remove one function will remove | 644 | * All PCIe functions are in one slot, remove one function will remove |
683 | * the whole slot, so just wait until we are the last function left. | 645 | * the whole slot, so just wait until we are the last function left. |
@@ -685,13 +647,20 @@ void pcie_aspm_exit_link_state(struct pci_dev *pdev) | |||
685 | if (!list_is_last(&pdev->bus_list, &parent->subordinate->devices)) | 647 | if (!list_is_last(&pdev->bus_list, &parent->subordinate->devices)) |
686 | goto out; | 648 | goto out; |
687 | 649 | ||
650 | link = parent->link_state; | ||
651 | root = link->root; | ||
652 | parent_link = link->parent; | ||
653 | |||
688 | /* All functions are removed, so just disable ASPM for the link */ | 654 | /* All functions are removed, so just disable ASPM for the link */ |
689 | __pcie_aspm_config_one_dev(parent, 0); | 655 | pcie_config_aspm_link(link, 0); |
690 | list_del(&link_state->sibling); | 656 | list_del(&link->sibling); |
691 | list_del(&link_state->link); | 657 | list_del(&link->link); |
692 | /* Clock PM is for endpoint device */ | 658 | /* Clock PM is for endpoint device */ |
659 | free_link_state(link); | ||
693 | 660 | ||
694 | free_link_state(link_state); | 661 | /* Recheck latencies and configure upstream links */ |
662 | pcie_update_aspm_capable(root); | ||
663 | pcie_config_aspm_path(parent_link); | ||
695 | out: | 664 | out: |
696 | mutex_unlock(&aspm_lock); | 665 | mutex_unlock(&aspm_lock); |
697 | up_read(&pci_bus_sem); | 666 | up_read(&pci_bus_sem); |
@@ -700,18 +669,23 @@ out: | |||
700 | /* @pdev: the root port or switch downstream port */ | 669 | /* @pdev: the root port or switch downstream port */ |
701 | void pcie_aspm_pm_state_change(struct pci_dev *pdev) | 670 | void pcie_aspm_pm_state_change(struct pci_dev *pdev) |
702 | { | 671 | { |
703 | struct pcie_link_state *link_state = pdev->link_state; | 672 | struct pcie_link_state *link = pdev->link_state; |
704 | 673 | ||
705 | if (aspm_disabled || !pdev->is_pcie || !pdev->link_state) | 674 | if (aspm_disabled || !pdev->is_pcie || !link) |
706 | return; | 675 | return; |
707 | if (pdev->pcie_type != PCI_EXP_TYPE_ROOT_PORT && | 676 | if ((pdev->pcie_type != PCI_EXP_TYPE_ROOT_PORT) && |
708 | pdev->pcie_type != PCI_EXP_TYPE_DOWNSTREAM) | 677 | (pdev->pcie_type != PCI_EXP_TYPE_DOWNSTREAM)) |
709 | return; | 678 | return; |
710 | /* | 679 | /* |
711 | * devices changed PM state, we should recheck if latency meets all | 680 | * Devices changed PM state, we should recheck if latency |
712 | * functions' requirement | 681 | * meets all functions' requirement |
713 | */ | 682 | */ |
714 | pcie_aspm_configure_link_state(link_state, link_state->aspm_enabled); | 683 | down_read(&pci_bus_sem); |
684 | mutex_lock(&aspm_lock); | ||
685 | pcie_update_aspm_capable(link->root); | ||
686 | pcie_config_aspm_path(link); | ||
687 | mutex_unlock(&aspm_lock); | ||
688 | up_read(&pci_bus_sem); | ||
715 | } | 689 | } |
716 | 690 | ||
717 | /* | 691 | /* |
@@ -721,7 +695,7 @@ void pcie_aspm_pm_state_change(struct pci_dev *pdev) | |||
721 | void pci_disable_link_state(struct pci_dev *pdev, int state) | 695 | void pci_disable_link_state(struct pci_dev *pdev, int state) |
722 | { | 696 | { |
723 | struct pci_dev *parent = pdev->bus->self; | 697 | struct pci_dev *parent = pdev->bus->self; |
724 | struct pcie_link_state *link_state; | 698 | struct pcie_link_state *link; |
725 | 699 | ||
726 | if (aspm_disabled || !pdev->is_pcie) | 700 | if (aspm_disabled || !pdev->is_pcie) |
727 | return; | 701 | return; |
@@ -733,12 +707,16 @@ void pci_disable_link_state(struct pci_dev *pdev, int state) | |||
733 | 707 | ||
734 | down_read(&pci_bus_sem); | 708 | down_read(&pci_bus_sem); |
735 | mutex_lock(&aspm_lock); | 709 | mutex_lock(&aspm_lock); |
736 | link_state = parent->link_state; | 710 | link = parent->link_state; |
737 | link_state->aspm_support &= ~state; | 711 | if (state & PCIE_LINK_STATE_L0S) |
738 | __pcie_aspm_configure_link_state(link_state, link_state->aspm_enabled); | 712 | link->aspm_disable |= ASPM_STATE_L0S; |
713 | if (state & PCIE_LINK_STATE_L1) | ||
714 | link->aspm_disable |= ASPM_STATE_L1; | ||
715 | pcie_config_aspm_link(link, policy_to_aspm_state(link)); | ||
716 | |||
739 | if (state & PCIE_LINK_STATE_CLKPM) { | 717 | if (state & PCIE_LINK_STATE_CLKPM) { |
740 | link_state->clkpm_capable = 0; | 718 | link->clkpm_capable = 0; |
741 | pcie_set_clkpm(link_state, 0); | 719 | pcie_set_clkpm(link, 0); |
742 | } | 720 | } |
743 | mutex_unlock(&aspm_lock); | 721 | mutex_unlock(&aspm_lock); |
744 | up_read(&pci_bus_sem); | 722 | up_read(&pci_bus_sem); |
@@ -748,7 +726,7 @@ EXPORT_SYMBOL(pci_disable_link_state); | |||
748 | static int pcie_aspm_set_policy(const char *val, struct kernel_param *kp) | 726 | static int pcie_aspm_set_policy(const char *val, struct kernel_param *kp) |
749 | { | 727 | { |
750 | int i; | 728 | int i; |
751 | struct pcie_link_state *link_state; | 729 | struct pcie_link_state *link; |
752 | 730 | ||
753 | for (i = 0; i < ARRAY_SIZE(policy_str); i++) | 731 | for (i = 0; i < ARRAY_SIZE(policy_str); i++) |
754 | if (!strncmp(val, policy_str[i], strlen(policy_str[i]))) | 732 | if (!strncmp(val, policy_str[i], strlen(policy_str[i]))) |
@@ -761,10 +739,9 @@ static int pcie_aspm_set_policy(const char *val, struct kernel_param *kp) | |||
761 | down_read(&pci_bus_sem); | 739 | down_read(&pci_bus_sem); |
762 | mutex_lock(&aspm_lock); | 740 | mutex_lock(&aspm_lock); |
763 | aspm_policy = i; | 741 | aspm_policy = i; |
764 | list_for_each_entry(link_state, &link_list, sibling) { | 742 | list_for_each_entry(link, &link_list, sibling) { |
765 | __pcie_aspm_configure_link_state(link_state, | 743 | pcie_config_aspm_link(link, policy_to_aspm_state(link)); |
766 | policy_to_aspm_state(link_state)); | 744 | pcie_set_clkpm(link, policy_to_clkpm_state(link)); |
767 | pcie_set_clkpm(link_state, policy_to_clkpm_state(link_state)); | ||
768 | } | 745 | } |
769 | mutex_unlock(&aspm_lock); | 746 | mutex_unlock(&aspm_lock); |
770 | up_read(&pci_bus_sem); | 747 | up_read(&pci_bus_sem); |
@@ -802,18 +779,28 @@ static ssize_t link_state_store(struct device *dev, | |||
802 | size_t n) | 779 | size_t n) |
803 | { | 780 | { |
804 | struct pci_dev *pdev = to_pci_dev(dev); | 781 | struct pci_dev *pdev = to_pci_dev(dev); |
805 | int state; | 782 | struct pcie_link_state *link, *root = pdev->link_state->root; |
783 | u32 val = buf[0] - '0', state = 0; | ||
806 | 784 | ||
807 | if (n < 1) | 785 | if (n < 1 || val > 3) |
808 | return -EINVAL; | 786 | return -EINVAL; |
809 | state = buf[0]-'0'; | ||
810 | if (state >= 0 && state <= 3) { | ||
811 | /* setup link aspm state */ | ||
812 | pcie_aspm_configure_link_state(pdev->link_state, state); | ||
813 | return n; | ||
814 | } | ||
815 | 787 | ||
816 | return -EINVAL; | 788 | /* Convert requested state to ASPM state */ |
789 | if (val & PCIE_LINK_STATE_L0S) | ||
790 | state |= ASPM_STATE_L0S; | ||
791 | if (val & PCIE_LINK_STATE_L1) | ||
792 | state |= ASPM_STATE_L1; | ||
793 | |||
794 | down_read(&pci_bus_sem); | ||
795 | mutex_lock(&aspm_lock); | ||
796 | list_for_each_entry(link, &link_list, sibling) { | ||
797 | if (link->root != root) | ||
798 | continue; | ||
799 | pcie_config_aspm_link(link, state); | ||
800 | } | ||
801 | mutex_unlock(&aspm_lock); | ||
802 | up_read(&pci_bus_sem); | ||
803 | return n; | ||
817 | } | 804 | } |
818 | 805 | ||
819 | static ssize_t clk_ctl_show(struct device *dev, | 806 | static ssize_t clk_ctl_show(struct device *dev, |
diff --git a/drivers/pci/pcie/portdrv_core.c b/drivers/pci/pcie/portdrv_core.c index 13ffdc35ea0e..52f84fca9f7d 100644 --- a/drivers/pci/pcie/portdrv_core.c +++ b/drivers/pci/pcie/portdrv_core.c | |||
@@ -187,14 +187,9 @@ static int pcie_port_enable_msix(struct pci_dev *dev, int *vectors, int mask) | |||
187 | */ | 187 | */ |
188 | static int assign_interrupt_mode(struct pci_dev *dev, int *vectors, int mask) | 188 | static int assign_interrupt_mode(struct pci_dev *dev, int *vectors, int mask) |
189 | { | 189 | { |
190 | struct pcie_port_data *port_data = pci_get_drvdata(dev); | ||
191 | int irq, interrupt_mode = PCIE_PORT_NO_IRQ; | 190 | int irq, interrupt_mode = PCIE_PORT_NO_IRQ; |
192 | int i; | 191 | int i; |
193 | 192 | ||
194 | /* Check MSI quirk */ | ||
195 | if (port_data->port_type == PCIE_RC_PORT && pcie_mch_quirk) | ||
196 | goto Fallback; | ||
197 | |||
198 | /* Try to use MSI-X if supported */ | 193 | /* Try to use MSI-X if supported */ |
199 | if (!pcie_port_enable_msix(dev, vectors, mask)) | 194 | if (!pcie_port_enable_msix(dev, vectors, mask)) |
200 | return PCIE_PORT_MSIX_MODE; | 195 | return PCIE_PORT_MSIX_MODE; |
@@ -203,7 +198,6 @@ static int assign_interrupt_mode(struct pci_dev *dev, int *vectors, int mask) | |||
203 | if (!pci_enable_msi(dev)) | 198 | if (!pci_enable_msi(dev)) |
204 | interrupt_mode = PCIE_PORT_MSI_MODE; | 199 | interrupt_mode = PCIE_PORT_MSI_MODE; |
205 | 200 | ||
206 | Fallback: | ||
207 | if (interrupt_mode == PCIE_PORT_NO_IRQ && dev->pin) | 201 | if (interrupt_mode == PCIE_PORT_NO_IRQ && dev->pin) |
208 | interrupt_mode = PCIE_PORT_INTx_MODE; | 202 | interrupt_mode = PCIE_PORT_INTx_MODE; |
209 | 203 | ||
diff --git a/drivers/pci/pcie/portdrv_pci.c b/drivers/pci/pcie/portdrv_pci.c index 091ce70051e0..6df5c984a791 100644 --- a/drivers/pci/pcie/portdrv_pci.c +++ b/drivers/pci/pcie/portdrv_pci.c | |||
@@ -205,6 +205,7 @@ static pci_ers_result_t pcie_portdrv_slot_reset(struct pci_dev *dev) | |||
205 | 205 | ||
206 | /* If fatal, restore cfg space for possible link reset at upstream */ | 206 | /* If fatal, restore cfg space for possible link reset at upstream */ |
207 | if (dev->error_state == pci_channel_io_frozen) { | 207 | if (dev->error_state == pci_channel_io_frozen) { |
208 | dev->state_saved = true; | ||
208 | pci_restore_state(dev); | 209 | pci_restore_state(dev); |
209 | pcie_portdrv_restore_config(dev); | 210 | pcie_portdrv_restore_config(dev); |
210 | pci_enable_pcie_error_reporting(dev); | 211 | pci_enable_pcie_error_reporting(dev); |
diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c index 40e75f6a5056..8105e32117f6 100644 --- a/drivers/pci/probe.c +++ b/drivers/pci/probe.c | |||
@@ -235,7 +235,10 @@ int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type, | |||
235 | res->start = l64; | 235 | res->start = l64; |
236 | res->end = l64 + sz64; | 236 | res->end = l64 + sz64; |
237 | dev_printk(KERN_DEBUG, &dev->dev, | 237 | dev_printk(KERN_DEBUG, &dev->dev, |
238 | "reg %x 64bit mmio: %pR\n", pos, res); | 238 | "reg %x %s: %pR\n", pos, |
239 | (res->flags & IORESOURCE_PREFETCH) ? | ||
240 | "64bit mmio pref" : "64bit mmio", | ||
241 | res); | ||
239 | } | 242 | } |
240 | 243 | ||
241 | res->flags |= IORESOURCE_MEM_64; | 244 | res->flags |= IORESOURCE_MEM_64; |
@@ -249,7 +252,9 @@ int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type, | |||
249 | res->end = l + sz; | 252 | res->end = l + sz; |
250 | 253 | ||
251 | dev_printk(KERN_DEBUG, &dev->dev, "reg %x %s: %pR\n", pos, | 254 | dev_printk(KERN_DEBUG, &dev->dev, "reg %x %s: %pR\n", pos, |
252 | (res->flags & IORESOURCE_IO) ? "io port" : "32bit mmio", | 255 | (res->flags & IORESOURCE_IO) ? "io port" : |
256 | ((res->flags & IORESOURCE_PREFETCH) ? | ||
257 | "32bit mmio pref" : "32bit mmio"), | ||
253 | res); | 258 | res); |
254 | } | 259 | } |
255 | 260 | ||
@@ -692,6 +697,23 @@ static void set_pcie_port_type(struct pci_dev *pdev) | |||
692 | pdev->pcie_type = (reg16 & PCI_EXP_FLAGS_TYPE) >> 4; | 697 | pdev->pcie_type = (reg16 & PCI_EXP_FLAGS_TYPE) >> 4; |
693 | } | 698 | } |
694 | 699 | ||
700 | static void set_pcie_hotplug_bridge(struct pci_dev *pdev) | ||
701 | { | ||
702 | int pos; | ||
703 | u16 reg16; | ||
704 | u32 reg32; | ||
705 | |||
706 | pos = pci_find_capability(pdev, PCI_CAP_ID_EXP); | ||
707 | if (!pos) | ||
708 | return; | ||
709 | pci_read_config_word(pdev, pos + PCI_EXP_FLAGS, ®16); | ||
710 | if (!(reg16 & PCI_EXP_FLAGS_SLOT)) | ||
711 | return; | ||
712 | pci_read_config_dword(pdev, pos + PCI_EXP_SLTCAP, ®32); | ||
713 | if (reg32 & PCI_EXP_SLTCAP_HPC) | ||
714 | pdev->is_hotplug_bridge = 1; | ||
715 | } | ||
716 | |||
695 | #define LEGACY_IO_RESOURCE (IORESOURCE_IO | IORESOURCE_PCI_FIXED) | 717 | #define LEGACY_IO_RESOURCE (IORESOURCE_IO | IORESOURCE_PCI_FIXED) |
696 | 718 | ||
697 | /** | 719 | /** |
@@ -799,6 +821,7 @@ int pci_setup_device(struct pci_dev *dev) | |||
799 | pci_read_irq(dev); | 821 | pci_read_irq(dev); |
800 | dev->transparent = ((dev->class & 0xff) == 1); | 822 | dev->transparent = ((dev->class & 0xff) == 1); |
801 | pci_read_bases(dev, 2, PCI_ROM_ADDRESS1); | 823 | pci_read_bases(dev, 2, PCI_ROM_ADDRESS1); |
824 | set_pcie_hotplug_bridge(dev); | ||
802 | break; | 825 | break; |
803 | 826 | ||
804 | case PCI_HEADER_TYPE_CARDBUS: /* CardBus bridge header */ | 827 | case PCI_HEADER_TYPE_CARDBUS: /* CardBus bridge header */ |
@@ -1009,6 +1032,9 @@ void pci_device_add(struct pci_dev *dev, struct pci_bus *bus) | |||
1009 | /* Fix up broken headers */ | 1032 | /* Fix up broken headers */ |
1010 | pci_fixup_device(pci_fixup_header, dev); | 1033 | pci_fixup_device(pci_fixup_header, dev); |
1011 | 1034 | ||
1035 | /* Clear the state_saved flag. */ | ||
1036 | dev->state_saved = false; | ||
1037 | |||
1012 | /* Initialize various capabilities */ | 1038 | /* Initialize various capabilities */ |
1013 | pci_init_capabilities(dev); | 1039 | pci_init_capabilities(dev); |
1014 | 1040 | ||
@@ -1061,8 +1087,7 @@ int pci_scan_slot(struct pci_bus *bus, int devfn) | |||
1061 | if (dev && !dev->is_added) /* new device? */ | 1087 | if (dev && !dev->is_added) /* new device? */ |
1062 | nr++; | 1088 | nr++; |
1063 | 1089 | ||
1064 | if ((dev && dev->multifunction) || | 1090 | if (dev && dev->multifunction) { |
1065 | (!dev && pcibios_scan_all_fns(bus, devfn))) { | ||
1066 | for (fn = 1; fn < 8; fn++) { | 1091 | for (fn = 1; fn < 8; fn++) { |
1067 | dev = pci_scan_single_device(bus, devfn + fn); | 1092 | dev = pci_scan_single_device(bus, devfn + fn); |
1068 | if (dev) { | 1093 | if (dev) { |
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c index 85ce23997be4..6099facecd79 100644 --- a/drivers/pci/quirks.c +++ b/drivers/pci/quirks.c | |||
@@ -31,8 +31,6 @@ int isa_dma_bridge_buggy; | |||
31 | EXPORT_SYMBOL(isa_dma_bridge_buggy); | 31 | EXPORT_SYMBOL(isa_dma_bridge_buggy); |
32 | int pci_pci_problems; | 32 | int pci_pci_problems; |
33 | EXPORT_SYMBOL(pci_pci_problems); | 33 | EXPORT_SYMBOL(pci_pci_problems); |
34 | int pcie_mch_quirk; | ||
35 | EXPORT_SYMBOL(pcie_mch_quirk); | ||
36 | 34 | ||
37 | #ifdef CONFIG_PCI_QUIRKS | 35 | #ifdef CONFIG_PCI_QUIRKS |
38 | /* | 36 | /* |
@@ -1203,6 +1201,7 @@ static void __init asus_hides_smbus_hostbridge(struct pci_dev *dev) | |||
1203 | switch(dev->subsystem_device) { | 1201 | switch(dev->subsystem_device) { |
1204 | case 0x00b8: /* Compaq Evo D510 CMT */ | 1202 | case 0x00b8: /* Compaq Evo D510 CMT */ |
1205 | case 0x00b9: /* Compaq Evo D510 SFF */ | 1203 | case 0x00b9: /* Compaq Evo D510 SFF */ |
1204 | case 0x00ba: /* Compaq Evo D510 USDT */ | ||
1206 | /* Motherboard doesn't have Host bridge | 1205 | /* Motherboard doesn't have Host bridge |
1207 | * subvendor/subdevice IDs and on-board VGA | 1206 | * subvendor/subdevice IDs and on-board VGA |
1208 | * controller is disabled if an AGP card is | 1207 | * controller is disabled if an AGP card is |
@@ -1501,7 +1500,8 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_EESSC, quirk_a | |||
1501 | 1500 | ||
1502 | static void __devinit quirk_pcie_mch(struct pci_dev *pdev) | 1501 | static void __devinit quirk_pcie_mch(struct pci_dev *pdev) |
1503 | { | 1502 | { |
1504 | pcie_mch_quirk = 1; | 1503 | pci_msi_off(pdev); |
1504 | pdev->no_msi = 1; | ||
1505 | } | 1505 | } |
1506 | DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_E7520_MCH, quirk_pcie_mch); | 1506 | DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_E7520_MCH, quirk_pcie_mch); |
1507 | DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_E7320_MCH, quirk_pcie_mch); | 1507 | DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_E7320_MCH, quirk_pcie_mch); |
@@ -1569,10 +1569,8 @@ static void quirk_reroute_to_boot_interrupts_intel(struct pci_dev *dev) | |||
1569 | return; | 1569 | return; |
1570 | 1570 | ||
1571 | dev->irq_reroute_variant = INTEL_IRQ_REROUTE_VARIANT; | 1571 | dev->irq_reroute_variant = INTEL_IRQ_REROUTE_VARIANT; |
1572 | 1572 | dev_info(&dev->dev, "rerouting interrupts for [%04x:%04x]\n", | |
1573 | printk(KERN_INFO "PCI quirk: reroute interrupts for 0x%04x:0x%04x\n", | 1573 | dev->vendor, dev->device); |
1574 | dev->vendor, dev->device); | ||
1575 | return; | ||
1576 | } | 1574 | } |
1577 | DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_80333_0, quirk_reroute_to_boot_interrupts_intel); | 1575 | DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_80333_0, quirk_reroute_to_boot_interrupts_intel); |
1578 | DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_80333_1, quirk_reroute_to_boot_interrupts_intel); | 1576 | DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_80333_1, quirk_reroute_to_boot_interrupts_intel); |
@@ -1614,8 +1612,8 @@ static void quirk_disable_intel_boot_interrupt(struct pci_dev *dev) | |||
1614 | pci_config_word |= INTEL_6300_DISABLE_BOOT_IRQ; | 1612 | pci_config_word |= INTEL_6300_DISABLE_BOOT_IRQ; |
1615 | pci_write_config_word(dev, INTEL_6300_IOAPIC_ABAR, pci_config_word); | 1613 | pci_write_config_word(dev, INTEL_6300_IOAPIC_ABAR, pci_config_word); |
1616 | 1614 | ||
1617 | printk(KERN_INFO "disabled boot interrupt on device 0x%04x:0x%04x\n", | 1615 | dev_info(&dev->dev, "disabled boot interrupts on device [%04x:%04x]\n", |
1618 | dev->vendor, dev->device); | 1616 | dev->vendor, dev->device); |
1619 | } | 1617 | } |
1620 | DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ESB_10, quirk_disable_intel_boot_interrupt); | 1618 | DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ESB_10, quirk_disable_intel_boot_interrupt); |
1621 | DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ESB_10, quirk_disable_intel_boot_interrupt); | 1619 | DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ESB_10, quirk_disable_intel_boot_interrupt); |
@@ -1647,8 +1645,8 @@ static void quirk_disable_broadcom_boot_interrupt(struct pci_dev *dev) | |||
1647 | 1645 | ||
1648 | pci_write_config_dword(dev, BC_HT1000_FEATURE_REG, pci_config_dword); | 1646 | pci_write_config_dword(dev, BC_HT1000_FEATURE_REG, pci_config_dword); |
1649 | 1647 | ||
1650 | printk(KERN_INFO "disabled boot interrupts on PCI device" | 1648 | dev_info(&dev->dev, "disabled boot interrupts on device [%04x:%04x]\n", |
1651 | "0x%04x:0x%04x\n", dev->vendor, dev->device); | 1649 | dev->vendor, dev->device); |
1652 | } | 1650 | } |
1653 | DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_SERVERWORKS, PCI_DEVICE_ID_SERVERWORKS_HT1000SB, quirk_disable_broadcom_boot_interrupt); | 1651 | DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_SERVERWORKS, PCI_DEVICE_ID_SERVERWORKS_HT1000SB, quirk_disable_broadcom_boot_interrupt); |
1654 | DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_SERVERWORKS, PCI_DEVICE_ID_SERVERWORKS_HT1000SB, quirk_disable_broadcom_boot_interrupt); | 1652 | DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_SERVERWORKS, PCI_DEVICE_ID_SERVERWORKS_HT1000SB, quirk_disable_broadcom_boot_interrupt); |
@@ -1678,8 +1676,8 @@ static void quirk_disable_amd_813x_boot_interrupt(struct pci_dev *dev) | |||
1678 | pci_config_dword &= ~AMD_813X_NOIOAMODE; | 1676 | pci_config_dword &= ~AMD_813X_NOIOAMODE; |
1679 | pci_write_config_dword(dev, AMD_813X_MISC, pci_config_dword); | 1677 | pci_write_config_dword(dev, AMD_813X_MISC, pci_config_dword); |
1680 | 1678 | ||
1681 | printk(KERN_INFO "disabled boot interrupts on PCI device " | 1679 | dev_info(&dev->dev, "disabled boot interrupts on device [%04x:%04x]\n", |
1682 | "0x%04x:0x%04x\n", dev->vendor, dev->device); | 1680 | dev->vendor, dev->device); |
1683 | } | 1681 | } |
1684 | DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8131_BRIDGE, quirk_disable_amd_813x_boot_interrupt); | 1682 | DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8131_BRIDGE, quirk_disable_amd_813x_boot_interrupt); |
1685 | DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8132_BRIDGE, quirk_disable_amd_813x_boot_interrupt); | 1683 | DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8132_BRIDGE, quirk_disable_amd_813x_boot_interrupt); |
@@ -1695,14 +1693,13 @@ static void quirk_disable_amd_8111_boot_interrupt(struct pci_dev *dev) | |||
1695 | 1693 | ||
1696 | pci_read_config_word(dev, AMD_8111_PCI_IRQ_ROUTING, &pci_config_word); | 1694 | pci_read_config_word(dev, AMD_8111_PCI_IRQ_ROUTING, &pci_config_word); |
1697 | if (!pci_config_word) { | 1695 | if (!pci_config_word) { |
1698 | printk(KERN_INFO "boot interrupts on PCI device 0x%04x:0x%04x " | 1696 | dev_info(&dev->dev, "boot interrupts on device [%04x:%04x] " |
1699 | "already disabled\n", | 1697 | "already disabled\n", dev->vendor, dev->device); |
1700 | dev->vendor, dev->device); | ||
1701 | return; | 1698 | return; |
1702 | } | 1699 | } |
1703 | pci_write_config_word(dev, AMD_8111_PCI_IRQ_ROUTING, 0); | 1700 | pci_write_config_word(dev, AMD_8111_PCI_IRQ_ROUTING, 0); |
1704 | printk(KERN_INFO "disabled boot interrupts on PCI device " | 1701 | dev_info(&dev->dev, "disabled boot interrupts on device [%04x:%04x]\n", |
1705 | "0x%04x:0x%04x\n", dev->vendor, dev->device); | 1702 | dev->vendor, dev->device); |
1706 | } | 1703 | } |
1707 | DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8111_SMBUS, quirk_disable_amd_8111_boot_interrupt); | 1704 | DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8111_SMBUS, quirk_disable_amd_8111_boot_interrupt); |
1708 | DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8111_SMBUS, quirk_disable_amd_8111_boot_interrupt); | 1705 | DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8111_SMBUS, quirk_disable_amd_8111_boot_interrupt); |
@@ -2384,8 +2381,10 @@ static void __devinit nv_msi_ht_cap_quirk_leaf(struct pci_dev *dev) | |||
2384 | } | 2381 | } |
2385 | 2382 | ||
2386 | DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID, nv_msi_ht_cap_quirk_leaf); | 2383 | DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID, nv_msi_ht_cap_quirk_leaf); |
2384 | DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID, nv_msi_ht_cap_quirk_leaf); | ||
2387 | 2385 | ||
2388 | DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AL, PCI_ANY_ID, nv_msi_ht_cap_quirk_all); | 2386 | DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AL, PCI_ANY_ID, nv_msi_ht_cap_quirk_all); |
2387 | DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_AL, PCI_ANY_ID, nv_msi_ht_cap_quirk_all); | ||
2389 | 2388 | ||
2390 | static void __devinit quirk_msi_intx_disable_bug(struct pci_dev *dev) | 2389 | static void __devinit quirk_msi_intx_disable_bug(struct pci_dev *dev) |
2391 | { | 2390 | { |
@@ -2494,6 +2493,7 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x10e6, quirk_i82576_sriov); | |||
2494 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x10e7, quirk_i82576_sriov); | 2493 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x10e7, quirk_i82576_sriov); |
2495 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x10e8, quirk_i82576_sriov); | 2494 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x10e8, quirk_i82576_sriov); |
2496 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x150a, quirk_i82576_sriov); | 2495 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x150a, quirk_i82576_sriov); |
2496 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x150d, quirk_i82576_sriov); | ||
2497 | 2497 | ||
2498 | #endif /* CONFIG_PCI_IOV */ | 2498 | #endif /* CONFIG_PCI_IOV */ |
2499 | 2499 | ||
diff --git a/drivers/pci/search.c b/drivers/pci/search.c index e8cb5051c311..ec415352d9ba 100644 --- a/drivers/pci/search.c +++ b/drivers/pci/search.c | |||
@@ -113,37 +113,6 @@ pci_find_next_bus(const struct pci_bus *from) | |||
113 | return b; | 113 | return b; |
114 | } | 114 | } |
115 | 115 | ||
116 | #ifdef CONFIG_PCI_LEGACY | ||
117 | /** | ||
118 | * pci_find_device - begin or continue searching for a PCI device by vendor/device id | ||
119 | * @vendor: PCI vendor id to match, or %PCI_ANY_ID to match all vendor ids | ||
120 | * @device: PCI device id to match, or %PCI_ANY_ID to match all device ids | ||
121 | * @from: Previous PCI device found in search, or %NULL for new search. | ||
122 | * | ||
123 | * Iterates through the list of known PCI devices. If a PCI device is found | ||
124 | * with a matching @vendor and @device, a pointer to its device structure is | ||
125 | * returned. Otherwise, %NULL is returned. | ||
126 | * A new search is initiated by passing %NULL as the @from argument. | ||
127 | * Otherwise if @from is not %NULL, searches continue from next device | ||
128 | * on the global list. | ||
129 | * | ||
130 | * NOTE: Do not use this function any more; use pci_get_device() instead, as | ||
131 | * the PCI device returned by this function can disappear at any moment in | ||
132 | * time. | ||
133 | */ | ||
134 | struct pci_dev *pci_find_device(unsigned int vendor, unsigned int device, | ||
135 | struct pci_dev *from) | ||
136 | { | ||
137 | struct pci_dev *pdev; | ||
138 | |||
139 | pci_dev_get(from); | ||
140 | pdev = pci_get_subsys(vendor, device, PCI_ANY_ID, PCI_ANY_ID, from); | ||
141 | pci_dev_put(pdev); | ||
142 | return pdev; | ||
143 | } | ||
144 | EXPORT_SYMBOL(pci_find_device); | ||
145 | #endif /* CONFIG_PCI_LEGACY */ | ||
146 | |||
147 | /** | 116 | /** |
148 | * pci_get_slot - locate PCI device for a given PCI slot | 117 | * pci_get_slot - locate PCI device for a given PCI slot |
149 | * @bus: PCI bus on which desired PCI device resides | 118 | * @bus: PCI bus on which desired PCI device resides |
diff --git a/drivers/pci/setup-bus.c b/drivers/pci/setup-bus.c index 7c443b4583ab..cb1a027eb552 100644 --- a/drivers/pci/setup-bus.c +++ b/drivers/pci/setup-bus.c | |||
@@ -309,7 +309,7 @@ static struct resource *find_free_bus_resource(struct pci_bus *bus, unsigned lon | |||
309 | since these windows have 4K granularity and the IO ranges | 309 | since these windows have 4K granularity and the IO ranges |
310 | of non-bridge PCI devices are limited to 256 bytes. | 310 | of non-bridge PCI devices are limited to 256 bytes. |
311 | We must be careful with the ISA aliasing though. */ | 311 | We must be careful with the ISA aliasing though. */ |
312 | static void pbus_size_io(struct pci_bus *bus) | 312 | static void pbus_size_io(struct pci_bus *bus, resource_size_t min_size) |
313 | { | 313 | { |
314 | struct pci_dev *dev; | 314 | struct pci_dev *dev; |
315 | struct resource *b_res = find_free_bus_resource(bus, IORESOURCE_IO); | 315 | struct resource *b_res = find_free_bus_resource(bus, IORESOURCE_IO); |
@@ -336,6 +336,8 @@ static void pbus_size_io(struct pci_bus *bus) | |||
336 | size1 += r_size; | 336 | size1 += r_size; |
337 | } | 337 | } |
338 | } | 338 | } |
339 | if (size < min_size) | ||
340 | size = min_size; | ||
339 | /* To be fixed in 2.5: we should have sort of HAVE_ISA | 341 | /* To be fixed in 2.5: we should have sort of HAVE_ISA |
340 | flag in the struct pci_bus. */ | 342 | flag in the struct pci_bus. */ |
341 | #if defined(CONFIG_ISA) || defined(CONFIG_EISA) | 343 | #if defined(CONFIG_ISA) || defined(CONFIG_EISA) |
@@ -354,7 +356,8 @@ static void pbus_size_io(struct pci_bus *bus) | |||
354 | 356 | ||
355 | /* Calculate the size of the bus and minimal alignment which | 357 | /* Calculate the size of the bus and minimal alignment which |
356 | guarantees that all child resources fit in this size. */ | 358 | guarantees that all child resources fit in this size. */ |
357 | static int pbus_size_mem(struct pci_bus *bus, unsigned long mask, unsigned long type) | 359 | static int pbus_size_mem(struct pci_bus *bus, unsigned long mask, |
360 | unsigned long type, resource_size_t min_size) | ||
358 | { | 361 | { |
359 | struct pci_dev *dev; | 362 | struct pci_dev *dev; |
360 | resource_size_t min_align, align, size; | 363 | resource_size_t min_align, align, size; |
@@ -404,6 +407,8 @@ static int pbus_size_mem(struct pci_bus *bus, unsigned long mask, unsigned long | |||
404 | mem64_mask &= r->flags & IORESOURCE_MEM_64; | 407 | mem64_mask &= r->flags & IORESOURCE_MEM_64; |
405 | } | 408 | } |
406 | } | 409 | } |
410 | if (size < min_size) | ||
411 | size = min_size; | ||
407 | 412 | ||
408 | align = 0; | 413 | align = 0; |
409 | min_align = 0; | 414 | min_align = 0; |
@@ -483,6 +488,7 @@ void __ref pci_bus_size_bridges(struct pci_bus *bus) | |||
483 | { | 488 | { |
484 | struct pci_dev *dev; | 489 | struct pci_dev *dev; |
485 | unsigned long mask, prefmask; | 490 | unsigned long mask, prefmask; |
491 | resource_size_t min_mem_size = 0, min_io_size = 0; | ||
486 | 492 | ||
487 | list_for_each_entry(dev, &bus->devices, bus_list) { | 493 | list_for_each_entry(dev, &bus->devices, bus_list) { |
488 | struct pci_bus *b = dev->subordinate; | 494 | struct pci_bus *b = dev->subordinate; |
@@ -512,8 +518,12 @@ void __ref pci_bus_size_bridges(struct pci_bus *bus) | |||
512 | 518 | ||
513 | case PCI_CLASS_BRIDGE_PCI: | 519 | case PCI_CLASS_BRIDGE_PCI: |
514 | pci_bridge_check_ranges(bus); | 520 | pci_bridge_check_ranges(bus); |
521 | if (bus->self->is_hotplug_bridge) { | ||
522 | min_io_size = pci_hotplug_io_size; | ||
523 | min_mem_size = pci_hotplug_mem_size; | ||
524 | } | ||
515 | default: | 525 | default: |
516 | pbus_size_io(bus); | 526 | pbus_size_io(bus, min_io_size); |
517 | /* If the bridge supports prefetchable range, size it | 527 | /* If the bridge supports prefetchable range, size it |
518 | separately. If it doesn't, or its prefetchable window | 528 | separately. If it doesn't, or its prefetchable window |
519 | has already been allocated by arch code, try | 529 | has already been allocated by arch code, try |
@@ -521,9 +531,11 @@ void __ref pci_bus_size_bridges(struct pci_bus *bus) | |||
521 | resources. */ | 531 | resources. */ |
522 | mask = IORESOURCE_MEM; | 532 | mask = IORESOURCE_MEM; |
523 | prefmask = IORESOURCE_MEM | IORESOURCE_PREFETCH; | 533 | prefmask = IORESOURCE_MEM | IORESOURCE_PREFETCH; |
524 | if (pbus_size_mem(bus, prefmask, prefmask)) | 534 | if (pbus_size_mem(bus, prefmask, prefmask, min_mem_size)) |
525 | mask = prefmask; /* Success, size non-prefetch only. */ | 535 | mask = prefmask; /* Success, size non-prefetch only. */ |
526 | pbus_size_mem(bus, mask, IORESOURCE_MEM); | 536 | else |
537 | min_mem_size += min_mem_size; | ||
538 | pbus_size_mem(bus, mask, IORESOURCE_MEM, min_mem_size); | ||
527 | break; | 539 | break; |
528 | } | 540 | } |
529 | } | 541 | } |
diff --git a/drivers/pci/setup-res.c b/drivers/pci/setup-res.c index 88cdd1a937d6..706f82d8111f 100644 --- a/drivers/pci/setup-res.c +++ b/drivers/pci/setup-res.c | |||
@@ -119,6 +119,7 @@ int pci_claim_resource(struct pci_dev *dev, int resource) | |||
119 | 119 | ||
120 | return err; | 120 | return err; |
121 | } | 121 | } |
122 | EXPORT_SYMBOL(pci_claim_resource); | ||
122 | 123 | ||
123 | #ifdef CONFIG_PCI_QUIRKS | 124 | #ifdef CONFIG_PCI_QUIRKS |
124 | void pci_disable_bridge_window(struct pci_dev *dev) | 125 | void pci_disable_bridge_window(struct pci_dev *dev) |
diff --git a/drivers/pcmcia/yenta_socket.c b/drivers/pcmcia/yenta_socket.c index 3ecd7c99d8eb..737fe5d87c40 100644 --- a/drivers/pcmcia/yenta_socket.c +++ b/drivers/pcmcia/yenta_socket.c | |||
@@ -622,11 +622,12 @@ static int yenta_search_res(struct yenta_socket *socket, struct resource *res, | |||
622 | 622 | ||
623 | static int yenta_allocate_res(struct yenta_socket *socket, int nr, unsigned type, int addr_start, int addr_end) | 623 | static int yenta_allocate_res(struct yenta_socket *socket, int nr, unsigned type, int addr_start, int addr_end) |
624 | { | 624 | { |
625 | struct resource *root, *res; | 625 | struct pci_dev *dev = socket->dev; |
626 | struct resource *res; | ||
626 | struct pci_bus_region region; | 627 | struct pci_bus_region region; |
627 | unsigned mask; | 628 | unsigned mask; |
628 | 629 | ||
629 | res = socket->dev->resource + PCI_BRIDGE_RESOURCES + nr; | 630 | res = dev->resource + PCI_BRIDGE_RESOURCES + nr; |
630 | /* Already allocated? */ | 631 | /* Already allocated? */ |
631 | if (res->parent) | 632 | if (res->parent) |
632 | return 0; | 633 | return 0; |
@@ -636,17 +637,16 @@ static int yenta_allocate_res(struct yenta_socket *socket, int nr, unsigned type | |||
636 | if (type & IORESOURCE_IO) | 637 | if (type & IORESOURCE_IO) |
637 | mask = ~3; | 638 | mask = ~3; |
638 | 639 | ||
639 | res->name = socket->dev->subordinate->name; | 640 | res->name = dev->subordinate->name; |
640 | res->flags = type; | 641 | res->flags = type; |
641 | 642 | ||
642 | region.start = config_readl(socket, addr_start) & mask; | 643 | region.start = config_readl(socket, addr_start) & mask; |
643 | region.end = config_readl(socket, addr_end) | ~mask; | 644 | region.end = config_readl(socket, addr_end) | ~mask; |
644 | if (region.start && region.end > region.start && !override_bios) { | 645 | if (region.start && region.end > region.start && !override_bios) { |
645 | pcibios_bus_to_resource(socket->dev, res, ®ion); | 646 | pcibios_bus_to_resource(dev, res, ®ion); |
646 | root = pci_find_parent_resource(socket->dev, res); | 647 | if (pci_claim_resource(dev, PCI_BRIDGE_RESOURCES + nr) == 0) |
647 | if (root && (request_resource(root, res) == 0)) | ||
648 | return 0; | 648 | return 0; |
649 | dev_printk(KERN_INFO, &socket->dev->dev, | 649 | dev_printk(KERN_INFO, &dev->dev, |
650 | "Preassigned resource %d busy or not available, " | 650 | "Preassigned resource %d busy or not available, " |
651 | "reconfiguring...\n", | 651 | "reconfiguring...\n", |
652 | nr); | 652 | nr); |
@@ -672,7 +672,7 @@ static int yenta_allocate_res(struct yenta_socket *socket, int nr, unsigned type | |||
672 | return 1; | 672 | return 1; |
673 | } | 673 | } |
674 | 674 | ||
675 | dev_printk(KERN_INFO, &socket->dev->dev, | 675 | dev_printk(KERN_INFO, &dev->dev, |
676 | "no resource of type %x available, trying to continue...\n", | 676 | "no resource of type %x available, trying to continue...\n", |
677 | type); | 677 | type); |
678 | res->start = res->end = res->flags = 0; | 678 | res->start = res->end = res->flags = 0; |
diff --git a/drivers/video/Kconfig b/drivers/video/Kconfig index 1999b1834814..cef3e1d9b92e 100644 --- a/drivers/video/Kconfig +++ b/drivers/video/Kconfig | |||
@@ -7,6 +7,8 @@ menu "Graphics support" | |||
7 | 7 | ||
8 | source "drivers/char/agp/Kconfig" | 8 | source "drivers/char/agp/Kconfig" |
9 | 9 | ||
10 | source "drivers/gpu/vga/Kconfig" | ||
11 | |||
10 | source "drivers/gpu/drm/Kconfig" | 12 | source "drivers/gpu/drm/Kconfig" |
11 | 13 | ||
12 | config VGASTATE | 14 | config VGASTATE |