diff options
author | Michal Marek <mmarek@suse.cz> | 2011-03-09 10:15:44 -0500 |
---|---|---|
committer | Michal Marek <mmarek@suse.cz> | 2011-03-09 10:15:44 -0500 |
commit | 2d8ad8719591fa803b0d589ed057fa46f49b7155 (patch) | |
tree | 4ae051577dad1161c91dafbf4207bb10a9dc91bb /drivers/pci | |
parent | 9b4ce7bce5f30712fd926ab4599a803314a07719 (diff) | |
parent | c56eb8fb6dccb83d9fe62fd4dc00c834de9bc470 (diff) |
Merge commit 'v2.6.38-rc1' into kbuild/packaging
Diffstat (limited to 'drivers/pci')
75 files changed, 5368 insertions, 1879 deletions
diff --git a/drivers/pci/Kconfig b/drivers/pci/Kconfig index b1ecefa2a23d..a9523fdc6911 100644 --- a/drivers/pci/Kconfig +++ b/drivers/pci/Kconfig | |||
@@ -19,18 +19,7 @@ config PCI_MSI | |||
19 | by using the 'pci=nomsi' option. This disables MSI for the | 19 | by using the 'pci=nomsi' option. This disables MSI for the |
20 | entire system. | 20 | entire system. |
21 | 21 | ||
22 | If you don't know what to do here, say N. | 22 | If you don't know what to do here, say Y. |
23 | |||
24 | config PCI_LEGACY | ||
25 | bool "Enable deprecated pci_find_* API" | ||
26 | depends on PCI | ||
27 | default y | ||
28 | help | ||
29 | Say Y here if you want to include support for the deprecated | ||
30 | pci_find_device() API. Most drivers have been converted over | ||
31 | to using the proper hotplug APIs, so this option serves to | ||
32 | include/exclude only a few drivers that are still using this | ||
33 | API. | ||
34 | 23 | ||
35 | config PCI_DEBUG | 24 | config PCI_DEBUG |
36 | bool "PCI Debugging" | 25 | bool "PCI Debugging" |
@@ -51,6 +40,28 @@ config PCI_STUB | |||
51 | 40 | ||
52 | When in doubt, say N. | 41 | When in doubt, say N. |
53 | 42 | ||
43 | config XEN_PCIDEV_FRONTEND | ||
44 | tristate "Xen PCI Frontend" | ||
45 | depends on PCI && X86 && XEN | ||
46 | select HOTPLUG | ||
47 | select PCI_XEN | ||
48 | select XEN_XENBUS_FRONTEND | ||
49 | default y | ||
50 | help | ||
51 | The PCI device frontend driver allows the kernel to import arbitrary | ||
52 | PCI devices from a PCI backend to support PCI driver domains. | ||
53 | |||
54 | config XEN_PCIDEV_FE_DEBUG | ||
55 | bool "Xen PCI Frontend debugging" | ||
56 | depends on XEN_PCIDEV_FRONTEND && PCI_DEBUG | ||
57 | help | ||
58 | Say Y here if you want the Xen PCI frontend to produce a bunch of debug | ||
59 | messages to the system log. Select this if you are having a | ||
60 | problem with Xen PCI frontend support and want to see more of what is | ||
61 | going on. | ||
62 | |||
63 | When in doubt, say N. | ||
64 | |||
54 | config HT_IRQ | 65 | config HT_IRQ |
55 | bool "Interrupts on hypertransport devices" | 66 | bool "Interrupts on hypertransport devices" |
56 | default y | 67 | default y |
diff --git a/drivers/pci/Makefile b/drivers/pci/Makefile index 4df48d58eaa6..98e6fdf34d30 100644 --- a/drivers/pci/Makefile +++ b/drivers/pci/Makefile | |||
@@ -2,14 +2,13 @@ | |||
2 | # Makefile for the PCI bus specific drivers. | 2 | # Makefile for the PCI bus specific drivers. |
3 | # | 3 | # |
4 | 4 | ||
5 | obj-y += access.o bus.o probe.o remove.o pci.o quirks.o \ | 5 | obj-y += access.o bus.o probe.o remove.o pci.o \ |
6 | pci-driver.o search.o pci-sysfs.o rom.o setup-res.o \ | 6 | pci-driver.o search.o pci-sysfs.o rom.o setup-res.o \ |
7 | irq.o | 7 | irq.o vpd.o |
8 | obj-$(CONFIG_PROC_FS) += proc.o | 8 | obj-$(CONFIG_PROC_FS) += proc.o |
9 | obj-$(CONFIG_SYSFS) += slot.o | 9 | obj-$(CONFIG_SYSFS) += slot.o |
10 | 10 | ||
11 | obj-$(CONFIG_PCI_LEGACY) += legacy.o | 11 | obj-$(CONFIG_PCI_QUIRKS) += quirks.o |
12 | CFLAGS_legacy.o += -Wno-deprecated-declarations | ||
13 | 12 | ||
14 | # Build PCI Express stuff if needed | 13 | # Build PCI Express stuff if needed |
15 | obj-$(CONFIG_PCIEPORTBUS) += pcie/ | 14 | obj-$(CONFIG_PCIEPORTBUS) += pcie/ |
@@ -49,12 +48,17 @@ obj-$(CONFIG_PPC) += setup-bus.o | |||
49 | obj-$(CONFIG_MIPS) += setup-bus.o setup-irq.o | 48 | obj-$(CONFIG_MIPS) += setup-bus.o setup-irq.o |
50 | obj-$(CONFIG_X86_VISWS) += setup-irq.o | 49 | obj-$(CONFIG_X86_VISWS) += setup-irq.o |
51 | obj-$(CONFIG_MN10300) += setup-bus.o | 50 | obj-$(CONFIG_MN10300) += setup-bus.o |
51 | obj-$(CONFIG_MICROBLAZE) += setup-bus.o | ||
52 | obj-$(CONFIG_TILE) += setup-bus.o setup-irq.o | ||
52 | 53 | ||
53 | # | 54 | # |
54 | # ACPI Related PCI FW Functions | 55 | # ACPI Related PCI FW Functions |
55 | # | 56 | # |
56 | obj-$(CONFIG_ACPI) += pci-acpi.o | 57 | obj-$(CONFIG_ACPI) += pci-acpi.o |
57 | 58 | ||
59 | # SMBIOS provided firmware instance and labels | ||
60 | obj-$(CONFIG_DMI) += pci-label.o | ||
61 | |||
58 | # Cardbus & CompactPCI use setup-bus | 62 | # Cardbus & CompactPCI use setup-bus |
59 | obj-$(CONFIG_HOTPLUG) += setup-bus.o | 63 | obj-$(CONFIG_HOTPLUG) += setup-bus.o |
60 | 64 | ||
@@ -62,6 +66,6 @@ obj-$(CONFIG_PCI_SYSCALL) += syscall.o | |||
62 | 66 | ||
63 | obj-$(CONFIG_PCI_STUB) += pci-stub.o | 67 | obj-$(CONFIG_PCI_STUB) += pci-stub.o |
64 | 68 | ||
65 | ifeq ($(CONFIG_PCI_DEBUG),y) | 69 | obj-$(CONFIG_XEN_PCIDEV_FRONTEND) += xen-pcifront.o |
66 | EXTRA_CFLAGS += -DDEBUG | 70 | |
67 | endif | 71 | ccflags-$(CONFIG_PCI_DEBUG) := -DDEBUG |
diff --git a/drivers/pci/access.c b/drivers/pci/access.c index db23200c4874..531bc697d800 100644 --- a/drivers/pci/access.c +++ b/drivers/pci/access.c | |||
@@ -2,6 +2,7 @@ | |||
2 | #include <linux/pci.h> | 2 | #include <linux/pci.h> |
3 | #include <linux/module.h> | 3 | #include <linux/module.h> |
4 | #include <linux/sched.h> | 4 | #include <linux/sched.h> |
5 | #include <linux/slab.h> | ||
5 | #include <linux/ioport.h> | 6 | #include <linux/ioport.h> |
6 | #include <linux/wait.h> | 7 | #include <linux/wait.h> |
7 | 8 | ||
@@ -12,7 +13,7 @@ | |||
12 | * configuration space. | 13 | * configuration space. |
13 | */ | 14 | */ |
14 | 15 | ||
15 | static DEFINE_SPINLOCK(pci_lock); | 16 | static DEFINE_RAW_SPINLOCK(pci_lock); |
16 | 17 | ||
17 | /* | 18 | /* |
18 | * Wrappers for all PCI configuration access functions. They just check | 19 | * Wrappers for all PCI configuration access functions. They just check |
@@ -32,10 +33,10 @@ int pci_bus_read_config_##size \ | |||
32 | unsigned long flags; \ | 33 | unsigned long flags; \ |
33 | u32 data = 0; \ | 34 | u32 data = 0; \ |
34 | if (PCI_##size##_BAD) return PCIBIOS_BAD_REGISTER_NUMBER; \ | 35 | if (PCI_##size##_BAD) return PCIBIOS_BAD_REGISTER_NUMBER; \ |
35 | spin_lock_irqsave(&pci_lock, flags); \ | 36 | raw_spin_lock_irqsave(&pci_lock, flags); \ |
36 | res = bus->ops->read(bus, devfn, pos, len, &data); \ | 37 | res = bus->ops->read(bus, devfn, pos, len, &data); \ |
37 | *value = (type)data; \ | 38 | *value = (type)data; \ |
38 | spin_unlock_irqrestore(&pci_lock, flags); \ | 39 | raw_spin_unlock_irqrestore(&pci_lock, flags); \ |
39 | return res; \ | 40 | return res; \ |
40 | } | 41 | } |
41 | 42 | ||
@@ -46,9 +47,9 @@ int pci_bus_write_config_##size \ | |||
46 | int res; \ | 47 | int res; \ |
47 | unsigned long flags; \ | 48 | unsigned long flags; \ |
48 | if (PCI_##size##_BAD) return PCIBIOS_BAD_REGISTER_NUMBER; \ | 49 | if (PCI_##size##_BAD) return PCIBIOS_BAD_REGISTER_NUMBER; \ |
49 | spin_lock_irqsave(&pci_lock, flags); \ | 50 | raw_spin_lock_irqsave(&pci_lock, flags); \ |
50 | res = bus->ops->write(bus, devfn, pos, len, value); \ | 51 | res = bus->ops->write(bus, devfn, pos, len, value); \ |
51 | spin_unlock_irqrestore(&pci_lock, flags); \ | 52 | raw_spin_unlock_irqrestore(&pci_lock, flags); \ |
52 | return res; \ | 53 | return res; \ |
53 | } | 54 | } |
54 | 55 | ||
@@ -78,10 +79,10 @@ struct pci_ops *pci_bus_set_ops(struct pci_bus *bus, struct pci_ops *ops) | |||
78 | struct pci_ops *old_ops; | 79 | struct pci_ops *old_ops; |
79 | unsigned long flags; | 80 | unsigned long flags; |
80 | 81 | ||
81 | spin_lock_irqsave(&pci_lock, flags); | 82 | raw_spin_lock_irqsave(&pci_lock, flags); |
82 | old_ops = bus->ops; | 83 | old_ops = bus->ops; |
83 | bus->ops = ops; | 84 | bus->ops = ops; |
84 | spin_unlock_irqrestore(&pci_lock, flags); | 85 | raw_spin_unlock_irqrestore(&pci_lock, flags); |
85 | return old_ops; | 86 | return old_ops; |
86 | } | 87 | } |
87 | EXPORT_SYMBOL(pci_bus_set_ops); | 88 | EXPORT_SYMBOL(pci_bus_set_ops); |
@@ -135,9 +136,9 @@ static noinline void pci_wait_ucfg(struct pci_dev *dev) | |||
135 | __add_wait_queue(&pci_ucfg_wait, &wait); | 136 | __add_wait_queue(&pci_ucfg_wait, &wait); |
136 | do { | 137 | do { |
137 | set_current_state(TASK_UNINTERRUPTIBLE); | 138 | set_current_state(TASK_UNINTERRUPTIBLE); |
138 | spin_unlock_irq(&pci_lock); | 139 | raw_spin_unlock_irq(&pci_lock); |
139 | schedule(); | 140 | schedule(); |
140 | spin_lock_irq(&pci_lock); | 141 | raw_spin_lock_irq(&pci_lock); |
141 | } while (dev->block_ucfg_access); | 142 | } while (dev->block_ucfg_access); |
142 | __remove_wait_queue(&pci_ucfg_wait, &wait); | 143 | __remove_wait_queue(&pci_ucfg_wait, &wait); |
143 | } | 144 | } |
@@ -149,11 +150,11 @@ int pci_user_read_config_##size \ | |||
149 | int ret = 0; \ | 150 | int ret = 0; \ |
150 | u32 data = -1; \ | 151 | u32 data = -1; \ |
151 | if (PCI_##size##_BAD) return PCIBIOS_BAD_REGISTER_NUMBER; \ | 152 | if (PCI_##size##_BAD) return PCIBIOS_BAD_REGISTER_NUMBER; \ |
152 | spin_lock_irq(&pci_lock); \ | 153 | raw_spin_lock_irq(&pci_lock); \ |
153 | if (unlikely(dev->block_ucfg_access)) pci_wait_ucfg(dev); \ | 154 | if (unlikely(dev->block_ucfg_access)) pci_wait_ucfg(dev); \ |
154 | ret = dev->bus->ops->read(dev->bus, dev->devfn, \ | 155 | ret = dev->bus->ops->read(dev->bus, dev->devfn, \ |
155 | pos, sizeof(type), &data); \ | 156 | pos, sizeof(type), &data); \ |
156 | spin_unlock_irq(&pci_lock); \ | 157 | raw_spin_unlock_irq(&pci_lock); \ |
157 | *val = (type)data; \ | 158 | *val = (type)data; \ |
158 | return ret; \ | 159 | return ret; \ |
159 | } | 160 | } |
@@ -164,11 +165,11 @@ int pci_user_write_config_##size \ | |||
164 | { \ | 165 | { \ |
165 | int ret = -EIO; \ | 166 | int ret = -EIO; \ |
166 | if (PCI_##size##_BAD) return PCIBIOS_BAD_REGISTER_NUMBER; \ | 167 | if (PCI_##size##_BAD) return PCIBIOS_BAD_REGISTER_NUMBER; \ |
167 | spin_lock_irq(&pci_lock); \ | 168 | raw_spin_lock_irq(&pci_lock); \ |
168 | if (unlikely(dev->block_ucfg_access)) pci_wait_ucfg(dev); \ | 169 | if (unlikely(dev->block_ucfg_access)) pci_wait_ucfg(dev); \ |
169 | ret = dev->bus->ops->write(dev->bus, dev->devfn, \ | 170 | ret = dev->bus->ops->write(dev->bus, dev->devfn, \ |
170 | pos, sizeof(type), val); \ | 171 | pos, sizeof(type), val); \ |
171 | spin_unlock_irq(&pci_lock); \ | 172 | raw_spin_unlock_irq(&pci_lock); \ |
172 | return ret; \ | 173 | return ret; \ |
173 | } | 174 | } |
174 | 175 | ||
@@ -219,8 +220,13 @@ static int pci_vpd_pci22_wait(struct pci_dev *dev) | |||
219 | return 0; | 220 | return 0; |
220 | } | 221 | } |
221 | 222 | ||
222 | if (time_after(jiffies, timeout)) | 223 | if (time_after(jiffies, timeout)) { |
224 | dev_printk(KERN_DEBUG, &dev->dev, | ||
225 | "vpd r/w failed. This is likely a firmware " | ||
226 | "bug on this device. Contact the card " | ||
227 | "vendor for a firmware update."); | ||
223 | return -ETIMEDOUT; | 228 | return -ETIMEDOUT; |
229 | } | ||
224 | if (fatal_signal_pending(current)) | 230 | if (fatal_signal_pending(current)) |
225 | return -EINTR; | 231 | return -EINTR; |
226 | if (!cond_resched()) | 232 | if (!cond_resched()) |
@@ -395,10 +401,10 @@ void pci_block_user_cfg_access(struct pci_dev *dev) | |||
395 | unsigned long flags; | 401 | unsigned long flags; |
396 | int was_blocked; | 402 | int was_blocked; |
397 | 403 | ||
398 | spin_lock_irqsave(&pci_lock, flags); | 404 | raw_spin_lock_irqsave(&pci_lock, flags); |
399 | was_blocked = dev->block_ucfg_access; | 405 | was_blocked = dev->block_ucfg_access; |
400 | dev->block_ucfg_access = 1; | 406 | dev->block_ucfg_access = 1; |
401 | spin_unlock_irqrestore(&pci_lock, flags); | 407 | raw_spin_unlock_irqrestore(&pci_lock, flags); |
402 | 408 | ||
403 | /* If we BUG() inside the pci_lock, we're guaranteed to hose | 409 | /* If we BUG() inside the pci_lock, we're guaranteed to hose |
404 | * the machine */ | 410 | * the machine */ |
@@ -416,7 +422,7 @@ void pci_unblock_user_cfg_access(struct pci_dev *dev) | |||
416 | { | 422 | { |
417 | unsigned long flags; | 423 | unsigned long flags; |
418 | 424 | ||
419 | spin_lock_irqsave(&pci_lock, flags); | 425 | raw_spin_lock_irqsave(&pci_lock, flags); |
420 | 426 | ||
421 | /* This indicates a problem in the caller, but we don't need | 427 | /* This indicates a problem in the caller, but we don't need |
422 | * to kill them, unlike a double-block above. */ | 428 | * to kill them, unlike a double-block above. */ |
@@ -424,6 +430,6 @@ void pci_unblock_user_cfg_access(struct pci_dev *dev) | |||
424 | 430 | ||
425 | dev->block_ucfg_access = 0; | 431 | dev->block_ucfg_access = 0; |
426 | wake_up_all(&pci_ucfg_wait); | 432 | wake_up_all(&pci_ucfg_wait); |
427 | spin_unlock_irqrestore(&pci_lock, flags); | 433 | raw_spin_unlock_irqrestore(&pci_lock, flags); |
428 | } | 434 | } |
429 | EXPORT_SYMBOL_GPL(pci_unblock_user_cfg_access); | 435 | EXPORT_SYMBOL_GPL(pci_unblock_user_cfg_access); |
diff --git a/drivers/pci/bus.c b/drivers/pci/bus.c index cef28a79103f..69546e9213dd 100644 --- a/drivers/pci/bus.c +++ b/drivers/pci/bus.c | |||
@@ -14,9 +14,56 @@ | |||
14 | #include <linux/ioport.h> | 14 | #include <linux/ioport.h> |
15 | #include <linux/proc_fs.h> | 15 | #include <linux/proc_fs.h> |
16 | #include <linux/init.h> | 16 | #include <linux/init.h> |
17 | #include <linux/slab.h> | ||
17 | 18 | ||
18 | #include "pci.h" | 19 | #include "pci.h" |
19 | 20 | ||
21 | void pci_bus_add_resource(struct pci_bus *bus, struct resource *res, | ||
22 | unsigned int flags) | ||
23 | { | ||
24 | struct pci_bus_resource *bus_res; | ||
25 | |||
26 | bus_res = kzalloc(sizeof(struct pci_bus_resource), GFP_KERNEL); | ||
27 | if (!bus_res) { | ||
28 | dev_err(&bus->dev, "can't add %pR resource\n", res); | ||
29 | return; | ||
30 | } | ||
31 | |||
32 | bus_res->res = res; | ||
33 | bus_res->flags = flags; | ||
34 | list_add_tail(&bus_res->list, &bus->resources); | ||
35 | } | ||
36 | |||
37 | struct resource *pci_bus_resource_n(const struct pci_bus *bus, int n) | ||
38 | { | ||
39 | struct pci_bus_resource *bus_res; | ||
40 | |||
41 | if (n < PCI_BRIDGE_RESOURCE_NUM) | ||
42 | return bus->resource[n]; | ||
43 | |||
44 | n -= PCI_BRIDGE_RESOURCE_NUM; | ||
45 | list_for_each_entry(bus_res, &bus->resources, list) { | ||
46 | if (n-- == 0) | ||
47 | return bus_res->res; | ||
48 | } | ||
49 | return NULL; | ||
50 | } | ||
51 | EXPORT_SYMBOL_GPL(pci_bus_resource_n); | ||
52 | |||
53 | void pci_bus_remove_resources(struct pci_bus *bus) | ||
54 | { | ||
55 | struct pci_bus_resource *bus_res, *tmp; | ||
56 | int i; | ||
57 | |||
58 | for (i = 0; i < PCI_BRIDGE_RESOURCE_NUM; i++) | ||
59 | bus->resource[i] = NULL; | ||
60 | |||
61 | list_for_each_entry_safe(bus_res, tmp, &bus->resources, list) { | ||
62 | list_del(&bus_res->list); | ||
63 | kfree(bus_res); | ||
64 | } | ||
65 | } | ||
66 | |||
20 | /** | 67 | /** |
21 | * pci_bus_alloc_resource - allocate a resource from a parent bus | 68 | * pci_bus_alloc_resource - allocate a resource from a parent bus |
22 | * @bus: PCI bus | 69 | * @bus: PCI bus |
@@ -36,11 +83,14 @@ int | |||
36 | pci_bus_alloc_resource(struct pci_bus *bus, struct resource *res, | 83 | pci_bus_alloc_resource(struct pci_bus *bus, struct resource *res, |
37 | resource_size_t size, resource_size_t align, | 84 | resource_size_t size, resource_size_t align, |
38 | resource_size_t min, unsigned int type_mask, | 85 | resource_size_t min, unsigned int type_mask, |
39 | void (*alignf)(void *, struct resource *, resource_size_t, | 86 | resource_size_t (*alignf)(void *, |
40 | resource_size_t), | 87 | const struct resource *, |
88 | resource_size_t, | ||
89 | resource_size_t), | ||
41 | void *alignf_data) | 90 | void *alignf_data) |
42 | { | 91 | { |
43 | int i, ret = -ENOMEM; | 92 | int i, ret = -ENOMEM; |
93 | struct resource *r; | ||
44 | resource_size_t max = -1; | 94 | resource_size_t max = -1; |
45 | 95 | ||
46 | type_mask |= IORESOURCE_IO | IORESOURCE_MEM; | 96 | type_mask |= IORESOURCE_IO | IORESOURCE_MEM; |
@@ -49,8 +99,7 @@ pci_bus_alloc_resource(struct pci_bus *bus, struct resource *res, | |||
49 | if (!(res->flags & IORESOURCE_MEM_64)) | 99 | if (!(res->flags & IORESOURCE_MEM_64)) |
50 | max = PCIBIOS_MAX_MEM_32; | 100 | max = PCIBIOS_MAX_MEM_32; |
51 | 101 | ||
52 | for (i = 0; i < PCI_BUS_NUM_RESOURCES; i++) { | 102 | pci_bus_for_each_resource(bus, r, i) { |
53 | struct resource *r = bus->resource[i]; | ||
54 | if (!r) | 103 | if (!r) |
55 | continue; | 104 | continue; |
56 | 105 | ||
@@ -191,6 +240,8 @@ void pci_enable_bridges(struct pci_bus *bus) | |||
191 | if (dev->subordinate) { | 240 | if (dev->subordinate) { |
192 | if (!pci_is_enabled(dev)) { | 241 | if (!pci_is_enabled(dev)) { |
193 | retval = pci_enable_device(dev); | 242 | retval = pci_enable_device(dev); |
243 | if (retval) | ||
244 | dev_err(&dev->dev, "Error enabling bridge (%d), continuing\n", retval); | ||
194 | pci_set_master(dev); | 245 | pci_set_master(dev); |
195 | } | 246 | } |
196 | pci_enable_bridges(dev->subordinate); | 247 | pci_enable_bridges(dev->subordinate); |
@@ -240,14 +291,15 @@ void pci_walk_bus(struct pci_bus *top, int (*cb)(struct pci_dev *, void *), | |||
240 | next = dev->bus_list.next; | 291 | next = dev->bus_list.next; |
241 | 292 | ||
242 | /* Run device routines with the device locked */ | 293 | /* Run device routines with the device locked */ |
243 | down(&dev->dev.sem); | 294 | device_lock(&dev->dev); |
244 | retval = cb(dev, userdata); | 295 | retval = cb(dev, userdata); |
245 | up(&dev->dev.sem); | 296 | device_unlock(&dev->dev); |
246 | if (retval) | 297 | if (retval) |
247 | break; | 298 | break; |
248 | } | 299 | } |
249 | up_read(&pci_bus_sem); | 300 | up_read(&pci_bus_sem); |
250 | } | 301 | } |
302 | EXPORT_SYMBOL_GPL(pci_walk_bus); | ||
251 | 303 | ||
252 | EXPORT_SYMBOL(pci_bus_alloc_resource); | 304 | EXPORT_SYMBOL(pci_bus_alloc_resource); |
253 | EXPORT_SYMBOL_GPL(pci_bus_add_device); | 305 | EXPORT_SYMBOL_GPL(pci_bus_add_device); |
diff --git a/drivers/pci/dmar.c b/drivers/pci/dmar.c index 83aae4747594..09933eb9126b 100644 --- a/drivers/pci/dmar.c +++ b/drivers/pci/dmar.c | |||
@@ -35,6 +35,8 @@ | |||
35 | #include <linux/interrupt.h> | 35 | #include <linux/interrupt.h> |
36 | #include <linux/tboot.h> | 36 | #include <linux/tboot.h> |
37 | #include <linux/dmi.h> | 37 | #include <linux/dmi.h> |
38 | #include <linux/slab.h> | ||
39 | #include <asm/iommu_table.h> | ||
38 | 40 | ||
39 | #define PREFIX "DMAR: " | 41 | #define PREFIX "DMAR: " |
40 | 42 | ||
@@ -130,9 +132,10 @@ static int __init dmar_parse_dev_scope(void *start, void *end, int *cnt, | |||
130 | if (scope->entry_type == ACPI_DMAR_SCOPE_TYPE_ENDPOINT || | 132 | if (scope->entry_type == ACPI_DMAR_SCOPE_TYPE_ENDPOINT || |
131 | scope->entry_type == ACPI_DMAR_SCOPE_TYPE_BRIDGE) | 133 | scope->entry_type == ACPI_DMAR_SCOPE_TYPE_BRIDGE) |
132 | (*cnt)++; | 134 | (*cnt)++; |
133 | else | 135 | else if (scope->entry_type != ACPI_DMAR_SCOPE_TYPE_IOAPIC) { |
134 | printk(KERN_WARNING PREFIX | 136 | printk(KERN_WARNING PREFIX |
135 | "Unsupported device scope\n"); | 137 | "Unsupported device scope\n"); |
138 | } | ||
136 | start += scope->length; | 139 | start += scope->length; |
137 | } | 140 | } |
138 | if (*cnt == 0) | 141 | if (*cnt == 0) |
@@ -308,6 +311,8 @@ int dmar_find_matched_atsr_unit(struct pci_dev *dev) | |||
308 | struct acpi_dmar_atsr *atsr; | 311 | struct acpi_dmar_atsr *atsr; |
309 | struct dmar_atsr_unit *atsru; | 312 | struct dmar_atsr_unit *atsru; |
310 | 313 | ||
314 | dev = pci_physfn(dev); | ||
315 | |||
311 | list_for_each_entry(atsru, &dmar_atsr_units, list) { | 316 | list_for_each_entry(atsru, &dmar_atsr_units, list) { |
312 | atsr = container_of(atsru->hdr, struct acpi_dmar_atsr, header); | 317 | atsr = container_of(atsru->hdr, struct acpi_dmar_atsr, header); |
313 | if (atsr->segment == pci_domain_nr(dev->bus)) | 318 | if (atsr->segment == pci_domain_nr(dev->bus)) |
@@ -357,12 +362,14 @@ dmar_parse_one_rhsa(struct acpi_dmar_header *header) | |||
357 | return 0; | 362 | return 0; |
358 | } | 363 | } |
359 | } | 364 | } |
360 | WARN(1, "Your BIOS is broken; RHSA refers to non-existent DMAR unit at %llx\n" | 365 | WARN_TAINT( |
361 | "BIOS vendor: %s; Ver: %s; Product Version: %s\n", | 366 | 1, TAINT_FIRMWARE_WORKAROUND, |
362 | drhd->reg_base_addr, | 367 | "Your BIOS is broken; RHSA refers to non-existent DMAR unit at %llx\n" |
363 | dmi_get_system_info(DMI_BIOS_VENDOR), | 368 | "BIOS vendor: %s; Ver: %s; Product Version: %s\n", |
364 | dmi_get_system_info(DMI_BIOS_VERSION), | 369 | drhd->reg_base_addr, |
365 | dmi_get_system_info(DMI_PRODUCT_VERSION)); | 370 | dmi_get_system_info(DMI_BIOS_VENDOR), |
371 | dmi_get_system_info(DMI_BIOS_VERSION), | ||
372 | dmi_get_system_info(DMI_PRODUCT_VERSION)); | ||
366 | 373 | ||
367 | return 0; | 374 | return 0; |
368 | } | 375 | } |
@@ -506,7 +513,7 @@ parse_dmar_table(void) | |||
506 | return ret; | 513 | return ret; |
507 | } | 514 | } |
508 | 515 | ||
509 | int dmar_pci_device_match(struct pci_dev *devices[], int cnt, | 516 | static int dmar_pci_device_match(struct pci_dev *devices[], int cnt, |
510 | struct pci_dev *dev) | 517 | struct pci_dev *dev) |
511 | { | 518 | { |
512 | int index; | 519 | int index; |
@@ -529,6 +536,8 @@ dmar_find_matched_drhd_unit(struct pci_dev *dev) | |||
529 | struct dmar_drhd_unit *dmaru = NULL; | 536 | struct dmar_drhd_unit *dmaru = NULL; |
530 | struct acpi_dmar_hardware_unit *drhd; | 537 | struct acpi_dmar_hardware_unit *drhd; |
531 | 538 | ||
539 | dev = pci_physfn(dev); | ||
540 | |||
532 | list_for_each_entry(dmaru, &dmar_drhd_units, list) { | 541 | list_for_each_entry(dmaru, &dmar_drhd_units, list) { |
533 | drhd = container_of(dmaru->hdr, | 542 | drhd = container_of(dmaru->hdr, |
534 | struct acpi_dmar_hardware_unit, | 543 | struct acpi_dmar_hardware_unit, |
@@ -613,7 +622,17 @@ int __init dmar_table_init(void) | |||
613 | return 0; | 622 | return 0; |
614 | } | 623 | } |
615 | 624 | ||
616 | static int bios_warned; | 625 | static void warn_invalid_dmar(u64 addr, const char *message) |
626 | { | ||
627 | WARN_TAINT_ONCE( | ||
628 | 1, TAINT_FIRMWARE_WORKAROUND, | ||
629 | "Your BIOS is broken; DMAR reported at address %llx%s!\n" | ||
630 | "BIOS vendor: %s; Ver: %s; Product Version: %s\n", | ||
631 | addr, message, | ||
632 | dmi_get_system_info(DMI_BIOS_VENDOR), | ||
633 | dmi_get_system_info(DMI_BIOS_VERSION), | ||
634 | dmi_get_system_info(DMI_PRODUCT_VERSION)); | ||
635 | } | ||
617 | 636 | ||
618 | int __init check_zero_address(void) | 637 | int __init check_zero_address(void) |
619 | { | 638 | { |
@@ -639,13 +658,7 @@ int __init check_zero_address(void) | |||
639 | 658 | ||
640 | drhd = (void *)entry_header; | 659 | drhd = (void *)entry_header; |
641 | if (!drhd->address) { | 660 | if (!drhd->address) { |
642 | /* Promote an attitude of violence to a BIOS engineer today */ | 661 | warn_invalid_dmar(0, ""); |
643 | WARN(1, "Your BIOS is broken; DMAR reported at address zero!\n" | ||
644 | "BIOS vendor: %s; Ver: %s; Product Version: %s\n", | ||
645 | dmi_get_system_info(DMI_BIOS_VENDOR), | ||
646 | dmi_get_system_info(DMI_BIOS_VERSION), | ||
647 | dmi_get_system_info(DMI_PRODUCT_VERSION)); | ||
648 | bios_warned = 1; | ||
649 | goto failed; | 662 | goto failed; |
650 | } | 663 | } |
651 | 664 | ||
@@ -658,14 +671,8 @@ int __init check_zero_address(void) | |||
658 | ecap = dmar_readq(addr + DMAR_ECAP_REG); | 671 | ecap = dmar_readq(addr + DMAR_ECAP_REG); |
659 | early_iounmap(addr, VTD_PAGE_SIZE); | 672 | early_iounmap(addr, VTD_PAGE_SIZE); |
660 | if (cap == (uint64_t)-1 && ecap == (uint64_t)-1) { | 673 | if (cap == (uint64_t)-1 && ecap == (uint64_t)-1) { |
661 | /* Promote an attitude of violence to a BIOS engineer today */ | 674 | warn_invalid_dmar(drhd->address, |
662 | WARN(1, "Your BIOS is broken; DMAR reported at address %llx returns all ones!\n" | 675 | " returns all ones"); |
663 | "BIOS vendor: %s; Ver: %s; Product Version: %s\n", | ||
664 | drhd->address, | ||
665 | dmi_get_system_info(DMI_BIOS_VENDOR), | ||
666 | dmi_get_system_info(DMI_BIOS_VERSION), | ||
667 | dmi_get_system_info(DMI_PRODUCT_VERSION)); | ||
668 | bios_warned = 1; | ||
669 | goto failed; | 676 | goto failed; |
670 | } | 677 | } |
671 | } | 678 | } |
@@ -681,7 +688,7 @@ failed: | |||
681 | return 0; | 688 | return 0; |
682 | } | 689 | } |
683 | 690 | ||
684 | void __init detect_intel_iommu(void) | 691 | int __init detect_intel_iommu(void) |
685 | { | 692 | { |
686 | int ret; | 693 | int ret; |
687 | 694 | ||
@@ -717,6 +724,8 @@ void __init detect_intel_iommu(void) | |||
717 | } | 724 | } |
718 | early_acpi_os_unmap_memory(dmar_tbl, dmar_tbl_size); | 725 | early_acpi_os_unmap_memory(dmar_tbl, dmar_tbl_size); |
719 | dmar_tbl = NULL; | 726 | dmar_tbl = NULL; |
727 | |||
728 | return ret ? 1 : -ENODEV; | ||
720 | } | 729 | } |
721 | 730 | ||
722 | 731 | ||
@@ -730,14 +739,7 @@ int alloc_iommu(struct dmar_drhd_unit *drhd) | |||
730 | int msagaw = 0; | 739 | int msagaw = 0; |
731 | 740 | ||
732 | if (!drhd->reg_base_addr) { | 741 | if (!drhd->reg_base_addr) { |
733 | if (!bios_warned) { | 742 | warn_invalid_dmar(0, ""); |
734 | WARN(1, "Your BIOS is broken; DMAR reported at address zero!\n" | ||
735 | "BIOS vendor: %s; Ver: %s; Product Version: %s\n", | ||
736 | dmi_get_system_info(DMI_BIOS_VENDOR), | ||
737 | dmi_get_system_info(DMI_BIOS_VERSION), | ||
738 | dmi_get_system_info(DMI_PRODUCT_VERSION)); | ||
739 | bios_warned = 1; | ||
740 | } | ||
741 | return -EINVAL; | 743 | return -EINVAL; |
742 | } | 744 | } |
743 | 745 | ||
@@ -757,16 +759,7 @@ int alloc_iommu(struct dmar_drhd_unit *drhd) | |||
757 | iommu->ecap = dmar_readq(iommu->reg + DMAR_ECAP_REG); | 759 | iommu->ecap = dmar_readq(iommu->reg + DMAR_ECAP_REG); |
758 | 760 | ||
759 | if (iommu->cap == (uint64_t)-1 && iommu->ecap == (uint64_t)-1) { | 761 | if (iommu->cap == (uint64_t)-1 && iommu->ecap == (uint64_t)-1) { |
760 | if (!bios_warned) { | 762 | warn_invalid_dmar(drhd->reg_base_addr, " returns all ones"); |
761 | /* Promote an attitude of violence to a BIOS engineer today */ | ||
762 | WARN(1, "Your BIOS is broken; DMAR reported at address %llx returns all ones!\n" | ||
763 | "BIOS vendor: %s; Ver: %s; Product Version: %s\n", | ||
764 | drhd->reg_base_addr, | ||
765 | dmi_get_system_info(DMI_BIOS_VENDOR), | ||
766 | dmi_get_system_info(DMI_BIOS_VERSION), | ||
767 | dmi_get_system_info(DMI_PRODUCT_VERSION)); | ||
768 | bios_warned = 1; | ||
769 | } | ||
770 | goto err_unmap; | 763 | goto err_unmap; |
771 | } | 764 | } |
772 | 765 | ||
@@ -805,7 +798,8 @@ int alloc_iommu(struct dmar_drhd_unit *drhd) | |||
805 | } | 798 | } |
806 | 799 | ||
807 | ver = readl(iommu->reg + DMAR_VER_REG); | 800 | ver = readl(iommu->reg + DMAR_VER_REG); |
808 | pr_info("IOMMU %llx: ver %d:%d cap %llx ecap %llx\n", | 801 | pr_info("IOMMU %d: reg_base_addr %llx ver %d:%d cap %llx ecap %llx\n", |
802 | iommu->seq_id, | ||
809 | (unsigned long long)drhd->reg_base_addr, | 803 | (unsigned long long)drhd->reg_base_addr, |
810 | DMAR_VER_MAJOR(ver), DMAR_VER_MINOR(ver), | 804 | DMAR_VER_MAJOR(ver), DMAR_VER_MINOR(ver), |
811 | (unsigned long long)iommu->cap, | 805 | (unsigned long long)iommu->cap, |
@@ -1230,9 +1224,9 @@ const char *dmar_get_fault_reason(u8 fault_reason, int *fault_type) | |||
1230 | } | 1224 | } |
1231 | } | 1225 | } |
1232 | 1226 | ||
1233 | void dmar_msi_unmask(unsigned int irq) | 1227 | void dmar_msi_unmask(struct irq_data *data) |
1234 | { | 1228 | { |
1235 | struct intel_iommu *iommu = get_irq_data(irq); | 1229 | struct intel_iommu *iommu = irq_data_get_irq_data(data); |
1236 | unsigned long flag; | 1230 | unsigned long flag; |
1237 | 1231 | ||
1238 | /* unmask it */ | 1232 | /* unmask it */ |
@@ -1243,10 +1237,10 @@ void dmar_msi_unmask(unsigned int irq) | |||
1243 | spin_unlock_irqrestore(&iommu->register_lock, flag); | 1237 | spin_unlock_irqrestore(&iommu->register_lock, flag); |
1244 | } | 1238 | } |
1245 | 1239 | ||
1246 | void dmar_msi_mask(unsigned int irq) | 1240 | void dmar_msi_mask(struct irq_data *data) |
1247 | { | 1241 | { |
1248 | unsigned long flag; | 1242 | unsigned long flag; |
1249 | struct intel_iommu *iommu = get_irq_data(irq); | 1243 | struct intel_iommu *iommu = irq_data_get_irq_data(data); |
1250 | 1244 | ||
1251 | /* mask it */ | 1245 | /* mask it */ |
1252 | spin_lock_irqsave(&iommu->register_lock, flag); | 1246 | spin_lock_irqsave(&iommu->register_lock, flag); |
@@ -1423,6 +1417,11 @@ int __init enable_drhd_fault_handling(void) | |||
1423 | (unsigned long long)drhd->reg_base_addr, ret); | 1417 | (unsigned long long)drhd->reg_base_addr, ret); |
1424 | return -1; | 1418 | return -1; |
1425 | } | 1419 | } |
1420 | |||
1421 | /* | ||
1422 | * Clear any previous faults. | ||
1423 | */ | ||
1424 | dmar_fault(iommu->irq, iommu); | ||
1426 | } | 1425 | } |
1427 | 1426 | ||
1428 | return 0; | 1427 | return 0; |
@@ -1456,9 +1455,12 @@ int dmar_reenable_qi(struct intel_iommu *iommu) | |||
1456 | /* | 1455 | /* |
1457 | * Check interrupt remapping support in DMAR table description. | 1456 | * Check interrupt remapping support in DMAR table description. |
1458 | */ | 1457 | */ |
1459 | int dmar_ir_support(void) | 1458 | int __init dmar_ir_support(void) |
1460 | { | 1459 | { |
1461 | struct acpi_table_dmar *dmar; | 1460 | struct acpi_table_dmar *dmar; |
1462 | dmar = (struct acpi_table_dmar *)dmar_tbl; | 1461 | dmar = (struct acpi_table_dmar *)dmar_tbl; |
1462 | if (!dmar) | ||
1463 | return 0; | ||
1463 | return dmar->flags & 0x1; | 1464 | return dmar->flags & 0x1; |
1464 | } | 1465 | } |
1466 | IOMMU_INIT_POST(detect_intel_iommu); | ||
diff --git a/drivers/pci/hotplug/acpi_pcihp.c b/drivers/pci/hotplug/acpi_pcihp.c index 3c76fc67cf0e..3bc72d18b121 100644 --- a/drivers/pci/hotplug/acpi_pcihp.c +++ b/drivers/pci/hotplug/acpi_pcihp.c | |||
@@ -32,6 +32,7 @@ | |||
32 | #include <linux/pci_hotplug.h> | 32 | #include <linux/pci_hotplug.h> |
33 | #include <linux/acpi.h> | 33 | #include <linux/acpi.h> |
34 | #include <linux/pci-acpi.h> | 34 | #include <linux/pci-acpi.h> |
35 | #include <linux/slab.h> | ||
35 | 36 | ||
36 | #define MY_NAME "acpi_pcihp" | 37 | #define MY_NAME "acpi_pcihp" |
37 | 38 | ||
@@ -337,9 +338,7 @@ int acpi_get_hp_hw_control_from_firmware(struct pci_dev *pdev, u32 flags) | |||
337 | acpi_handle chandle, handle; | 338 | acpi_handle chandle, handle; |
338 | struct acpi_buffer string = { ACPI_ALLOCATE_BUFFER, NULL }; | 339 | struct acpi_buffer string = { ACPI_ALLOCATE_BUFFER, NULL }; |
339 | 340 | ||
340 | flags &= (OSC_PCI_EXPRESS_NATIVE_HP_CONTROL | | 341 | flags &= OSC_SHPC_NATIVE_HP_CONTROL; |
341 | OSC_SHPC_NATIVE_HP_CONTROL | | ||
342 | OSC_PCI_EXPRESS_CAP_STRUCTURE_CONTROL); | ||
343 | if (!flags) { | 342 | if (!flags) { |
344 | err("Invalid flags %u specified!\n", flags); | 343 | err("Invalid flags %u specified!\n", flags); |
345 | return -EINVAL; | 344 | return -EINVAL; |
@@ -359,7 +358,7 @@ int acpi_get_hp_hw_control_from_firmware(struct pci_dev *pdev, u32 flags) | |||
359 | acpi_get_name(handle, ACPI_FULL_PATHNAME, &string); | 358 | acpi_get_name(handle, ACPI_FULL_PATHNAME, &string); |
360 | dbg("Trying to get hotplug control for %s\n", | 359 | dbg("Trying to get hotplug control for %s\n", |
361 | (char *)string.pointer); | 360 | (char *)string.pointer); |
362 | status = acpi_pci_osc_control_set(handle, flags); | 361 | status = acpi_pci_osc_control_set(handle, &flags, flags); |
363 | if (ACPI_SUCCESS(status)) | 362 | if (ACPI_SUCCESS(status)) |
364 | goto got_one; | 363 | goto got_one; |
365 | if (status == AE_SUPPORT) | 364 | if (status == AE_SUPPORT) |
diff --git a/drivers/pci/hotplug/acpiphp.h b/drivers/pci/hotplug/acpiphp.h index bab52047baa8..7722108e78df 100644 --- a/drivers/pci/hotplug/acpiphp.h +++ b/drivers/pci/hotplug/acpiphp.h | |||
@@ -36,7 +36,6 @@ | |||
36 | #define _ACPIPHP_H | 36 | #define _ACPIPHP_H |
37 | 37 | ||
38 | #include <linux/acpi.h> | 38 | #include <linux/acpi.h> |
39 | #include <linux/kobject.h> | ||
40 | #include <linux/mutex.h> | 39 | #include <linux/mutex.h> |
41 | #include <linux/pci_hotplug.h> | 40 | #include <linux/pci_hotplug.h> |
42 | 41 | ||
diff --git a/drivers/pci/hotplug/acpiphp_core.c b/drivers/pci/hotplug/acpiphp_core.c index 4dd7114964ac..efa9f2de51c1 100644 --- a/drivers/pci/hotplug/acpiphp_core.c +++ b/drivers/pci/hotplug/acpiphp_core.c | |||
@@ -332,8 +332,6 @@ int acpiphp_register_hotplug_slot(struct acpiphp_slot *acpiphp_slot) | |||
332 | slot->hotplug_slot->info->attention_status = 0; | 332 | slot->hotplug_slot->info->attention_status = 0; |
333 | slot->hotplug_slot->info->latch_status = acpiphp_get_latch_status(slot->acpi_slot); | 333 | slot->hotplug_slot->info->latch_status = acpiphp_get_latch_status(slot->acpi_slot); |
334 | slot->hotplug_slot->info->adapter_status = acpiphp_get_adapter_status(slot->acpi_slot); | 334 | slot->hotplug_slot->info->adapter_status = acpiphp_get_adapter_status(slot->acpi_slot); |
335 | slot->hotplug_slot->info->max_bus_speed = PCI_SPEED_UNKNOWN; | ||
336 | slot->hotplug_slot->info->cur_bus_speed = PCI_SPEED_UNKNOWN; | ||
337 | 335 | ||
338 | acpiphp_slot->slot = slot; | 336 | acpiphp_slot->slot = slot; |
339 | snprintf(name, SLOT_NAME_SIZE, "%llu", slot->acpi_slot->sun); | 337 | snprintf(name, SLOT_NAME_SIZE, "%llu", slot->acpi_slot->sun); |
diff --git a/drivers/pci/hotplug/acpiphp_glue.c b/drivers/pci/hotplug/acpiphp_glue.c index 8e952fdab764..cb23aa2ebf96 100644 --- a/drivers/pci/hotplug/acpiphp_glue.c +++ b/drivers/pci/hotplug/acpiphp_glue.c | |||
@@ -47,6 +47,7 @@ | |||
47 | #include <linux/pci_hotplug.h> | 47 | #include <linux/pci_hotplug.h> |
48 | #include <linux/pci-acpi.h> | 48 | #include <linux/pci-acpi.h> |
49 | #include <linux/mutex.h> | 49 | #include <linux/mutex.h> |
50 | #include <linux/slab.h> | ||
50 | 51 | ||
51 | #include "../pci.h" | 52 | #include "../pci.h" |
52 | #include "acpiphp.h" | 53 | #include "acpiphp.h" |
@@ -720,12 +721,6 @@ static int acpiphp_bus_add(struct acpiphp_func *func) | |||
720 | -ret_val); | 721 | -ret_val); |
721 | goto acpiphp_bus_add_out; | 722 | goto acpiphp_bus_add_out; |
722 | } | 723 | } |
723 | /* | ||
724 | * try to start anyway. We could have failed to add | ||
725 | * simply because this bus had previously been added | ||
726 | * on another add. Don't bother with the return value | ||
727 | * we just keep going. | ||
728 | */ | ||
729 | ret_val = acpi_bus_start(device); | 724 | ret_val = acpi_bus_start(device); |
730 | 725 | ||
731 | acpiphp_bus_add_out: | 726 | acpiphp_bus_add_out: |
@@ -755,6 +750,24 @@ static int acpiphp_bus_trim(acpi_handle handle) | |||
755 | return retval; | 750 | return retval; |
756 | } | 751 | } |
757 | 752 | ||
753 | static void acpiphp_set_acpi_region(struct acpiphp_slot *slot) | ||
754 | { | ||
755 | struct acpiphp_func *func; | ||
756 | union acpi_object params[2]; | ||
757 | struct acpi_object_list arg_list; | ||
758 | |||
759 | list_for_each_entry(func, &slot->funcs, sibling) { | ||
760 | arg_list.count = 2; | ||
761 | arg_list.pointer = params; | ||
762 | params[0].type = ACPI_TYPE_INTEGER; | ||
763 | params[0].integer.value = ACPI_ADR_SPACE_PCI_CONFIG; | ||
764 | params[1].type = ACPI_TYPE_INTEGER; | ||
765 | params[1].integer.value = 1; | ||
766 | /* _REG is optional, we don't care about if there is failure */ | ||
767 | acpi_evaluate_object(func->handle, "_REG", &arg_list, NULL); | ||
768 | } | ||
769 | } | ||
770 | |||
758 | /** | 771 | /** |
759 | * enable_device - enable, configure a slot | 772 | * enable_device - enable, configure a slot |
760 | * @slot: slot to be enabled | 773 | * @slot: slot to be enabled |
@@ -811,6 +824,7 @@ static int __ref enable_device(struct acpiphp_slot *slot) | |||
811 | pci_bus_assign_resources(bus); | 824 | pci_bus_assign_resources(bus); |
812 | acpiphp_sanitize_bus(bus); | 825 | acpiphp_sanitize_bus(bus); |
813 | acpiphp_set_hpp_values(bus); | 826 | acpiphp_set_hpp_values(bus); |
827 | acpiphp_set_acpi_region(slot); | ||
814 | pci_enable_bridges(bus); | 828 | pci_enable_bridges(bus); |
815 | pci_bus_add_devices(bus); | 829 | pci_bus_add_devices(bus); |
816 | 830 | ||
diff --git a/drivers/pci/hotplug/acpiphp_ibm.c b/drivers/pci/hotplug/acpiphp_ibm.c index aa5df485f8cf..e525263210ee 100644 --- a/drivers/pci/hotplug/acpiphp_ibm.c +++ b/drivers/pci/hotplug/acpiphp_ibm.c | |||
@@ -26,6 +26,7 @@ | |||
26 | */ | 26 | */ |
27 | 27 | ||
28 | #include <linux/init.h> | 28 | #include <linux/init.h> |
29 | #include <linux/slab.h> | ||
29 | #include <linux/module.h> | 30 | #include <linux/module.h> |
30 | #include <linux/kernel.h> | 31 | #include <linux/kernel.h> |
31 | #include <acpi/acpi_bus.h> | 32 | #include <acpi/acpi_bus.h> |
@@ -107,7 +108,7 @@ static int ibm_set_attention_status(struct hotplug_slot *slot, u8 status); | |||
107 | static int ibm_get_attention_status(struct hotplug_slot *slot, u8 *status); | 108 | static int ibm_get_attention_status(struct hotplug_slot *slot, u8 *status); |
108 | static void ibm_handle_events(acpi_handle handle, u32 event, void *context); | 109 | static void ibm_handle_events(acpi_handle handle, u32 event, void *context); |
109 | static int ibm_get_table_from_acpi(char **bufp); | 110 | static int ibm_get_table_from_acpi(char **bufp); |
110 | static ssize_t ibm_read_apci_table(struct kobject *kobj, | 111 | static ssize_t ibm_read_apci_table(struct file *filp, struct kobject *kobj, |
111 | struct bin_attribute *bin_attr, | 112 | struct bin_attribute *bin_attr, |
112 | char *buffer, loff_t pos, size_t size); | 113 | char *buffer, loff_t pos, size_t size); |
113 | static acpi_status __init ibm_find_acpi_device(acpi_handle handle, | 114 | static acpi_status __init ibm_find_acpi_device(acpi_handle handle, |
@@ -350,6 +351,7 @@ read_table_done: | |||
350 | 351 | ||
351 | /** | 352 | /** |
352 | * ibm_read_apci_table - callback for the sysfs apci_table file | 353 | * ibm_read_apci_table - callback for the sysfs apci_table file |
354 | * @filp: the open sysfs file | ||
353 | * @kobj: the kobject this binary attribute is a part of | 355 | * @kobj: the kobject this binary attribute is a part of |
354 | * @bin_attr: struct bin_attribute for this file | 356 | * @bin_attr: struct bin_attribute for this file |
355 | * @buffer: the kernel space buffer to fill | 357 | * @buffer: the kernel space buffer to fill |
@@ -363,7 +365,7 @@ read_table_done: | |||
363 | * things get really tricky here... | 365 | * things get really tricky here... |
364 | * our solution is to only allow reading the table in all at once. | 366 | * our solution is to only allow reading the table in all at once. |
365 | */ | 367 | */ |
366 | static ssize_t ibm_read_apci_table(struct kobject *kobj, | 368 | static ssize_t ibm_read_apci_table(struct file *filp, struct kobject *kobj, |
367 | struct bin_attribute *bin_attr, | 369 | struct bin_attribute *bin_attr, |
368 | char *buffer, loff_t pos, size_t size) | 370 | char *buffer, loff_t pos, size_t size) |
369 | { | 371 | { |
diff --git a/drivers/pci/hotplug/cpcihp_generic.c b/drivers/pci/hotplug/cpcihp_generic.c index 148fb463b81c..fb3f84661bdc 100644 --- a/drivers/pci/hotplug/cpcihp_generic.c +++ b/drivers/pci/hotplug/cpcihp_generic.c | |||
@@ -162,6 +162,7 @@ static int __init cpcihp_generic_init(void) | |||
162 | dev = pci_get_slot(bus, PCI_DEVFN(bridge_slot, 0)); | 162 | dev = pci_get_slot(bus, PCI_DEVFN(bridge_slot, 0)); |
163 | if(!dev || dev->hdr_type != PCI_HEADER_TYPE_BRIDGE) { | 163 | if(!dev || dev->hdr_type != PCI_HEADER_TYPE_BRIDGE) { |
164 | err("Invalid bridge device %s", bridge); | 164 | err("Invalid bridge device %s", bridge); |
165 | pci_dev_put(dev); | ||
165 | return -EINVAL; | 166 | return -EINVAL; |
166 | } | 167 | } |
167 | bus = dev->subordinate; | 168 | bus = dev->subordinate; |
diff --git a/drivers/pci/hotplug/cpqphp.h b/drivers/pci/hotplug/cpqphp.h index 9c6a9fd26812..d8ffc7366801 100644 --- a/drivers/pci/hotplug/cpqphp.h +++ b/drivers/pci/hotplug/cpqphp.h | |||
@@ -310,8 +310,6 @@ struct controller { | |||
310 | u8 first_slot; | 310 | u8 first_slot; |
311 | u8 add_support; | 311 | u8 add_support; |
312 | u8 push_flag; | 312 | u8 push_flag; |
313 | enum pci_bus_speed speed; | ||
314 | enum pci_bus_speed speed_capability; | ||
315 | u8 push_button; /* 0 = no pushbutton, 1 = pushbutton present */ | 313 | u8 push_button; /* 0 = no pushbutton, 1 = pushbutton present */ |
316 | u8 slot_switch_type; /* 0 = no switch, 1 = switch present */ | 314 | u8 slot_switch_type; /* 0 = no switch, 1 = switch present */ |
317 | u8 defeature_PHP; /* 0 = PHP not supported, 1 = PHP supported */ | 315 | u8 defeature_PHP; /* 0 = PHP not supported, 1 = PHP supported */ |
diff --git a/drivers/pci/hotplug/cpqphp_core.c b/drivers/pci/hotplug/cpqphp_core.c index 075b4f4b6e0d..4952c3b9379d 100644 --- a/drivers/pci/hotplug/cpqphp_core.c +++ b/drivers/pci/hotplug/cpqphp_core.c | |||
@@ -583,30 +583,6 @@ static int get_adapter_status(struct hotplug_slot *hotplug_slot, u8 *value) | |||
583 | return 0; | 583 | return 0; |
584 | } | 584 | } |
585 | 585 | ||
586 | static int get_max_bus_speed (struct hotplug_slot *hotplug_slot, enum pci_bus_speed *value) | ||
587 | { | ||
588 | struct slot *slot = hotplug_slot->private; | ||
589 | struct controller *ctrl = slot->ctrl; | ||
590 | |||
591 | dbg("%s - physical_slot = %s\n", __func__, slot_name(slot)); | ||
592 | |||
593 | *value = ctrl->speed_capability; | ||
594 | |||
595 | return 0; | ||
596 | } | ||
597 | |||
598 | static int get_cur_bus_speed (struct hotplug_slot *hotplug_slot, enum pci_bus_speed *value) | ||
599 | { | ||
600 | struct slot *slot = hotplug_slot->private; | ||
601 | struct controller *ctrl = slot->ctrl; | ||
602 | |||
603 | dbg("%s - physical_slot = %s\n", __func__, slot_name(slot)); | ||
604 | |||
605 | *value = ctrl->speed; | ||
606 | |||
607 | return 0; | ||
608 | } | ||
609 | |||
610 | static struct hotplug_slot_ops cpqphp_hotplug_slot_ops = { | 586 | static struct hotplug_slot_ops cpqphp_hotplug_slot_ops = { |
611 | .set_attention_status = set_attention_status, | 587 | .set_attention_status = set_attention_status, |
612 | .enable_slot = process_SI, | 588 | .enable_slot = process_SI, |
@@ -616,8 +592,6 @@ static struct hotplug_slot_ops cpqphp_hotplug_slot_ops = { | |||
616 | .get_attention_status = get_attention_status, | 592 | .get_attention_status = get_attention_status, |
617 | .get_latch_status = get_latch_status, | 593 | .get_latch_status = get_latch_status, |
618 | .get_adapter_status = get_adapter_status, | 594 | .get_adapter_status = get_adapter_status, |
619 | .get_max_bus_speed = get_max_bus_speed, | ||
620 | .get_cur_bus_speed = get_cur_bus_speed, | ||
621 | }; | 595 | }; |
622 | 596 | ||
623 | #define SLOT_NAME_SIZE 10 | 597 | #define SLOT_NAME_SIZE 10 |
@@ -629,6 +603,7 @@ static int ctrl_slot_setup(struct controller *ctrl, | |||
629 | struct slot *slot; | 603 | struct slot *slot; |
630 | struct hotplug_slot *hotplug_slot; | 604 | struct hotplug_slot *hotplug_slot; |
631 | struct hotplug_slot_info *hotplug_slot_info; | 605 | struct hotplug_slot_info *hotplug_slot_info; |
606 | struct pci_bus *bus = ctrl->pci_bus; | ||
632 | u8 number_of_slots; | 607 | u8 number_of_slots; |
633 | u8 slot_device; | 608 | u8 slot_device; |
634 | u8 slot_number; | 609 | u8 slot_number; |
@@ -694,7 +669,7 @@ static int ctrl_slot_setup(struct controller *ctrl, | |||
694 | slot->capabilities |= PCISLOT_64_BIT_SUPPORTED; | 669 | slot->capabilities |= PCISLOT_64_BIT_SUPPORTED; |
695 | if (is_slot66mhz(slot)) | 670 | if (is_slot66mhz(slot)) |
696 | slot->capabilities |= PCISLOT_66_MHZ_SUPPORTED; | 671 | slot->capabilities |= PCISLOT_66_MHZ_SUPPORTED; |
697 | if (ctrl->speed == PCI_SPEED_66MHz) | 672 | if (bus->cur_bus_speed == PCI_SPEED_66MHz) |
698 | slot->capabilities |= PCISLOT_66_MHZ_OPERATION; | 673 | slot->capabilities |= PCISLOT_66_MHZ_OPERATION; |
699 | 674 | ||
700 | ctrl_slot = | 675 | ctrl_slot = |
@@ -844,6 +819,7 @@ static int cpqhpc_probe(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
844 | u32 rc; | 819 | u32 rc; |
845 | struct controller *ctrl; | 820 | struct controller *ctrl; |
846 | struct pci_func *func; | 821 | struct pci_func *func; |
822 | struct pci_bus *bus; | ||
847 | int err; | 823 | int err; |
848 | 824 | ||
849 | err = pci_enable_device(pdev); | 825 | err = pci_enable_device(pdev); |
@@ -853,6 +829,14 @@ static int cpqhpc_probe(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
853 | return err; | 829 | return err; |
854 | } | 830 | } |
855 | 831 | ||
832 | bus = pdev->subordinate; | ||
833 | if (!bus) { | ||
834 | dev_notice(&pdev->dev, "the device is not a bridge, " | ||
835 | "skipping\n"); | ||
836 | rc = -ENODEV; | ||
837 | goto err_disable_device; | ||
838 | } | ||
839 | |||
856 | /* Need to read VID early b/c it's used to differentiate CPQ and INTC | 840 | /* Need to read VID early b/c it's used to differentiate CPQ and INTC |
857 | * discovery | 841 | * discovery |
858 | */ | 842 | */ |
@@ -871,7 +855,7 @@ static int cpqhpc_probe(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
871 | goto err_disable_device; | 855 | goto err_disable_device; |
872 | } | 856 | } |
873 | 857 | ||
874 | /* Check for the proper subsytem ID's | 858 | /* Check for the proper subsystem ID's |
875 | * Intel uses a different SSID programming model than Compaq. | 859 | * Intel uses a different SSID programming model than Compaq. |
876 | * For Intel, each SSID bit identifies a PHP capability. | 860 | * For Intel, each SSID bit identifies a PHP capability. |
877 | * Also Intel HPC's may have RID=0. | 861 | * Also Intel HPC's may have RID=0. |
@@ -929,22 +913,22 @@ static int cpqhpc_probe(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
929 | pci_read_config_byte(pdev, 0x41, &bus_cap); | 913 | pci_read_config_byte(pdev, 0x41, &bus_cap); |
930 | if (bus_cap & 0x80) { | 914 | if (bus_cap & 0x80) { |
931 | dbg("bus max supports 133MHz PCI-X\n"); | 915 | dbg("bus max supports 133MHz PCI-X\n"); |
932 | ctrl->speed_capability = PCI_SPEED_133MHz_PCIX; | 916 | bus->max_bus_speed = PCI_SPEED_133MHz_PCIX; |
933 | break; | 917 | break; |
934 | } | 918 | } |
935 | if (bus_cap & 0x40) { | 919 | if (bus_cap & 0x40) { |
936 | dbg("bus max supports 100MHz PCI-X\n"); | 920 | dbg("bus max supports 100MHz PCI-X\n"); |
937 | ctrl->speed_capability = PCI_SPEED_100MHz_PCIX; | 921 | bus->max_bus_speed = PCI_SPEED_100MHz_PCIX; |
938 | break; | 922 | break; |
939 | } | 923 | } |
940 | if (bus_cap & 20) { | 924 | if (bus_cap & 20) { |
941 | dbg("bus max supports 66MHz PCI-X\n"); | 925 | dbg("bus max supports 66MHz PCI-X\n"); |
942 | ctrl->speed_capability = PCI_SPEED_66MHz_PCIX; | 926 | bus->max_bus_speed = PCI_SPEED_66MHz_PCIX; |
943 | break; | 927 | break; |
944 | } | 928 | } |
945 | if (bus_cap & 10) { | 929 | if (bus_cap & 10) { |
946 | dbg("bus max supports 66MHz PCI\n"); | 930 | dbg("bus max supports 66MHz PCI\n"); |
947 | ctrl->speed_capability = PCI_SPEED_66MHz; | 931 | bus->max_bus_speed = PCI_SPEED_66MHz; |
948 | break; | 932 | break; |
949 | } | 933 | } |
950 | 934 | ||
@@ -955,7 +939,7 @@ static int cpqhpc_probe(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
955 | case PCI_SUB_HPC_ID: | 939 | case PCI_SUB_HPC_ID: |
956 | /* Original 6500/7000 implementation */ | 940 | /* Original 6500/7000 implementation */ |
957 | ctrl->slot_switch_type = 1; | 941 | ctrl->slot_switch_type = 1; |
958 | ctrl->speed_capability = PCI_SPEED_33MHz; | 942 | bus->max_bus_speed = PCI_SPEED_33MHz; |
959 | ctrl->push_button = 0; | 943 | ctrl->push_button = 0; |
960 | ctrl->pci_config_space = 1; | 944 | ctrl->pci_config_space = 1; |
961 | ctrl->defeature_PHP = 1; | 945 | ctrl->defeature_PHP = 1; |
@@ -966,7 +950,7 @@ static int cpqhpc_probe(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
966 | /* First Pushbutton implementation */ | 950 | /* First Pushbutton implementation */ |
967 | ctrl->push_flag = 1; | 951 | ctrl->push_flag = 1; |
968 | ctrl->slot_switch_type = 1; | 952 | ctrl->slot_switch_type = 1; |
969 | ctrl->speed_capability = PCI_SPEED_33MHz; | 953 | bus->max_bus_speed = PCI_SPEED_33MHz; |
970 | ctrl->push_button = 1; | 954 | ctrl->push_button = 1; |
971 | ctrl->pci_config_space = 1; | 955 | ctrl->pci_config_space = 1; |
972 | ctrl->defeature_PHP = 1; | 956 | ctrl->defeature_PHP = 1; |
@@ -976,7 +960,7 @@ static int cpqhpc_probe(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
976 | case PCI_SUB_HPC_ID_INTC: | 960 | case PCI_SUB_HPC_ID_INTC: |
977 | /* Third party (6500/7000) */ | 961 | /* Third party (6500/7000) */ |
978 | ctrl->slot_switch_type = 1; | 962 | ctrl->slot_switch_type = 1; |
979 | ctrl->speed_capability = PCI_SPEED_33MHz; | 963 | bus->max_bus_speed = PCI_SPEED_33MHz; |
980 | ctrl->push_button = 0; | 964 | ctrl->push_button = 0; |
981 | ctrl->pci_config_space = 1; | 965 | ctrl->pci_config_space = 1; |
982 | ctrl->defeature_PHP = 1; | 966 | ctrl->defeature_PHP = 1; |
@@ -987,7 +971,7 @@ static int cpqhpc_probe(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
987 | /* First 66 Mhz implementation */ | 971 | /* First 66 Mhz implementation */ |
988 | ctrl->push_flag = 1; | 972 | ctrl->push_flag = 1; |
989 | ctrl->slot_switch_type = 1; | 973 | ctrl->slot_switch_type = 1; |
990 | ctrl->speed_capability = PCI_SPEED_66MHz; | 974 | bus->max_bus_speed = PCI_SPEED_66MHz; |
991 | ctrl->push_button = 1; | 975 | ctrl->push_button = 1; |
992 | ctrl->pci_config_space = 1; | 976 | ctrl->pci_config_space = 1; |
993 | ctrl->defeature_PHP = 1; | 977 | ctrl->defeature_PHP = 1; |
@@ -998,7 +982,7 @@ static int cpqhpc_probe(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
998 | /* First PCI-X implementation, 100MHz */ | 982 | /* First PCI-X implementation, 100MHz */ |
999 | ctrl->push_flag = 1; | 983 | ctrl->push_flag = 1; |
1000 | ctrl->slot_switch_type = 1; | 984 | ctrl->slot_switch_type = 1; |
1001 | ctrl->speed_capability = PCI_SPEED_100MHz_PCIX; | 985 | bus->max_bus_speed = PCI_SPEED_100MHz_PCIX; |
1002 | ctrl->push_button = 1; | 986 | ctrl->push_button = 1; |
1003 | ctrl->pci_config_space = 1; | 987 | ctrl->pci_config_space = 1; |
1004 | ctrl->defeature_PHP = 1; | 988 | ctrl->defeature_PHP = 1; |
@@ -1015,9 +999,9 @@ static int cpqhpc_probe(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
1015 | case PCI_VENDOR_ID_INTEL: | 999 | case PCI_VENDOR_ID_INTEL: |
1016 | /* Check for speed capability (0=33, 1=66) */ | 1000 | /* Check for speed capability (0=33, 1=66) */ |
1017 | if (subsystem_deviceid & 0x0001) | 1001 | if (subsystem_deviceid & 0x0001) |
1018 | ctrl->speed_capability = PCI_SPEED_66MHz; | 1002 | bus->max_bus_speed = PCI_SPEED_66MHz; |
1019 | else | 1003 | else |
1020 | ctrl->speed_capability = PCI_SPEED_33MHz; | 1004 | bus->max_bus_speed = PCI_SPEED_33MHz; |
1021 | 1005 | ||
1022 | /* Check for push button */ | 1006 | /* Check for push button */ |
1023 | if (subsystem_deviceid & 0x0002) | 1007 | if (subsystem_deviceid & 0x0002) |
@@ -1079,7 +1063,7 @@ static int cpqhpc_probe(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
1079 | pdev->bus->number); | 1063 | pdev->bus->number); |
1080 | 1064 | ||
1081 | dbg("Hotplug controller capabilities:\n"); | 1065 | dbg("Hotplug controller capabilities:\n"); |
1082 | dbg(" speed_capability %d\n", ctrl->speed_capability); | 1066 | dbg(" speed_capability %d\n", bus->max_bus_speed); |
1083 | dbg(" slot_switch_type %s\n", ctrl->slot_switch_type ? | 1067 | dbg(" slot_switch_type %s\n", ctrl->slot_switch_type ? |
1084 | "switch present" : "no switch"); | 1068 | "switch present" : "no switch"); |
1085 | dbg(" defeature_PHP %s\n", ctrl->defeature_PHP ? | 1069 | dbg(" defeature_PHP %s\n", ctrl->defeature_PHP ? |
@@ -1098,13 +1082,12 @@ static int cpqhpc_probe(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
1098 | 1082 | ||
1099 | /* make our own copy of the pci bus structure, | 1083 | /* make our own copy of the pci bus structure, |
1100 | * as we like tweaking it a lot */ | 1084 | * as we like tweaking it a lot */ |
1101 | ctrl->pci_bus = kmalloc(sizeof(*ctrl->pci_bus), GFP_KERNEL); | 1085 | ctrl->pci_bus = kmemdup(pdev->bus, sizeof(*ctrl->pci_bus), GFP_KERNEL); |
1102 | if (!ctrl->pci_bus) { | 1086 | if (!ctrl->pci_bus) { |
1103 | err("out of memory\n"); | 1087 | err("out of memory\n"); |
1104 | rc = -ENOMEM; | 1088 | rc = -ENOMEM; |
1105 | goto err_free_ctrl; | 1089 | goto err_free_ctrl; |
1106 | } | 1090 | } |
1107 | memcpy(ctrl->pci_bus, pdev->bus, sizeof(*ctrl->pci_bus)); | ||
1108 | 1091 | ||
1109 | ctrl->bus = pdev->bus->number; | 1092 | ctrl->bus = pdev->bus->number; |
1110 | ctrl->rev = pdev->revision; | 1093 | ctrl->rev = pdev->revision; |
@@ -1142,7 +1125,7 @@ static int cpqhpc_probe(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
1142 | } | 1125 | } |
1143 | 1126 | ||
1144 | /* Check for 66Mhz operation */ | 1127 | /* Check for 66Mhz operation */ |
1145 | ctrl->speed = get_controller_speed(ctrl); | 1128 | bus->cur_bus_speed = get_controller_speed(ctrl); |
1146 | 1129 | ||
1147 | 1130 | ||
1148 | /******************************************************** | 1131 | /******************************************************** |
diff --git a/drivers/pci/hotplug/cpqphp_ctrl.c b/drivers/pci/hotplug/cpqphp_ctrl.c index 0ff689afa757..e43908d9b5df 100644 --- a/drivers/pci/hotplug/cpqphp_ctrl.c +++ b/drivers/pci/hotplug/cpqphp_ctrl.c | |||
@@ -1130,12 +1130,13 @@ static int is_bridge(struct pci_func * func) | |||
1130 | static u8 set_controller_speed(struct controller *ctrl, u8 adapter_speed, u8 hp_slot) | 1130 | static u8 set_controller_speed(struct controller *ctrl, u8 adapter_speed, u8 hp_slot) |
1131 | { | 1131 | { |
1132 | struct slot *slot; | 1132 | struct slot *slot; |
1133 | struct pci_bus *bus = ctrl->pci_bus; | ||
1133 | u8 reg; | 1134 | u8 reg; |
1134 | u8 slot_power = readb(ctrl->hpc_reg + SLOT_POWER); | 1135 | u8 slot_power = readb(ctrl->hpc_reg + SLOT_POWER); |
1135 | u16 reg16; | 1136 | u16 reg16; |
1136 | u32 leds = readl(ctrl->hpc_reg + LED_CONTROL); | 1137 | u32 leds = readl(ctrl->hpc_reg + LED_CONTROL); |
1137 | 1138 | ||
1138 | if (ctrl->speed == adapter_speed) | 1139 | if (bus->cur_bus_speed == adapter_speed) |
1139 | return 0; | 1140 | return 0; |
1140 | 1141 | ||
1141 | /* We don't allow freq/mode changes if we find another adapter running | 1142 | /* We don't allow freq/mode changes if we find another adapter running |
@@ -1152,7 +1153,7 @@ static u8 set_controller_speed(struct controller *ctrl, u8 adapter_speed, u8 hp_ | |||
1152 | * lower speed/mode, we allow the new adapter to function at | 1153 | * lower speed/mode, we allow the new adapter to function at |
1153 | * this rate if supported | 1154 | * this rate if supported |
1154 | */ | 1155 | */ |
1155 | if (ctrl->speed < adapter_speed) | 1156 | if (bus->cur_bus_speed < adapter_speed) |
1156 | return 0; | 1157 | return 0; |
1157 | 1158 | ||
1158 | return 1; | 1159 | return 1; |
@@ -1161,20 +1162,20 @@ static u8 set_controller_speed(struct controller *ctrl, u8 adapter_speed, u8 hp_ | |||
1161 | /* If the controller doesn't support freq/mode changes and the | 1162 | /* If the controller doesn't support freq/mode changes and the |
1162 | * controller is running at a higher mode, we bail | 1163 | * controller is running at a higher mode, we bail |
1163 | */ | 1164 | */ |
1164 | if ((ctrl->speed > adapter_speed) && (!ctrl->pcix_speed_capability)) | 1165 | if ((bus->cur_bus_speed > adapter_speed) && (!ctrl->pcix_speed_capability)) |
1165 | return 1; | 1166 | return 1; |
1166 | 1167 | ||
1167 | /* But we allow the adapter to run at a lower rate if possible */ | 1168 | /* But we allow the adapter to run at a lower rate if possible */ |
1168 | if ((ctrl->speed < adapter_speed) && (!ctrl->pcix_speed_capability)) | 1169 | if ((bus->cur_bus_speed < adapter_speed) && (!ctrl->pcix_speed_capability)) |
1169 | return 0; | 1170 | return 0; |
1170 | 1171 | ||
1171 | /* We try to set the max speed supported by both the adapter and | 1172 | /* We try to set the max speed supported by both the adapter and |
1172 | * controller | 1173 | * controller |
1173 | */ | 1174 | */ |
1174 | if (ctrl->speed_capability < adapter_speed) { | 1175 | if (bus->max_bus_speed < adapter_speed) { |
1175 | if (ctrl->speed == ctrl->speed_capability) | 1176 | if (bus->cur_bus_speed == bus->max_bus_speed) |
1176 | return 0; | 1177 | return 0; |
1177 | adapter_speed = ctrl->speed_capability; | 1178 | adapter_speed = bus->max_bus_speed; |
1178 | } | 1179 | } |
1179 | 1180 | ||
1180 | writel(0x0L, ctrl->hpc_reg + LED_CONTROL); | 1181 | writel(0x0L, ctrl->hpc_reg + LED_CONTROL); |
@@ -1229,8 +1230,8 @@ static u8 set_controller_speed(struct controller *ctrl, u8 adapter_speed, u8 hp_ | |||
1229 | pci_write_config_byte(ctrl->pci_dev, 0x43, reg); | 1230 | pci_write_config_byte(ctrl->pci_dev, 0x43, reg); |
1230 | 1231 | ||
1231 | /* Only if mode change...*/ | 1232 | /* Only if mode change...*/ |
1232 | if (((ctrl->speed == PCI_SPEED_66MHz) && (adapter_speed == PCI_SPEED_66MHz_PCIX)) || | 1233 | if (((bus->cur_bus_speed == PCI_SPEED_66MHz) && (adapter_speed == PCI_SPEED_66MHz_PCIX)) || |
1233 | ((ctrl->speed == PCI_SPEED_66MHz_PCIX) && (adapter_speed == PCI_SPEED_66MHz))) | 1234 | ((bus->cur_bus_speed == PCI_SPEED_66MHz_PCIX) && (adapter_speed == PCI_SPEED_66MHz))) |
1234 | set_SOGO(ctrl); | 1235 | set_SOGO(ctrl); |
1235 | 1236 | ||
1236 | wait_for_ctrl_irq(ctrl); | 1237 | wait_for_ctrl_irq(ctrl); |
@@ -1243,7 +1244,7 @@ static u8 set_controller_speed(struct controller *ctrl, u8 adapter_speed, u8 hp_ | |||
1243 | set_SOGO(ctrl); | 1244 | set_SOGO(ctrl); |
1244 | wait_for_ctrl_irq(ctrl); | 1245 | wait_for_ctrl_irq(ctrl); |
1245 | 1246 | ||
1246 | ctrl->speed = adapter_speed; | 1247 | bus->cur_bus_speed = adapter_speed; |
1247 | slot = cpqhp_find_slot(ctrl, hp_slot + ctrl->slot_device_offset); | 1248 | slot = cpqhp_find_slot(ctrl, hp_slot + ctrl->slot_device_offset); |
1248 | 1249 | ||
1249 | info("Successfully changed frequency/mode for adapter in slot %d\n", | 1250 | info("Successfully changed frequency/mode for adapter in slot %d\n", |
@@ -1269,6 +1270,7 @@ static u8 set_controller_speed(struct controller *ctrl, u8 adapter_speed, u8 hp_ | |||
1269 | */ | 1270 | */ |
1270 | static u32 board_replaced(struct pci_func *func, struct controller *ctrl) | 1271 | static u32 board_replaced(struct pci_func *func, struct controller *ctrl) |
1271 | { | 1272 | { |
1273 | struct pci_bus *bus = ctrl->pci_bus; | ||
1272 | u8 hp_slot; | 1274 | u8 hp_slot; |
1273 | u8 temp_byte; | 1275 | u8 temp_byte; |
1274 | u8 adapter_speed; | 1276 | u8 adapter_speed; |
@@ -1309,7 +1311,7 @@ static u32 board_replaced(struct pci_func *func, struct controller *ctrl) | |||
1309 | wait_for_ctrl_irq (ctrl); | 1311 | wait_for_ctrl_irq (ctrl); |
1310 | 1312 | ||
1311 | adapter_speed = get_adapter_speed(ctrl, hp_slot); | 1313 | adapter_speed = get_adapter_speed(ctrl, hp_slot); |
1312 | if (ctrl->speed != adapter_speed) | 1314 | if (bus->cur_bus_speed != adapter_speed) |
1313 | if (set_controller_speed(ctrl, adapter_speed, hp_slot)) | 1315 | if (set_controller_speed(ctrl, adapter_speed, hp_slot)) |
1314 | rc = WRONG_BUS_FREQUENCY; | 1316 | rc = WRONG_BUS_FREQUENCY; |
1315 | 1317 | ||
@@ -1426,6 +1428,7 @@ static u32 board_added(struct pci_func *func, struct controller *ctrl) | |||
1426 | u32 temp_register = 0xFFFFFFFF; | 1428 | u32 temp_register = 0xFFFFFFFF; |
1427 | u32 rc = 0; | 1429 | u32 rc = 0; |
1428 | struct pci_func *new_slot = NULL; | 1430 | struct pci_func *new_slot = NULL; |
1431 | struct pci_bus *bus = ctrl->pci_bus; | ||
1429 | struct slot *p_slot; | 1432 | struct slot *p_slot; |
1430 | struct resource_lists res_lists; | 1433 | struct resource_lists res_lists; |
1431 | 1434 | ||
@@ -1456,7 +1459,7 @@ static u32 board_added(struct pci_func *func, struct controller *ctrl) | |||
1456 | wait_for_ctrl_irq (ctrl); | 1459 | wait_for_ctrl_irq (ctrl); |
1457 | 1460 | ||
1458 | adapter_speed = get_adapter_speed(ctrl, hp_slot); | 1461 | adapter_speed = get_adapter_speed(ctrl, hp_slot); |
1459 | if (ctrl->speed != adapter_speed) | 1462 | if (bus->cur_bus_speed != adapter_speed) |
1460 | if (set_controller_speed(ctrl, adapter_speed, hp_slot)) | 1463 | if (set_controller_speed(ctrl, adapter_speed, hp_slot)) |
1461 | rc = WRONG_BUS_FREQUENCY; | 1464 | rc = WRONG_BUS_FREQUENCY; |
1462 | 1465 | ||
diff --git a/drivers/pci/hotplug/cpqphp_sysfs.c b/drivers/pci/hotplug/cpqphp_sysfs.c index e6089bdb6e5b..4cb30447a486 100644 --- a/drivers/pci/hotplug/cpqphp_sysfs.c +++ b/drivers/pci/hotplug/cpqphp_sysfs.c | |||
@@ -28,15 +28,17 @@ | |||
28 | 28 | ||
29 | #include <linux/module.h> | 29 | #include <linux/module.h> |
30 | #include <linux/kernel.h> | 30 | #include <linux/kernel.h> |
31 | #include <linux/slab.h> | ||
31 | #include <linux/types.h> | 32 | #include <linux/types.h> |
32 | #include <linux/proc_fs.h> | 33 | #include <linux/proc_fs.h> |
33 | #include <linux/workqueue.h> | 34 | #include <linux/workqueue.h> |
34 | #include <linux/pci.h> | 35 | #include <linux/pci.h> |
35 | #include <linux/pci_hotplug.h> | 36 | #include <linux/pci_hotplug.h> |
36 | #include <linux/smp_lock.h> | 37 | #include <linux/mutex.h> |
37 | #include <linux/debugfs.h> | 38 | #include <linux/debugfs.h> |
38 | #include "cpqphp.h" | 39 | #include "cpqphp.h" |
39 | 40 | ||
41 | static DEFINE_MUTEX(cpqphp_mutex); | ||
40 | static int show_ctrl (struct controller *ctrl, char *buf) | 42 | static int show_ctrl (struct controller *ctrl, char *buf) |
41 | { | 43 | { |
42 | char *out = buf; | 44 | char *out = buf; |
@@ -146,7 +148,7 @@ static int open(struct inode *inode, struct file *file) | |||
146 | struct ctrl_dbg *dbg; | 148 | struct ctrl_dbg *dbg; |
147 | int retval = -ENOMEM; | 149 | int retval = -ENOMEM; |
148 | 150 | ||
149 | lock_kernel(); | 151 | mutex_lock(&cpqphp_mutex); |
150 | dbg = kmalloc(sizeof(*dbg), GFP_KERNEL); | 152 | dbg = kmalloc(sizeof(*dbg), GFP_KERNEL); |
151 | if (!dbg) | 153 | if (!dbg) |
152 | goto exit; | 154 | goto exit; |
@@ -159,7 +161,7 @@ static int open(struct inode *inode, struct file *file) | |||
159 | file->private_data = dbg; | 161 | file->private_data = dbg; |
160 | retval = 0; | 162 | retval = 0; |
161 | exit: | 163 | exit: |
162 | unlock_kernel(); | 164 | mutex_unlock(&cpqphp_mutex); |
163 | return retval; | 165 | return retval; |
164 | } | 166 | } |
165 | 167 | ||
@@ -168,7 +170,7 @@ static loff_t lseek(struct file *file, loff_t off, int whence) | |||
168 | struct ctrl_dbg *dbg; | 170 | struct ctrl_dbg *dbg; |
169 | loff_t new = -1; | 171 | loff_t new = -1; |
170 | 172 | ||
171 | lock_kernel(); | 173 | mutex_lock(&cpqphp_mutex); |
172 | dbg = file->private_data; | 174 | dbg = file->private_data; |
173 | 175 | ||
174 | switch (whence) { | 176 | switch (whence) { |
@@ -180,10 +182,10 @@ static loff_t lseek(struct file *file, loff_t off, int whence) | |||
180 | break; | 182 | break; |
181 | } | 183 | } |
182 | if (new < 0 || new > dbg->size) { | 184 | if (new < 0 || new > dbg->size) { |
183 | unlock_kernel(); | 185 | mutex_unlock(&cpqphp_mutex); |
184 | return -EINVAL; | 186 | return -EINVAL; |
185 | } | 187 | } |
186 | unlock_kernel(); | 188 | mutex_unlock(&cpqphp_mutex); |
187 | return (file->f_pos = new); | 189 | return (file->f_pos = new); |
188 | } | 190 | } |
189 | 191 | ||
diff --git a/drivers/pci/hotplug/fakephp.c b/drivers/pci/hotplug/fakephp.c index 6151389fd903..17d10e2e8fb6 100644 --- a/drivers/pci/hotplug/fakephp.c +++ b/drivers/pci/hotplug/fakephp.c | |||
@@ -19,6 +19,7 @@ | |||
19 | #include <linux/init.h> | 19 | #include <linux/init.h> |
20 | #include <linux/pci.h> | 20 | #include <linux/pci.h> |
21 | #include <linux/device.h> | 21 | #include <linux/device.h> |
22 | #include <linux/slab.h> | ||
22 | #include "../pci.h" | 23 | #include "../pci.h" |
23 | 24 | ||
24 | struct legacy_slot { | 25 | struct legacy_slot { |
@@ -73,7 +74,7 @@ static void legacy_release(struct kobject *kobj) | |||
73 | } | 74 | } |
74 | 75 | ||
75 | static struct kobj_type legacy_ktype = { | 76 | static struct kobj_type legacy_ktype = { |
76 | .sysfs_ops = &(struct sysfs_ops){ | 77 | .sysfs_ops = &(const struct sysfs_ops){ |
77 | .store = legacy_store, .show = legacy_show | 78 | .store = legacy_store, .show = legacy_show |
78 | }, | 79 | }, |
79 | .release = &legacy_release, | 80 | .release = &legacy_release, |
@@ -134,7 +135,7 @@ static int __init init_legacy(void) | |||
134 | struct pci_dev *pdev = NULL; | 135 | struct pci_dev *pdev = NULL; |
135 | 136 | ||
136 | /* Add existing devices */ | 137 | /* Add existing devices */ |
137 | while ((pdev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, pdev))) | 138 | for_each_pci_dev(pdev) |
138 | legacy_add_slot(pdev); | 139 | legacy_add_slot(pdev); |
139 | 140 | ||
140 | /* Be alerted of any new ones */ | 141 | /* Be alerted of any new ones */ |
diff --git a/drivers/pci/hotplug/ibmphp_core.c b/drivers/pci/hotplug/ibmphp_core.c index 7485ffda950c..d934dd4fa873 100644 --- a/drivers/pci/hotplug/ibmphp_core.c +++ b/drivers/pci/hotplug/ibmphp_core.c | |||
@@ -395,89 +395,40 @@ static int get_adapter_present(struct hotplug_slot *hotplug_slot, u8 * value) | |||
395 | return rc; | 395 | return rc; |
396 | } | 396 | } |
397 | 397 | ||
398 | static int get_max_bus_speed(struct hotplug_slot *hotplug_slot, enum pci_bus_speed *value) | 398 | static int get_max_bus_speed(struct slot *slot) |
399 | { | 399 | { |
400 | int rc = -ENODEV; | 400 | int rc; |
401 | struct slot *pslot; | ||
402 | u8 mode = 0; | 401 | u8 mode = 0; |
402 | enum pci_bus_speed speed; | ||
403 | struct pci_bus *bus = slot->hotplug_slot->pci_slot->bus; | ||
403 | 404 | ||
404 | debug("%s - Entry hotplug_slot[%p] pvalue[%p]\n", __func__, | 405 | debug("%s - Entry slot[%p]\n", __func__, slot); |
405 | hotplug_slot, value); | ||
406 | 406 | ||
407 | ibmphp_lock_operations(); | 407 | ibmphp_lock_operations(); |
408 | 408 | mode = slot->supported_bus_mode; | |
409 | if (hotplug_slot) { | 409 | speed = slot->supported_speed; |
410 | pslot = hotplug_slot->private; | ||
411 | if (pslot) { | ||
412 | rc = 0; | ||
413 | mode = pslot->supported_bus_mode; | ||
414 | *value = pslot->supported_speed; | ||
415 | switch (*value) { | ||
416 | case BUS_SPEED_33: | ||
417 | break; | ||
418 | case BUS_SPEED_66: | ||
419 | if (mode == BUS_MODE_PCIX) | ||
420 | *value += 0x01; | ||
421 | break; | ||
422 | case BUS_SPEED_100: | ||
423 | case BUS_SPEED_133: | ||
424 | *value = pslot->supported_speed + 0x01; | ||
425 | break; | ||
426 | default: | ||
427 | /* Note (will need to change): there would be soon 256, 512 also */ | ||
428 | rc = -ENODEV; | ||
429 | } | ||
430 | } | ||
431 | } | ||
432 | |||
433 | ibmphp_unlock_operations(); | 410 | ibmphp_unlock_operations(); |
434 | debug("%s - Exit rc[%d] value[%x]\n", __func__, rc, *value); | ||
435 | return rc; | ||
436 | } | ||
437 | 411 | ||
438 | static int get_cur_bus_speed(struct hotplug_slot *hotplug_slot, enum pci_bus_speed *value) | 412 | switch (speed) { |
439 | { | 413 | case BUS_SPEED_33: |
440 | int rc = -ENODEV; | 414 | break; |
441 | struct slot *pslot; | 415 | case BUS_SPEED_66: |
442 | u8 mode = 0; | 416 | if (mode == BUS_MODE_PCIX) |
443 | 417 | speed += 0x01; | |
444 | debug("%s - Entry hotplug_slot[%p] pvalue[%p]\n", __func__, | 418 | break; |
445 | hotplug_slot, value); | 419 | case BUS_SPEED_100: |
446 | 420 | case BUS_SPEED_133: | |
447 | ibmphp_lock_operations(); | 421 | speed += 0x01; |
448 | 422 | break; | |
449 | if (hotplug_slot) { | 423 | default: |
450 | pslot = hotplug_slot->private; | 424 | /* Note (will need to change): there would be soon 256, 512 also */ |
451 | if (pslot) { | 425 | rc = -ENODEV; |
452 | rc = get_cur_bus_info(&pslot); | ||
453 | if (!rc) { | ||
454 | mode = pslot->bus_on->current_bus_mode; | ||
455 | *value = pslot->bus_on->current_speed; | ||
456 | switch (*value) { | ||
457 | case BUS_SPEED_33: | ||
458 | break; | ||
459 | case BUS_SPEED_66: | ||
460 | if (mode == BUS_MODE_PCIX) | ||
461 | *value += 0x01; | ||
462 | else if (mode == BUS_MODE_PCI) | ||
463 | ; | ||
464 | else | ||
465 | *value = PCI_SPEED_UNKNOWN; | ||
466 | break; | ||
467 | case BUS_SPEED_100: | ||
468 | case BUS_SPEED_133: | ||
469 | *value += 0x01; | ||
470 | break; | ||
471 | default: | ||
472 | /* Note of change: there would also be 256, 512 soon */ | ||
473 | rc = -ENODEV; | ||
474 | } | ||
475 | } | ||
476 | } | ||
477 | } | 426 | } |
478 | 427 | ||
479 | ibmphp_unlock_operations(); | 428 | if (!rc) |
480 | debug("%s - Exit rc[%d] value[%x]\n", __func__, rc, *value); | 429 | bus->max_bus_speed = speed; |
430 | |||
431 | debug("%s - Exit rc[%d] speed[%x]\n", __func__, rc, speed); | ||
481 | return rc; | 432 | return rc; |
482 | } | 433 | } |
483 | 434 | ||
@@ -572,6 +523,7 @@ static int __init init_ops(void) | |||
572 | if (slot_cur->bus_on->current_speed == 0xFF) | 523 | if (slot_cur->bus_on->current_speed == 0xFF) |
573 | if (get_cur_bus_info(&slot_cur)) | 524 | if (get_cur_bus_info(&slot_cur)) |
574 | return -1; | 525 | return -1; |
526 | get_max_bus_speed(slot_cur); | ||
575 | 527 | ||
576 | if (slot_cur->ctrl->options == 0xFF) | 528 | if (slot_cur->ctrl->options == 0xFF) |
577 | if (get_hpc_options(slot_cur, &slot_cur->ctrl->options)) | 529 | if (get_hpc_options(slot_cur, &slot_cur->ctrl->options)) |
@@ -655,6 +607,7 @@ static int validate(struct slot *slot_cur, int opn) | |||
655 | int ibmphp_update_slot_info(struct slot *slot_cur) | 607 | int ibmphp_update_slot_info(struct slot *slot_cur) |
656 | { | 608 | { |
657 | struct hotplug_slot_info *info; | 609 | struct hotplug_slot_info *info; |
610 | struct pci_bus *bus = slot_cur->hotplug_slot->pci_slot->bus; | ||
658 | int rc; | 611 | int rc; |
659 | u8 bus_speed; | 612 | u8 bus_speed; |
660 | u8 mode; | 613 | u8 mode; |
@@ -700,8 +653,7 @@ int ibmphp_update_slot_info(struct slot *slot_cur) | |||
700 | bus_speed = PCI_SPEED_UNKNOWN; | 653 | bus_speed = PCI_SPEED_UNKNOWN; |
701 | } | 654 | } |
702 | 655 | ||
703 | info->cur_bus_speed = bus_speed; | 656 | bus->cur_bus_speed = bus_speed; |
704 | info->max_bus_speed = slot_cur->hotplug_slot->info->max_bus_speed; | ||
705 | // To do: bus_names | 657 | // To do: bus_names |
706 | 658 | ||
707 | rc = pci_hp_change_slot_info(slot_cur->hotplug_slot, info); | 659 | rc = pci_hp_change_slot_info(slot_cur->hotplug_slot, info); |
@@ -1326,8 +1278,6 @@ struct hotplug_slot_ops ibmphp_hotplug_slot_ops = { | |||
1326 | .get_attention_status = get_attention_status, | 1278 | .get_attention_status = get_attention_status, |
1327 | .get_latch_status = get_latch_status, | 1279 | .get_latch_status = get_latch_status, |
1328 | .get_adapter_status = get_adapter_present, | 1280 | .get_adapter_status = get_adapter_present, |
1329 | .get_max_bus_speed = get_max_bus_speed, | ||
1330 | .get_cur_bus_speed = get_cur_bus_speed, | ||
1331 | /* .get_max_adapter_speed = get_max_adapter_speed, | 1281 | /* .get_max_adapter_speed = get_max_adapter_speed, |
1332 | .get_bus_name_status = get_bus_name, | 1282 | .get_bus_name_status = get_bus_name, |
1333 | */ | 1283 | */ |
diff --git a/drivers/pci/hotplug/ibmphp_ebda.c b/drivers/pci/hotplug/ibmphp_ebda.c index c1abac8ab5c3..2850e64dedae 100644 --- a/drivers/pci/hotplug/ibmphp_ebda.c +++ b/drivers/pci/hotplug/ibmphp_ebda.c | |||
@@ -245,7 +245,7 @@ static void __init print_ebda_hpc (void) | |||
245 | 245 | ||
246 | int __init ibmphp_access_ebda (void) | 246 | int __init ibmphp_access_ebda (void) |
247 | { | 247 | { |
248 | u8 format, num_ctlrs, rio_complete, hs_complete; | 248 | u8 format, num_ctlrs, rio_complete, hs_complete, ebda_sz; |
249 | u16 ebda_seg, num_entries, next_offset, offset, blk_id, sub_addr, re, rc_id, re_id, base; | 249 | u16 ebda_seg, num_entries, next_offset, offset, blk_id, sub_addr, re, rc_id, re_id, base; |
250 | int rc = 0; | 250 | int rc = 0; |
251 | 251 | ||
@@ -260,13 +260,28 @@ int __init ibmphp_access_ebda (void) | |||
260 | iounmap (io_mem); | 260 | iounmap (io_mem); |
261 | debug ("returned ebda segment: %x\n", ebda_seg); | 261 | debug ("returned ebda segment: %x\n", ebda_seg); |
262 | 262 | ||
263 | io_mem = ioremap(ebda_seg<<4, 1024); | 263 | io_mem = ioremap(ebda_seg<<4, 1); |
264 | if (!io_mem) | ||
265 | return -ENOMEM; | ||
266 | ebda_sz = readb(io_mem); | ||
267 | iounmap(io_mem); | ||
268 | debug("ebda size: %d(KiB)\n", ebda_sz); | ||
269 | if (ebda_sz == 0) | ||
270 | return -ENOMEM; | ||
271 | |||
272 | io_mem = ioremap(ebda_seg<<4, (ebda_sz * 1024)); | ||
264 | if (!io_mem ) | 273 | if (!io_mem ) |
265 | return -ENOMEM; | 274 | return -ENOMEM; |
266 | next_offset = 0x180; | 275 | next_offset = 0x180; |
267 | 276 | ||
268 | for (;;) { | 277 | for (;;) { |
269 | offset = next_offset; | 278 | offset = next_offset; |
279 | |||
280 | /* Make sure what we read is still in the mapped section */ | ||
281 | if (WARN(offset > (ebda_sz * 1024 - 4), | ||
282 | "ibmphp_ebda: next read is beyond ebda_sz\n")) | ||
283 | break; | ||
284 | |||
270 | next_offset = readw (io_mem + offset); /* offset of next blk */ | 285 | next_offset = readw (io_mem + offset); /* offset of next blk */ |
271 | 286 | ||
272 | offset += 2; | 287 | offset += 2; |
diff --git a/drivers/pci/hotplug/ibmphp_hpc.c b/drivers/pci/hotplug/ibmphp_hpc.c index c7084f0eca5a..f59ed30512b5 100644 --- a/drivers/pci/hotplug/ibmphp_hpc.c +++ b/drivers/pci/hotplug/ibmphp_hpc.c | |||
@@ -35,6 +35,7 @@ | |||
35 | #include <linux/init.h> | 35 | #include <linux/init.h> |
36 | #include <linux/mutex.h> | 36 | #include <linux/mutex.h> |
37 | #include <linux/sched.h> | 37 | #include <linux/sched.h> |
38 | #include <linux/semaphore.h> | ||
38 | #include <linux/kthread.h> | 39 | #include <linux/kthread.h> |
39 | #include "ibmphp.h" | 40 | #include "ibmphp.h" |
40 | 41 | ||
@@ -132,8 +133,8 @@ void __init ibmphp_hpc_initvars (void) | |||
132 | debug ("%s - Entry\n", __func__); | 133 | debug ("%s - Entry\n", __func__); |
133 | 134 | ||
134 | mutex_init(&sem_hpcaccess); | 135 | mutex_init(&sem_hpcaccess); |
135 | init_MUTEX (&semOperations); | 136 | sema_init(&semOperations, 1); |
136 | init_MUTEX_LOCKED (&sem_exit); | 137 | sema_init(&sem_exit, 0); |
137 | to_debug = 0; | 138 | to_debug = 0; |
138 | 139 | ||
139 | debug ("%s - Exit\n", __func__); | 140 | debug ("%s - Exit\n", __func__); |
diff --git a/drivers/pci/hotplug/ibmphp_res.c b/drivers/pci/hotplug/ibmphp_res.c index ec73294d1fa6..e2dc289f767c 100644 --- a/drivers/pci/hotplug/ibmphp_res.c +++ b/drivers/pci/hotplug/ibmphp_res.c | |||
@@ -40,7 +40,7 @@ static void update_resources (struct bus_node *bus_cur, int type, int rangeno); | |||
40 | static int once_over (void); | 40 | static int once_over (void); |
41 | static int remove_ranges (struct bus_node *, struct bus_node *); | 41 | static int remove_ranges (struct bus_node *, struct bus_node *); |
42 | static int update_bridge_ranges (struct bus_node **); | 42 | static int update_bridge_ranges (struct bus_node **); |
43 | static int add_range (int type, struct range_node *, struct bus_node *); | 43 | static int add_bus_range (int type, struct range_node *, struct bus_node *); |
44 | static void fix_resources (struct bus_node *); | 44 | static void fix_resources (struct bus_node *); |
45 | static struct bus_node *find_bus_wprev (u8, struct bus_node **, u8); | 45 | static struct bus_node *find_bus_wprev (u8, struct bus_node **, u8); |
46 | 46 | ||
@@ -133,7 +133,7 @@ static int __init alloc_bus_range (struct bus_node **new_bus, struct range_node | |||
133 | newrange->rangeno = 1; | 133 | newrange->rangeno = 1; |
134 | else { | 134 | else { |
135 | /* need to insert our range */ | 135 | /* need to insert our range */ |
136 | add_range (flag, newrange, newbus); | 136 | add_bus_range (flag, newrange, newbus); |
137 | debug ("%d resource Primary Bus inserted on bus %x [%x - %x]\n", flag, newbus->busno, newrange->start, newrange->end); | 137 | debug ("%d resource Primary Bus inserted on bus %x [%x - %x]\n", flag, newbus->busno, newrange->start, newrange->end); |
138 | } | 138 | } |
139 | 139 | ||
@@ -384,7 +384,7 @@ int __init ibmphp_rsrc_init (void) | |||
384 | * Input: type of the resource, range to add, current bus | 384 | * Input: type of the resource, range to add, current bus |
385 | * Output: 0 or -1, bus and range ptrs | 385 | * Output: 0 or -1, bus and range ptrs |
386 | ********************************************************************************/ | 386 | ********************************************************************************/ |
387 | static int add_range (int type, struct range_node *range, struct bus_node *bus_cur) | 387 | static int add_bus_range (int type, struct range_node *range, struct bus_node *bus_cur) |
388 | { | 388 | { |
389 | struct range_node *range_cur = NULL; | 389 | struct range_node *range_cur = NULL; |
390 | struct range_node *range_prev; | 390 | struct range_node *range_prev; |
@@ -455,7 +455,7 @@ static int add_range (int type, struct range_node *range, struct bus_node *bus_c | |||
455 | 455 | ||
456 | /******************************************************************************* | 456 | /******************************************************************************* |
457 | * This routine goes through the list of resources of type 'type' and updates | 457 | * This routine goes through the list of resources of type 'type' and updates |
458 | * the range numbers that they correspond to. It was called from add_range fnc | 458 | * the range numbers that they correspond to. It was called from add_bus_range fnc |
459 | * | 459 | * |
460 | * Input: bus, type of the resource, the rangeno starting from which to update | 460 | * Input: bus, type of the resource, the rangeno starting from which to update |
461 | ******************************************************************************/ | 461 | ******************************************************************************/ |
@@ -1999,7 +1999,7 @@ static int __init update_bridge_ranges (struct bus_node **bus) | |||
1999 | 1999 | ||
2000 | if (bus_sec->noIORanges > 0) { | 2000 | if (bus_sec->noIORanges > 0) { |
2001 | if (!range_exists_already (range, bus_sec, IO)) { | 2001 | if (!range_exists_already (range, bus_sec, IO)) { |
2002 | add_range (IO, range, bus_sec); | 2002 | add_bus_range (IO, range, bus_sec); |
2003 | ++bus_sec->noIORanges; | 2003 | ++bus_sec->noIORanges; |
2004 | } else { | 2004 | } else { |
2005 | kfree (range); | 2005 | kfree (range); |
@@ -2048,7 +2048,7 @@ static int __init update_bridge_ranges (struct bus_node **bus) | |||
2048 | 2048 | ||
2049 | if (bus_sec->noMemRanges > 0) { | 2049 | if (bus_sec->noMemRanges > 0) { |
2050 | if (!range_exists_already (range, bus_sec, MEM)) { | 2050 | if (!range_exists_already (range, bus_sec, MEM)) { |
2051 | add_range (MEM, range, bus_sec); | 2051 | add_bus_range (MEM, range, bus_sec); |
2052 | ++bus_sec->noMemRanges; | 2052 | ++bus_sec->noMemRanges; |
2053 | } else { | 2053 | } else { |
2054 | kfree (range); | 2054 | kfree (range); |
@@ -2102,7 +2102,7 @@ static int __init update_bridge_ranges (struct bus_node **bus) | |||
2102 | 2102 | ||
2103 | if (bus_sec->noPFMemRanges > 0) { | 2103 | if (bus_sec->noPFMemRanges > 0) { |
2104 | if (!range_exists_already (range, bus_sec, PFMEM)) { | 2104 | if (!range_exists_already (range, bus_sec, PFMEM)) { |
2105 | add_range (PFMEM, range, bus_sec); | 2105 | add_bus_range (PFMEM, range, bus_sec); |
2106 | ++bus_sec->noPFMemRanges; | 2106 | ++bus_sec->noPFMemRanges; |
2107 | } else { | 2107 | } else { |
2108 | kfree (range); | 2108 | kfree (range); |
diff --git a/drivers/pci/hotplug/pci_hotplug_core.c b/drivers/pci/hotplug/pci_hotplug_core.c index 38183a534b65..6d2eea93298f 100644 --- a/drivers/pci/hotplug/pci_hotplug_core.c +++ b/drivers/pci/hotplug/pci_hotplug_core.c | |||
@@ -33,7 +33,6 @@ | |||
33 | #include <linux/kobject.h> | 33 | #include <linux/kobject.h> |
34 | #include <linux/sysfs.h> | 34 | #include <linux/sysfs.h> |
35 | #include <linux/pagemap.h> | 35 | #include <linux/pagemap.h> |
36 | #include <linux/slab.h> | ||
37 | #include <linux/init.h> | 36 | #include <linux/init.h> |
38 | #include <linux/mount.h> | 37 | #include <linux/mount.h> |
39 | #include <linux/namei.h> | 38 | #include <linux/namei.h> |
@@ -64,32 +63,6 @@ static int debug; | |||
64 | static LIST_HEAD(pci_hotplug_slot_list); | 63 | static LIST_HEAD(pci_hotplug_slot_list); |
65 | static DEFINE_MUTEX(pci_hp_mutex); | 64 | static DEFINE_MUTEX(pci_hp_mutex); |
66 | 65 | ||
67 | /* these strings match up with the values in pci_bus_speed */ | ||
68 | static char *pci_bus_speed_strings[] = { | ||
69 | "33 MHz PCI", /* 0x00 */ | ||
70 | "66 MHz PCI", /* 0x01 */ | ||
71 | "66 MHz PCI-X", /* 0x02 */ | ||
72 | "100 MHz PCI-X", /* 0x03 */ | ||
73 | "133 MHz PCI-X", /* 0x04 */ | ||
74 | NULL, /* 0x05 */ | ||
75 | NULL, /* 0x06 */ | ||
76 | NULL, /* 0x07 */ | ||
77 | NULL, /* 0x08 */ | ||
78 | "66 MHz PCI-X 266", /* 0x09 */ | ||
79 | "100 MHz PCI-X 266", /* 0x0a */ | ||
80 | "133 MHz PCI-X 266", /* 0x0b */ | ||
81 | NULL, /* 0x0c */ | ||
82 | NULL, /* 0x0d */ | ||
83 | NULL, /* 0x0e */ | ||
84 | NULL, /* 0x0f */ | ||
85 | NULL, /* 0x10 */ | ||
86 | "66 MHz PCI-X 533", /* 0x11 */ | ||
87 | "100 MHz PCI-X 533", /* 0x12 */ | ||
88 | "133 MHz PCI-X 533", /* 0x13 */ | ||
89 | "2.5 GT/s PCIe", /* 0x14 */ | ||
90 | "5.0 GT/s PCIe", /* 0x15 */ | ||
91 | }; | ||
92 | |||
93 | #ifdef CONFIG_HOTPLUG_PCI_CPCI | 66 | #ifdef CONFIG_HOTPLUG_PCI_CPCI |
94 | extern int cpci_hotplug_init(int debug); | 67 | extern int cpci_hotplug_init(int debug); |
95 | extern void cpci_hotplug_exit(void); | 68 | extern void cpci_hotplug_exit(void); |
@@ -118,8 +91,6 @@ GET_STATUS(power_status, u8) | |||
118 | GET_STATUS(attention_status, u8) | 91 | GET_STATUS(attention_status, u8) |
119 | GET_STATUS(latch_status, u8) | 92 | GET_STATUS(latch_status, u8) |
120 | GET_STATUS(adapter_status, u8) | 93 | GET_STATUS(adapter_status, u8) |
121 | GET_STATUS(max_bus_speed, enum pci_bus_speed) | ||
122 | GET_STATUS(cur_bus_speed, enum pci_bus_speed) | ||
123 | 94 | ||
124 | static ssize_t power_read_file(struct pci_slot *slot, char *buf) | 95 | static ssize_t power_read_file(struct pci_slot *slot, char *buf) |
125 | { | 96 | { |
@@ -263,60 +234,6 @@ static struct pci_slot_attribute hotplug_slot_attr_presence = { | |||
263 | .show = presence_read_file, | 234 | .show = presence_read_file, |
264 | }; | 235 | }; |
265 | 236 | ||
266 | static char *unknown_speed = "Unknown bus speed"; | ||
267 | |||
268 | static ssize_t max_bus_speed_read_file(struct pci_slot *slot, char *buf) | ||
269 | { | ||
270 | char *speed_string; | ||
271 | int retval; | ||
272 | enum pci_bus_speed value; | ||
273 | |||
274 | retval = get_max_bus_speed(slot->hotplug, &value); | ||
275 | if (retval) | ||
276 | goto exit; | ||
277 | |||
278 | if (value == PCI_SPEED_UNKNOWN) | ||
279 | speed_string = unknown_speed; | ||
280 | else | ||
281 | speed_string = pci_bus_speed_strings[value]; | ||
282 | |||
283 | retval = sprintf (buf, "%s\n", speed_string); | ||
284 | |||
285 | exit: | ||
286 | return retval; | ||
287 | } | ||
288 | |||
289 | static struct pci_slot_attribute hotplug_slot_attr_max_bus_speed = { | ||
290 | .attr = {.name = "max_bus_speed", .mode = S_IFREG | S_IRUGO}, | ||
291 | .show = max_bus_speed_read_file, | ||
292 | }; | ||
293 | |||
294 | static ssize_t cur_bus_speed_read_file(struct pci_slot *slot, char *buf) | ||
295 | { | ||
296 | char *speed_string; | ||
297 | int retval; | ||
298 | enum pci_bus_speed value; | ||
299 | |||
300 | retval = get_cur_bus_speed(slot->hotplug, &value); | ||
301 | if (retval) | ||
302 | goto exit; | ||
303 | |||
304 | if (value == PCI_SPEED_UNKNOWN) | ||
305 | speed_string = unknown_speed; | ||
306 | else | ||
307 | speed_string = pci_bus_speed_strings[value]; | ||
308 | |||
309 | retval = sprintf (buf, "%s\n", speed_string); | ||
310 | |||
311 | exit: | ||
312 | return retval; | ||
313 | } | ||
314 | |||
315 | static struct pci_slot_attribute hotplug_slot_attr_cur_bus_speed = { | ||
316 | .attr = {.name = "cur_bus_speed", .mode = S_IFREG | S_IRUGO}, | ||
317 | .show = cur_bus_speed_read_file, | ||
318 | }; | ||
319 | |||
320 | static ssize_t test_write_file(struct pci_slot *pci_slot, const char *buf, | 237 | static ssize_t test_write_file(struct pci_slot *pci_slot, const char *buf, |
321 | size_t count) | 238 | size_t count) |
322 | { | 239 | { |
@@ -391,26 +308,6 @@ static bool has_adapter_file(struct pci_slot *pci_slot) | |||
391 | return false; | 308 | return false; |
392 | } | 309 | } |
393 | 310 | ||
394 | static bool has_max_bus_speed_file(struct pci_slot *pci_slot) | ||
395 | { | ||
396 | struct hotplug_slot *slot = pci_slot->hotplug; | ||
397 | if ((!slot) || (!slot->ops)) | ||
398 | return false; | ||
399 | if (slot->ops->get_max_bus_speed) | ||
400 | return true; | ||
401 | return false; | ||
402 | } | ||
403 | |||
404 | static bool has_cur_bus_speed_file(struct pci_slot *pci_slot) | ||
405 | { | ||
406 | struct hotplug_slot *slot = pci_slot->hotplug; | ||
407 | if ((!slot) || (!slot->ops)) | ||
408 | return false; | ||
409 | if (slot->ops->get_cur_bus_speed) | ||
410 | return true; | ||
411 | return false; | ||
412 | } | ||
413 | |||
414 | static bool has_test_file(struct pci_slot *pci_slot) | 311 | static bool has_test_file(struct pci_slot *pci_slot) |
415 | { | 312 | { |
416 | struct hotplug_slot *slot = pci_slot->hotplug; | 313 | struct hotplug_slot *slot = pci_slot->hotplug; |
@@ -456,20 +353,6 @@ static int fs_add_slot(struct pci_slot *slot) | |||
456 | goto exit_adapter; | 353 | goto exit_adapter; |
457 | } | 354 | } |
458 | 355 | ||
459 | if (has_max_bus_speed_file(slot)) { | ||
460 | retval = sysfs_create_file(&slot->kobj, | ||
461 | &hotplug_slot_attr_max_bus_speed.attr); | ||
462 | if (retval) | ||
463 | goto exit_max_speed; | ||
464 | } | ||
465 | |||
466 | if (has_cur_bus_speed_file(slot)) { | ||
467 | retval = sysfs_create_file(&slot->kobj, | ||
468 | &hotplug_slot_attr_cur_bus_speed.attr); | ||
469 | if (retval) | ||
470 | goto exit_cur_speed; | ||
471 | } | ||
472 | |||
473 | if (has_test_file(slot)) { | 356 | if (has_test_file(slot)) { |
474 | retval = sysfs_create_file(&slot->kobj, | 357 | retval = sysfs_create_file(&slot->kobj, |
475 | &hotplug_slot_attr_test.attr); | 358 | &hotplug_slot_attr_test.attr); |
@@ -480,14 +363,6 @@ static int fs_add_slot(struct pci_slot *slot) | |||
480 | goto exit; | 363 | goto exit; |
481 | 364 | ||
482 | exit_test: | 365 | exit_test: |
483 | if (has_cur_bus_speed_file(slot)) | ||
484 | sysfs_remove_file(&slot->kobj, | ||
485 | &hotplug_slot_attr_cur_bus_speed.attr); | ||
486 | exit_cur_speed: | ||
487 | if (has_max_bus_speed_file(slot)) | ||
488 | sysfs_remove_file(&slot->kobj, | ||
489 | &hotplug_slot_attr_max_bus_speed.attr); | ||
490 | exit_max_speed: | ||
491 | if (has_adapter_file(slot)) | 366 | if (has_adapter_file(slot)) |
492 | sysfs_remove_file(&slot->kobj, | 367 | sysfs_remove_file(&slot->kobj, |
493 | &hotplug_slot_attr_presence.attr); | 368 | &hotplug_slot_attr_presence.attr); |
@@ -523,14 +398,6 @@ static void fs_remove_slot(struct pci_slot *slot) | |||
523 | sysfs_remove_file(&slot->kobj, | 398 | sysfs_remove_file(&slot->kobj, |
524 | &hotplug_slot_attr_presence.attr); | 399 | &hotplug_slot_attr_presence.attr); |
525 | 400 | ||
526 | if (has_max_bus_speed_file(slot)) | ||
527 | sysfs_remove_file(&slot->kobj, | ||
528 | &hotplug_slot_attr_max_bus_speed.attr); | ||
529 | |||
530 | if (has_cur_bus_speed_file(slot)) | ||
531 | sysfs_remove_file(&slot->kobj, | ||
532 | &hotplug_slot_attr_cur_bus_speed.attr); | ||
533 | |||
534 | if (has_test_file(slot)) | 401 | if (has_test_file(slot)) |
535 | sysfs_remove_file(&slot->kobj, &hotplug_slot_attr_test.attr); | 402 | sysfs_remove_file(&slot->kobj, &hotplug_slot_attr_test.attr); |
536 | 403 | ||
diff --git a/drivers/pci/hotplug/pciehp.h b/drivers/pci/hotplug/pciehp.h index 4ed76b47b6dc..838f571027b7 100644 --- a/drivers/pci/hotplug/pciehp.h +++ b/drivers/pci/hotplug/pciehp.h | |||
@@ -36,6 +36,7 @@ | |||
36 | #include <linux/sched.h> /* signal_pending() */ | 36 | #include <linux/sched.h> /* signal_pending() */ |
37 | #include <linux/pcieport_if.h> | 37 | #include <linux/pcieport_if.h> |
38 | #include <linux/mutex.h> | 38 | #include <linux/mutex.h> |
39 | #include <linux/workqueue.h> | ||
39 | 40 | ||
40 | #define MY_NAME "pciehp" | 41 | #define MY_NAME "pciehp" |
41 | 42 | ||
@@ -44,6 +45,7 @@ extern int pciehp_poll_time; | |||
44 | extern int pciehp_debug; | 45 | extern int pciehp_debug; |
45 | extern int pciehp_force; | 46 | extern int pciehp_force; |
46 | extern struct workqueue_struct *pciehp_wq; | 47 | extern struct workqueue_struct *pciehp_wq; |
48 | extern struct workqueue_struct *pciehp_ordered_wq; | ||
47 | 49 | ||
48 | #define dbg(format, arg...) \ | 50 | #define dbg(format, arg...) \ |
49 | do { \ | 51 | do { \ |
@@ -176,19 +178,11 @@ static inline void pciehp_firmware_init(void) | |||
176 | { | 178 | { |
177 | pciehp_acpi_slot_detection_init(); | 179 | pciehp_acpi_slot_detection_init(); |
178 | } | 180 | } |
179 | |||
180 | static inline int pciehp_get_hp_hw_control_from_firmware(struct pci_dev *dev) | ||
181 | { | ||
182 | int retval; | ||
183 | u32 flags = (OSC_PCI_EXPRESS_NATIVE_HP_CONTROL | | ||
184 | OSC_PCI_EXPRESS_CAP_STRUCTURE_CONTROL); | ||
185 | retval = acpi_get_hp_hw_control_from_firmware(dev, flags); | ||
186 | if (retval) | ||
187 | return retval; | ||
188 | return pciehp_acpi_slot_detection_check(dev); | ||
189 | } | ||
190 | #else | 181 | #else |
191 | #define pciehp_firmware_init() do {} while (0) | 182 | #define pciehp_firmware_init() do {} while (0) |
192 | #define pciehp_get_hp_hw_control_from_firmware(dev) 0 | 183 | static inline int pciehp_acpi_slot_detection_check(struct pci_dev *dev) |
184 | { | ||
185 | return 0; | ||
186 | } | ||
193 | #endif /* CONFIG_ACPI */ | 187 | #endif /* CONFIG_ACPI */ |
194 | #endif /* _PCIEHP_H */ | 188 | #endif /* _PCIEHP_H */ |
diff --git a/drivers/pci/hotplug/pciehp_acpi.c b/drivers/pci/hotplug/pciehp_acpi.c index b09b083011d6..5f7226223a62 100644 --- a/drivers/pci/hotplug/pciehp_acpi.c +++ b/drivers/pci/hotplug/pciehp_acpi.c | |||
@@ -26,6 +26,7 @@ | |||
26 | #include <linux/acpi.h> | 26 | #include <linux/acpi.h> |
27 | #include <linux/pci.h> | 27 | #include <linux/pci.h> |
28 | #include <linux/pci_hotplug.h> | 28 | #include <linux/pci_hotplug.h> |
29 | #include <linux/slab.h> | ||
29 | #include "pciehp.h" | 30 | #include "pciehp.h" |
30 | 31 | ||
31 | #define PCIEHP_DETECT_PCIE (0) | 32 | #define PCIEHP_DETECT_PCIE (0) |
@@ -84,9 +85,7 @@ static int __init dummy_probe(struct pcie_device *dev) | |||
84 | acpi_handle handle; | 85 | acpi_handle handle; |
85 | struct dummy_slot *slot, *tmp; | 86 | struct dummy_slot *slot, *tmp; |
86 | struct pci_dev *pdev = dev->port; | 87 | struct pci_dev *pdev = dev->port; |
87 | /* Note: pciehp_detect_mode != PCIEHP_DETECT_ACPI here */ | 88 | |
88 | if (pciehp_get_hp_hw_control_from_firmware(pdev)) | ||
89 | return -ENODEV; | ||
90 | pos = pci_pcie_cap(pdev); | 89 | pos = pci_pcie_cap(pdev); |
91 | if (!pos) | 90 | if (!pos) |
92 | return -ENODEV; | 91 | return -ENODEV; |
@@ -116,7 +115,8 @@ static struct pcie_port_service_driver __initdata dummy_driver = { | |||
116 | static int __init select_detection_mode(void) | 115 | static int __init select_detection_mode(void) |
117 | { | 116 | { |
118 | struct dummy_slot *slot, *tmp; | 117 | struct dummy_slot *slot, *tmp; |
119 | pcie_port_service_register(&dummy_driver); | 118 | if (pcie_port_service_register(&dummy_driver)) |
119 | return PCIEHP_DETECT_ACPI; | ||
120 | pcie_port_service_unregister(&dummy_driver); | 120 | pcie_port_service_unregister(&dummy_driver); |
121 | list_for_each_entry_safe(slot, tmp, &dummy_slots, list) { | 121 | list_for_each_entry_safe(slot, tmp, &dummy_slots, list) { |
122 | list_del(&slot->list); | 122 | list_del(&slot->list); |
diff --git a/drivers/pci/hotplug/pciehp_core.c b/drivers/pci/hotplug/pciehp_core.c index 5674b2075bdc..7ac8358df8fd 100644 --- a/drivers/pci/hotplug/pciehp_core.c +++ b/drivers/pci/hotplug/pciehp_core.c | |||
@@ -30,6 +30,7 @@ | |||
30 | #include <linux/module.h> | 30 | #include <linux/module.h> |
31 | #include <linux/moduleparam.h> | 31 | #include <linux/moduleparam.h> |
32 | #include <linux/kernel.h> | 32 | #include <linux/kernel.h> |
33 | #include <linux/slab.h> | ||
33 | #include <linux/types.h> | 34 | #include <linux/types.h> |
34 | #include <linux/pci.h> | 35 | #include <linux/pci.h> |
35 | #include "pciehp.h" | 36 | #include "pciehp.h" |
@@ -42,6 +43,7 @@ int pciehp_poll_mode; | |||
42 | int pciehp_poll_time; | 43 | int pciehp_poll_time; |
43 | int pciehp_force; | 44 | int pciehp_force; |
44 | struct workqueue_struct *pciehp_wq; | 45 | struct workqueue_struct *pciehp_wq; |
46 | struct workqueue_struct *pciehp_ordered_wq; | ||
45 | 47 | ||
46 | #define DRIVER_VERSION "0.4" | 48 | #define DRIVER_VERSION "0.4" |
47 | #define DRIVER_AUTHOR "Dan Zink <dan.zink@compaq.com>, Greg Kroah-Hartman <greg@kroah.com>, Dely Sy <dely.l.sy@intel.com>" | 49 | #define DRIVER_AUTHOR "Dan Zink <dan.zink@compaq.com>, Greg Kroah-Hartman <greg@kroah.com>, Dely Sy <dely.l.sy@intel.com>" |
@@ -58,7 +60,7 @@ module_param(pciehp_force, bool, 0644); | |||
58 | MODULE_PARM_DESC(pciehp_debug, "Debugging mode enabled or not"); | 60 | MODULE_PARM_DESC(pciehp_debug, "Debugging mode enabled or not"); |
59 | MODULE_PARM_DESC(pciehp_poll_mode, "Using polling mechanism for hot-plug events or not"); | 61 | MODULE_PARM_DESC(pciehp_poll_mode, "Using polling mechanism for hot-plug events or not"); |
60 | MODULE_PARM_DESC(pciehp_poll_time, "Polling mechanism frequency, in seconds"); | 62 | MODULE_PARM_DESC(pciehp_poll_time, "Polling mechanism frequency, in seconds"); |
61 | MODULE_PARM_DESC(pciehp_force, "Force pciehp, even if _OSC and OSHP are missing"); | 63 | MODULE_PARM_DESC(pciehp_force, "Force pciehp, even if OSHP is missing"); |
62 | 64 | ||
63 | #define PCIE_MODULE_NAME "pciehp" | 65 | #define PCIE_MODULE_NAME "pciehp" |
64 | 66 | ||
@@ -69,8 +71,6 @@ static int get_power_status (struct hotplug_slot *slot, u8 *value); | |||
69 | static int get_attention_status (struct hotplug_slot *slot, u8 *value); | 71 | static int get_attention_status (struct hotplug_slot *slot, u8 *value); |
70 | static int get_latch_status (struct hotplug_slot *slot, u8 *value); | 72 | static int get_latch_status (struct hotplug_slot *slot, u8 *value); |
71 | static int get_adapter_status (struct hotplug_slot *slot, u8 *value); | 73 | static int get_adapter_status (struct hotplug_slot *slot, u8 *value); |
72 | static int get_max_bus_speed (struct hotplug_slot *slot, enum pci_bus_speed *value); | ||
73 | static int get_cur_bus_speed (struct hotplug_slot *slot, enum pci_bus_speed *value); | ||
74 | 74 | ||
75 | /** | 75 | /** |
76 | * release_slot - free up the memory used by a slot | 76 | * release_slot - free up the memory used by a slot |
@@ -113,8 +113,6 @@ static int init_slot(struct controller *ctrl) | |||
113 | ops->disable_slot = disable_slot; | 113 | ops->disable_slot = disable_slot; |
114 | ops->get_power_status = get_power_status; | 114 | ops->get_power_status = get_power_status; |
115 | ops->get_adapter_status = get_adapter_status; | 115 | ops->get_adapter_status = get_adapter_status; |
116 | ops->get_max_bus_speed = get_max_bus_speed; | ||
117 | ops->get_cur_bus_speed = get_cur_bus_speed; | ||
118 | if (MRL_SENS(ctrl)) | 116 | if (MRL_SENS(ctrl)) |
119 | ops->get_latch_status = get_latch_status; | 117 | ops->get_latch_status = get_latch_status; |
120 | if (ATTN_LED(ctrl)) { | 118 | if (ATTN_LED(ctrl)) { |
@@ -227,27 +225,6 @@ static int get_adapter_status(struct hotplug_slot *hotplug_slot, u8 *value) | |||
227 | return pciehp_get_adapter_status(slot, value); | 225 | return pciehp_get_adapter_status(slot, value); |
228 | } | 226 | } |
229 | 227 | ||
230 | static int get_max_bus_speed(struct hotplug_slot *hotplug_slot, | ||
231 | enum pci_bus_speed *value) | ||
232 | { | ||
233 | struct slot *slot = hotplug_slot->private; | ||
234 | |||
235 | ctrl_dbg(slot->ctrl, "%s: physical_slot = %s\n", | ||
236 | __func__, slot_name(slot)); | ||
237 | |||
238 | return pciehp_get_max_link_speed(slot, value); | ||
239 | } | ||
240 | |||
241 | static int get_cur_bus_speed(struct hotplug_slot *hotplug_slot, enum pci_bus_speed *value) | ||
242 | { | ||
243 | struct slot *slot = hotplug_slot->private; | ||
244 | |||
245 | ctrl_dbg(slot->ctrl, "%s: physical_slot = %s\n", | ||
246 | __func__, slot_name(slot)); | ||
247 | |||
248 | return pciehp_get_cur_link_speed(slot, value); | ||
249 | } | ||
250 | |||
251 | static int pciehp_probe(struct pcie_device *dev) | 228 | static int pciehp_probe(struct pcie_device *dev) |
252 | { | 229 | { |
253 | int rc; | 230 | int rc; |
@@ -259,7 +236,7 @@ static int pciehp_probe(struct pcie_device *dev) | |||
259 | dev_info(&dev->device, | 236 | dev_info(&dev->device, |
260 | "Bypassing BIOS check for pciehp use on %s\n", | 237 | "Bypassing BIOS check for pciehp use on %s\n", |
261 | pci_name(dev->port)); | 238 | pci_name(dev->port)); |
262 | else if (pciehp_get_hp_hw_control_from_firmware(dev->port)) | 239 | else if (pciehp_acpi_slot_detection_check(dev->port)) |
263 | goto err_out_none; | 240 | goto err_out_none; |
264 | 241 | ||
265 | ctrl = pcie_init(dev); | 242 | ctrl = pcie_init(dev); |
@@ -364,18 +341,33 @@ static int __init pcied_init(void) | |||
364 | { | 341 | { |
365 | int retval = 0; | 342 | int retval = 0; |
366 | 343 | ||
344 | pciehp_wq = alloc_workqueue("pciehp", 0, 0); | ||
345 | if (!pciehp_wq) | ||
346 | return -ENOMEM; | ||
347 | |||
348 | pciehp_ordered_wq = alloc_ordered_workqueue("pciehp_ordered", 0); | ||
349 | if (!pciehp_ordered_wq) { | ||
350 | destroy_workqueue(pciehp_wq); | ||
351 | return -ENOMEM; | ||
352 | } | ||
353 | |||
367 | pciehp_firmware_init(); | 354 | pciehp_firmware_init(); |
368 | retval = pcie_port_service_register(&hpdriver_portdrv); | 355 | retval = pcie_port_service_register(&hpdriver_portdrv); |
369 | dbg("pcie_port_service_register = %d\n", retval); | 356 | dbg("pcie_port_service_register = %d\n", retval); |
370 | info(DRIVER_DESC " version: " DRIVER_VERSION "\n"); | 357 | info(DRIVER_DESC " version: " DRIVER_VERSION "\n"); |
371 | if (retval) | 358 | if (retval) { |
359 | destroy_workqueue(pciehp_ordered_wq); | ||
360 | destroy_workqueue(pciehp_wq); | ||
372 | dbg("Failure to register service\n"); | 361 | dbg("Failure to register service\n"); |
362 | } | ||
373 | return retval; | 363 | return retval; |
374 | } | 364 | } |
375 | 365 | ||
376 | static void __exit pcied_cleanup(void) | 366 | static void __exit pcied_cleanup(void) |
377 | { | 367 | { |
378 | dbg("unload_pciehpd()\n"); | 368 | dbg("unload_pciehpd()\n"); |
369 | destroy_workqueue(pciehp_ordered_wq); | ||
370 | destroy_workqueue(pciehp_wq); | ||
379 | pcie_port_service_unregister(&hpdriver_portdrv); | 371 | pcie_port_service_unregister(&hpdriver_portdrv); |
380 | info(DRIVER_DESC " version: " DRIVER_VERSION " unloaded\n"); | 372 | info(DRIVER_DESC " version: " DRIVER_VERSION " unloaded\n"); |
381 | } | 373 | } |
diff --git a/drivers/pci/hotplug/pciehp_ctrl.c b/drivers/pci/hotplug/pciehp_ctrl.c index d6ac1b261dd9..085dbb5fc168 100644 --- a/drivers/pci/hotplug/pciehp_ctrl.c +++ b/drivers/pci/hotplug/pciehp_ctrl.c | |||
@@ -30,8 +30,8 @@ | |||
30 | #include <linux/module.h> | 30 | #include <linux/module.h> |
31 | #include <linux/kernel.h> | 31 | #include <linux/kernel.h> |
32 | #include <linux/types.h> | 32 | #include <linux/types.h> |
33 | #include <linux/slab.h> | ||
33 | #include <linux/pci.h> | 34 | #include <linux/pci.h> |
34 | #include <linux/workqueue.h> | ||
35 | #include "../pci.h" | 35 | #include "../pci.h" |
36 | #include "pciehp.h" | 36 | #include "pciehp.h" |
37 | 37 | ||
@@ -49,7 +49,7 @@ static int queue_interrupt_event(struct slot *p_slot, u32 event_type) | |||
49 | info->p_slot = p_slot; | 49 | info->p_slot = p_slot; |
50 | INIT_WORK(&info->work, interrupt_event_handler); | 50 | INIT_WORK(&info->work, interrupt_event_handler); |
51 | 51 | ||
52 | schedule_work(&info->work); | 52 | queue_work(pciehp_wq, &info->work); |
53 | 53 | ||
54 | return 0; | 54 | return 0; |
55 | } | 55 | } |
@@ -341,9 +341,10 @@ void pciehp_queue_pushbutton_work(struct work_struct *work) | |||
341 | p_slot->state = POWERON_STATE; | 341 | p_slot->state = POWERON_STATE; |
342 | break; | 342 | break; |
343 | default: | 343 | default: |
344 | kfree(info); | ||
344 | goto out; | 345 | goto out; |
345 | } | 346 | } |
346 | queue_work(pciehp_wq, &info->work); | 347 | queue_work(pciehp_ordered_wq, &info->work); |
347 | out: | 348 | out: |
348 | mutex_unlock(&p_slot->lock); | 349 | mutex_unlock(&p_slot->lock); |
349 | } | 350 | } |
@@ -376,7 +377,7 @@ static void handle_button_press_event(struct slot *p_slot) | |||
376 | if (ATTN_LED(ctrl)) | 377 | if (ATTN_LED(ctrl)) |
377 | pciehp_set_attention_status(p_slot, 0); | 378 | pciehp_set_attention_status(p_slot, 0); |
378 | 379 | ||
379 | schedule_delayed_work(&p_slot->work, 5*HZ); | 380 | queue_delayed_work(pciehp_wq, &p_slot->work, 5*HZ); |
380 | break; | 381 | break; |
381 | case BLINKINGOFF_STATE: | 382 | case BLINKINGOFF_STATE: |
382 | case BLINKINGON_STATE: | 383 | case BLINKINGON_STATE: |
@@ -438,7 +439,7 @@ static void handle_surprise_event(struct slot *p_slot) | |||
438 | else | 439 | else |
439 | p_slot->state = POWERON_STATE; | 440 | p_slot->state = POWERON_STATE; |
440 | 441 | ||
441 | queue_work(pciehp_wq, &info->work); | 442 | queue_work(pciehp_ordered_wq, &info->work); |
442 | } | 443 | } |
443 | 444 | ||
444 | static void interrupt_event_handler(struct work_struct *work) | 445 | static void interrupt_event_handler(struct work_struct *work) |
diff --git a/drivers/pci/hotplug/pciehp_hpc.c b/drivers/pci/hotplug/pciehp_hpc.c index 10040d58c8ef..50a23da5d24d 100644 --- a/drivers/pci/hotplug/pciehp_hpc.c +++ b/drivers/pci/hotplug/pciehp_hpc.c | |||
@@ -36,12 +36,11 @@ | |||
36 | #include <linux/pci.h> | 36 | #include <linux/pci.h> |
37 | #include <linux/interrupt.h> | 37 | #include <linux/interrupt.h> |
38 | #include <linux/time.h> | 38 | #include <linux/time.h> |
39 | #include <linux/slab.h> | ||
39 | 40 | ||
40 | #include "../pci.h" | 41 | #include "../pci.h" |
41 | #include "pciehp.h" | 42 | #include "pciehp.h" |
42 | 43 | ||
43 | static atomic_t pciehp_num_controllers = ATOMIC_INIT(0); | ||
44 | |||
45 | static inline int pciehp_readw(struct controller *ctrl, int reg, u16 *value) | 44 | static inline int pciehp_readw(struct controller *ctrl, int reg, u16 *value) |
46 | { | 45 | { |
47 | struct pci_dev *dev = ctrl->pcie->port; | 46 | struct pci_dev *dev = ctrl->pcie->port; |
@@ -492,6 +491,7 @@ int pciehp_power_on_slot(struct slot * slot) | |||
492 | u16 slot_cmd; | 491 | u16 slot_cmd; |
493 | u16 cmd_mask; | 492 | u16 cmd_mask; |
494 | u16 slot_status; | 493 | u16 slot_status; |
494 | u16 lnk_status; | ||
495 | int retval = 0; | 495 | int retval = 0; |
496 | 496 | ||
497 | /* Clear sticky power-fault bit from previous power failures */ | 497 | /* Clear sticky power-fault bit from previous power failures */ |
@@ -523,6 +523,14 @@ int pciehp_power_on_slot(struct slot * slot) | |||
523 | ctrl_dbg(ctrl, "%s: SLOTCTRL %x write cmd %x\n", __func__, | 523 | ctrl_dbg(ctrl, "%s: SLOTCTRL %x write cmd %x\n", __func__, |
524 | pci_pcie_cap(ctrl->pcie->port) + PCI_EXP_SLTCTL, slot_cmd); | 524 | pci_pcie_cap(ctrl->pcie->port) + PCI_EXP_SLTCTL, slot_cmd); |
525 | 525 | ||
526 | retval = pciehp_readw(ctrl, PCI_EXP_LNKSTA, &lnk_status); | ||
527 | if (retval) { | ||
528 | ctrl_err(ctrl, "%s: Cannot read LNKSTA register\n", | ||
529 | __func__); | ||
530 | return retval; | ||
531 | } | ||
532 | pcie_update_link_speed(ctrl->pcie->port->subordinate, lnk_status); | ||
533 | |||
526 | return retval; | 534 | return retval; |
527 | } | 535 | } |
528 | 536 | ||
@@ -610,37 +618,6 @@ static irqreturn_t pcie_isr(int irq, void *dev_id) | |||
610 | return IRQ_HANDLED; | 618 | return IRQ_HANDLED; |
611 | } | 619 | } |
612 | 620 | ||
613 | int pciehp_get_max_link_speed(struct slot *slot, enum pci_bus_speed *value) | ||
614 | { | ||
615 | struct controller *ctrl = slot->ctrl; | ||
616 | enum pcie_link_speed lnk_speed; | ||
617 | u32 lnk_cap; | ||
618 | int retval = 0; | ||
619 | |||
620 | retval = pciehp_readl(ctrl, PCI_EXP_LNKCAP, &lnk_cap); | ||
621 | if (retval) { | ||
622 | ctrl_err(ctrl, "%s: Cannot read LNKCAP register\n", __func__); | ||
623 | return retval; | ||
624 | } | ||
625 | |||
626 | switch (lnk_cap & 0x000F) { | ||
627 | case 1: | ||
628 | lnk_speed = PCIE_2_5GB; | ||
629 | break; | ||
630 | case 2: | ||
631 | lnk_speed = PCIE_5_0GB; | ||
632 | break; | ||
633 | default: | ||
634 | lnk_speed = PCIE_LNK_SPEED_UNKNOWN; | ||
635 | break; | ||
636 | } | ||
637 | |||
638 | *value = lnk_speed; | ||
639 | ctrl_dbg(ctrl, "Max link speed = %d\n", lnk_speed); | ||
640 | |||
641 | return retval; | ||
642 | } | ||
643 | |||
644 | int pciehp_get_max_lnk_width(struct slot *slot, | 621 | int pciehp_get_max_lnk_width(struct slot *slot, |
645 | enum pcie_link_width *value) | 622 | enum pcie_link_width *value) |
646 | { | 623 | { |
@@ -691,38 +668,6 @@ int pciehp_get_max_lnk_width(struct slot *slot, | |||
691 | return retval; | 668 | return retval; |
692 | } | 669 | } |
693 | 670 | ||
694 | int pciehp_get_cur_link_speed(struct slot *slot, enum pci_bus_speed *value) | ||
695 | { | ||
696 | struct controller *ctrl = slot->ctrl; | ||
697 | enum pcie_link_speed lnk_speed = PCI_SPEED_UNKNOWN; | ||
698 | int retval = 0; | ||
699 | u16 lnk_status; | ||
700 | |||
701 | retval = pciehp_readw(ctrl, PCI_EXP_LNKSTA, &lnk_status); | ||
702 | if (retval) { | ||
703 | ctrl_err(ctrl, "%s: Cannot read LNKSTATUS register\n", | ||
704 | __func__); | ||
705 | return retval; | ||
706 | } | ||
707 | |||
708 | switch (lnk_status & PCI_EXP_LNKSTA_CLS) { | ||
709 | case 1: | ||
710 | lnk_speed = PCIE_2_5GB; | ||
711 | break; | ||
712 | case 2: | ||
713 | lnk_speed = PCIE_5_0GB; | ||
714 | break; | ||
715 | default: | ||
716 | lnk_speed = PCIE_LNK_SPEED_UNKNOWN; | ||
717 | break; | ||
718 | } | ||
719 | |||
720 | *value = lnk_speed; | ||
721 | ctrl_dbg(ctrl, "Current link speed = %d\n", lnk_speed); | ||
722 | |||
723 | return retval; | ||
724 | } | ||
725 | |||
726 | int pciehp_get_cur_lnk_width(struct slot *slot, | 671 | int pciehp_get_cur_lnk_width(struct slot *slot, |
727 | enum pcie_link_width *value) | 672 | enum pcie_link_width *value) |
728 | { | 673 | { |
@@ -858,8 +803,8 @@ static void pcie_cleanup_slot(struct controller *ctrl) | |||
858 | { | 803 | { |
859 | struct slot *slot = ctrl->slot; | 804 | struct slot *slot = ctrl->slot; |
860 | cancel_delayed_work(&slot->work); | 805 | cancel_delayed_work(&slot->work); |
861 | flush_scheduled_work(); | ||
862 | flush_workqueue(pciehp_wq); | 806 | flush_workqueue(pciehp_wq); |
807 | flush_workqueue(pciehp_ordered_wq); | ||
863 | kfree(slot); | 808 | kfree(slot); |
864 | } | 809 | } |
865 | 810 | ||
@@ -886,9 +831,8 @@ static inline void dbg_ctrl(struct controller *ctrl) | |||
886 | for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) { | 831 | for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) { |
887 | if (!pci_resource_len(pdev, i)) | 832 | if (!pci_resource_len(pdev, i)) |
888 | continue; | 833 | continue; |
889 | ctrl_info(ctrl, " PCI resource [%d] : 0x%llx@0x%llx\n", | 834 | ctrl_info(ctrl, " PCI resource [%d] : %pR\n", |
890 | i, (unsigned long long)pci_resource_len(pdev, i), | 835 | i, &pdev->resource[i]); |
891 | (unsigned long long)pci_resource_start(pdev, i)); | ||
892 | } | 836 | } |
893 | ctrl_info(ctrl, "Slot Capabilities : 0x%08x\n", ctrl->slot_cap); | 837 | ctrl_info(ctrl, "Slot Capabilities : 0x%08x\n", ctrl->slot_cap); |
894 | ctrl_info(ctrl, " Physical Slot Number : %d\n", PSN(ctrl)); | 838 | ctrl_info(ctrl, " Physical Slot Number : %d\n", PSN(ctrl)); |
@@ -966,16 +910,6 @@ struct controller *pcie_init(struct pcie_device *dev) | |||
966 | /* Disable sotfware notification */ | 910 | /* Disable sotfware notification */ |
967 | pcie_disable_notification(ctrl); | 911 | pcie_disable_notification(ctrl); |
968 | 912 | ||
969 | /* | ||
970 | * If this is the first controller to be initialized, | ||
971 | * initialize the pciehp work queue | ||
972 | */ | ||
973 | if (atomic_add_return(1, &pciehp_num_controllers) == 1) { | ||
974 | pciehp_wq = create_singlethread_workqueue("pciehpd"); | ||
975 | if (!pciehp_wq) | ||
976 | goto abort_ctrl; | ||
977 | } | ||
978 | |||
979 | ctrl_info(ctrl, "HPC vendor_id %x device_id %x ss_vid %x ss_did %x\n", | 913 | ctrl_info(ctrl, "HPC vendor_id %x device_id %x ss_vid %x ss_did %x\n", |
980 | pdev->vendor, pdev->device, pdev->subsystem_vendor, | 914 | pdev->vendor, pdev->device, pdev->subsystem_vendor, |
981 | pdev->subsystem_device); | 915 | pdev->subsystem_device); |
@@ -995,11 +929,5 @@ void pciehp_release_ctrl(struct controller *ctrl) | |||
995 | { | 929 | { |
996 | pcie_shutdown_notification(ctrl); | 930 | pcie_shutdown_notification(ctrl); |
997 | pcie_cleanup_slot(ctrl); | 931 | pcie_cleanup_slot(ctrl); |
998 | /* | ||
999 | * If this is the last controller to be released, destroy the | ||
1000 | * pciehp work queue | ||
1001 | */ | ||
1002 | if (atomic_dec_and_test(&pciehp_num_controllers)) | ||
1003 | destroy_workqueue(pciehp_wq); | ||
1004 | kfree(ctrl); | 932 | kfree(ctrl); |
1005 | } | 933 | } |
diff --git a/drivers/pci/hotplug/pciehp_pci.c b/drivers/pci/hotplug/pciehp_pci.c index 21733108adde..a4031dfe938e 100644 --- a/drivers/pci/hotplug/pciehp_pci.c +++ b/drivers/pci/hotplug/pciehp_pci.c | |||
@@ -53,17 +53,15 @@ static int __ref pciehp_add_bridge(struct pci_dev *dev) | |||
53 | busnr = pci_scan_bridge(parent, dev, busnr, pass); | 53 | busnr = pci_scan_bridge(parent, dev, busnr, pass); |
54 | if (!dev->subordinate) | 54 | if (!dev->subordinate) |
55 | return -1; | 55 | return -1; |
56 | pci_bus_size_bridges(dev->subordinate); | 56 | |
57 | pci_bus_assign_resources(parent); | ||
58 | pci_enable_bridges(parent); | ||
59 | pci_bus_add_devices(parent); | ||
60 | return 0; | 57 | return 0; |
61 | } | 58 | } |
62 | 59 | ||
63 | int pciehp_configure_device(struct slot *p_slot) | 60 | int pciehp_configure_device(struct slot *p_slot) |
64 | { | 61 | { |
65 | struct pci_dev *dev; | 62 | struct pci_dev *dev; |
66 | struct pci_bus *parent = p_slot->ctrl->pcie->port->subordinate; | 63 | struct pci_dev *bridge = p_slot->ctrl->pcie->port; |
64 | struct pci_bus *parent = bridge->subordinate; | ||
67 | int num, fn; | 65 | int num, fn; |
68 | struct controller *ctrl = p_slot->ctrl; | 66 | struct controller *ctrl = p_slot->ctrl; |
69 | 67 | ||
@@ -86,22 +84,29 @@ int pciehp_configure_device(struct slot *p_slot) | |||
86 | dev = pci_get_slot(parent, PCI_DEVFN(0, fn)); | 84 | dev = pci_get_slot(parent, PCI_DEVFN(0, fn)); |
87 | if (!dev) | 85 | if (!dev) |
88 | continue; | 86 | continue; |
89 | if ((dev->class >> 16) == PCI_BASE_CLASS_DISPLAY) { | ||
90 | ctrl_err(ctrl, "Cannot hot-add display device %s\n", | ||
91 | pci_name(dev)); | ||
92 | pci_dev_put(dev); | ||
93 | continue; | ||
94 | } | ||
95 | if ((dev->hdr_type == PCI_HEADER_TYPE_BRIDGE) || | 87 | if ((dev->hdr_type == PCI_HEADER_TYPE_BRIDGE) || |
96 | (dev->hdr_type == PCI_HEADER_TYPE_CARDBUS)) { | 88 | (dev->hdr_type == PCI_HEADER_TYPE_CARDBUS)) { |
97 | pciehp_add_bridge(dev); | 89 | pciehp_add_bridge(dev); |
98 | } | 90 | } |
91 | pci_dev_put(dev); | ||
92 | } | ||
93 | |||
94 | pci_assign_unassigned_bridge_resources(bridge); | ||
95 | |||
96 | for (fn = 0; fn < 8; fn++) { | ||
97 | dev = pci_get_slot(parent, PCI_DEVFN(0, fn)); | ||
98 | if (!dev) | ||
99 | continue; | ||
100 | if ((dev->class >> 16) == PCI_BASE_CLASS_DISPLAY) { | ||
101 | pci_dev_put(dev); | ||
102 | continue; | ||
103 | } | ||
99 | pci_configure_slot(dev); | 104 | pci_configure_slot(dev); |
100 | pci_dev_put(dev); | 105 | pci_dev_put(dev); |
101 | } | 106 | } |
102 | 107 | ||
103 | pci_bus_assign_resources(parent); | ||
104 | pci_bus_add_devices(parent); | 108 | pci_bus_add_devices(parent); |
109 | |||
105 | return 0; | 110 | return 0; |
106 | } | 111 | } |
107 | 112 | ||
@@ -122,15 +127,9 @@ int pciehp_unconfigure_device(struct slot *p_slot) | |||
122 | presence = 0; | 127 | presence = 0; |
123 | 128 | ||
124 | for (j = 0; j < 8; j++) { | 129 | for (j = 0; j < 8; j++) { |
125 | struct pci_dev* temp = pci_get_slot(parent, PCI_DEVFN(0, j)); | 130 | struct pci_dev *temp = pci_get_slot(parent, PCI_DEVFN(0, j)); |
126 | if (!temp) | 131 | if (!temp) |
127 | continue; | 132 | continue; |
128 | if ((temp->class >> 16) == PCI_BASE_CLASS_DISPLAY) { | ||
129 | ctrl_err(ctrl, "Cannot remove display device %s\n", | ||
130 | pci_name(temp)); | ||
131 | pci_dev_put(temp); | ||
132 | continue; | ||
133 | } | ||
134 | if (temp->hdr_type == PCI_HEADER_TYPE_BRIDGE && presence) { | 133 | if (temp->hdr_type == PCI_HEADER_TYPE_BRIDGE && presence) { |
135 | pci_read_config_byte(temp, PCI_BRIDGE_CONTROL, &bctl); | 134 | pci_read_config_byte(temp, PCI_BRIDGE_CONTROL, &bctl); |
136 | if (bctl & PCI_BRIDGE_CTL_VGA) { | 135 | if (bctl & PCI_BRIDGE_CTL_VGA) { |
@@ -138,7 +137,8 @@ int pciehp_unconfigure_device(struct slot *p_slot) | |||
138 | "Cannot remove display device %s\n", | 137 | "Cannot remove display device %s\n", |
139 | pci_name(temp)); | 138 | pci_name(temp)); |
140 | pci_dev_put(temp); | 139 | pci_dev_put(temp); |
141 | continue; | 140 | rc = -EINVAL; |
141 | break; | ||
142 | } | 142 | } |
143 | } | 143 | } |
144 | pci_remove_bus_device(temp); | 144 | pci_remove_bus_device(temp); |
diff --git a/drivers/pci/hotplug/rpadlpar_core.c b/drivers/pci/hotplug/rpadlpar_core.c index 4e3e0382c16e..083034710fa6 100644 --- a/drivers/pci/hotplug/rpadlpar_core.c +++ b/drivers/pci/hotplug/rpadlpar_core.c | |||
@@ -20,6 +20,7 @@ | |||
20 | #include <linux/init.h> | 20 | #include <linux/init.h> |
21 | #include <linux/pci.h> | 21 | #include <linux/pci.h> |
22 | #include <linux/string.h> | 22 | #include <linux/string.h> |
23 | #include <linux/vmalloc.h> | ||
23 | 24 | ||
24 | #include <asm/pci-bridge.h> | 25 | #include <asm/pci-bridge.h> |
25 | #include <linux/mutex.h> | 26 | #include <linux/mutex.h> |
@@ -430,6 +431,8 @@ int dlpar_remove_slot(char *drc_name) | |||
430 | rc = dlpar_remove_pci_slot(drc_name, dn); | 431 | rc = dlpar_remove_pci_slot(drc_name, dn); |
431 | break; | 432 | break; |
432 | } | 433 | } |
434 | vm_unmap_aliases(); | ||
435 | |||
433 | printk(KERN_INFO "%s: slot %s removed\n", DLPAR_MODULE_NAME, drc_name); | 436 | printk(KERN_INFO "%s: slot %s removed\n", DLPAR_MODULE_NAME, drc_name); |
434 | exit: | 437 | exit: |
435 | mutex_unlock(&rpadlpar_mutex); | 438 | mutex_unlock(&rpadlpar_mutex); |
diff --git a/drivers/pci/hotplug/rpaphp_core.c b/drivers/pci/hotplug/rpaphp_core.c index c159223389ec..ef7411c660b9 100644 --- a/drivers/pci/hotplug/rpaphp_core.c +++ b/drivers/pci/hotplug/rpaphp_core.c | |||
@@ -27,9 +27,9 @@ | |||
27 | #include <linux/moduleparam.h> | 27 | #include <linux/moduleparam.h> |
28 | #include <linux/pci.h> | 28 | #include <linux/pci.h> |
29 | #include <linux/pci_hotplug.h> | 29 | #include <linux/pci_hotplug.h> |
30 | #include <linux/slab.h> | ||
31 | #include <linux/smp.h> | 30 | #include <linux/smp.h> |
32 | #include <linux/init.h> | 31 | #include <linux/init.h> |
32 | #include <linux/vmalloc.h> | ||
33 | #include <asm/eeh.h> /* for eeh_add_device() */ | 33 | #include <asm/eeh.h> /* for eeh_add_device() */ |
34 | #include <asm/rtas.h> /* rtas_call */ | 34 | #include <asm/rtas.h> /* rtas_call */ |
35 | #include <asm/pci-bridge.h> /* for pci_controller */ | 35 | #include <asm/pci-bridge.h> /* for pci_controller */ |
@@ -130,10 +130,9 @@ static int get_adapter_status(struct hotplug_slot *hotplug_slot, u8 * value) | |||
130 | return 0; | 130 | return 0; |
131 | } | 131 | } |
132 | 132 | ||
133 | static int get_max_bus_speed(struct hotplug_slot *hotplug_slot, enum pci_bus_speed *value) | 133 | static enum pci_bus_speed get_max_bus_speed(struct slot *slot) |
134 | { | 134 | { |
135 | struct slot *slot = (struct slot *)hotplug_slot->private; | 135 | enum pci_bus_speed speed; |
136 | |||
137 | switch (slot->type) { | 136 | switch (slot->type) { |
138 | case 1: | 137 | case 1: |
139 | case 2: | 138 | case 2: |
@@ -141,30 +140,30 @@ static int get_max_bus_speed(struct hotplug_slot *hotplug_slot, enum pci_bus_spe | |||
141 | case 4: | 140 | case 4: |
142 | case 5: | 141 | case 5: |
143 | case 6: | 142 | case 6: |
144 | *value = PCI_SPEED_33MHz; /* speed for case 1-6 */ | 143 | speed = PCI_SPEED_33MHz; /* speed for case 1-6 */ |
145 | break; | 144 | break; |
146 | case 7: | 145 | case 7: |
147 | case 8: | 146 | case 8: |
148 | *value = PCI_SPEED_66MHz; | 147 | speed = PCI_SPEED_66MHz; |
149 | break; | 148 | break; |
150 | case 11: | 149 | case 11: |
151 | case 14: | 150 | case 14: |
152 | *value = PCI_SPEED_66MHz_PCIX; | 151 | speed = PCI_SPEED_66MHz_PCIX; |
153 | break; | 152 | break; |
154 | case 12: | 153 | case 12: |
155 | case 15: | 154 | case 15: |
156 | *value = PCI_SPEED_100MHz_PCIX; | 155 | speed = PCI_SPEED_100MHz_PCIX; |
157 | break; | 156 | break; |
158 | case 13: | 157 | case 13: |
159 | case 16: | 158 | case 16: |
160 | *value = PCI_SPEED_133MHz_PCIX; | 159 | speed = PCI_SPEED_133MHz_PCIX; |
161 | break; | 160 | break; |
162 | default: | 161 | default: |
163 | *value = PCI_SPEED_UNKNOWN; | 162 | speed = PCI_SPEED_UNKNOWN; |
164 | break; | 163 | break; |
165 | |||
166 | } | 164 | } |
167 | return 0; | 165 | |
166 | return speed; | ||
168 | } | 167 | } |
169 | 168 | ||
170 | static int get_children_props(struct device_node *dn, const int **drc_indexes, | 169 | static int get_children_props(struct device_node *dn, const int **drc_indexes, |
@@ -408,6 +407,8 @@ static int enable_slot(struct hotplug_slot *hotplug_slot) | |||
408 | slot->state = NOT_VALID; | 407 | slot->state = NOT_VALID; |
409 | return -EINVAL; | 408 | return -EINVAL; |
410 | } | 409 | } |
410 | |||
411 | slot->bus->max_bus_speed = get_max_bus_speed(slot); | ||
411 | return 0; | 412 | return 0; |
412 | } | 413 | } |
413 | 414 | ||
@@ -418,6 +419,8 @@ static int disable_slot(struct hotplug_slot *hotplug_slot) | |||
418 | return -EINVAL; | 419 | return -EINVAL; |
419 | 420 | ||
420 | pcibios_remove_pci_devices(slot->bus); | 421 | pcibios_remove_pci_devices(slot->bus); |
422 | vm_unmap_aliases(); | ||
423 | |||
421 | slot->state = NOT_CONFIGURED; | 424 | slot->state = NOT_CONFIGURED; |
422 | return 0; | 425 | return 0; |
423 | } | 426 | } |
@@ -429,7 +432,6 @@ struct hotplug_slot_ops rpaphp_hotplug_slot_ops = { | |||
429 | .get_power_status = get_power_status, | 432 | .get_power_status = get_power_status, |
430 | .get_attention_status = get_attention_status, | 433 | .get_attention_status = get_attention_status, |
431 | .get_adapter_status = get_adapter_status, | 434 | .get_adapter_status = get_adapter_status, |
432 | .get_max_bus_speed = get_max_bus_speed, | ||
433 | }; | 435 | }; |
434 | 436 | ||
435 | module_init(rpaphp_init); | 437 | module_init(rpaphp_init); |
diff --git a/drivers/pci/hotplug/rpaphp_slot.c b/drivers/pci/hotplug/rpaphp_slot.c index 2ea9cf1a8d02..b283bbea6d24 100644 --- a/drivers/pci/hotplug/rpaphp_slot.c +++ b/drivers/pci/hotplug/rpaphp_slot.c | |||
@@ -24,7 +24,6 @@ | |||
24 | */ | 24 | */ |
25 | #include <linux/kernel.h> | 25 | #include <linux/kernel.h> |
26 | #include <linux/module.h> | 26 | #include <linux/module.h> |
27 | #include <linux/kobject.h> | ||
28 | #include <linux/sysfs.h> | 27 | #include <linux/sysfs.h> |
29 | #include <linux/pci.h> | 28 | #include <linux/pci.h> |
30 | #include <linux/string.h> | 29 | #include <linux/string.h> |
diff --git a/drivers/pci/hotplug/sgi_hotplug.c b/drivers/pci/hotplug/sgi_hotplug.c index 8aebe1e9d3d6..72d507b6a2aa 100644 --- a/drivers/pci/hotplug/sgi_hotplug.c +++ b/drivers/pci/hotplug/sgi_hotplug.c | |||
@@ -15,6 +15,7 @@ | |||
15 | #include <linux/pci.h> | 15 | #include <linux/pci.h> |
16 | #include <linux/pci_hotplug.h> | 16 | #include <linux/pci_hotplug.h> |
17 | #include <linux/proc_fs.h> | 17 | #include <linux/proc_fs.h> |
18 | #include <linux/slab.h> | ||
18 | #include <linux/types.h> | 19 | #include <linux/types.h> |
19 | #include <linux/mutex.h> | 20 | #include <linux/mutex.h> |
20 | 21 | ||
diff --git a/drivers/pci/hotplug/shpchp.h b/drivers/pci/hotplug/shpchp.h index 8e210cd76e55..e0c90e643b5f 100644 --- a/drivers/pci/hotplug/shpchp.h +++ b/drivers/pci/hotplug/shpchp.h | |||
@@ -35,6 +35,7 @@ | |||
35 | #include <linux/delay.h> | 35 | #include <linux/delay.h> |
36 | #include <linux/sched.h> /* signal_pending(), struct timer_list */ | 36 | #include <linux/sched.h> /* signal_pending(), struct timer_list */ |
37 | #include <linux/mutex.h> | 37 | #include <linux/mutex.h> |
38 | #include <linux/workqueue.h> | ||
38 | 39 | ||
39 | #if !defined(MODULE) | 40 | #if !defined(MODULE) |
40 | #define MY_NAME "shpchp" | 41 | #define MY_NAME "shpchp" |
@@ -46,6 +47,7 @@ extern int shpchp_poll_mode; | |||
46 | extern int shpchp_poll_time; | 47 | extern int shpchp_poll_time; |
47 | extern int shpchp_debug; | 48 | extern int shpchp_debug; |
48 | extern struct workqueue_struct *shpchp_wq; | 49 | extern struct workqueue_struct *shpchp_wq; |
50 | extern struct workqueue_struct *shpchp_ordered_wq; | ||
49 | 51 | ||
50 | #define dbg(format, arg...) \ | 52 | #define dbg(format, arg...) \ |
51 | do { \ | 53 | do { \ |
@@ -333,8 +335,6 @@ struct hpc_ops { | |||
333 | int (*set_attention_status)(struct slot *slot, u8 status); | 335 | int (*set_attention_status)(struct slot *slot, u8 status); |
334 | int (*get_latch_status)(struct slot *slot, u8 *status); | 336 | int (*get_latch_status)(struct slot *slot, u8 *status); |
335 | int (*get_adapter_status)(struct slot *slot, u8 *status); | 337 | int (*get_adapter_status)(struct slot *slot, u8 *status); |
336 | int (*get_max_bus_speed)(struct slot *slot, enum pci_bus_speed *speed); | ||
337 | int (*get_cur_bus_speed)(struct slot *slot, enum pci_bus_speed *speed); | ||
338 | int (*get_adapter_speed)(struct slot *slot, enum pci_bus_speed *speed); | 338 | int (*get_adapter_speed)(struct slot *slot, enum pci_bus_speed *speed); |
339 | int (*get_mode1_ECC_cap)(struct slot *slot, u8 *mode); | 339 | int (*get_mode1_ECC_cap)(struct slot *slot, u8 *mode); |
340 | int (*get_prog_int)(struct slot *slot, u8 *prog_int); | 340 | int (*get_prog_int)(struct slot *slot, u8 *prog_int); |
diff --git a/drivers/pci/hotplug/shpchp_core.c b/drivers/pci/hotplug/shpchp_core.c index 8a520a3d0f59..aca972bbfb4c 100644 --- a/drivers/pci/hotplug/shpchp_core.c +++ b/drivers/pci/hotplug/shpchp_core.c | |||
@@ -31,8 +31,8 @@ | |||
31 | #include <linux/moduleparam.h> | 31 | #include <linux/moduleparam.h> |
32 | #include <linux/kernel.h> | 32 | #include <linux/kernel.h> |
33 | #include <linux/types.h> | 33 | #include <linux/types.h> |
34 | #include <linux/slab.h> | ||
34 | #include <linux/pci.h> | 35 | #include <linux/pci.h> |
35 | #include <linux/workqueue.h> | ||
36 | #include "shpchp.h" | 36 | #include "shpchp.h" |
37 | 37 | ||
38 | /* Global variables */ | 38 | /* Global variables */ |
@@ -40,6 +40,7 @@ int shpchp_debug; | |||
40 | int shpchp_poll_mode; | 40 | int shpchp_poll_mode; |
41 | int shpchp_poll_time; | 41 | int shpchp_poll_time; |
42 | struct workqueue_struct *shpchp_wq; | 42 | struct workqueue_struct *shpchp_wq; |
43 | struct workqueue_struct *shpchp_ordered_wq; | ||
43 | 44 | ||
44 | #define DRIVER_VERSION "0.4" | 45 | #define DRIVER_VERSION "0.4" |
45 | #define DRIVER_AUTHOR "Dan Zink <dan.zink@compaq.com>, Greg Kroah-Hartman <greg@kroah.com>, Dely Sy <dely.l.sy@intel.com>" | 46 | #define DRIVER_AUTHOR "Dan Zink <dan.zink@compaq.com>, Greg Kroah-Hartman <greg@kroah.com>, Dely Sy <dely.l.sy@intel.com>" |
@@ -65,8 +66,6 @@ static int get_power_status (struct hotplug_slot *slot, u8 *value); | |||
65 | static int get_attention_status (struct hotplug_slot *slot, u8 *value); | 66 | static int get_attention_status (struct hotplug_slot *slot, u8 *value); |
66 | static int get_latch_status (struct hotplug_slot *slot, u8 *value); | 67 | static int get_latch_status (struct hotplug_slot *slot, u8 *value); |
67 | static int get_adapter_status (struct hotplug_slot *slot, u8 *value); | 68 | static int get_adapter_status (struct hotplug_slot *slot, u8 *value); |
68 | static int get_max_bus_speed (struct hotplug_slot *slot, enum pci_bus_speed *value); | ||
69 | static int get_cur_bus_speed (struct hotplug_slot *slot, enum pci_bus_speed *value); | ||
70 | 69 | ||
71 | static struct hotplug_slot_ops shpchp_hotplug_slot_ops = { | 70 | static struct hotplug_slot_ops shpchp_hotplug_slot_ops = { |
72 | .set_attention_status = set_attention_status, | 71 | .set_attention_status = set_attention_status, |
@@ -76,8 +75,6 @@ static struct hotplug_slot_ops shpchp_hotplug_slot_ops = { | |||
76 | .get_attention_status = get_attention_status, | 75 | .get_attention_status = get_attention_status, |
77 | .get_latch_status = get_latch_status, | 76 | .get_latch_status = get_latch_status, |
78 | .get_adapter_status = get_adapter_status, | 77 | .get_adapter_status = get_adapter_status, |
79 | .get_max_bus_speed = get_max_bus_speed, | ||
80 | .get_cur_bus_speed = get_cur_bus_speed, | ||
81 | }; | 78 | }; |
82 | 79 | ||
83 | /** | 80 | /** |
@@ -177,8 +174,8 @@ void cleanup_slots(struct controller *ctrl) | |||
177 | slot = list_entry(tmp, struct slot, slot_list); | 174 | slot = list_entry(tmp, struct slot, slot_list); |
178 | list_del(&slot->slot_list); | 175 | list_del(&slot->slot_list); |
179 | cancel_delayed_work(&slot->work); | 176 | cancel_delayed_work(&slot->work); |
180 | flush_scheduled_work(); | ||
181 | flush_workqueue(shpchp_wq); | 177 | flush_workqueue(shpchp_wq); |
178 | flush_workqueue(shpchp_ordered_wq); | ||
182 | pci_hp_deregister(slot->hotplug_slot); | 179 | pci_hp_deregister(slot->hotplug_slot); |
183 | } | 180 | } |
184 | } | 181 | } |
@@ -279,37 +276,6 @@ static int get_adapter_status (struct hotplug_slot *hotplug_slot, u8 *value) | |||
279 | return 0; | 276 | return 0; |
280 | } | 277 | } |
281 | 278 | ||
282 | static int get_max_bus_speed(struct hotplug_slot *hotplug_slot, | ||
283 | enum pci_bus_speed *value) | ||
284 | { | ||
285 | struct slot *slot = get_slot(hotplug_slot); | ||
286 | int retval; | ||
287 | |||
288 | ctrl_dbg(slot->ctrl, "%s: physical_slot = %s\n", | ||
289 | __func__, slot_name(slot)); | ||
290 | |||
291 | retval = slot->hpc_ops->get_max_bus_speed(slot, value); | ||
292 | if (retval < 0) | ||
293 | *value = PCI_SPEED_UNKNOWN; | ||
294 | |||
295 | return 0; | ||
296 | } | ||
297 | |||
298 | static int get_cur_bus_speed (struct hotplug_slot *hotplug_slot, enum pci_bus_speed *value) | ||
299 | { | ||
300 | struct slot *slot = get_slot(hotplug_slot); | ||
301 | int retval; | ||
302 | |||
303 | ctrl_dbg(slot->ctrl, "%s: physical_slot = %s\n", | ||
304 | __func__, slot_name(slot)); | ||
305 | |||
306 | retval = slot->hpc_ops->get_cur_bus_speed(slot, value); | ||
307 | if (retval < 0) | ||
308 | *value = PCI_SPEED_UNKNOWN; | ||
309 | |||
310 | return 0; | ||
311 | } | ||
312 | |||
313 | static int is_shpc_capable(struct pci_dev *dev) | 279 | static int is_shpc_capable(struct pci_dev *dev) |
314 | { | 280 | { |
315 | if ((dev->vendor == PCI_VENDOR_ID_AMD) || (dev->device == | 281 | if ((dev->vendor == PCI_VENDOR_ID_AMD) || (dev->device == |
@@ -394,9 +360,23 @@ static int __init shpcd_init(void) | |||
394 | { | 360 | { |
395 | int retval = 0; | 361 | int retval = 0; |
396 | 362 | ||
363 | shpchp_wq = alloc_ordered_workqueue("shpchp", 0); | ||
364 | if (!shpchp_wq) | ||
365 | return -ENOMEM; | ||
366 | |||
367 | shpchp_ordered_wq = alloc_ordered_workqueue("shpchp_ordered", 0); | ||
368 | if (!shpchp_ordered_wq) { | ||
369 | destroy_workqueue(shpchp_wq); | ||
370 | return -ENOMEM; | ||
371 | } | ||
372 | |||
397 | retval = pci_register_driver(&shpc_driver); | 373 | retval = pci_register_driver(&shpc_driver); |
398 | dbg("%s: pci_register_driver = %d\n", __func__, retval); | 374 | dbg("%s: pci_register_driver = %d\n", __func__, retval); |
399 | info(DRIVER_DESC " version: " DRIVER_VERSION "\n"); | 375 | info(DRIVER_DESC " version: " DRIVER_VERSION "\n"); |
376 | if (retval) { | ||
377 | destroy_workqueue(shpchp_ordered_wq); | ||
378 | destroy_workqueue(shpchp_wq); | ||
379 | } | ||
400 | return retval; | 380 | return retval; |
401 | } | 381 | } |
402 | 382 | ||
@@ -404,6 +384,8 @@ static void __exit shpcd_cleanup(void) | |||
404 | { | 384 | { |
405 | dbg("unload_shpchpd()\n"); | 385 | dbg("unload_shpchpd()\n"); |
406 | pci_unregister_driver(&shpc_driver); | 386 | pci_unregister_driver(&shpc_driver); |
387 | destroy_workqueue(shpchp_ordered_wq); | ||
388 | destroy_workqueue(shpchp_wq); | ||
407 | info(DRIVER_DESC " version: " DRIVER_VERSION " unloaded\n"); | 389 | info(DRIVER_DESC " version: " DRIVER_VERSION " unloaded\n"); |
408 | } | 390 | } |
409 | 391 | ||
diff --git a/drivers/pci/hotplug/shpchp_ctrl.c b/drivers/pci/hotplug/shpchp_ctrl.c index b8ab2796e66a..b00b09bdd38a 100644 --- a/drivers/pci/hotplug/shpchp_ctrl.c +++ b/drivers/pci/hotplug/shpchp_ctrl.c | |||
@@ -30,8 +30,8 @@ | |||
30 | #include <linux/module.h> | 30 | #include <linux/module.h> |
31 | #include <linux/kernel.h> | 31 | #include <linux/kernel.h> |
32 | #include <linux/types.h> | 32 | #include <linux/types.h> |
33 | #include <linux/slab.h> | ||
33 | #include <linux/pci.h> | 34 | #include <linux/pci.h> |
34 | #include <linux/workqueue.h> | ||
35 | #include "../pci.h" | 35 | #include "../pci.h" |
36 | #include "shpchp.h" | 36 | #include "shpchp.h" |
37 | 37 | ||
@@ -51,7 +51,7 @@ static int queue_interrupt_event(struct slot *p_slot, u32 event_type) | |||
51 | info->p_slot = p_slot; | 51 | info->p_slot = p_slot; |
52 | INIT_WORK(&info->work, interrupt_event_handler); | 52 | INIT_WORK(&info->work, interrupt_event_handler); |
53 | 53 | ||
54 | schedule_work(&info->work); | 54 | queue_work(shpchp_wq, &info->work); |
55 | 55 | ||
56 | return 0; | 56 | return 0; |
57 | } | 57 | } |
@@ -285,17 +285,8 @@ static int board_added(struct slot *p_slot) | |||
285 | return WRONG_BUS_FREQUENCY; | 285 | return WRONG_BUS_FREQUENCY; |
286 | } | 286 | } |
287 | 287 | ||
288 | rc = p_slot->hpc_ops->get_cur_bus_speed(p_slot, &bsp); | 288 | bsp = ctrl->pci_dev->bus->cur_bus_speed; |
289 | if (rc) { | 289 | msp = ctrl->pci_dev->bus->max_bus_speed; |
290 | ctrl_err(ctrl, "Can't get bus operation speed\n"); | ||
291 | return WRONG_BUS_FREQUENCY; | ||
292 | } | ||
293 | |||
294 | rc = p_slot->hpc_ops->get_max_bus_speed(p_slot, &msp); | ||
295 | if (rc) { | ||
296 | ctrl_err(ctrl, "Can't get max bus operation speed\n"); | ||
297 | msp = bsp; | ||
298 | } | ||
299 | 290 | ||
300 | /* Check if there are other slots or devices on the same bus */ | 291 | /* Check if there are other slots or devices on the same bus */ |
301 | if (!list_empty(&ctrl->pci_dev->subordinate->devices)) | 292 | if (!list_empty(&ctrl->pci_dev->subordinate->devices)) |
@@ -462,9 +453,10 @@ void shpchp_queue_pushbutton_work(struct work_struct *work) | |||
462 | p_slot->state = POWERON_STATE; | 453 | p_slot->state = POWERON_STATE; |
463 | break; | 454 | break; |
464 | default: | 455 | default: |
456 | kfree(info); | ||
465 | goto out; | 457 | goto out; |
466 | } | 458 | } |
467 | queue_work(shpchp_wq, &info->work); | 459 | queue_work(shpchp_ordered_wq, &info->work); |
468 | out: | 460 | out: |
469 | mutex_unlock(&p_slot->lock); | 461 | mutex_unlock(&p_slot->lock); |
470 | } | 462 | } |
@@ -512,7 +504,7 @@ static void handle_button_press_event(struct slot *p_slot) | |||
512 | p_slot->hpc_ops->green_led_blink(p_slot); | 504 | p_slot->hpc_ops->green_led_blink(p_slot); |
513 | p_slot->hpc_ops->set_attention_status(p_slot, 0); | 505 | p_slot->hpc_ops->set_attention_status(p_slot, 0); |
514 | 506 | ||
515 | schedule_delayed_work(&p_slot->work, 5*HZ); | 507 | queue_delayed_work(shpchp_wq, &p_slot->work, 5*HZ); |
516 | break; | 508 | break; |
517 | case BLINKINGOFF_STATE: | 509 | case BLINKINGOFF_STATE: |
518 | case BLINKINGON_STATE: | 510 | case BLINKINGON_STATE: |
diff --git a/drivers/pci/hotplug/shpchp_hpc.c b/drivers/pci/hotplug/shpchp_hpc.c index 86dc39847769..36547f0ce305 100644 --- a/drivers/pci/hotplug/shpchp_hpc.c +++ b/drivers/pci/hotplug/shpchp_hpc.c | |||
@@ -113,7 +113,7 @@ | |||
113 | #define CON_PFAULT_INTR_MASK (1 << 28) | 113 | #define CON_PFAULT_INTR_MASK (1 << 28) |
114 | #define MRL_CHANGE_SERR_MASK (1 << 29) | 114 | #define MRL_CHANGE_SERR_MASK (1 << 29) |
115 | #define CON_PFAULT_SERR_MASK (1 << 30) | 115 | #define CON_PFAULT_SERR_MASK (1 << 30) |
116 | #define SLOT_REG_RSVDZ_MASK (1 << 15) | (7 << 21) | 116 | #define SLOT_REG_RSVDZ_MASK ((1 << 15) | (7 << 21)) |
117 | 117 | ||
118 | /* | 118 | /* |
119 | * SHPC Command Code definitnions | 119 | * SHPC Command Code definitnions |
@@ -179,8 +179,6 @@ | |||
179 | #define SLOT_EVENT_LATCH 0x2 | 179 | #define SLOT_EVENT_LATCH 0x2 |
180 | #define SLOT_SERR_INT_MASK 0x3 | 180 | #define SLOT_SERR_INT_MASK 0x3 |
181 | 181 | ||
182 | static atomic_t shpchp_num_controllers = ATOMIC_INIT(0); | ||
183 | |||
184 | static irqreturn_t shpc_isr(int irq, void *dev_id); | 182 | static irqreturn_t shpc_isr(int irq, void *dev_id); |
185 | static void start_int_poll_timer(struct controller *ctrl, int sec); | 183 | static void start_int_poll_timer(struct controller *ctrl, int sec); |
186 | static int hpc_check_cmd_status(struct controller *ctrl); | 184 | static int hpc_check_cmd_status(struct controller *ctrl); |
@@ -614,13 +612,6 @@ static void hpc_release_ctlr(struct controller *ctrl) | |||
614 | 612 | ||
615 | iounmap(ctrl->creg); | 613 | iounmap(ctrl->creg); |
616 | release_mem_region(ctrl->mmio_base, ctrl->mmio_size); | 614 | release_mem_region(ctrl->mmio_base, ctrl->mmio_size); |
617 | |||
618 | /* | ||
619 | * If this is the last controller to be released, destroy the | ||
620 | * shpchpd work queue | ||
621 | */ | ||
622 | if (atomic_dec_and_test(&shpchp_num_controllers)) | ||
623 | destroy_workqueue(shpchp_wq); | ||
624 | } | 615 | } |
625 | 616 | ||
626 | static int hpc_power_on_slot(struct slot * slot) | 617 | static int hpc_power_on_slot(struct slot * slot) |
@@ -660,6 +651,75 @@ static int hpc_slot_disable(struct slot * slot) | |||
660 | return retval; | 651 | return retval; |
661 | } | 652 | } |
662 | 653 | ||
654 | static int shpc_get_cur_bus_speed(struct controller *ctrl) | ||
655 | { | ||
656 | int retval = 0; | ||
657 | struct pci_bus *bus = ctrl->pci_dev->subordinate; | ||
658 | enum pci_bus_speed bus_speed = PCI_SPEED_UNKNOWN; | ||
659 | u16 sec_bus_reg = shpc_readw(ctrl, SEC_BUS_CONFIG); | ||
660 | u8 pi = shpc_readb(ctrl, PROG_INTERFACE); | ||
661 | u8 speed_mode = (pi == 2) ? (sec_bus_reg & 0xF) : (sec_bus_reg & 0x7); | ||
662 | |||
663 | if ((pi == 1) && (speed_mode > 4)) { | ||
664 | retval = -ENODEV; | ||
665 | goto out; | ||
666 | } | ||
667 | |||
668 | switch (speed_mode) { | ||
669 | case 0x0: | ||
670 | bus_speed = PCI_SPEED_33MHz; | ||
671 | break; | ||
672 | case 0x1: | ||
673 | bus_speed = PCI_SPEED_66MHz; | ||
674 | break; | ||
675 | case 0x2: | ||
676 | bus_speed = PCI_SPEED_66MHz_PCIX; | ||
677 | break; | ||
678 | case 0x3: | ||
679 | bus_speed = PCI_SPEED_100MHz_PCIX; | ||
680 | break; | ||
681 | case 0x4: | ||
682 | bus_speed = PCI_SPEED_133MHz_PCIX; | ||
683 | break; | ||
684 | case 0x5: | ||
685 | bus_speed = PCI_SPEED_66MHz_PCIX_ECC; | ||
686 | break; | ||
687 | case 0x6: | ||
688 | bus_speed = PCI_SPEED_100MHz_PCIX_ECC; | ||
689 | break; | ||
690 | case 0x7: | ||
691 | bus_speed = PCI_SPEED_133MHz_PCIX_ECC; | ||
692 | break; | ||
693 | case 0x8: | ||
694 | bus_speed = PCI_SPEED_66MHz_PCIX_266; | ||
695 | break; | ||
696 | case 0x9: | ||
697 | bus_speed = PCI_SPEED_100MHz_PCIX_266; | ||
698 | break; | ||
699 | case 0xa: | ||
700 | bus_speed = PCI_SPEED_133MHz_PCIX_266; | ||
701 | break; | ||
702 | case 0xb: | ||
703 | bus_speed = PCI_SPEED_66MHz_PCIX_533; | ||
704 | break; | ||
705 | case 0xc: | ||
706 | bus_speed = PCI_SPEED_100MHz_PCIX_533; | ||
707 | break; | ||
708 | case 0xd: | ||
709 | bus_speed = PCI_SPEED_133MHz_PCIX_533; | ||
710 | break; | ||
711 | default: | ||
712 | retval = -ENODEV; | ||
713 | break; | ||
714 | } | ||
715 | |||
716 | out: | ||
717 | bus->cur_bus_speed = bus_speed; | ||
718 | dbg("Current bus speed = %d\n", bus_speed); | ||
719 | return retval; | ||
720 | } | ||
721 | |||
722 | |||
663 | static int hpc_set_bus_speed_mode(struct slot * slot, enum pci_bus_speed value) | 723 | static int hpc_set_bus_speed_mode(struct slot * slot, enum pci_bus_speed value) |
664 | { | 724 | { |
665 | int retval; | 725 | int retval; |
@@ -720,6 +780,8 @@ static int hpc_set_bus_speed_mode(struct slot * slot, enum pci_bus_speed value) | |||
720 | retval = shpc_write_cmd(slot, 0, cmd); | 780 | retval = shpc_write_cmd(slot, 0, cmd); |
721 | if (retval) | 781 | if (retval) |
722 | ctrl_err(ctrl, "%s: Write command failed!\n", __func__); | 782 | ctrl_err(ctrl, "%s: Write command failed!\n", __func__); |
783 | else | ||
784 | shpc_get_cur_bus_speed(ctrl); | ||
723 | 785 | ||
724 | return retval; | 786 | return retval; |
725 | } | 787 | } |
@@ -803,10 +865,10 @@ static irqreturn_t shpc_isr(int irq, void *dev_id) | |||
803 | return IRQ_HANDLED; | 865 | return IRQ_HANDLED; |
804 | } | 866 | } |
805 | 867 | ||
806 | static int hpc_get_max_bus_speed (struct slot *slot, enum pci_bus_speed *value) | 868 | static int shpc_get_max_bus_speed(struct controller *ctrl) |
807 | { | 869 | { |
808 | int retval = 0; | 870 | int retval = 0; |
809 | struct controller *ctrl = slot->ctrl; | 871 | struct pci_bus *bus = ctrl->pci_dev->subordinate; |
810 | enum pci_bus_speed bus_speed = PCI_SPEED_UNKNOWN; | 872 | enum pci_bus_speed bus_speed = PCI_SPEED_UNKNOWN; |
811 | u8 pi = shpc_readb(ctrl, PROG_INTERFACE); | 873 | u8 pi = shpc_readb(ctrl, PROG_INTERFACE); |
812 | u32 slot_avail1 = shpc_readl(ctrl, SLOT_AVAIL1); | 874 | u32 slot_avail1 = shpc_readl(ctrl, SLOT_AVAIL1); |
@@ -842,79 +904,12 @@ static int hpc_get_max_bus_speed (struct slot *slot, enum pci_bus_speed *value) | |||
842 | retval = -ENODEV; | 904 | retval = -ENODEV; |
843 | } | 905 | } |
844 | 906 | ||
845 | *value = bus_speed; | 907 | bus->max_bus_speed = bus_speed; |
846 | ctrl_dbg(ctrl, "Max bus speed = %d\n", bus_speed); | 908 | ctrl_dbg(ctrl, "Max bus speed = %d\n", bus_speed); |
847 | 909 | ||
848 | return retval; | 910 | return retval; |
849 | } | 911 | } |
850 | 912 | ||
851 | static int hpc_get_cur_bus_speed (struct slot *slot, enum pci_bus_speed *value) | ||
852 | { | ||
853 | int retval = 0; | ||
854 | struct controller *ctrl = slot->ctrl; | ||
855 | enum pci_bus_speed bus_speed = PCI_SPEED_UNKNOWN; | ||
856 | u16 sec_bus_reg = shpc_readw(ctrl, SEC_BUS_CONFIG); | ||
857 | u8 pi = shpc_readb(ctrl, PROG_INTERFACE); | ||
858 | u8 speed_mode = (pi == 2) ? (sec_bus_reg & 0xF) : (sec_bus_reg & 0x7); | ||
859 | |||
860 | if ((pi == 1) && (speed_mode > 4)) { | ||
861 | *value = PCI_SPEED_UNKNOWN; | ||
862 | return -ENODEV; | ||
863 | } | ||
864 | |||
865 | switch (speed_mode) { | ||
866 | case 0x0: | ||
867 | *value = PCI_SPEED_33MHz; | ||
868 | break; | ||
869 | case 0x1: | ||
870 | *value = PCI_SPEED_66MHz; | ||
871 | break; | ||
872 | case 0x2: | ||
873 | *value = PCI_SPEED_66MHz_PCIX; | ||
874 | break; | ||
875 | case 0x3: | ||
876 | *value = PCI_SPEED_100MHz_PCIX; | ||
877 | break; | ||
878 | case 0x4: | ||
879 | *value = PCI_SPEED_133MHz_PCIX; | ||
880 | break; | ||
881 | case 0x5: | ||
882 | *value = PCI_SPEED_66MHz_PCIX_ECC; | ||
883 | break; | ||
884 | case 0x6: | ||
885 | *value = PCI_SPEED_100MHz_PCIX_ECC; | ||
886 | break; | ||
887 | case 0x7: | ||
888 | *value = PCI_SPEED_133MHz_PCIX_ECC; | ||
889 | break; | ||
890 | case 0x8: | ||
891 | *value = PCI_SPEED_66MHz_PCIX_266; | ||
892 | break; | ||
893 | case 0x9: | ||
894 | *value = PCI_SPEED_100MHz_PCIX_266; | ||
895 | break; | ||
896 | case 0xa: | ||
897 | *value = PCI_SPEED_133MHz_PCIX_266; | ||
898 | break; | ||
899 | case 0xb: | ||
900 | *value = PCI_SPEED_66MHz_PCIX_533; | ||
901 | break; | ||
902 | case 0xc: | ||
903 | *value = PCI_SPEED_100MHz_PCIX_533; | ||
904 | break; | ||
905 | case 0xd: | ||
906 | *value = PCI_SPEED_133MHz_PCIX_533; | ||
907 | break; | ||
908 | default: | ||
909 | *value = PCI_SPEED_UNKNOWN; | ||
910 | retval = -ENODEV; | ||
911 | break; | ||
912 | } | ||
913 | |||
914 | ctrl_dbg(ctrl, "Current bus speed = %d\n", bus_speed); | ||
915 | return retval; | ||
916 | } | ||
917 | |||
918 | static struct hpc_ops shpchp_hpc_ops = { | 913 | static struct hpc_ops shpchp_hpc_ops = { |
919 | .power_on_slot = hpc_power_on_slot, | 914 | .power_on_slot = hpc_power_on_slot, |
920 | .slot_enable = hpc_slot_enable, | 915 | .slot_enable = hpc_slot_enable, |
@@ -926,8 +921,6 @@ static struct hpc_ops shpchp_hpc_ops = { | |||
926 | .get_latch_status = hpc_get_latch_status, | 921 | .get_latch_status = hpc_get_latch_status, |
927 | .get_adapter_status = hpc_get_adapter_status, | 922 | .get_adapter_status = hpc_get_adapter_status, |
928 | 923 | ||
929 | .get_max_bus_speed = hpc_get_max_bus_speed, | ||
930 | .get_cur_bus_speed = hpc_get_cur_bus_speed, | ||
931 | .get_adapter_speed = hpc_get_adapter_speed, | 924 | .get_adapter_speed = hpc_get_adapter_speed, |
932 | .get_mode1_ECC_cap = hpc_get_mode1_ECC_cap, | 925 | .get_mode1_ECC_cap = hpc_get_mode1_ECC_cap, |
933 | .get_prog_int = hpc_get_prog_int, | 926 | .get_prog_int = hpc_get_prog_int, |
@@ -1075,9 +1068,8 @@ int shpc_init(struct controller *ctrl, struct pci_dev *pdev) | |||
1075 | 1068 | ||
1076 | rc = request_irq(ctrl->pci_dev->irq, shpc_isr, IRQF_SHARED, | 1069 | rc = request_irq(ctrl->pci_dev->irq, shpc_isr, IRQF_SHARED, |
1077 | MY_NAME, (void *)ctrl); | 1070 | MY_NAME, (void *)ctrl); |
1078 | ctrl_dbg(ctrl, "request_irq %d for hpc%d (returns %d)\n", | 1071 | ctrl_dbg(ctrl, "request_irq %d (returns %d)\n", |
1079 | ctrl->pci_dev->irq, | 1072 | ctrl->pci_dev->irq, rc); |
1080 | atomic_read(&shpchp_num_controllers), rc); | ||
1081 | if (rc) { | 1073 | if (rc) { |
1082 | ctrl_err(ctrl, "Can't get irq %d for the hotplug " | 1074 | ctrl_err(ctrl, "Can't get irq %d for the hotplug " |
1083 | "controller\n", ctrl->pci_dev->irq); | 1075 | "controller\n", ctrl->pci_dev->irq); |
@@ -1086,17 +1078,8 @@ int shpc_init(struct controller *ctrl, struct pci_dev *pdev) | |||
1086 | } | 1078 | } |
1087 | ctrl_dbg(ctrl, "HPC at %s irq=%x\n", pci_name(pdev), pdev->irq); | 1079 | ctrl_dbg(ctrl, "HPC at %s irq=%x\n", pci_name(pdev), pdev->irq); |
1088 | 1080 | ||
1089 | /* | 1081 | shpc_get_max_bus_speed(ctrl); |
1090 | * If this is the first controller to be initialized, | 1082 | shpc_get_cur_bus_speed(ctrl); |
1091 | * initialize the shpchpd work queue | ||
1092 | */ | ||
1093 | if (atomic_add_return(1, &shpchp_num_controllers) == 1) { | ||
1094 | shpchp_wq = create_singlethread_workqueue("shpchpd"); | ||
1095 | if (!shpchp_wq) { | ||
1096 | rc = -ENOMEM; | ||
1097 | goto abort_iounmap; | ||
1098 | } | ||
1099 | } | ||
1100 | 1083 | ||
1101 | /* | 1084 | /* |
1102 | * Unmask all event interrupts of all slots | 1085 | * Unmask all event interrupts of all slots |
diff --git a/drivers/pci/hotplug/shpchp_pci.c b/drivers/pci/hotplug/shpchp_pci.c index 8c3d3219f227..a2ccfcd3c298 100644 --- a/drivers/pci/hotplug/shpchp_pci.c +++ b/drivers/pci/hotplug/shpchp_pci.c | |||
@@ -60,12 +60,6 @@ int __ref shpchp_configure_device(struct slot *p_slot) | |||
60 | dev = pci_get_slot(parent, PCI_DEVFN(p_slot->device, fn)); | 60 | dev = pci_get_slot(parent, PCI_DEVFN(p_slot->device, fn)); |
61 | if (!dev) | 61 | if (!dev) |
62 | continue; | 62 | continue; |
63 | if ((dev->class >> 16) == PCI_BASE_CLASS_DISPLAY) { | ||
64 | ctrl_err(ctrl, "Cannot hot-add display device %s\n", | ||
65 | pci_name(dev)); | ||
66 | pci_dev_put(dev); | ||
67 | continue; | ||
68 | } | ||
69 | if ((dev->hdr_type == PCI_HEADER_TYPE_BRIDGE) || | 63 | if ((dev->hdr_type == PCI_HEADER_TYPE_BRIDGE) || |
70 | (dev->hdr_type == PCI_HEADER_TYPE_CARDBUS)) { | 64 | (dev->hdr_type == PCI_HEADER_TYPE_CARDBUS)) { |
71 | /* Find an unused bus number for the new bridge */ | 65 | /* Find an unused bus number for the new bridge */ |
@@ -114,17 +108,11 @@ int shpchp_unconfigure_device(struct slot *p_slot) | |||
114 | ctrl_dbg(ctrl, "%s: domain:bus:dev = %04x:%02x:%02x\n", | 108 | ctrl_dbg(ctrl, "%s: domain:bus:dev = %04x:%02x:%02x\n", |
115 | __func__, pci_domain_nr(parent), p_slot->bus, p_slot->device); | 109 | __func__, pci_domain_nr(parent), p_slot->bus, p_slot->device); |
116 | 110 | ||
117 | for (j=0; j<8 ; j++) { | 111 | for (j = 0; j < 8 ; j++) { |
118 | struct pci_dev* temp = pci_get_slot(parent, | 112 | struct pci_dev *temp = pci_get_slot(parent, |
119 | (p_slot->device << 3) | j); | 113 | (p_slot->device << 3) | j); |
120 | if (!temp) | 114 | if (!temp) |
121 | continue; | 115 | continue; |
122 | if ((temp->class >> 16) == PCI_BASE_CLASS_DISPLAY) { | ||
123 | ctrl_err(ctrl, "Cannot remove display device %s\n", | ||
124 | pci_name(temp)); | ||
125 | pci_dev_put(temp); | ||
126 | continue; | ||
127 | } | ||
128 | if (temp->hdr_type == PCI_HEADER_TYPE_BRIDGE) { | 116 | if (temp->hdr_type == PCI_HEADER_TYPE_BRIDGE) { |
129 | pci_read_config_byte(temp, PCI_BRIDGE_CONTROL, &bctl); | 117 | pci_read_config_byte(temp, PCI_BRIDGE_CONTROL, &bctl); |
130 | if (bctl & PCI_BRIDGE_CTL_VGA) { | 118 | if (bctl & PCI_BRIDGE_CTL_VGA) { |
@@ -132,7 +120,8 @@ int shpchp_unconfigure_device(struct slot *p_slot) | |||
132 | "Cannot remove display device %s\n", | 120 | "Cannot remove display device %s\n", |
133 | pci_name(temp)); | 121 | pci_name(temp)); |
134 | pci_dev_put(temp); | 122 | pci_dev_put(temp); |
135 | continue; | 123 | rc = -EINVAL; |
124 | break; | ||
136 | } | 125 | } |
137 | } | 126 | } |
138 | pci_remove_bus_device(temp); | 127 | pci_remove_bus_device(temp); |
diff --git a/drivers/pci/hotplug/shpchp_sysfs.c b/drivers/pci/hotplug/shpchp_sysfs.c index 29fa9d26adae..071b7dc0094b 100644 --- a/drivers/pci/hotplug/shpchp_sysfs.c +++ b/drivers/pci/hotplug/shpchp_sysfs.c | |||
@@ -47,8 +47,7 @@ static ssize_t show_ctrl (struct device *dev, struct device_attribute *attr, cha | |||
47 | bus = pdev->subordinate; | 47 | bus = pdev->subordinate; |
48 | 48 | ||
49 | out += sprintf(buf, "Free resources: memory\n"); | 49 | out += sprintf(buf, "Free resources: memory\n"); |
50 | for (index = 0; index < PCI_BUS_NUM_RESOURCES; index++) { | 50 | pci_bus_for_each_resource(bus, res, index) { |
51 | res = bus->resource[index]; | ||
52 | if (res && (res->flags & IORESOURCE_MEM) && | 51 | if (res && (res->flags & IORESOURCE_MEM) && |
53 | !(res->flags & IORESOURCE_PREFETCH)) { | 52 | !(res->flags & IORESOURCE_PREFETCH)) { |
54 | out += sprintf(out, "start = %8.8llx, " | 53 | out += sprintf(out, "start = %8.8llx, " |
@@ -58,8 +57,7 @@ static ssize_t show_ctrl (struct device *dev, struct device_attribute *attr, cha | |||
58 | } | 57 | } |
59 | } | 58 | } |
60 | out += sprintf(out, "Free resources: prefetchable memory\n"); | 59 | out += sprintf(out, "Free resources: prefetchable memory\n"); |
61 | for (index = 0; index < PCI_BUS_NUM_RESOURCES; index++) { | 60 | pci_bus_for_each_resource(bus, res, index) { |
62 | res = bus->resource[index]; | ||
63 | if (res && (res->flags & IORESOURCE_MEM) && | 61 | if (res && (res->flags & IORESOURCE_MEM) && |
64 | (res->flags & IORESOURCE_PREFETCH)) { | 62 | (res->flags & IORESOURCE_PREFETCH)) { |
65 | out += sprintf(out, "start = %8.8llx, " | 63 | out += sprintf(out, "start = %8.8llx, " |
@@ -69,8 +67,7 @@ static ssize_t show_ctrl (struct device *dev, struct device_attribute *attr, cha | |||
69 | } | 67 | } |
70 | } | 68 | } |
71 | out += sprintf(out, "Free resources: IO\n"); | 69 | out += sprintf(out, "Free resources: IO\n"); |
72 | for (index = 0; index < PCI_BUS_NUM_RESOURCES; index++) { | 70 | pci_bus_for_each_resource(bus, res, index) { |
73 | res = bus->resource[index]; | ||
74 | if (res && (res->flags & IORESOURCE_IO)) { | 71 | if (res && (res->flags & IORESOURCE_IO)) { |
75 | out += sprintf(out, "start = %8.8llx, " | 72 | out += sprintf(out, "start = %8.8llx, " |
76 | "length = %8.8llx\n", | 73 | "length = %8.8llx\n", |
diff --git a/drivers/pci/htirq.c b/drivers/pci/htirq.c index 737a1c44b07a..834842aa5bbf 100644 --- a/drivers/pci/htirq.c +++ b/drivers/pci/htirq.c | |||
@@ -10,7 +10,6 @@ | |||
10 | #include <linux/pci.h> | 10 | #include <linux/pci.h> |
11 | #include <linux/spinlock.h> | 11 | #include <linux/spinlock.h> |
12 | #include <linux/slab.h> | 12 | #include <linux/slab.h> |
13 | #include <linux/gfp.h> | ||
14 | #include <linux/htirq.h> | 13 | #include <linux/htirq.h> |
15 | 14 | ||
16 | /* Global ht irq lock. | 15 | /* Global ht irq lock. |
@@ -58,28 +57,22 @@ void fetch_ht_irq_msg(unsigned int irq, struct ht_irq_msg *msg) | |||
58 | *msg = cfg->msg; | 57 | *msg = cfg->msg; |
59 | } | 58 | } |
60 | 59 | ||
61 | void mask_ht_irq(unsigned int irq) | 60 | void mask_ht_irq(struct irq_data *data) |
62 | { | 61 | { |
63 | struct ht_irq_cfg *cfg; | 62 | struct ht_irq_cfg *cfg = irq_data_get_irq_data(data); |
64 | struct ht_irq_msg msg; | 63 | struct ht_irq_msg msg = cfg->msg; |
65 | |||
66 | cfg = get_irq_data(irq); | ||
67 | 64 | ||
68 | msg = cfg->msg; | ||
69 | msg.address_lo |= 1; | 65 | msg.address_lo |= 1; |
70 | write_ht_irq_msg(irq, &msg); | 66 | write_ht_irq_msg(data->irq, &msg); |
71 | } | 67 | } |
72 | 68 | ||
73 | void unmask_ht_irq(unsigned int irq) | 69 | void unmask_ht_irq(struct irq_data *data) |
74 | { | 70 | { |
75 | struct ht_irq_cfg *cfg; | 71 | struct ht_irq_cfg *cfg = irq_data_get_irq_data(data); |
76 | struct ht_irq_msg msg; | 72 | struct ht_irq_msg msg = cfg->msg; |
77 | |||
78 | cfg = get_irq_data(irq); | ||
79 | 73 | ||
80 | msg = cfg->msg; | ||
81 | msg.address_lo &= ~1; | 74 | msg.address_lo &= ~1; |
82 | write_ht_irq_msg(irq, &msg); | 75 | write_ht_irq_msg(data->irq, &msg); |
83 | } | 76 | } |
84 | 77 | ||
85 | /** | 78 | /** |
diff --git a/drivers/pci/intel-iommu.c b/drivers/pci/intel-iommu.c index 417312528ddf..4789f8e8bf7a 100644 --- a/drivers/pci/intel-iommu.c +++ b/drivers/pci/intel-iommu.c | |||
@@ -71,6 +71,49 @@ | |||
71 | #define DMA_32BIT_PFN IOVA_PFN(DMA_BIT_MASK(32)) | 71 | #define DMA_32BIT_PFN IOVA_PFN(DMA_BIT_MASK(32)) |
72 | #define DMA_64BIT_PFN IOVA_PFN(DMA_BIT_MASK(64)) | 72 | #define DMA_64BIT_PFN IOVA_PFN(DMA_BIT_MASK(64)) |
73 | 73 | ||
74 | /* page table handling */ | ||
75 | #define LEVEL_STRIDE (9) | ||
76 | #define LEVEL_MASK (((u64)1 << LEVEL_STRIDE) - 1) | ||
77 | |||
78 | static inline int agaw_to_level(int agaw) | ||
79 | { | ||
80 | return agaw + 2; | ||
81 | } | ||
82 | |||
83 | static inline int agaw_to_width(int agaw) | ||
84 | { | ||
85 | return 30 + agaw * LEVEL_STRIDE; | ||
86 | } | ||
87 | |||
88 | static inline int width_to_agaw(int width) | ||
89 | { | ||
90 | return (width - 30) / LEVEL_STRIDE; | ||
91 | } | ||
92 | |||
93 | static inline unsigned int level_to_offset_bits(int level) | ||
94 | { | ||
95 | return (level - 1) * LEVEL_STRIDE; | ||
96 | } | ||
97 | |||
98 | static inline int pfn_level_offset(unsigned long pfn, int level) | ||
99 | { | ||
100 | return (pfn >> level_to_offset_bits(level)) & LEVEL_MASK; | ||
101 | } | ||
102 | |||
103 | static inline unsigned long level_mask(int level) | ||
104 | { | ||
105 | return -1UL << level_to_offset_bits(level); | ||
106 | } | ||
107 | |||
108 | static inline unsigned long level_size(int level) | ||
109 | { | ||
110 | return 1UL << level_to_offset_bits(level); | ||
111 | } | ||
112 | |||
113 | static inline unsigned long align_to_level(unsigned long pfn, int level) | ||
114 | { | ||
115 | return (pfn + level_size(level) - 1) & level_mask(level); | ||
116 | } | ||
74 | 117 | ||
75 | /* VT-d pages must always be _smaller_ than MM pages. Otherwise things | 118 | /* VT-d pages must always be _smaller_ than MM pages. Otherwise things |
76 | are never going to work. */ | 119 | are never going to work. */ |
@@ -236,7 +279,7 @@ static inline u64 dma_pte_addr(struct dma_pte *pte) | |||
236 | return pte->val & VTD_PAGE_MASK; | 279 | return pte->val & VTD_PAGE_MASK; |
237 | #else | 280 | #else |
238 | /* Must have a full atomic 64-bit read */ | 281 | /* Must have a full atomic 64-bit read */ |
239 | return __cmpxchg64(pte, 0ULL, 0ULL) & VTD_PAGE_MASK; | 282 | return __cmpxchg64(&pte->val, 0ULL, 0ULL) & VTD_PAGE_MASK; |
240 | #endif | 283 | #endif |
241 | } | 284 | } |
242 | 285 | ||
@@ -340,7 +383,7 @@ int dmar_disabled = 0; | |||
340 | int dmar_disabled = 1; | 383 | int dmar_disabled = 1; |
341 | #endif /*CONFIG_DMAR_DEFAULT_ON*/ | 384 | #endif /*CONFIG_DMAR_DEFAULT_ON*/ |
342 | 385 | ||
343 | static int __initdata dmar_map_gfx = 1; | 386 | static int dmar_map_gfx = 1; |
344 | static int dmar_forcedac; | 387 | static int dmar_forcedac; |
345 | static int intel_iommu_strict; | 388 | static int intel_iommu_strict; |
346 | 389 | ||
@@ -434,8 +477,6 @@ void free_iova_mem(struct iova *iova) | |||
434 | } | 477 | } |
435 | 478 | ||
436 | 479 | ||
437 | static inline int width_to_agaw(int width); | ||
438 | |||
439 | static int __iommu_calculate_agaw(struct intel_iommu *iommu, int max_gaw) | 480 | static int __iommu_calculate_agaw(struct intel_iommu *iommu, int max_gaw) |
440 | { | 481 | { |
441 | unsigned long sagaw; | 482 | unsigned long sagaw; |
@@ -491,13 +532,11 @@ static void domain_update_iommu_coherency(struct dmar_domain *domain) | |||
491 | 532 | ||
492 | domain->iommu_coherency = 1; | 533 | domain->iommu_coherency = 1; |
493 | 534 | ||
494 | i = find_first_bit(&domain->iommu_bmp, g_num_of_iommus); | 535 | for_each_set_bit(i, &domain->iommu_bmp, g_num_of_iommus) { |
495 | for (; i < g_num_of_iommus; ) { | ||
496 | if (!ecap_coherent(g_iommus[i]->ecap)) { | 536 | if (!ecap_coherent(g_iommus[i]->ecap)) { |
497 | domain->iommu_coherency = 0; | 537 | domain->iommu_coherency = 0; |
498 | break; | 538 | break; |
499 | } | 539 | } |
500 | i = find_next_bit(&domain->iommu_bmp, g_num_of_iommus, i+1); | ||
501 | } | 540 | } |
502 | } | 541 | } |
503 | 542 | ||
@@ -507,13 +546,11 @@ static void domain_update_iommu_snooping(struct dmar_domain *domain) | |||
507 | 546 | ||
508 | domain->iommu_snooping = 1; | 547 | domain->iommu_snooping = 1; |
509 | 548 | ||
510 | i = find_first_bit(&domain->iommu_bmp, g_num_of_iommus); | 549 | for_each_set_bit(i, &domain->iommu_bmp, g_num_of_iommus) { |
511 | for (; i < g_num_of_iommus; ) { | ||
512 | if (!ecap_sc_support(g_iommus[i]->ecap)) { | 550 | if (!ecap_sc_support(g_iommus[i]->ecap)) { |
513 | domain->iommu_snooping = 0; | 551 | domain->iommu_snooping = 0; |
514 | break; | 552 | break; |
515 | } | 553 | } |
516 | i = find_next_bit(&domain->iommu_bmp, g_num_of_iommus, i+1); | ||
517 | } | 554 | } |
518 | } | 555 | } |
519 | 556 | ||
@@ -650,51 +687,6 @@ out: | |||
650 | spin_unlock_irqrestore(&iommu->lock, flags); | 687 | spin_unlock_irqrestore(&iommu->lock, flags); |
651 | } | 688 | } |
652 | 689 | ||
653 | /* page table handling */ | ||
654 | #define LEVEL_STRIDE (9) | ||
655 | #define LEVEL_MASK (((u64)1 << LEVEL_STRIDE) - 1) | ||
656 | |||
657 | static inline int agaw_to_level(int agaw) | ||
658 | { | ||
659 | return agaw + 2; | ||
660 | } | ||
661 | |||
662 | static inline int agaw_to_width(int agaw) | ||
663 | { | ||
664 | return 30 + agaw * LEVEL_STRIDE; | ||
665 | |||
666 | } | ||
667 | |||
668 | static inline int width_to_agaw(int width) | ||
669 | { | ||
670 | return (width - 30) / LEVEL_STRIDE; | ||
671 | } | ||
672 | |||
673 | static inline unsigned int level_to_offset_bits(int level) | ||
674 | { | ||
675 | return (level - 1) * LEVEL_STRIDE; | ||
676 | } | ||
677 | |||
678 | static inline int pfn_level_offset(unsigned long pfn, int level) | ||
679 | { | ||
680 | return (pfn >> level_to_offset_bits(level)) & LEVEL_MASK; | ||
681 | } | ||
682 | |||
683 | static inline unsigned long level_mask(int level) | ||
684 | { | ||
685 | return -1UL << level_to_offset_bits(level); | ||
686 | } | ||
687 | |||
688 | static inline unsigned long level_size(int level) | ||
689 | { | ||
690 | return 1UL << level_to_offset_bits(level); | ||
691 | } | ||
692 | |||
693 | static inline unsigned long align_to_level(unsigned long pfn, int level) | ||
694 | { | ||
695 | return (pfn + level_size(level) - 1) & level_mask(level); | ||
696 | } | ||
697 | |||
698 | static struct dma_pte *pfn_to_dma_pte(struct dmar_domain *domain, | 690 | static struct dma_pte *pfn_to_dma_pte(struct dmar_domain *domain, |
699 | unsigned long pfn) | 691 | unsigned long pfn) |
700 | { | 692 | { |
@@ -1068,7 +1060,7 @@ static void iommu_flush_dev_iotlb(struct dmar_domain *domain, | |||
1068 | } | 1060 | } |
1069 | 1061 | ||
1070 | static void iommu_flush_iotlb_psi(struct intel_iommu *iommu, u16 did, | 1062 | static void iommu_flush_iotlb_psi(struct intel_iommu *iommu, u16 did, |
1071 | unsigned long pfn, unsigned int pages) | 1063 | unsigned long pfn, unsigned int pages, int map) |
1072 | { | 1064 | { |
1073 | unsigned int mask = ilog2(__roundup_pow_of_two(pages)); | 1065 | unsigned int mask = ilog2(__roundup_pow_of_two(pages)); |
1074 | uint64_t addr = (uint64_t)pfn << VTD_PAGE_SHIFT; | 1066 | uint64_t addr = (uint64_t)pfn << VTD_PAGE_SHIFT; |
@@ -1089,10 +1081,10 @@ static void iommu_flush_iotlb_psi(struct intel_iommu *iommu, u16 did, | |||
1089 | DMA_TLB_PSI_FLUSH); | 1081 | DMA_TLB_PSI_FLUSH); |
1090 | 1082 | ||
1091 | /* | 1083 | /* |
1092 | * In caching mode, domain ID 0 is reserved for non-present to present | 1084 | * In caching mode, changes of pages from non-present to present require |
1093 | * mapping flush. Device IOTLB doesn't need to be flushed in this case. | 1085 | * flush. However, device IOTLB doesn't need to be flushed in this case. |
1094 | */ | 1086 | */ |
1095 | if (!cap_caching_mode(iommu->cap) || did) | 1087 | if (!cap_caching_mode(iommu->cap) || !map) |
1096 | iommu_flush_dev_iotlb(iommu->domains[did], addr, mask); | 1088 | iommu_flush_dev_iotlb(iommu->domains[did], addr, mask); |
1097 | } | 1089 | } |
1098 | 1090 | ||
@@ -1154,7 +1146,8 @@ static int iommu_init_domains(struct intel_iommu *iommu) | |||
1154 | unsigned long nlongs; | 1146 | unsigned long nlongs; |
1155 | 1147 | ||
1156 | ndomains = cap_ndoms(iommu->cap); | 1148 | ndomains = cap_ndoms(iommu->cap); |
1157 | pr_debug("Number of Domains supportd <%ld>\n", ndomains); | 1149 | pr_debug("IOMMU %d: Number of Domains supportd <%ld>\n", iommu->seq_id, |
1150 | ndomains); | ||
1158 | nlongs = BITS_TO_LONGS(ndomains); | 1151 | nlongs = BITS_TO_LONGS(ndomains); |
1159 | 1152 | ||
1160 | spin_lock_init(&iommu->lock); | 1153 | spin_lock_init(&iommu->lock); |
@@ -1194,8 +1187,7 @@ void free_dmar_iommu(struct intel_iommu *iommu) | |||
1194 | unsigned long flags; | 1187 | unsigned long flags; |
1195 | 1188 | ||
1196 | if ((iommu->domains) && (iommu->domain_ids)) { | 1189 | if ((iommu->domains) && (iommu->domain_ids)) { |
1197 | i = find_first_bit(iommu->domain_ids, cap_ndoms(iommu->cap)); | 1190 | for_each_set_bit(i, iommu->domain_ids, cap_ndoms(iommu->cap)) { |
1198 | for (; i < cap_ndoms(iommu->cap); ) { | ||
1199 | domain = iommu->domains[i]; | 1191 | domain = iommu->domains[i]; |
1200 | clear_bit(i, iommu->domain_ids); | 1192 | clear_bit(i, iommu->domain_ids); |
1201 | 1193 | ||
@@ -1207,9 +1199,6 @@ void free_dmar_iommu(struct intel_iommu *iommu) | |||
1207 | domain_exit(domain); | 1199 | domain_exit(domain); |
1208 | } | 1200 | } |
1209 | spin_unlock_irqrestore(&domain->iommu_lock, flags); | 1201 | spin_unlock_irqrestore(&domain->iommu_lock, flags); |
1210 | |||
1211 | i = find_next_bit(iommu->domain_ids, | ||
1212 | cap_ndoms(iommu->cap), i+1); | ||
1213 | } | 1202 | } |
1214 | } | 1203 | } |
1215 | 1204 | ||
@@ -1292,14 +1281,11 @@ static void iommu_detach_domain(struct dmar_domain *domain, | |||
1292 | 1281 | ||
1293 | spin_lock_irqsave(&iommu->lock, flags); | 1282 | spin_lock_irqsave(&iommu->lock, flags); |
1294 | ndomains = cap_ndoms(iommu->cap); | 1283 | ndomains = cap_ndoms(iommu->cap); |
1295 | num = find_first_bit(iommu->domain_ids, ndomains); | 1284 | for_each_set_bit(num, iommu->domain_ids, ndomains) { |
1296 | for (; num < ndomains; ) { | ||
1297 | if (iommu->domains[num] == domain) { | 1285 | if (iommu->domains[num] == domain) { |
1298 | found = 1; | 1286 | found = 1; |
1299 | break; | 1287 | break; |
1300 | } | 1288 | } |
1301 | num = find_next_bit(iommu->domain_ids, | ||
1302 | cap_ndoms(iommu->cap), num+1); | ||
1303 | } | 1289 | } |
1304 | 1290 | ||
1305 | if (found) { | 1291 | if (found) { |
@@ -1485,15 +1471,12 @@ static int domain_context_mapping_one(struct dmar_domain *domain, int segment, | |||
1485 | 1471 | ||
1486 | /* find an available domain id for this device in iommu */ | 1472 | /* find an available domain id for this device in iommu */ |
1487 | ndomains = cap_ndoms(iommu->cap); | 1473 | ndomains = cap_ndoms(iommu->cap); |
1488 | num = find_first_bit(iommu->domain_ids, ndomains); | 1474 | for_each_set_bit(num, iommu->domain_ids, ndomains) { |
1489 | for (; num < ndomains; ) { | ||
1490 | if (iommu->domains[num] == domain) { | 1475 | if (iommu->domains[num] == domain) { |
1491 | id = num; | 1476 | id = num; |
1492 | found = 1; | 1477 | found = 1; |
1493 | break; | 1478 | break; |
1494 | } | 1479 | } |
1495 | num = find_next_bit(iommu->domain_ids, | ||
1496 | cap_ndoms(iommu->cap), num+1); | ||
1497 | } | 1480 | } |
1498 | 1481 | ||
1499 | if (found == 0) { | 1482 | if (found == 0) { |
@@ -1558,7 +1541,7 @@ static int domain_context_mapping_one(struct dmar_domain *domain, int segment, | |||
1558 | (((u16)bus) << 8) | devfn, | 1541 | (((u16)bus) << 8) | devfn, |
1559 | DMA_CCMD_MASK_NOBIT, | 1542 | DMA_CCMD_MASK_NOBIT, |
1560 | DMA_CCMD_DEVICE_INVL); | 1543 | DMA_CCMD_DEVICE_INVL); |
1561 | iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_DSI_FLUSH); | 1544 | iommu->flush.flush_iotlb(iommu, domain->id, 0, 0, DMA_TLB_DSI_FLUSH); |
1562 | } else { | 1545 | } else { |
1563 | iommu_flush_write_buffer(iommu); | 1546 | iommu_flush_write_buffer(iommu); |
1564 | } | 1547 | } |
@@ -1887,14 +1870,15 @@ static struct dmar_domain *get_domain_for_dev(struct pci_dev *pdev, int gaw) | |||
1887 | } | 1870 | } |
1888 | } | 1871 | } |
1889 | if (found) { | 1872 | if (found) { |
1873 | spin_unlock_irqrestore(&device_domain_lock, flags); | ||
1890 | free_devinfo_mem(info); | 1874 | free_devinfo_mem(info); |
1891 | domain_exit(domain); | 1875 | domain_exit(domain); |
1892 | domain = found; | 1876 | domain = found; |
1893 | } else { | 1877 | } else { |
1894 | list_add(&info->link, &domain->devices); | 1878 | list_add(&info->link, &domain->devices); |
1895 | list_add(&info->global, &device_domain_list); | 1879 | list_add(&info->global, &device_domain_list); |
1880 | spin_unlock_irqrestore(&device_domain_lock, flags); | ||
1896 | } | 1881 | } |
1897 | spin_unlock_irqrestore(&device_domain_lock, flags); | ||
1898 | } | 1882 | } |
1899 | 1883 | ||
1900 | found_domain: | 1884 | found_domain: |
@@ -2333,14 +2317,16 @@ int __init init_dmars(void) | |||
2333 | */ | 2317 | */ |
2334 | iommu->flush.flush_context = __iommu_flush_context; | 2318 | iommu->flush.flush_context = __iommu_flush_context; |
2335 | iommu->flush.flush_iotlb = __iommu_flush_iotlb; | 2319 | iommu->flush.flush_iotlb = __iommu_flush_iotlb; |
2336 | printk(KERN_INFO "IOMMU 0x%Lx: using Register based " | 2320 | printk(KERN_INFO "IOMMU %d 0x%Lx: using Register based " |
2337 | "invalidation\n", | 2321 | "invalidation\n", |
2322 | iommu->seq_id, | ||
2338 | (unsigned long long)drhd->reg_base_addr); | 2323 | (unsigned long long)drhd->reg_base_addr); |
2339 | } else { | 2324 | } else { |
2340 | iommu->flush.flush_context = qi_flush_context; | 2325 | iommu->flush.flush_context = qi_flush_context; |
2341 | iommu->flush.flush_iotlb = qi_flush_iotlb; | 2326 | iommu->flush.flush_iotlb = qi_flush_iotlb; |
2342 | printk(KERN_INFO "IOMMU 0x%Lx: using Queued " | 2327 | printk(KERN_INFO "IOMMU %d 0x%Lx: using Queued " |
2343 | "invalidation\n", | 2328 | "invalidation\n", |
2329 | iommu->seq_id, | ||
2344 | (unsigned long long)drhd->reg_base_addr); | 2330 | (unsigned long long)drhd->reg_base_addr); |
2345 | } | 2331 | } |
2346 | } | 2332 | } |
@@ -2621,7 +2607,7 @@ static dma_addr_t __intel_map_single(struct device *hwdev, phys_addr_t paddr, | |||
2621 | 2607 | ||
2622 | /* it's a non-present to present mapping. Only flush if caching mode */ | 2608 | /* it's a non-present to present mapping. Only flush if caching mode */ |
2623 | if (cap_caching_mode(iommu->cap)) | 2609 | if (cap_caching_mode(iommu->cap)) |
2624 | iommu_flush_iotlb_psi(iommu, 0, mm_to_dma_pfn(iova->pfn_lo), size); | 2610 | iommu_flush_iotlb_psi(iommu, domain->id, mm_to_dma_pfn(iova->pfn_lo), size, 1); |
2625 | else | 2611 | else |
2626 | iommu_flush_write_buffer(iommu); | 2612 | iommu_flush_write_buffer(iommu); |
2627 | 2613 | ||
@@ -2661,15 +2647,24 @@ static void flush_unmaps(void) | |||
2661 | if (!deferred_flush[i].next) | 2647 | if (!deferred_flush[i].next) |
2662 | continue; | 2648 | continue; |
2663 | 2649 | ||
2664 | iommu->flush.flush_iotlb(iommu, 0, 0, 0, | 2650 | /* In caching mode, global flushes turn emulation expensive */ |
2651 | if (!cap_caching_mode(iommu->cap)) | ||
2652 | iommu->flush.flush_iotlb(iommu, 0, 0, 0, | ||
2665 | DMA_TLB_GLOBAL_FLUSH); | 2653 | DMA_TLB_GLOBAL_FLUSH); |
2666 | for (j = 0; j < deferred_flush[i].next; j++) { | 2654 | for (j = 0; j < deferred_flush[i].next; j++) { |
2667 | unsigned long mask; | 2655 | unsigned long mask; |
2668 | struct iova *iova = deferred_flush[i].iova[j]; | 2656 | struct iova *iova = deferred_flush[i].iova[j]; |
2669 | 2657 | struct dmar_domain *domain = deferred_flush[i].domain[j]; | |
2670 | mask = ilog2(mm_to_dma_pfn(iova->pfn_hi - iova->pfn_lo + 1)); | 2658 | |
2671 | iommu_flush_dev_iotlb(deferred_flush[i].domain[j], | 2659 | /* On real hardware multiple invalidations are expensive */ |
2672 | (uint64_t)iova->pfn_lo << PAGE_SHIFT, mask); | 2660 | if (cap_caching_mode(iommu->cap)) |
2661 | iommu_flush_iotlb_psi(iommu, domain->id, | ||
2662 | iova->pfn_lo, iova->pfn_hi - iova->pfn_lo + 1, 0); | ||
2663 | else { | ||
2664 | mask = ilog2(mm_to_dma_pfn(iova->pfn_hi - iova->pfn_lo + 1)); | ||
2665 | iommu_flush_dev_iotlb(deferred_flush[i].domain[j], | ||
2666 | (uint64_t)iova->pfn_lo << PAGE_SHIFT, mask); | ||
2667 | } | ||
2673 | __free_iova(&deferred_flush[i].domain[j]->iovad, iova); | 2668 | __free_iova(&deferred_flush[i].domain[j]->iovad, iova); |
2674 | } | 2669 | } |
2675 | deferred_flush[i].next = 0; | 2670 | deferred_flush[i].next = 0; |
@@ -2750,7 +2745,7 @@ static void intel_unmap_page(struct device *dev, dma_addr_t dev_addr, | |||
2750 | 2745 | ||
2751 | if (intel_iommu_strict) { | 2746 | if (intel_iommu_strict) { |
2752 | iommu_flush_iotlb_psi(iommu, domain->id, start_pfn, | 2747 | iommu_flush_iotlb_psi(iommu, domain->id, start_pfn, |
2753 | last_pfn - start_pfn + 1); | 2748 | last_pfn - start_pfn + 1, 0); |
2754 | /* free iova */ | 2749 | /* free iova */ |
2755 | __free_iova(&domain->iovad, iova); | 2750 | __free_iova(&domain->iovad, iova); |
2756 | } else { | 2751 | } else { |
@@ -2840,7 +2835,7 @@ static void intel_unmap_sg(struct device *hwdev, struct scatterlist *sglist, | |||
2840 | 2835 | ||
2841 | if (intel_iommu_strict) { | 2836 | if (intel_iommu_strict) { |
2842 | iommu_flush_iotlb_psi(iommu, domain->id, start_pfn, | 2837 | iommu_flush_iotlb_psi(iommu, domain->id, start_pfn, |
2843 | last_pfn - start_pfn + 1); | 2838 | last_pfn - start_pfn + 1, 0); |
2844 | /* free iova */ | 2839 | /* free iova */ |
2845 | __free_iova(&domain->iovad, iova); | 2840 | __free_iova(&domain->iovad, iova); |
2846 | } else { | 2841 | } else { |
@@ -2874,7 +2869,6 @@ static int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, int ne | |||
2874 | struct dmar_domain *domain; | 2869 | struct dmar_domain *domain; |
2875 | size_t size = 0; | 2870 | size_t size = 0; |
2876 | int prot = 0; | 2871 | int prot = 0; |
2877 | size_t offset_pfn = 0; | ||
2878 | struct iova *iova = NULL; | 2872 | struct iova *iova = NULL; |
2879 | int ret; | 2873 | int ret; |
2880 | struct scatterlist *sg; | 2874 | struct scatterlist *sg; |
@@ -2928,7 +2922,7 @@ static int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, int ne | |||
2928 | 2922 | ||
2929 | /* it's a non-present to present mapping. Only flush if caching mode */ | 2923 | /* it's a non-present to present mapping. Only flush if caching mode */ |
2930 | if (cap_caching_mode(iommu->cap)) | 2924 | if (cap_caching_mode(iommu->cap)) |
2931 | iommu_flush_iotlb_psi(iommu, 0, start_vpfn, offset_pfn); | 2925 | iommu_flush_iotlb_psi(iommu, domain->id, start_vpfn, size, 1); |
2932 | else | 2926 | else |
2933 | iommu_flush_write_buffer(iommu); | 2927 | iommu_flush_write_buffer(iommu); |
2934 | 2928 | ||
@@ -3032,6 +3026,34 @@ static void __init iommu_exit_mempool(void) | |||
3032 | 3026 | ||
3033 | } | 3027 | } |
3034 | 3028 | ||
3029 | static void quirk_ioat_snb_local_iommu(struct pci_dev *pdev) | ||
3030 | { | ||
3031 | struct dmar_drhd_unit *drhd; | ||
3032 | u32 vtbar; | ||
3033 | int rc; | ||
3034 | |||
3035 | /* We know that this device on this chipset has its own IOMMU. | ||
3036 | * If we find it under a different IOMMU, then the BIOS is lying | ||
3037 | * to us. Hope that the IOMMU for this device is actually | ||
3038 | * disabled, and it needs no translation... | ||
3039 | */ | ||
3040 | rc = pci_bus_read_config_dword(pdev->bus, PCI_DEVFN(0, 0), 0xb0, &vtbar); | ||
3041 | if (rc) { | ||
3042 | /* "can't" happen */ | ||
3043 | dev_info(&pdev->dev, "failed to run vt-d quirk\n"); | ||
3044 | return; | ||
3045 | } | ||
3046 | vtbar &= 0xffff0000; | ||
3047 | |||
3048 | /* we know that the this iommu should be at offset 0xa000 from vtbar */ | ||
3049 | drhd = dmar_find_matched_drhd_unit(pdev); | ||
3050 | if (WARN_TAINT_ONCE(!drhd || drhd->reg_base_addr - vtbar != 0xa000, | ||
3051 | TAINT_FIRMWARE_WORKAROUND, | ||
3052 | "BIOS assigned incorrect VT-d unit for Intel(R) QuickData Technology device\n")) | ||
3053 | pdev->dev.archdata.iommu = DUMMY_DEVICE_DOMAIN_INFO; | ||
3054 | } | ||
3055 | DECLARE_PCI_FIXUP_ENABLE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB, quirk_ioat_snb_local_iommu); | ||
3056 | |||
3035 | static void __init init_no_remapping_devices(void) | 3057 | static void __init init_no_remapping_devices(void) |
3036 | { | 3058 | { |
3037 | struct dmar_drhd_unit *drhd; | 3059 | struct dmar_drhd_unit *drhd; |
@@ -3436,22 +3458,6 @@ static void vm_domain_remove_all_dev_info(struct dmar_domain *domain) | |||
3436 | /* domain id for virtual machine, it won't be set in context */ | 3458 | /* domain id for virtual machine, it won't be set in context */ |
3437 | static unsigned long vm_domid; | 3459 | static unsigned long vm_domid; |
3438 | 3460 | ||
3439 | static int vm_domain_min_agaw(struct dmar_domain *domain) | ||
3440 | { | ||
3441 | int i; | ||
3442 | int min_agaw = domain->agaw; | ||
3443 | |||
3444 | i = find_first_bit(&domain->iommu_bmp, g_num_of_iommus); | ||
3445 | for (; i < g_num_of_iommus; ) { | ||
3446 | if (min_agaw > g_iommus[i]->agaw) | ||
3447 | min_agaw = g_iommus[i]->agaw; | ||
3448 | |||
3449 | i = find_next_bit(&domain->iommu_bmp, g_num_of_iommus, i+1); | ||
3450 | } | ||
3451 | |||
3452 | return min_agaw; | ||
3453 | } | ||
3454 | |||
3455 | static struct dmar_domain *iommu_alloc_vm_domain(void) | 3461 | static struct dmar_domain *iommu_alloc_vm_domain(void) |
3456 | { | 3462 | { |
3457 | struct dmar_domain *domain; | 3463 | struct dmar_domain *domain; |
@@ -3512,8 +3518,7 @@ static void iommu_free_vm_domain(struct dmar_domain *domain) | |||
3512 | iommu = drhd->iommu; | 3518 | iommu = drhd->iommu; |
3513 | 3519 | ||
3514 | ndomains = cap_ndoms(iommu->cap); | 3520 | ndomains = cap_ndoms(iommu->cap); |
3515 | i = find_first_bit(iommu->domain_ids, ndomains); | 3521 | for_each_set_bit(i, iommu->domain_ids, ndomains) { |
3516 | for (; i < ndomains; ) { | ||
3517 | if (iommu->domains[i] == domain) { | 3522 | if (iommu->domains[i] == domain) { |
3518 | spin_lock_irqsave(&iommu->lock, flags); | 3523 | spin_lock_irqsave(&iommu->lock, flags); |
3519 | clear_bit(i, iommu->domain_ids); | 3524 | clear_bit(i, iommu->domain_ids); |
@@ -3521,7 +3526,6 @@ static void iommu_free_vm_domain(struct dmar_domain *domain) | |||
3521 | spin_unlock_irqrestore(&iommu->lock, flags); | 3526 | spin_unlock_irqrestore(&iommu->lock, flags); |
3522 | break; | 3527 | break; |
3523 | } | 3528 | } |
3524 | i = find_next_bit(iommu->domain_ids, ndomains, i+1); | ||
3525 | } | 3529 | } |
3526 | } | 3530 | } |
3527 | } | 3531 | } |
@@ -3582,7 +3586,6 @@ static int intel_iommu_attach_device(struct iommu_domain *domain, | |||
3582 | struct pci_dev *pdev = to_pci_dev(dev); | 3586 | struct pci_dev *pdev = to_pci_dev(dev); |
3583 | struct intel_iommu *iommu; | 3587 | struct intel_iommu *iommu; |
3584 | int addr_width; | 3588 | int addr_width; |
3585 | u64 end; | ||
3586 | 3589 | ||
3587 | /* normally pdev is not mapped */ | 3590 | /* normally pdev is not mapped */ |
3588 | if (unlikely(domain_context_mapped(pdev))) { | 3591 | if (unlikely(domain_context_mapped(pdev))) { |
@@ -3605,14 +3608,31 @@ static int intel_iommu_attach_device(struct iommu_domain *domain, | |||
3605 | 3608 | ||
3606 | /* check if this iommu agaw is sufficient for max mapped address */ | 3609 | /* check if this iommu agaw is sufficient for max mapped address */ |
3607 | addr_width = agaw_to_width(iommu->agaw); | 3610 | addr_width = agaw_to_width(iommu->agaw); |
3608 | end = DOMAIN_MAX_ADDR(addr_width); | 3611 | if (addr_width > cap_mgaw(iommu->cap)) |
3609 | end = end & VTD_PAGE_MASK; | 3612 | addr_width = cap_mgaw(iommu->cap); |
3610 | if (end < dmar_domain->max_addr) { | 3613 | |
3611 | printk(KERN_ERR "%s: iommu agaw (%d) is not " | 3614 | if (dmar_domain->max_addr > (1LL << addr_width)) { |
3615 | printk(KERN_ERR "%s: iommu width (%d) is not " | ||
3612 | "sufficient for the mapped address (%llx)\n", | 3616 | "sufficient for the mapped address (%llx)\n", |
3613 | __func__, iommu->agaw, dmar_domain->max_addr); | 3617 | __func__, addr_width, dmar_domain->max_addr); |
3614 | return -EFAULT; | 3618 | return -EFAULT; |
3615 | } | 3619 | } |
3620 | dmar_domain->gaw = addr_width; | ||
3621 | |||
3622 | /* | ||
3623 | * Knock out extra levels of page tables if necessary | ||
3624 | */ | ||
3625 | while (iommu->agaw < dmar_domain->agaw) { | ||
3626 | struct dma_pte *pte; | ||
3627 | |||
3628 | pte = dmar_domain->pgd; | ||
3629 | if (dma_pte_present(pte)) { | ||
3630 | free_pgtable_page(dmar_domain->pgd); | ||
3631 | dmar_domain->pgd = (struct dma_pte *) | ||
3632 | phys_to_virt(dma_pte_addr(pte)); | ||
3633 | } | ||
3634 | dmar_domain->agaw--; | ||
3635 | } | ||
3616 | 3636 | ||
3617 | return domain_add_dev_info(dmar_domain, pdev, CONTEXT_TT_MULTI_LEVEL); | 3637 | return domain_add_dev_info(dmar_domain, pdev, CONTEXT_TT_MULTI_LEVEL); |
3618 | } | 3638 | } |
@@ -3626,14 +3646,14 @@ static void intel_iommu_detach_device(struct iommu_domain *domain, | |||
3626 | domain_remove_one_dev_info(dmar_domain, pdev); | 3646 | domain_remove_one_dev_info(dmar_domain, pdev); |
3627 | } | 3647 | } |
3628 | 3648 | ||
3629 | static int intel_iommu_map_range(struct iommu_domain *domain, | 3649 | static int intel_iommu_map(struct iommu_domain *domain, |
3630 | unsigned long iova, phys_addr_t hpa, | 3650 | unsigned long iova, phys_addr_t hpa, |
3631 | size_t size, int iommu_prot) | 3651 | int gfp_order, int iommu_prot) |
3632 | { | 3652 | { |
3633 | struct dmar_domain *dmar_domain = domain->priv; | 3653 | struct dmar_domain *dmar_domain = domain->priv; |
3634 | u64 max_addr; | 3654 | u64 max_addr; |
3635 | int addr_width; | ||
3636 | int prot = 0; | 3655 | int prot = 0; |
3656 | size_t size; | ||
3637 | int ret; | 3657 | int ret; |
3638 | 3658 | ||
3639 | if (iommu_prot & IOMMU_READ) | 3659 | if (iommu_prot & IOMMU_READ) |
@@ -3643,20 +3663,17 @@ static int intel_iommu_map_range(struct iommu_domain *domain, | |||
3643 | if ((iommu_prot & IOMMU_CACHE) && dmar_domain->iommu_snooping) | 3663 | if ((iommu_prot & IOMMU_CACHE) && dmar_domain->iommu_snooping) |
3644 | prot |= DMA_PTE_SNP; | 3664 | prot |= DMA_PTE_SNP; |
3645 | 3665 | ||
3666 | size = PAGE_SIZE << gfp_order; | ||
3646 | max_addr = iova + size; | 3667 | max_addr = iova + size; |
3647 | if (dmar_domain->max_addr < max_addr) { | 3668 | if (dmar_domain->max_addr < max_addr) { |
3648 | int min_agaw; | ||
3649 | u64 end; | 3669 | u64 end; |
3650 | 3670 | ||
3651 | /* check if minimum agaw is sufficient for mapped address */ | 3671 | /* check if minimum agaw is sufficient for mapped address */ |
3652 | min_agaw = vm_domain_min_agaw(dmar_domain); | 3672 | end = __DOMAIN_MAX_ADDR(dmar_domain->gaw) + 1; |
3653 | addr_width = agaw_to_width(min_agaw); | ||
3654 | end = DOMAIN_MAX_ADDR(addr_width); | ||
3655 | end = end & VTD_PAGE_MASK; | ||
3656 | if (end < max_addr) { | 3673 | if (end < max_addr) { |
3657 | printk(KERN_ERR "%s: iommu agaw (%d) is not " | 3674 | printk(KERN_ERR "%s: iommu width (%d) is not " |
3658 | "sufficient for the mapped address (%llx)\n", | 3675 | "sufficient for the mapped address (%llx)\n", |
3659 | __func__, min_agaw, max_addr); | 3676 | __func__, dmar_domain->gaw, max_addr); |
3660 | return -EFAULT; | 3677 | return -EFAULT; |
3661 | } | 3678 | } |
3662 | dmar_domain->max_addr = max_addr; | 3679 | dmar_domain->max_addr = max_addr; |
@@ -3669,19 +3686,19 @@ static int intel_iommu_map_range(struct iommu_domain *domain, | |||
3669 | return ret; | 3686 | return ret; |
3670 | } | 3687 | } |
3671 | 3688 | ||
3672 | static void intel_iommu_unmap_range(struct iommu_domain *domain, | 3689 | static int intel_iommu_unmap(struct iommu_domain *domain, |
3673 | unsigned long iova, size_t size) | 3690 | unsigned long iova, int gfp_order) |
3674 | { | 3691 | { |
3675 | struct dmar_domain *dmar_domain = domain->priv; | 3692 | struct dmar_domain *dmar_domain = domain->priv; |
3676 | 3693 | size_t size = PAGE_SIZE << gfp_order; | |
3677 | if (!size) | ||
3678 | return; | ||
3679 | 3694 | ||
3680 | dma_pte_clear_range(dmar_domain, iova >> VTD_PAGE_SHIFT, | 3695 | dma_pte_clear_range(dmar_domain, iova >> VTD_PAGE_SHIFT, |
3681 | (iova + size - 1) >> VTD_PAGE_SHIFT); | 3696 | (iova + size - 1) >> VTD_PAGE_SHIFT); |
3682 | 3697 | ||
3683 | if (dmar_domain->max_addr == iova + size) | 3698 | if (dmar_domain->max_addr == iova + size) |
3684 | dmar_domain->max_addr = iova; | 3699 | dmar_domain->max_addr = iova; |
3700 | |||
3701 | return gfp_order; | ||
3685 | } | 3702 | } |
3686 | 3703 | ||
3687 | static phys_addr_t intel_iommu_iova_to_phys(struct iommu_domain *domain, | 3704 | static phys_addr_t intel_iommu_iova_to_phys(struct iommu_domain *domain, |
@@ -3705,6 +3722,8 @@ static int intel_iommu_domain_has_cap(struct iommu_domain *domain, | |||
3705 | 3722 | ||
3706 | if (cap == IOMMU_CAP_CACHE_COHERENCY) | 3723 | if (cap == IOMMU_CAP_CACHE_COHERENCY) |
3707 | return dmar_domain->iommu_snooping; | 3724 | return dmar_domain->iommu_snooping; |
3725 | if (cap == IOMMU_CAP_INTR_REMAP) | ||
3726 | return intr_remapping_enabled; | ||
3708 | 3727 | ||
3709 | return 0; | 3728 | return 0; |
3710 | } | 3729 | } |
@@ -3714,8 +3733,8 @@ static struct iommu_ops intel_iommu_ops = { | |||
3714 | .domain_destroy = intel_iommu_domain_destroy, | 3733 | .domain_destroy = intel_iommu_domain_destroy, |
3715 | .attach_dev = intel_iommu_attach_device, | 3734 | .attach_dev = intel_iommu_attach_device, |
3716 | .detach_dev = intel_iommu_detach_device, | 3735 | .detach_dev = intel_iommu_detach_device, |
3717 | .map = intel_iommu_map_range, | 3736 | .map = intel_iommu_map, |
3718 | .unmap = intel_iommu_unmap_range, | 3737 | .unmap = intel_iommu_unmap, |
3719 | .iova_to_phys = intel_iommu_iova_to_phys, | 3738 | .iova_to_phys = intel_iommu_iova_to_phys, |
3720 | .domain_has_cap = intel_iommu_domain_has_cap, | 3739 | .domain_has_cap = intel_iommu_domain_has_cap, |
3721 | }; | 3740 | }; |
@@ -3728,10 +3747,43 @@ static void __devinit quirk_iommu_rwbf(struct pci_dev *dev) | |||
3728 | */ | 3747 | */ |
3729 | printk(KERN_INFO "DMAR: Forcing write-buffer flush capability\n"); | 3748 | printk(KERN_INFO "DMAR: Forcing write-buffer flush capability\n"); |
3730 | rwbf_quirk = 1; | 3749 | rwbf_quirk = 1; |
3750 | |||
3751 | /* https://bugzilla.redhat.com/show_bug.cgi?id=538163 */ | ||
3752 | if (dev->revision == 0x07) { | ||
3753 | printk(KERN_INFO "DMAR: Disabling IOMMU for graphics on this chipset\n"); | ||
3754 | dmar_map_gfx = 0; | ||
3755 | } | ||
3731 | } | 3756 | } |
3732 | 3757 | ||
3733 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2a40, quirk_iommu_rwbf); | 3758 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2a40, quirk_iommu_rwbf); |
3734 | 3759 | ||
3760 | #define GGC 0x52 | ||
3761 | #define GGC_MEMORY_SIZE_MASK (0xf << 8) | ||
3762 | #define GGC_MEMORY_SIZE_NONE (0x0 << 8) | ||
3763 | #define GGC_MEMORY_SIZE_1M (0x1 << 8) | ||
3764 | #define GGC_MEMORY_SIZE_2M (0x3 << 8) | ||
3765 | #define GGC_MEMORY_VT_ENABLED (0x8 << 8) | ||
3766 | #define GGC_MEMORY_SIZE_2M_VT (0x9 << 8) | ||
3767 | #define GGC_MEMORY_SIZE_3M_VT (0xa << 8) | ||
3768 | #define GGC_MEMORY_SIZE_4M_VT (0xb << 8) | ||
3769 | |||
3770 | static void __devinit quirk_calpella_no_shadow_gtt(struct pci_dev *dev) | ||
3771 | { | ||
3772 | unsigned short ggc; | ||
3773 | |||
3774 | if (pci_read_config_word(dev, GGC, &ggc)) | ||
3775 | return; | ||
3776 | |||
3777 | if (!(ggc & GGC_MEMORY_VT_ENABLED)) { | ||
3778 | printk(KERN_INFO "DMAR: BIOS has allocated no shadow GTT; disabling IOMMU for graphics\n"); | ||
3779 | dmar_map_gfx = 0; | ||
3780 | } | ||
3781 | } | ||
3782 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0040, quirk_calpella_no_shadow_gtt); | ||
3783 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0044, quirk_calpella_no_shadow_gtt); | ||
3784 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0062, quirk_calpella_no_shadow_gtt); | ||
3785 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x006a, quirk_calpella_no_shadow_gtt); | ||
3786 | |||
3735 | /* On Tylersburg chipsets, some BIOSes have been known to enable the | 3787 | /* On Tylersburg chipsets, some BIOSes have been known to enable the |
3736 | ISOCH DMAR unit for the Azalia sound device, but not give it any | 3788 | ISOCH DMAR unit for the Azalia sound device, but not give it any |
3737 | TLB entries, which causes it to deadlock. Check for that. We do | 3789 | TLB entries, which causes it to deadlock. Check for that. We do |
diff --git a/drivers/pci/intr_remapping.c b/drivers/pci/intr_remapping.c index 95b849130ad4..ec87cd66f3eb 100644 --- a/drivers/pci/intr_remapping.c +++ b/drivers/pci/intr_remapping.c | |||
@@ -1,6 +1,7 @@ | |||
1 | #include <linux/interrupt.h> | 1 | #include <linux/interrupt.h> |
2 | #include <linux/dmar.h> | 2 | #include <linux/dmar.h> |
3 | #include <linux/spinlock.h> | 3 | #include <linux/spinlock.h> |
4 | #include <linux/slab.h> | ||
4 | #include <linux/jiffies.h> | 5 | #include <linux/jiffies.h> |
5 | #include <linux/hpet.h> | 6 | #include <linux/hpet.h> |
6 | #include <linux/pci.h> | 7 | #include <linux/pci.h> |
@@ -20,6 +21,8 @@ static int ir_ioapic_num, ir_hpet_num; | |||
20 | int intr_remapping_enabled; | 21 | int intr_remapping_enabled; |
21 | 22 | ||
22 | static int disable_intremap; | 23 | static int disable_intremap; |
24 | static int disable_sourceid_checking; | ||
25 | |||
23 | static __init int setup_nointremap(char *str) | 26 | static __init int setup_nointremap(char *str) |
24 | { | 27 | { |
25 | disable_intremap = 1; | 28 | disable_intremap = 1; |
@@ -27,109 +30,40 @@ static __init int setup_nointremap(char *str) | |||
27 | } | 30 | } |
28 | early_param("nointremap", setup_nointremap); | 31 | early_param("nointremap", setup_nointremap); |
29 | 32 | ||
30 | struct irq_2_iommu { | 33 | static __init int setup_intremap(char *str) |
31 | struct intel_iommu *iommu; | ||
32 | u16 irte_index; | ||
33 | u16 sub_handle; | ||
34 | u8 irte_mask; | ||
35 | }; | ||
36 | |||
37 | #ifdef CONFIG_GENERIC_HARDIRQS | ||
38 | static struct irq_2_iommu *get_one_free_irq_2_iommu(int node) | ||
39 | { | ||
40 | struct irq_2_iommu *iommu; | ||
41 | |||
42 | iommu = kzalloc_node(sizeof(*iommu), GFP_ATOMIC, node); | ||
43 | printk(KERN_DEBUG "alloc irq_2_iommu on node %d\n", node); | ||
44 | |||
45 | return iommu; | ||
46 | } | ||
47 | |||
48 | static struct irq_2_iommu *irq_2_iommu(unsigned int irq) | ||
49 | { | ||
50 | struct irq_desc *desc; | ||
51 | |||
52 | desc = irq_to_desc(irq); | ||
53 | |||
54 | if (WARN_ON_ONCE(!desc)) | ||
55 | return NULL; | ||
56 | |||
57 | return desc->irq_2_iommu; | ||
58 | } | ||
59 | |||
60 | static struct irq_2_iommu *irq_2_iommu_alloc(unsigned int irq) | ||
61 | { | 34 | { |
62 | struct irq_desc *desc; | 35 | if (!str) |
63 | struct irq_2_iommu *irq_iommu; | 36 | return -EINVAL; |
64 | |||
65 | desc = irq_to_desc(irq); | ||
66 | if (!desc) { | ||
67 | printk(KERN_INFO "can not get irq_desc for %d\n", irq); | ||
68 | return NULL; | ||
69 | } | ||
70 | 37 | ||
71 | irq_iommu = desc->irq_2_iommu; | 38 | if (!strncmp(str, "on", 2)) |
72 | 39 | disable_intremap = 0; | |
73 | if (!irq_iommu) | 40 | else if (!strncmp(str, "off", 3)) |
74 | desc->irq_2_iommu = get_one_free_irq_2_iommu(irq_node(irq)); | 41 | disable_intremap = 1; |
75 | 42 | else if (!strncmp(str, "nosid", 5)) | |
76 | return desc->irq_2_iommu; | 43 | disable_sourceid_checking = 1; |
77 | } | ||
78 | 44 | ||
79 | #else /* !CONFIG_SPARSE_IRQ */ | 45 | return 0; |
80 | |||
81 | static struct irq_2_iommu irq_2_iommuX[NR_IRQS]; | ||
82 | |||
83 | static struct irq_2_iommu *irq_2_iommu(unsigned int irq) | ||
84 | { | ||
85 | if (irq < nr_irqs) | ||
86 | return &irq_2_iommuX[irq]; | ||
87 | |||
88 | return NULL; | ||
89 | } | ||
90 | static struct irq_2_iommu *irq_2_iommu_alloc(unsigned int irq) | ||
91 | { | ||
92 | return irq_2_iommu(irq); | ||
93 | } | 46 | } |
94 | #endif | 47 | early_param("intremap", setup_intremap); |
95 | 48 | ||
96 | static DEFINE_SPINLOCK(irq_2_ir_lock); | 49 | static DEFINE_SPINLOCK(irq_2_ir_lock); |
97 | 50 | ||
98 | static struct irq_2_iommu *valid_irq_2_iommu(unsigned int irq) | 51 | static struct irq_2_iommu *irq_2_iommu(unsigned int irq) |
99 | { | ||
100 | struct irq_2_iommu *irq_iommu; | ||
101 | |||
102 | irq_iommu = irq_2_iommu(irq); | ||
103 | |||
104 | if (!irq_iommu) | ||
105 | return NULL; | ||
106 | |||
107 | if (!irq_iommu->iommu) | ||
108 | return NULL; | ||
109 | |||
110 | return irq_iommu; | ||
111 | } | ||
112 | |||
113 | int irq_remapped(int irq) | ||
114 | { | 52 | { |
115 | return valid_irq_2_iommu(irq) != NULL; | 53 | struct irq_cfg *cfg = get_irq_chip_data(irq); |
54 | return cfg ? &cfg->irq_2_iommu : NULL; | ||
116 | } | 55 | } |
117 | 56 | ||
118 | int get_irte(int irq, struct irte *entry) | 57 | int get_irte(int irq, struct irte *entry) |
119 | { | 58 | { |
120 | int index; | 59 | struct irq_2_iommu *irq_iommu = irq_2_iommu(irq); |
121 | struct irq_2_iommu *irq_iommu; | ||
122 | unsigned long flags; | 60 | unsigned long flags; |
61 | int index; | ||
123 | 62 | ||
124 | if (!entry) | 63 | if (!entry || !irq_iommu) |
125 | return -1; | 64 | return -1; |
126 | 65 | ||
127 | spin_lock_irqsave(&irq_2_ir_lock, flags); | 66 | spin_lock_irqsave(&irq_2_ir_lock, flags); |
128 | irq_iommu = valid_irq_2_iommu(irq); | ||
129 | if (!irq_iommu) { | ||
130 | spin_unlock_irqrestore(&irq_2_ir_lock, flags); | ||
131 | return -1; | ||
132 | } | ||
133 | 67 | ||
134 | index = irq_iommu->irte_index + irq_iommu->sub_handle; | 68 | index = irq_iommu->irte_index + irq_iommu->sub_handle; |
135 | *entry = *(irq_iommu->iommu->ir_table->base + index); | 69 | *entry = *(irq_iommu->iommu->ir_table->base + index); |
@@ -141,20 +75,14 @@ int get_irte(int irq, struct irte *entry) | |||
141 | int alloc_irte(struct intel_iommu *iommu, int irq, u16 count) | 75 | int alloc_irte(struct intel_iommu *iommu, int irq, u16 count) |
142 | { | 76 | { |
143 | struct ir_table *table = iommu->ir_table; | 77 | struct ir_table *table = iommu->ir_table; |
144 | struct irq_2_iommu *irq_iommu; | 78 | struct irq_2_iommu *irq_iommu = irq_2_iommu(irq); |
145 | u16 index, start_index; | 79 | u16 index, start_index; |
146 | unsigned int mask = 0; | 80 | unsigned int mask = 0; |
147 | unsigned long flags; | 81 | unsigned long flags; |
148 | int i; | 82 | int i; |
149 | 83 | ||
150 | if (!count) | 84 | if (!count || !irq_iommu) |
151 | return -1; | ||
152 | |||
153 | #ifndef CONFIG_SPARSE_IRQ | ||
154 | /* protect irq_2_iommu_alloc later */ | ||
155 | if (irq >= nr_irqs) | ||
156 | return -1; | 85 | return -1; |
157 | #endif | ||
158 | 86 | ||
159 | /* | 87 | /* |
160 | * start the IRTE search from index 0. | 88 | * start the IRTE search from index 0. |
@@ -195,13 +123,6 @@ int alloc_irte(struct intel_iommu *iommu, int irq, u16 count) | |||
195 | for (i = index; i < index + count; i++) | 123 | for (i = index; i < index + count; i++) |
196 | table->base[i].present = 1; | 124 | table->base[i].present = 1; |
197 | 125 | ||
198 | irq_iommu = irq_2_iommu_alloc(irq); | ||
199 | if (!irq_iommu) { | ||
200 | spin_unlock_irqrestore(&irq_2_ir_lock, flags); | ||
201 | printk(KERN_ERR "can't allocate irq_2_iommu\n"); | ||
202 | return -1; | ||
203 | } | ||
204 | |||
205 | irq_iommu->iommu = iommu; | 126 | irq_iommu->iommu = iommu; |
206 | irq_iommu->irte_index = index; | 127 | irq_iommu->irte_index = index; |
207 | irq_iommu->sub_handle = 0; | 128 | irq_iommu->sub_handle = 0; |
@@ -225,17 +146,14 @@ static int qi_flush_iec(struct intel_iommu *iommu, int index, int mask) | |||
225 | 146 | ||
226 | int map_irq_to_irte_handle(int irq, u16 *sub_handle) | 147 | int map_irq_to_irte_handle(int irq, u16 *sub_handle) |
227 | { | 148 | { |
228 | int index; | 149 | struct irq_2_iommu *irq_iommu = irq_2_iommu(irq); |
229 | struct irq_2_iommu *irq_iommu; | ||
230 | unsigned long flags; | 150 | unsigned long flags; |
151 | int index; | ||
231 | 152 | ||
232 | spin_lock_irqsave(&irq_2_ir_lock, flags); | 153 | if (!irq_iommu) |
233 | irq_iommu = valid_irq_2_iommu(irq); | ||
234 | if (!irq_iommu) { | ||
235 | spin_unlock_irqrestore(&irq_2_ir_lock, flags); | ||
236 | return -1; | 154 | return -1; |
237 | } | ||
238 | 155 | ||
156 | spin_lock_irqsave(&irq_2_ir_lock, flags); | ||
239 | *sub_handle = irq_iommu->sub_handle; | 157 | *sub_handle = irq_iommu->sub_handle; |
240 | index = irq_iommu->irte_index; | 158 | index = irq_iommu->irte_index; |
241 | spin_unlock_irqrestore(&irq_2_ir_lock, flags); | 159 | spin_unlock_irqrestore(&irq_2_ir_lock, flags); |
@@ -244,18 +162,13 @@ int map_irq_to_irte_handle(int irq, u16 *sub_handle) | |||
244 | 162 | ||
245 | int set_irte_irq(int irq, struct intel_iommu *iommu, u16 index, u16 subhandle) | 163 | int set_irte_irq(int irq, struct intel_iommu *iommu, u16 index, u16 subhandle) |
246 | { | 164 | { |
247 | struct irq_2_iommu *irq_iommu; | 165 | struct irq_2_iommu *irq_iommu = irq_2_iommu(irq); |
248 | unsigned long flags; | 166 | unsigned long flags; |
249 | 167 | ||
250 | spin_lock_irqsave(&irq_2_ir_lock, flags); | 168 | if (!irq_iommu) |
251 | |||
252 | irq_iommu = irq_2_iommu_alloc(irq); | ||
253 | |||
254 | if (!irq_iommu) { | ||
255 | spin_unlock_irqrestore(&irq_2_ir_lock, flags); | ||
256 | printk(KERN_ERR "can't allocate irq_2_iommu\n"); | ||
257 | return -1; | 169 | return -1; |
258 | } | 170 | |
171 | spin_lock_irqsave(&irq_2_ir_lock, flags); | ||
259 | 172 | ||
260 | irq_iommu->iommu = iommu; | 173 | irq_iommu->iommu = iommu; |
261 | irq_iommu->irte_index = index; | 174 | irq_iommu->irte_index = index; |
@@ -267,51 +180,26 @@ int set_irte_irq(int irq, struct intel_iommu *iommu, u16 index, u16 subhandle) | |||
267 | return 0; | 180 | return 0; |
268 | } | 181 | } |
269 | 182 | ||
270 | int clear_irte_irq(int irq, struct intel_iommu *iommu, u16 index) | ||
271 | { | ||
272 | struct irq_2_iommu *irq_iommu; | ||
273 | unsigned long flags; | ||
274 | |||
275 | spin_lock_irqsave(&irq_2_ir_lock, flags); | ||
276 | irq_iommu = valid_irq_2_iommu(irq); | ||
277 | if (!irq_iommu) { | ||
278 | spin_unlock_irqrestore(&irq_2_ir_lock, flags); | ||
279 | return -1; | ||
280 | } | ||
281 | |||
282 | irq_iommu->iommu = NULL; | ||
283 | irq_iommu->irte_index = 0; | ||
284 | irq_iommu->sub_handle = 0; | ||
285 | irq_2_iommu(irq)->irte_mask = 0; | ||
286 | |||
287 | spin_unlock_irqrestore(&irq_2_ir_lock, flags); | ||
288 | |||
289 | return 0; | ||
290 | } | ||
291 | |||
292 | int modify_irte(int irq, struct irte *irte_modified) | 183 | int modify_irte(int irq, struct irte *irte_modified) |
293 | { | 184 | { |
294 | int rc; | 185 | struct irq_2_iommu *irq_iommu = irq_2_iommu(irq); |
295 | int index; | ||
296 | struct irte *irte; | ||
297 | struct intel_iommu *iommu; | 186 | struct intel_iommu *iommu; |
298 | struct irq_2_iommu *irq_iommu; | ||
299 | unsigned long flags; | 187 | unsigned long flags; |
188 | struct irte *irte; | ||
189 | int rc, index; | ||
300 | 190 | ||
301 | spin_lock_irqsave(&irq_2_ir_lock, flags); | 191 | if (!irq_iommu) |
302 | irq_iommu = valid_irq_2_iommu(irq); | ||
303 | if (!irq_iommu) { | ||
304 | spin_unlock_irqrestore(&irq_2_ir_lock, flags); | ||
305 | return -1; | 192 | return -1; |
306 | } | 193 | |
194 | spin_lock_irqsave(&irq_2_ir_lock, flags); | ||
307 | 195 | ||
308 | iommu = irq_iommu->iommu; | 196 | iommu = irq_iommu->iommu; |
309 | 197 | ||
310 | index = irq_iommu->irte_index + irq_iommu->sub_handle; | 198 | index = irq_iommu->irte_index + irq_iommu->sub_handle; |
311 | irte = &iommu->ir_table->base[index]; | 199 | irte = &iommu->ir_table->base[index]; |
312 | 200 | ||
313 | set_64bit((unsigned long *)&irte->low, irte_modified->low); | 201 | set_64bit(&irte->low, irte_modified->low); |
314 | set_64bit((unsigned long *)&irte->high, irte_modified->high); | 202 | set_64bit(&irte->high, irte_modified->high); |
315 | __iommu_flush_cache(iommu, irte, sizeof(*irte)); | 203 | __iommu_flush_cache(iommu, irte, sizeof(*irte)); |
316 | 204 | ||
317 | rc = qi_flush_iec(iommu, index, 0); | 205 | rc = qi_flush_iec(iommu, index, 0); |
@@ -320,31 +208,6 @@ int modify_irte(int irq, struct irte *irte_modified) | |||
320 | return rc; | 208 | return rc; |
321 | } | 209 | } |
322 | 210 | ||
323 | int flush_irte(int irq) | ||
324 | { | ||
325 | int rc; | ||
326 | int index; | ||
327 | struct intel_iommu *iommu; | ||
328 | struct irq_2_iommu *irq_iommu; | ||
329 | unsigned long flags; | ||
330 | |||
331 | spin_lock_irqsave(&irq_2_ir_lock, flags); | ||
332 | irq_iommu = valid_irq_2_iommu(irq); | ||
333 | if (!irq_iommu) { | ||
334 | spin_unlock_irqrestore(&irq_2_ir_lock, flags); | ||
335 | return -1; | ||
336 | } | ||
337 | |||
338 | iommu = irq_iommu->iommu; | ||
339 | |||
340 | index = irq_iommu->irte_index + irq_iommu->sub_handle; | ||
341 | |||
342 | rc = qi_flush_iec(iommu, index, irq_iommu->irte_mask); | ||
343 | spin_unlock_irqrestore(&irq_2_ir_lock, flags); | ||
344 | |||
345 | return rc; | ||
346 | } | ||
347 | |||
348 | struct intel_iommu *map_hpet_to_ir(u8 hpet_id) | 211 | struct intel_iommu *map_hpet_to_ir(u8 hpet_id) |
349 | { | 212 | { |
350 | int i; | 213 | int i; |
@@ -392,8 +255,8 @@ static int clear_entries(struct irq_2_iommu *irq_iommu) | |||
392 | end = start + (1 << irq_iommu->irte_mask); | 255 | end = start + (1 << irq_iommu->irte_mask); |
393 | 256 | ||
394 | for (entry = start; entry < end; entry++) { | 257 | for (entry = start; entry < end; entry++) { |
395 | set_64bit((unsigned long *)&entry->low, 0); | 258 | set_64bit(&entry->low, 0); |
396 | set_64bit((unsigned long *)&entry->high, 0); | 259 | set_64bit(&entry->high, 0); |
397 | } | 260 | } |
398 | 261 | ||
399 | return qi_flush_iec(iommu, index, irq_iommu->irte_mask); | 262 | return qi_flush_iec(iommu, index, irq_iommu->irte_mask); |
@@ -401,16 +264,14 @@ static int clear_entries(struct irq_2_iommu *irq_iommu) | |||
401 | 264 | ||
402 | int free_irte(int irq) | 265 | int free_irte(int irq) |
403 | { | 266 | { |
404 | int rc = 0; | 267 | struct irq_2_iommu *irq_iommu = irq_2_iommu(irq); |
405 | struct irq_2_iommu *irq_iommu; | ||
406 | unsigned long flags; | 268 | unsigned long flags; |
269 | int rc; | ||
407 | 270 | ||
408 | spin_lock_irqsave(&irq_2_ir_lock, flags); | 271 | if (!irq_iommu) |
409 | irq_iommu = valid_irq_2_iommu(irq); | ||
410 | if (!irq_iommu) { | ||
411 | spin_unlock_irqrestore(&irq_2_ir_lock, flags); | ||
412 | return -1; | 272 | return -1; |
413 | } | 273 | |
274 | spin_lock_irqsave(&irq_2_ir_lock, flags); | ||
414 | 275 | ||
415 | rc = clear_entries(irq_iommu); | 276 | rc = clear_entries(irq_iommu); |
416 | 277 | ||
@@ -452,6 +313,8 @@ int free_irte(int irq) | |||
452 | static void set_irte_sid(struct irte *irte, unsigned int svt, | 313 | static void set_irte_sid(struct irte *irte, unsigned int svt, |
453 | unsigned int sq, unsigned int sid) | 314 | unsigned int sq, unsigned int sid) |
454 | { | 315 | { |
316 | if (disable_sourceid_checking) | ||
317 | svt = SVT_NO_VERIFY; | ||
455 | irte->svt = svt; | 318 | irte->svt = svt; |
456 | irte->sq = sq; | 319 | irte->sq = sq; |
457 | irte->sid = sid; | 320 | irte->sid = sid; |
@@ -831,9 +694,9 @@ static int ir_parse_ioapic_hpet_scope(struct acpi_dmar_header *header, | |||
831 | return -1; | 694 | return -1; |
832 | } | 695 | } |
833 | 696 | ||
834 | printk(KERN_INFO "IOAPIC id %d under DRHD base" | 697 | printk(KERN_INFO "IOAPIC id %d under DRHD base " |
835 | " 0x%Lx\n", scope->enumeration_id, | 698 | " 0x%Lx IOMMU %d\n", scope->enumeration_id, |
836 | drhd->address); | 699 | drhd->address, iommu->seq_id); |
837 | 700 | ||
838 | ir_parse_one_ioapic_scope(scope, iommu); | 701 | ir_parse_one_ioapic_scope(scope, iommu); |
839 | } else if (scope->entry_type == ACPI_DMAR_SCOPE_TYPE_HPET) { | 702 | } else if (scope->entry_type == ACPI_DMAR_SCOPE_TYPE_HPET) { |
diff --git a/drivers/pci/ioapic.c b/drivers/pci/ioapic.c index 3e0d7b5dd1b9..203508b227b7 100644 --- a/drivers/pci/ioapic.c +++ b/drivers/pci/ioapic.c | |||
@@ -18,6 +18,7 @@ | |||
18 | 18 | ||
19 | #include <linux/pci.h> | 19 | #include <linux/pci.h> |
20 | #include <linux/acpi.h> | 20 | #include <linux/acpi.h> |
21 | #include <linux/slab.h> | ||
21 | #include <acpi/acpi_bus.h> | 22 | #include <acpi/acpi_bus.h> |
22 | 23 | ||
23 | struct ioapic { | 24 | struct ioapic { |
@@ -31,9 +32,9 @@ static int ioapic_probe(struct pci_dev *dev, const struct pci_device_id *ent) | |||
31 | acpi_status status; | 32 | acpi_status status; |
32 | unsigned long long gsb; | 33 | unsigned long long gsb; |
33 | struct ioapic *ioapic; | 34 | struct ioapic *ioapic; |
34 | u64 addr; | ||
35 | int ret; | 35 | int ret; |
36 | char *type; | 36 | char *type; |
37 | struct resource *res; | ||
37 | 38 | ||
38 | handle = DEVICE_ACPI_HANDLE(&dev->dev); | 39 | handle = DEVICE_ACPI_HANDLE(&dev->dev); |
39 | if (!handle) | 40 | if (!handle) |
@@ -69,13 +70,12 @@ static int ioapic_probe(struct pci_dev *dev, const struct pci_device_id *ent) | |||
69 | if (pci_request_region(dev, 0, type)) | 70 | if (pci_request_region(dev, 0, type)) |
70 | goto exit_disable; | 71 | goto exit_disable; |
71 | 72 | ||
72 | addr = pci_resource_start(dev, 0); | 73 | res = &dev->resource[0]; |
73 | if (acpi_register_ioapic(ioapic->handle, addr, ioapic->gsi_base)) | 74 | if (acpi_register_ioapic(ioapic->handle, res->start, ioapic->gsi_base)) |
74 | goto exit_release; | 75 | goto exit_release; |
75 | 76 | ||
76 | pci_set_drvdata(dev, ioapic); | 77 | pci_set_drvdata(dev, ioapic); |
77 | dev_info(&dev->dev, "%s at %#llx, GSI %u\n", type, addr, | 78 | dev_info(&dev->dev, "%s at %pR, GSI %u\n", type, res, ioapic->gsi_base); |
78 | ioapic->gsi_base); | ||
79 | return 0; | 79 | return 0; |
80 | 80 | ||
81 | exit_release: | 81 | exit_release: |
diff --git a/drivers/pci/iov.c b/drivers/pci/iov.c index b2a448e19fe6..553d8ee55c1c 100644 --- a/drivers/pci/iov.c +++ b/drivers/pci/iov.c | |||
@@ -9,6 +9,7 @@ | |||
9 | */ | 9 | */ |
10 | 10 | ||
11 | #include <linux/pci.h> | 11 | #include <linux/pci.h> |
12 | #include <linux/slab.h> | ||
12 | #include <linux/mutex.h> | 13 | #include <linux/mutex.h> |
13 | #include <linux/string.h> | 14 | #include <linux/string.h> |
14 | #include <linux/delay.h> | 15 | #include <linux/delay.h> |
@@ -607,7 +608,7 @@ int pci_iov_resource_bar(struct pci_dev *dev, int resno, | |||
607 | * the VF BAR size multiplied by the number of VFs. The alignment | 608 | * the VF BAR size multiplied by the number of VFs. The alignment |
608 | * is just the VF BAR size. | 609 | * is just the VF BAR size. |
609 | */ | 610 | */ |
610 | int pci_sriov_resource_alignment(struct pci_dev *dev, int resno) | 611 | resource_size_t pci_sriov_resource_alignment(struct pci_dev *dev, int resno) |
611 | { | 612 | { |
612 | struct resource tmp; | 613 | struct resource tmp; |
613 | enum pci_bar_type type; | 614 | enum pci_bar_type type; |
@@ -706,6 +707,21 @@ irqreturn_t pci_sriov_migration(struct pci_dev *dev) | |||
706 | } | 707 | } |
707 | EXPORT_SYMBOL_GPL(pci_sriov_migration); | 708 | EXPORT_SYMBOL_GPL(pci_sriov_migration); |
708 | 709 | ||
710 | /** | ||
711 | * pci_num_vf - return number of VFs associated with a PF device_release_driver | ||
712 | * @dev: the PCI device | ||
713 | * | ||
714 | * Returns number of VFs, or 0 if SR-IOV is not enabled. | ||
715 | */ | ||
716 | int pci_num_vf(struct pci_dev *dev) | ||
717 | { | ||
718 | if (!dev || !dev->is_physfn) | ||
719 | return 0; | ||
720 | else | ||
721 | return dev->sriov->nr_virtfn; | ||
722 | } | ||
723 | EXPORT_SYMBOL_GPL(pci_num_vf); | ||
724 | |||
709 | static int ats_alloc_one(struct pci_dev *dev, int ps) | 725 | static int ats_alloc_one(struct pci_dev *dev, int ps) |
710 | { | 726 | { |
711 | int pos; | 727 | int pos; |
diff --git a/drivers/pci/legacy.c b/drivers/pci/legacy.c deleted file mode 100644 index 871f65c15936..000000000000 --- a/drivers/pci/legacy.c +++ /dev/null | |||
@@ -1,34 +0,0 @@ | |||
1 | #include <linux/init.h> | ||
2 | #include <linux/pci.h> | ||
3 | #include <linux/module.h> | ||
4 | #include <linux/interrupt.h> | ||
5 | #include "pci.h" | ||
6 | |||
7 | /** | ||
8 | * pci_find_device - begin or continue searching for a PCI device by vendor/device id | ||
9 | * @vendor: PCI vendor id to match, or %PCI_ANY_ID to match all vendor ids | ||
10 | * @device: PCI device id to match, or %PCI_ANY_ID to match all device ids | ||
11 | * @from: Previous PCI device found in search, or %NULL for new search. | ||
12 | * | ||
13 | * Iterates through the list of known PCI devices. If a PCI device is found | ||
14 | * with a matching @vendor and @device, a pointer to its device structure is | ||
15 | * returned. Otherwise, %NULL is returned. | ||
16 | * A new search is initiated by passing %NULL as the @from argument. | ||
17 | * Otherwise if @from is not %NULL, searches continue from next device | ||
18 | * on the global list. | ||
19 | * | ||
20 | * NOTE: Do not use this function any more; use pci_get_device() instead, as | ||
21 | * the PCI device returned by this function can disappear at any moment in | ||
22 | * time. | ||
23 | */ | ||
24 | struct pci_dev *pci_find_device(unsigned int vendor, unsigned int device, | ||
25 | struct pci_dev *from) | ||
26 | { | ||
27 | struct pci_dev *pdev; | ||
28 | |||
29 | pci_dev_get(from); | ||
30 | pdev = pci_get_subsys(vendor, device, PCI_ANY_ID, PCI_ANY_ID, from); | ||
31 | pci_dev_put(pdev); | ||
32 | return pdev; | ||
33 | } | ||
34 | EXPORT_SYMBOL(pci_find_device); | ||
diff --git a/drivers/pci/msi.c b/drivers/pci/msi.c index f9cf3173b23d..44b0aeee83e5 100644 --- a/drivers/pci/msi.c +++ b/drivers/pci/msi.c | |||
@@ -18,6 +18,7 @@ | |||
18 | #include <linux/smp.h> | 18 | #include <linux/smp.h> |
19 | #include <linux/errno.h> | 19 | #include <linux/errno.h> |
20 | #include <linux/io.h> | 20 | #include <linux/io.h> |
21 | #include <linux/slab.h> | ||
21 | 22 | ||
22 | #include "pci.h" | 23 | #include "pci.h" |
23 | #include "msi.h" | 24 | #include "msi.h" |
@@ -34,7 +35,12 @@ int arch_msi_check_device(struct pci_dev *dev, int nvec, int type) | |||
34 | #endif | 35 | #endif |
35 | 36 | ||
36 | #ifndef arch_setup_msi_irqs | 37 | #ifndef arch_setup_msi_irqs |
37 | int arch_setup_msi_irqs(struct pci_dev *dev, int nvec, int type) | 38 | # define arch_setup_msi_irqs default_setup_msi_irqs |
39 | # define HAVE_DEFAULT_MSI_SETUP_IRQS | ||
40 | #endif | ||
41 | |||
42 | #ifdef HAVE_DEFAULT_MSI_SETUP_IRQS | ||
43 | int default_setup_msi_irqs(struct pci_dev *dev, int nvec, int type) | ||
38 | { | 44 | { |
39 | struct msi_desc *entry; | 45 | struct msi_desc *entry; |
40 | int ret; | 46 | int ret; |
@@ -59,7 +65,12 @@ int arch_setup_msi_irqs(struct pci_dev *dev, int nvec, int type) | |||
59 | #endif | 65 | #endif |
60 | 66 | ||
61 | #ifndef arch_teardown_msi_irqs | 67 | #ifndef arch_teardown_msi_irqs |
62 | void arch_teardown_msi_irqs(struct pci_dev *dev) | 68 | # define arch_teardown_msi_irqs default_teardown_msi_irqs |
69 | # define HAVE_DEFAULT_MSI_TEARDOWN_IRQS | ||
70 | #endif | ||
71 | |||
72 | #ifdef HAVE_DEFAULT_MSI_TEARDOWN_IRQS | ||
73 | void default_teardown_msi_irqs(struct pci_dev *dev) | ||
63 | { | 74 | { |
64 | struct msi_desc *entry; | 75 | struct msi_desc *entry; |
65 | 76 | ||
@@ -157,8 +168,9 @@ static u32 __msix_mask_irq(struct msi_desc *desc, u32 flag) | |||
157 | u32 mask_bits = desc->masked; | 168 | u32 mask_bits = desc->masked; |
158 | unsigned offset = desc->msi_attrib.entry_nr * PCI_MSIX_ENTRY_SIZE + | 169 | unsigned offset = desc->msi_attrib.entry_nr * PCI_MSIX_ENTRY_SIZE + |
159 | PCI_MSIX_ENTRY_VECTOR_CTRL; | 170 | PCI_MSIX_ENTRY_VECTOR_CTRL; |
160 | mask_bits &= ~1; | 171 | mask_bits &= ~PCI_MSIX_ENTRY_CTRL_MASKBIT; |
161 | mask_bits |= flag; | 172 | if (flag) |
173 | mask_bits |= PCI_MSIX_ENTRY_CTRL_MASKBIT; | ||
162 | writel(mask_bits, desc->mask_base + offset); | 174 | writel(mask_bits, desc->mask_base + offset); |
163 | 175 | ||
164 | return mask_bits; | 176 | return mask_bits; |
@@ -169,32 +181,33 @@ static void msix_mask_irq(struct msi_desc *desc, u32 flag) | |||
169 | desc->masked = __msix_mask_irq(desc, flag); | 181 | desc->masked = __msix_mask_irq(desc, flag); |
170 | } | 182 | } |
171 | 183 | ||
172 | static void msi_set_mask_bit(unsigned irq, u32 flag) | 184 | static void msi_set_mask_bit(struct irq_data *data, u32 flag) |
173 | { | 185 | { |
174 | struct msi_desc *desc = get_irq_msi(irq); | 186 | struct msi_desc *desc = irq_data_get_msi(data); |
175 | 187 | ||
176 | if (desc->msi_attrib.is_msix) { | 188 | if (desc->msi_attrib.is_msix) { |
177 | msix_mask_irq(desc, flag); | 189 | msix_mask_irq(desc, flag); |
178 | readl(desc->mask_base); /* Flush write to device */ | 190 | readl(desc->mask_base); /* Flush write to device */ |
179 | } else { | 191 | } else { |
180 | unsigned offset = irq - desc->dev->irq; | 192 | unsigned offset = data->irq - desc->dev->irq; |
181 | msi_mask_irq(desc, 1 << offset, flag << offset); | 193 | msi_mask_irq(desc, 1 << offset, flag << offset); |
182 | } | 194 | } |
183 | } | 195 | } |
184 | 196 | ||
185 | void mask_msi_irq(unsigned int irq) | 197 | void mask_msi_irq(struct irq_data *data) |
186 | { | 198 | { |
187 | msi_set_mask_bit(irq, 1); | 199 | msi_set_mask_bit(data, 1); |
188 | } | 200 | } |
189 | 201 | ||
190 | void unmask_msi_irq(unsigned int irq) | 202 | void unmask_msi_irq(struct irq_data *data) |
191 | { | 203 | { |
192 | msi_set_mask_bit(irq, 0); | 204 | msi_set_mask_bit(data, 0); |
193 | } | 205 | } |
194 | 206 | ||
195 | void read_msi_msg_desc(struct irq_desc *desc, struct msi_msg *msg) | 207 | void __read_msi_msg(struct msi_desc *entry, struct msi_msg *msg) |
196 | { | 208 | { |
197 | struct msi_desc *entry = get_irq_desc_msi(desc); | 209 | BUG_ON(entry->dev->current_state != PCI_D0); |
210 | |||
198 | if (entry->msi_attrib.is_msix) { | 211 | if (entry->msi_attrib.is_msix) { |
199 | void __iomem *base = entry->mask_base + | 212 | void __iomem *base = entry->mask_base + |
200 | entry->msi_attrib.entry_nr * PCI_MSIX_ENTRY_SIZE; | 213 | entry->msi_attrib.entry_nr * PCI_MSIX_ENTRY_SIZE; |
@@ -223,15 +236,33 @@ void read_msi_msg_desc(struct irq_desc *desc, struct msi_msg *msg) | |||
223 | 236 | ||
224 | void read_msi_msg(unsigned int irq, struct msi_msg *msg) | 237 | void read_msi_msg(unsigned int irq, struct msi_msg *msg) |
225 | { | 238 | { |
226 | struct irq_desc *desc = irq_to_desc(irq); | 239 | struct msi_desc *entry = get_irq_msi(irq); |
227 | 240 | ||
228 | read_msi_msg_desc(desc, msg); | 241 | __read_msi_msg(entry, msg); |
229 | } | 242 | } |
230 | 243 | ||
231 | void write_msi_msg_desc(struct irq_desc *desc, struct msi_msg *msg) | 244 | void __get_cached_msi_msg(struct msi_desc *entry, struct msi_msg *msg) |
232 | { | 245 | { |
233 | struct msi_desc *entry = get_irq_desc_msi(desc); | 246 | /* Assert that the cache is valid, assuming that |
234 | if (entry->msi_attrib.is_msix) { | 247 | * valid messages are not all-zeroes. */ |
248 | BUG_ON(!(entry->msg.address_hi | entry->msg.address_lo | | ||
249 | entry->msg.data)); | ||
250 | |||
251 | *msg = entry->msg; | ||
252 | } | ||
253 | |||
254 | void get_cached_msi_msg(unsigned int irq, struct msi_msg *msg) | ||
255 | { | ||
256 | struct msi_desc *entry = get_irq_msi(irq); | ||
257 | |||
258 | __get_cached_msi_msg(entry, msg); | ||
259 | } | ||
260 | |||
261 | void __write_msi_msg(struct msi_desc *entry, struct msi_msg *msg) | ||
262 | { | ||
263 | if (entry->dev->current_state != PCI_D0) { | ||
264 | /* Don't touch the hardware now */ | ||
265 | } else if (entry->msi_attrib.is_msix) { | ||
235 | void __iomem *base; | 266 | void __iomem *base; |
236 | base = entry->mask_base + | 267 | base = entry->mask_base + |
237 | entry->msi_attrib.entry_nr * PCI_MSIX_ENTRY_SIZE; | 268 | entry->msi_attrib.entry_nr * PCI_MSIX_ENTRY_SIZE; |
@@ -266,9 +297,9 @@ void write_msi_msg_desc(struct irq_desc *desc, struct msi_msg *msg) | |||
266 | 297 | ||
267 | void write_msi_msg(unsigned int irq, struct msi_msg *msg) | 298 | void write_msi_msg(unsigned int irq, struct msi_msg *msg) |
268 | { | 299 | { |
269 | struct irq_desc *desc = irq_to_desc(irq); | 300 | struct msi_desc *entry = get_irq_msi(irq); |
270 | 301 | ||
271 | write_msi_msg_desc(desc, msg); | 302 | __write_msi_msg(entry, msg); |
272 | } | 303 | } |
273 | 304 | ||
274 | static void free_msi_irqs(struct pci_dev *dev) | 305 | static void free_msi_irqs(struct pci_dev *dev) |
@@ -434,7 +465,7 @@ static int msi_capability_init(struct pci_dev *dev, int nvec) | |||
434 | static void __iomem *msix_map_region(struct pci_dev *dev, unsigned pos, | 465 | static void __iomem *msix_map_region(struct pci_dev *dev, unsigned pos, |
435 | unsigned nr_entries) | 466 | unsigned nr_entries) |
436 | { | 467 | { |
437 | unsigned long phys_addr; | 468 | resource_size_t phys_addr; |
438 | u32 table_offset; | 469 | u32 table_offset; |
439 | u8 bir; | 470 | u8 bir; |
440 | 471 | ||
diff --git a/drivers/pci/msi.h b/drivers/pci/msi.h index de27c1cb5a2b..65c42f80f23e 100644 --- a/drivers/pci/msi.h +++ b/drivers/pci/msi.h | |||
@@ -6,12 +6,6 @@ | |||
6 | #ifndef MSI_H | 6 | #ifndef MSI_H |
7 | #define MSI_H | 7 | #define MSI_H |
8 | 8 | ||
9 | #define PCI_MSIX_ENTRY_SIZE 16 | ||
10 | #define PCI_MSIX_ENTRY_LOWER_ADDR 0 | ||
11 | #define PCI_MSIX_ENTRY_UPPER_ADDR 4 | ||
12 | #define PCI_MSIX_ENTRY_DATA 8 | ||
13 | #define PCI_MSIX_ENTRY_VECTOR_CTRL 12 | ||
14 | |||
15 | #define msi_control_reg(base) (base + PCI_MSI_FLAGS) | 9 | #define msi_control_reg(base) (base + PCI_MSI_FLAGS) |
16 | #define msi_lower_address_reg(base) (base + PCI_MSI_ADDRESS_LO) | 10 | #define msi_lower_address_reg(base) (base + PCI_MSI_ADDRESS_LO) |
17 | #define msi_upper_address_reg(base) (base + PCI_MSI_ADDRESS_HI) | 11 | #define msi_upper_address_reg(base) (base + PCI_MSI_ADDRESS_HI) |
@@ -22,8 +16,8 @@ | |||
22 | #define is_64bit_address(control) (!!(control & PCI_MSI_FLAGS_64BIT)) | 16 | #define is_64bit_address(control) (!!(control & PCI_MSI_FLAGS_64BIT)) |
23 | #define is_mask_bit_support(control) (!!(control & PCI_MSI_FLAGS_MASKBIT)) | 17 | #define is_mask_bit_support(control) (!!(control & PCI_MSI_FLAGS_MASKBIT)) |
24 | 18 | ||
25 | #define msix_table_offset_reg(base) (base + 0x04) | 19 | #define msix_table_offset_reg(base) (base + PCI_MSIX_TABLE) |
26 | #define msix_pba_offset_reg(base) (base + 0x08) | 20 | #define msix_pba_offset_reg(base) (base + PCI_MSIX_PBA) |
27 | #define msix_table_size(control) ((control & PCI_MSIX_FLAGS_QSIZE)+1) | 21 | #define msix_table_size(control) ((control & PCI_MSIX_FLAGS_QSIZE)+1) |
28 | #define multi_msix_capable(control) msix_table_size((control)) | 22 | #define multi_msix_capable(control) msix_table_size((control)) |
29 | 23 | ||
diff --git a/drivers/pci/pci-acpi.c b/drivers/pci/pci-acpi.c index 7e2829538a4c..6fe0772e0e7d 100644 --- a/drivers/pci/pci-acpi.c +++ b/drivers/pci/pci-acpi.c | |||
@@ -16,8 +16,145 @@ | |||
16 | #include <acpi/acpi_bus.h> | 16 | #include <acpi/acpi_bus.h> |
17 | 17 | ||
18 | #include <linux/pci-acpi.h> | 18 | #include <linux/pci-acpi.h> |
19 | #include <linux/pm_runtime.h> | ||
19 | #include "pci.h" | 20 | #include "pci.h" |
20 | 21 | ||
22 | static DEFINE_MUTEX(pci_acpi_pm_notify_mtx); | ||
23 | |||
24 | /** | ||
25 | * pci_acpi_wake_bus - Wake-up notification handler for root buses. | ||
26 | * @handle: ACPI handle of a device the notification is for. | ||
27 | * @event: Type of the signaled event. | ||
28 | * @context: PCI root bus to wake up devices on. | ||
29 | */ | ||
30 | static void pci_acpi_wake_bus(acpi_handle handle, u32 event, void *context) | ||
31 | { | ||
32 | struct pci_bus *pci_bus = context; | ||
33 | |||
34 | if (event == ACPI_NOTIFY_DEVICE_WAKE && pci_bus) | ||
35 | pci_pme_wakeup_bus(pci_bus); | ||
36 | } | ||
37 | |||
38 | /** | ||
39 | * pci_acpi_wake_dev - Wake-up notification handler for PCI devices. | ||
40 | * @handle: ACPI handle of a device the notification is for. | ||
41 | * @event: Type of the signaled event. | ||
42 | * @context: PCI device object to wake up. | ||
43 | */ | ||
44 | static void pci_acpi_wake_dev(acpi_handle handle, u32 event, void *context) | ||
45 | { | ||
46 | struct pci_dev *pci_dev = context; | ||
47 | |||
48 | if (event == ACPI_NOTIFY_DEVICE_WAKE && pci_dev) { | ||
49 | pci_wakeup_event(pci_dev); | ||
50 | pci_check_pme_status(pci_dev); | ||
51 | pm_runtime_resume(&pci_dev->dev); | ||
52 | if (pci_dev->subordinate) | ||
53 | pci_pme_wakeup_bus(pci_dev->subordinate); | ||
54 | } | ||
55 | } | ||
56 | |||
57 | /** | ||
58 | * add_pm_notifier - Register PM notifier for given ACPI device. | ||
59 | * @dev: ACPI device to add the notifier for. | ||
60 | * @context: PCI device or bus to check for PME status if an event is signaled. | ||
61 | * | ||
62 | * NOTE: @dev need not be a run-wake or wake-up device to be a valid source of | ||
63 | * PM wake-up events. For example, wake-up events may be generated for bridges | ||
64 | * if one of the devices below the bridge is signaling PME, even if the bridge | ||
65 | * itself doesn't have a wake-up GPE associated with it. | ||
66 | */ | ||
67 | static acpi_status add_pm_notifier(struct acpi_device *dev, | ||
68 | acpi_notify_handler handler, | ||
69 | void *context) | ||
70 | { | ||
71 | acpi_status status = AE_ALREADY_EXISTS; | ||
72 | |||
73 | mutex_lock(&pci_acpi_pm_notify_mtx); | ||
74 | |||
75 | if (dev->wakeup.flags.notifier_present) | ||
76 | goto out; | ||
77 | |||
78 | status = acpi_install_notify_handler(dev->handle, | ||
79 | ACPI_SYSTEM_NOTIFY, | ||
80 | handler, context); | ||
81 | if (ACPI_FAILURE(status)) | ||
82 | goto out; | ||
83 | |||
84 | dev->wakeup.flags.notifier_present = true; | ||
85 | |||
86 | out: | ||
87 | mutex_unlock(&pci_acpi_pm_notify_mtx); | ||
88 | return status; | ||
89 | } | ||
90 | |||
91 | /** | ||
92 | * remove_pm_notifier - Unregister PM notifier from given ACPI device. | ||
93 | * @dev: ACPI device to remove the notifier from. | ||
94 | */ | ||
95 | static acpi_status remove_pm_notifier(struct acpi_device *dev, | ||
96 | acpi_notify_handler handler) | ||
97 | { | ||
98 | acpi_status status = AE_BAD_PARAMETER; | ||
99 | |||
100 | mutex_lock(&pci_acpi_pm_notify_mtx); | ||
101 | |||
102 | if (!dev->wakeup.flags.notifier_present) | ||
103 | goto out; | ||
104 | |||
105 | status = acpi_remove_notify_handler(dev->handle, | ||
106 | ACPI_SYSTEM_NOTIFY, | ||
107 | handler); | ||
108 | if (ACPI_FAILURE(status)) | ||
109 | goto out; | ||
110 | |||
111 | dev->wakeup.flags.notifier_present = false; | ||
112 | |||
113 | out: | ||
114 | mutex_unlock(&pci_acpi_pm_notify_mtx); | ||
115 | return status; | ||
116 | } | ||
117 | |||
118 | /** | ||
119 | * pci_acpi_add_bus_pm_notifier - Register PM notifier for given PCI bus. | ||
120 | * @dev: ACPI device to add the notifier for. | ||
121 | * @pci_bus: PCI bus to walk checking for PME status if an event is signaled. | ||
122 | */ | ||
123 | acpi_status pci_acpi_add_bus_pm_notifier(struct acpi_device *dev, | ||
124 | struct pci_bus *pci_bus) | ||
125 | { | ||
126 | return add_pm_notifier(dev, pci_acpi_wake_bus, pci_bus); | ||
127 | } | ||
128 | |||
129 | /** | ||
130 | * pci_acpi_remove_bus_pm_notifier - Unregister PCI bus PM notifier. | ||
131 | * @dev: ACPI device to remove the notifier from. | ||
132 | */ | ||
133 | acpi_status pci_acpi_remove_bus_pm_notifier(struct acpi_device *dev) | ||
134 | { | ||
135 | return remove_pm_notifier(dev, pci_acpi_wake_bus); | ||
136 | } | ||
137 | |||
138 | /** | ||
139 | * pci_acpi_add_pm_notifier - Register PM notifier for given PCI device. | ||
140 | * @dev: ACPI device to add the notifier for. | ||
141 | * @pci_dev: PCI device to check for the PME status if an event is signaled. | ||
142 | */ | ||
143 | acpi_status pci_acpi_add_pm_notifier(struct acpi_device *dev, | ||
144 | struct pci_dev *pci_dev) | ||
145 | { | ||
146 | return add_pm_notifier(dev, pci_acpi_wake_dev, pci_dev); | ||
147 | } | ||
148 | |||
149 | /** | ||
150 | * pci_acpi_remove_pm_notifier - Unregister PCI device PM notifier. | ||
151 | * @dev: ACPI device to remove the notifier from. | ||
152 | */ | ||
153 | acpi_status pci_acpi_remove_pm_notifier(struct acpi_device *dev) | ||
154 | { | ||
155 | return remove_pm_notifier(dev, pci_acpi_wake_dev); | ||
156 | } | ||
157 | |||
21 | /* | 158 | /* |
22 | * _SxD returns the D-state with the highest power | 159 | * _SxD returns the D-state with the highest power |
23 | * (lowest D-state number) supported in the S-state "x". | 160 | * (lowest D-state number) supported in the S-state "x". |
@@ -131,19 +268,92 @@ static int acpi_pci_sleep_wake(struct pci_dev *dev, bool enable) | |||
131 | return 0; | 268 | return 0; |
132 | } | 269 | } |
133 | 270 | ||
271 | /** | ||
272 | * acpi_dev_run_wake - Enable/disable wake-up for given device. | ||
273 | * @phys_dev: Device to enable/disable the platform to wake-up the system for. | ||
274 | * @enable: Whether enable or disable the wake-up functionality. | ||
275 | * | ||
276 | * Find the ACPI device object corresponding to @pci_dev and try to | ||
277 | * enable/disable the GPE associated with it. | ||
278 | */ | ||
279 | static int acpi_dev_run_wake(struct device *phys_dev, bool enable) | ||
280 | { | ||
281 | struct acpi_device *dev; | ||
282 | acpi_handle handle; | ||
283 | int error = -ENODEV; | ||
284 | |||
285 | if (!device_run_wake(phys_dev)) | ||
286 | return -EINVAL; | ||
287 | |||
288 | handle = DEVICE_ACPI_HANDLE(phys_dev); | ||
289 | if (!handle || ACPI_FAILURE(acpi_bus_get_device(handle, &dev))) { | ||
290 | dev_dbg(phys_dev, "ACPI handle has no context in %s!\n", | ||
291 | __func__); | ||
292 | return -ENODEV; | ||
293 | } | ||
294 | |||
295 | if (enable) { | ||
296 | if (!dev->wakeup.run_wake_count++) { | ||
297 | acpi_enable_wakeup_device_power(dev, ACPI_STATE_S0); | ||
298 | acpi_enable_gpe(dev->wakeup.gpe_device, | ||
299 | dev->wakeup.gpe_number); | ||
300 | } | ||
301 | } else if (dev->wakeup.run_wake_count > 0) { | ||
302 | if (!--dev->wakeup.run_wake_count) { | ||
303 | acpi_disable_gpe(dev->wakeup.gpe_device, | ||
304 | dev->wakeup.gpe_number); | ||
305 | acpi_disable_wakeup_device_power(dev); | ||
306 | } | ||
307 | } else { | ||
308 | error = -EALREADY; | ||
309 | } | ||
310 | |||
311 | return error; | ||
312 | } | ||
313 | |||
314 | static void acpi_pci_propagate_run_wake(struct pci_bus *bus, bool enable) | ||
315 | { | ||
316 | while (bus->parent) { | ||
317 | struct pci_dev *bridge = bus->self; | ||
318 | |||
319 | if (bridge->pme_interrupt) | ||
320 | return; | ||
321 | if (!acpi_dev_run_wake(&bridge->dev, enable)) | ||
322 | return; | ||
323 | bus = bus->parent; | ||
324 | } | ||
325 | |||
326 | /* We have reached the root bus. */ | ||
327 | if (bus->bridge) | ||
328 | acpi_dev_run_wake(bus->bridge, enable); | ||
329 | } | ||
330 | |||
331 | static int acpi_pci_run_wake(struct pci_dev *dev, bool enable) | ||
332 | { | ||
333 | if (dev->pme_interrupt) | ||
334 | return 0; | ||
335 | |||
336 | if (!acpi_dev_run_wake(&dev->dev, enable)) | ||
337 | return 0; | ||
338 | |||
339 | acpi_pci_propagate_run_wake(dev->bus, enable); | ||
340 | return 0; | ||
341 | } | ||
342 | |||
134 | static struct pci_platform_pm_ops acpi_pci_platform_pm = { | 343 | static struct pci_platform_pm_ops acpi_pci_platform_pm = { |
135 | .is_manageable = acpi_pci_power_manageable, | 344 | .is_manageable = acpi_pci_power_manageable, |
136 | .set_state = acpi_pci_set_power_state, | 345 | .set_state = acpi_pci_set_power_state, |
137 | .choose_state = acpi_pci_choose_state, | 346 | .choose_state = acpi_pci_choose_state, |
138 | .can_wakeup = acpi_pci_can_wakeup, | 347 | .can_wakeup = acpi_pci_can_wakeup, |
139 | .sleep_wake = acpi_pci_sleep_wake, | 348 | .sleep_wake = acpi_pci_sleep_wake, |
349 | .run_wake = acpi_pci_run_wake, | ||
140 | }; | 350 | }; |
141 | 351 | ||
142 | /* ACPI bus type */ | 352 | /* ACPI bus type */ |
143 | static int acpi_pci_find_device(struct device *dev, acpi_handle *handle) | 353 | static int acpi_pci_find_device(struct device *dev, acpi_handle *handle) |
144 | { | 354 | { |
145 | struct pci_dev * pci_dev; | 355 | struct pci_dev * pci_dev; |
146 | acpi_integer addr; | 356 | u64 addr; |
147 | 357 | ||
148 | pci_dev = to_pci_dev(dev); | 358 | pci_dev = to_pci_dev(dev); |
149 | /* Please ref to ACPI spec for the syntax of _ADR */ | 359 | /* Please ref to ACPI spec for the syntax of _ADR */ |
@@ -189,6 +399,7 @@ static int __init acpi_pci_init(void) | |||
189 | 399 | ||
190 | if (acpi_gbl_FADT.boot_flags & ACPI_FADT_NO_ASPM) { | 400 | if (acpi_gbl_FADT.boot_flags & ACPI_FADT_NO_ASPM) { |
191 | printk(KERN_INFO"ACPI FADT declares the system doesn't support PCIe ASPM, so disable it\n"); | 401 | printk(KERN_INFO"ACPI FADT declares the system doesn't support PCIe ASPM, so disable it\n"); |
402 | pcie_clear_aspm(); | ||
192 | pcie_no_aspm(); | 403 | pcie_no_aspm(); |
193 | } | 404 | } |
194 | 405 | ||
diff --git a/drivers/pci/pci-driver.c b/drivers/pci/pci-driver.c index e5d47be3c6d7..88246dd46452 100644 --- a/drivers/pci/pci-driver.c +++ b/drivers/pci/pci-driver.c | |||
@@ -17,6 +17,7 @@ | |||
17 | #include <linux/slab.h> | 17 | #include <linux/slab.h> |
18 | #include <linux/sched.h> | 18 | #include <linux/sched.h> |
19 | #include <linux/cpu.h> | 19 | #include <linux/cpu.h> |
20 | #include <linux/pm_runtime.h> | ||
20 | #include "pci.h" | 21 | #include "pci.h" |
21 | 22 | ||
22 | struct pci_dynid { | 23 | struct pci_dynid { |
@@ -288,8 +289,26 @@ struct drv_dev_and_id { | |||
288 | static long local_pci_probe(void *_ddi) | 289 | static long local_pci_probe(void *_ddi) |
289 | { | 290 | { |
290 | struct drv_dev_and_id *ddi = _ddi; | 291 | struct drv_dev_and_id *ddi = _ddi; |
291 | 292 | struct device *dev = &ddi->dev->dev; | |
292 | return ddi->drv->probe(ddi->dev, ddi->id); | 293 | int rc; |
294 | |||
295 | /* Unbound PCI devices are always set to disabled and suspended. | ||
296 | * During probe, the device is set to enabled and active and the | ||
297 | * usage count is incremented. If the driver supports runtime PM, | ||
298 | * it should call pm_runtime_put_noidle() in its probe routine and | ||
299 | * pm_runtime_get_noresume() in its remove routine. | ||
300 | */ | ||
301 | pm_runtime_get_noresume(dev); | ||
302 | pm_runtime_set_active(dev); | ||
303 | pm_runtime_enable(dev); | ||
304 | |||
305 | rc = ddi->drv->probe(ddi->dev, ddi->id); | ||
306 | if (rc) { | ||
307 | pm_runtime_disable(dev); | ||
308 | pm_runtime_set_suspended(dev); | ||
309 | pm_runtime_put_noidle(dev); | ||
310 | } | ||
311 | return rc; | ||
293 | } | 312 | } |
294 | 313 | ||
295 | static int pci_call_probe(struct pci_driver *drv, struct pci_dev *dev, | 314 | static int pci_call_probe(struct pci_driver *drv, struct pci_dev *dev, |
@@ -319,7 +338,7 @@ static int pci_call_probe(struct pci_driver *drv, struct pci_dev *dev, | |||
319 | } | 338 | } |
320 | 339 | ||
321 | /** | 340 | /** |
322 | * __pci_device_probe() | 341 | * __pci_device_probe - check if a driver wants to claim a specific PCI device |
323 | * @drv: driver to call to check if it wants the PCI device | 342 | * @drv: driver to call to check if it wants the PCI device |
324 | * @pci_dev: PCI device being probed | 343 | * @pci_dev: PCI device being probed |
325 | * | 344 | * |
@@ -368,11 +387,19 @@ static int pci_device_remove(struct device * dev) | |||
368 | struct pci_driver * drv = pci_dev->driver; | 387 | struct pci_driver * drv = pci_dev->driver; |
369 | 388 | ||
370 | if (drv) { | 389 | if (drv) { |
371 | if (drv->remove) | 390 | if (drv->remove) { |
391 | pm_runtime_get_sync(dev); | ||
372 | drv->remove(pci_dev); | 392 | drv->remove(pci_dev); |
393 | pm_runtime_put_noidle(dev); | ||
394 | } | ||
373 | pci_dev->driver = NULL; | 395 | pci_dev->driver = NULL; |
374 | } | 396 | } |
375 | 397 | ||
398 | /* Undo the runtime PM settings in local_pci_probe() */ | ||
399 | pm_runtime_disable(dev); | ||
400 | pm_runtime_set_suspended(dev); | ||
401 | pm_runtime_put_noidle(dev); | ||
402 | |||
376 | /* | 403 | /* |
377 | * If the device is still on, set the power state as "unknown", | 404 | * If the device is still on, set the power state as "unknown", |
378 | * since it might change by the next time we load the driver. | 405 | * since it might change by the next time we load the driver. |
@@ -404,6 +431,36 @@ static void pci_device_shutdown(struct device *dev) | |||
404 | pci_msix_shutdown(pci_dev); | 431 | pci_msix_shutdown(pci_dev); |
405 | } | 432 | } |
406 | 433 | ||
434 | #ifdef CONFIG_PM_OPS | ||
435 | |||
436 | /* Auxiliary functions used for system resume and run-time resume. */ | ||
437 | |||
438 | /** | ||
439 | * pci_restore_standard_config - restore standard config registers of PCI device | ||
440 | * @pci_dev: PCI device to handle | ||
441 | */ | ||
442 | static int pci_restore_standard_config(struct pci_dev *pci_dev) | ||
443 | { | ||
444 | pci_update_current_state(pci_dev, PCI_UNKNOWN); | ||
445 | |||
446 | if (pci_dev->current_state != PCI_D0) { | ||
447 | int error = pci_set_power_state(pci_dev, PCI_D0); | ||
448 | if (error) | ||
449 | return error; | ||
450 | } | ||
451 | |||
452 | pci_restore_state(pci_dev); | ||
453 | return 0; | ||
454 | } | ||
455 | |||
456 | static void pci_pm_default_resume_early(struct pci_dev *pci_dev) | ||
457 | { | ||
458 | pci_restore_standard_config(pci_dev); | ||
459 | pci_fixup_device(pci_fixup_resume_early, pci_dev); | ||
460 | } | ||
461 | |||
462 | #endif | ||
463 | |||
407 | #ifdef CONFIG_PM_SLEEP | 464 | #ifdef CONFIG_PM_SLEEP |
408 | 465 | ||
409 | /* | 466 | /* |
@@ -520,29 +577,6 @@ static int pci_legacy_resume(struct device *dev) | |||
520 | 577 | ||
521 | /* Auxiliary functions used by the new power management framework */ | 578 | /* Auxiliary functions used by the new power management framework */ |
522 | 579 | ||
523 | /** | ||
524 | * pci_restore_standard_config - restore standard config registers of PCI device | ||
525 | * @pci_dev: PCI device to handle | ||
526 | */ | ||
527 | static int pci_restore_standard_config(struct pci_dev *pci_dev) | ||
528 | { | ||
529 | pci_update_current_state(pci_dev, PCI_UNKNOWN); | ||
530 | |||
531 | if (pci_dev->current_state != PCI_D0) { | ||
532 | int error = pci_set_power_state(pci_dev, PCI_D0); | ||
533 | if (error) | ||
534 | return error; | ||
535 | } | ||
536 | |||
537 | return pci_restore_state(pci_dev); | ||
538 | } | ||
539 | |||
540 | static void pci_pm_default_resume_noirq(struct pci_dev *pci_dev) | ||
541 | { | ||
542 | pci_restore_standard_config(pci_dev); | ||
543 | pci_fixup_device(pci_fixup_resume_early, pci_dev); | ||
544 | } | ||
545 | |||
546 | static void pci_pm_default_resume(struct pci_dev *pci_dev) | 580 | static void pci_pm_default_resume(struct pci_dev *pci_dev) |
547 | { | 581 | { |
548 | pci_fixup_device(pci_fixup_resume, pci_dev); | 582 | pci_fixup_device(pci_fixup_resume, pci_dev); |
@@ -581,6 +615,17 @@ static int pci_pm_prepare(struct device *dev) | |||
581 | struct device_driver *drv = dev->driver; | 615 | struct device_driver *drv = dev->driver; |
582 | int error = 0; | 616 | int error = 0; |
583 | 617 | ||
618 | /* | ||
619 | * PCI devices suspended at run time need to be resumed at this | ||
620 | * point, because in general it is necessary to reconfigure them for | ||
621 | * system suspend. Namely, if the device is supposed to wake up the | ||
622 | * system from the sleep state, we may need to reconfigure it for this | ||
623 | * purpose. In turn, if the device is not supposed to wake up the | ||
624 | * system from the sleep state, we'll have to prevent it from signaling | ||
625 | * wake-up. | ||
626 | */ | ||
627 | pm_runtime_resume(dev); | ||
628 | |||
584 | if (drv && drv->pm && drv->pm->prepare) | 629 | if (drv && drv->pm && drv->pm->prepare) |
585 | error = drv->pm->prepare(dev); | 630 | error = drv->pm->prepare(dev); |
586 | 631 | ||
@@ -595,6 +640,13 @@ static void pci_pm_complete(struct device *dev) | |||
595 | drv->pm->complete(dev); | 640 | drv->pm->complete(dev); |
596 | } | 641 | } |
597 | 642 | ||
643 | #else /* !CONFIG_PM_SLEEP */ | ||
644 | |||
645 | #define pci_pm_prepare NULL | ||
646 | #define pci_pm_complete NULL | ||
647 | |||
648 | #endif /* !CONFIG_PM_SLEEP */ | ||
649 | |||
598 | #ifdef CONFIG_SUSPEND | 650 | #ifdef CONFIG_SUSPEND |
599 | 651 | ||
600 | static int pci_pm_suspend(struct device *dev) | 652 | static int pci_pm_suspend(struct device *dev) |
@@ -681,7 +733,7 @@ static int pci_pm_resume_noirq(struct device *dev) | |||
681 | struct device_driver *drv = dev->driver; | 733 | struct device_driver *drv = dev->driver; |
682 | int error = 0; | 734 | int error = 0; |
683 | 735 | ||
684 | pci_pm_default_resume_noirq(pci_dev); | 736 | pci_pm_default_resume_early(pci_dev); |
685 | 737 | ||
686 | if (pci_has_legacy_pm_support(pci_dev)) | 738 | if (pci_has_legacy_pm_support(pci_dev)) |
687 | return pci_legacy_resume_early(dev); | 739 | return pci_legacy_resume_early(dev); |
@@ -879,7 +931,7 @@ static int pci_pm_restore_noirq(struct device *dev) | |||
879 | struct device_driver *drv = dev->driver; | 931 | struct device_driver *drv = dev->driver; |
880 | int error = 0; | 932 | int error = 0; |
881 | 933 | ||
882 | pci_pm_default_resume_noirq(pci_dev); | 934 | pci_pm_default_resume_early(pci_dev); |
883 | 935 | ||
884 | if (pci_has_legacy_pm_support(pci_dev)) | 936 | if (pci_has_legacy_pm_support(pci_dev)) |
885 | return pci_legacy_resume_early(dev); | 937 | return pci_legacy_resume_early(dev); |
@@ -931,6 +983,84 @@ static int pci_pm_restore(struct device *dev) | |||
931 | 983 | ||
932 | #endif /* !CONFIG_HIBERNATION */ | 984 | #endif /* !CONFIG_HIBERNATION */ |
933 | 985 | ||
986 | #ifdef CONFIG_PM_RUNTIME | ||
987 | |||
988 | static int pci_pm_runtime_suspend(struct device *dev) | ||
989 | { | ||
990 | struct pci_dev *pci_dev = to_pci_dev(dev); | ||
991 | const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; | ||
992 | pci_power_t prev = pci_dev->current_state; | ||
993 | int error; | ||
994 | |||
995 | if (!pm || !pm->runtime_suspend) | ||
996 | return -ENOSYS; | ||
997 | |||
998 | error = pm->runtime_suspend(dev); | ||
999 | suspend_report_result(pm->runtime_suspend, error); | ||
1000 | if (error) | ||
1001 | return error; | ||
1002 | |||
1003 | pci_fixup_device(pci_fixup_suspend, pci_dev); | ||
1004 | |||
1005 | if (!pci_dev->state_saved && pci_dev->current_state != PCI_D0 | ||
1006 | && pci_dev->current_state != PCI_UNKNOWN) { | ||
1007 | WARN_ONCE(pci_dev->current_state != prev, | ||
1008 | "PCI PM: State of device not saved by %pF\n", | ||
1009 | pm->runtime_suspend); | ||
1010 | return 0; | ||
1011 | } | ||
1012 | |||
1013 | if (!pci_dev->state_saved) | ||
1014 | pci_save_state(pci_dev); | ||
1015 | |||
1016 | pci_finish_runtime_suspend(pci_dev); | ||
1017 | |||
1018 | return 0; | ||
1019 | } | ||
1020 | |||
1021 | static int pci_pm_runtime_resume(struct device *dev) | ||
1022 | { | ||
1023 | struct pci_dev *pci_dev = to_pci_dev(dev); | ||
1024 | const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; | ||
1025 | |||
1026 | if (!pm || !pm->runtime_resume) | ||
1027 | return -ENOSYS; | ||
1028 | |||
1029 | pci_pm_default_resume_early(pci_dev); | ||
1030 | __pci_enable_wake(pci_dev, PCI_D0, true, false); | ||
1031 | pci_fixup_device(pci_fixup_resume, pci_dev); | ||
1032 | |||
1033 | return pm->runtime_resume(dev); | ||
1034 | } | ||
1035 | |||
1036 | static int pci_pm_runtime_idle(struct device *dev) | ||
1037 | { | ||
1038 | const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; | ||
1039 | |||
1040 | if (!pm) | ||
1041 | return -ENOSYS; | ||
1042 | |||
1043 | if (pm->runtime_idle) { | ||
1044 | int ret = pm->runtime_idle(dev); | ||
1045 | if (ret) | ||
1046 | return ret; | ||
1047 | } | ||
1048 | |||
1049 | pm_runtime_suspend(dev); | ||
1050 | |||
1051 | return 0; | ||
1052 | } | ||
1053 | |||
1054 | #else /* !CONFIG_PM_RUNTIME */ | ||
1055 | |||
1056 | #define pci_pm_runtime_suspend NULL | ||
1057 | #define pci_pm_runtime_resume NULL | ||
1058 | #define pci_pm_runtime_idle NULL | ||
1059 | |||
1060 | #endif /* !CONFIG_PM_RUNTIME */ | ||
1061 | |||
1062 | #ifdef CONFIG_PM_OPS | ||
1063 | |||
934 | const struct dev_pm_ops pci_dev_pm_ops = { | 1064 | const struct dev_pm_ops pci_dev_pm_ops = { |
935 | .prepare = pci_pm_prepare, | 1065 | .prepare = pci_pm_prepare, |
936 | .complete = pci_pm_complete, | 1066 | .complete = pci_pm_complete, |
@@ -946,15 +1076,18 @@ const struct dev_pm_ops pci_dev_pm_ops = { | |||
946 | .thaw_noirq = pci_pm_thaw_noirq, | 1076 | .thaw_noirq = pci_pm_thaw_noirq, |
947 | .poweroff_noirq = pci_pm_poweroff_noirq, | 1077 | .poweroff_noirq = pci_pm_poweroff_noirq, |
948 | .restore_noirq = pci_pm_restore_noirq, | 1078 | .restore_noirq = pci_pm_restore_noirq, |
1079 | .runtime_suspend = pci_pm_runtime_suspend, | ||
1080 | .runtime_resume = pci_pm_runtime_resume, | ||
1081 | .runtime_idle = pci_pm_runtime_idle, | ||
949 | }; | 1082 | }; |
950 | 1083 | ||
951 | #define PCI_PM_OPS_PTR (&pci_dev_pm_ops) | 1084 | #define PCI_PM_OPS_PTR (&pci_dev_pm_ops) |
952 | 1085 | ||
953 | #else /* !CONFIG_PM_SLEEP */ | 1086 | #else /* !COMFIG_PM_OPS */ |
954 | 1087 | ||
955 | #define PCI_PM_OPS_PTR NULL | 1088 | #define PCI_PM_OPS_PTR NULL |
956 | 1089 | ||
957 | #endif /* !CONFIG_PM_SLEEP */ | 1090 | #endif /* !COMFIG_PM_OPS */ |
958 | 1091 | ||
959 | /** | 1092 | /** |
960 | * __pci_register_driver - register a new pci driver | 1093 | * __pci_register_driver - register a new pci driver |
diff --git a/drivers/pci/pci-label.c b/drivers/pci/pci-label.c new file mode 100644 index 000000000000..90c0a729cd3a --- /dev/null +++ b/drivers/pci/pci-label.c | |||
@@ -0,0 +1,143 @@ | |||
1 | /* | ||
2 | * Purpose: Export the firmware instance and label associated with | ||
3 | * a pci device to sysfs | ||
4 | * Copyright (C) 2010 Dell Inc. | ||
5 | * by Narendra K <Narendra_K@dell.com>, | ||
6 | * Jordan Hargrave <Jordan_Hargrave@dell.com> | ||
7 | * | ||
8 | * SMBIOS defines type 41 for onboard pci devices. This code retrieves | ||
9 | * the instance number and string from the type 41 record and exports | ||
10 | * it to sysfs. | ||
11 | * | ||
12 | * Please see http://linux.dell.com/wiki/index.php/Oss/libnetdevname for more | ||
13 | * information. | ||
14 | */ | ||
15 | |||
16 | #include <linux/dmi.h> | ||
17 | #include <linux/sysfs.h> | ||
18 | #include <linux/pci.h> | ||
19 | #include <linux/pci_ids.h> | ||
20 | #include <linux/module.h> | ||
21 | #include <linux/device.h> | ||
22 | #include "pci.h" | ||
23 | |||
24 | enum smbios_attr_enum { | ||
25 | SMBIOS_ATTR_NONE = 0, | ||
26 | SMBIOS_ATTR_LABEL_SHOW, | ||
27 | SMBIOS_ATTR_INSTANCE_SHOW, | ||
28 | }; | ||
29 | |||
30 | static mode_t | ||
31 | find_smbios_instance_string(struct pci_dev *pdev, char *buf, | ||
32 | enum smbios_attr_enum attribute) | ||
33 | { | ||
34 | const struct dmi_device *dmi; | ||
35 | struct dmi_dev_onboard *donboard; | ||
36 | int bus; | ||
37 | int devfn; | ||
38 | |||
39 | bus = pdev->bus->number; | ||
40 | devfn = pdev->devfn; | ||
41 | |||
42 | dmi = NULL; | ||
43 | while ((dmi = dmi_find_device(DMI_DEV_TYPE_DEV_ONBOARD, | ||
44 | NULL, dmi)) != NULL) { | ||
45 | donboard = dmi->device_data; | ||
46 | if (donboard && donboard->bus == bus && | ||
47 | donboard->devfn == devfn) { | ||
48 | if (buf) { | ||
49 | if (attribute == SMBIOS_ATTR_INSTANCE_SHOW) | ||
50 | return scnprintf(buf, PAGE_SIZE, | ||
51 | "%d\n", | ||
52 | donboard->instance); | ||
53 | else if (attribute == SMBIOS_ATTR_LABEL_SHOW) | ||
54 | return scnprintf(buf, PAGE_SIZE, | ||
55 | "%s\n", | ||
56 | dmi->name); | ||
57 | } | ||
58 | return strlen(dmi->name); | ||
59 | } | ||
60 | } | ||
61 | return 0; | ||
62 | } | ||
63 | |||
64 | static mode_t | ||
65 | smbios_instance_string_exist(struct kobject *kobj, struct attribute *attr, | ||
66 | int n) | ||
67 | { | ||
68 | struct device *dev; | ||
69 | struct pci_dev *pdev; | ||
70 | |||
71 | dev = container_of(kobj, struct device, kobj); | ||
72 | pdev = to_pci_dev(dev); | ||
73 | |||
74 | return find_smbios_instance_string(pdev, NULL, SMBIOS_ATTR_NONE) ? | ||
75 | S_IRUGO : 0; | ||
76 | } | ||
77 | |||
78 | static ssize_t | ||
79 | smbioslabel_show(struct device *dev, struct device_attribute *attr, char *buf) | ||
80 | { | ||
81 | struct pci_dev *pdev; | ||
82 | pdev = to_pci_dev(dev); | ||
83 | |||
84 | return find_smbios_instance_string(pdev, buf, | ||
85 | SMBIOS_ATTR_LABEL_SHOW); | ||
86 | } | ||
87 | |||
88 | static ssize_t | ||
89 | smbiosinstance_show(struct device *dev, | ||
90 | struct device_attribute *attr, char *buf) | ||
91 | { | ||
92 | struct pci_dev *pdev; | ||
93 | pdev = to_pci_dev(dev); | ||
94 | |||
95 | return find_smbios_instance_string(pdev, buf, | ||
96 | SMBIOS_ATTR_INSTANCE_SHOW); | ||
97 | } | ||
98 | |||
99 | static struct device_attribute smbios_attr_label = { | ||
100 | .attr = {.name = "label", .mode = 0444}, | ||
101 | .show = smbioslabel_show, | ||
102 | }; | ||
103 | |||
104 | static struct device_attribute smbios_attr_instance = { | ||
105 | .attr = {.name = "index", .mode = 0444}, | ||
106 | .show = smbiosinstance_show, | ||
107 | }; | ||
108 | |||
109 | static struct attribute *smbios_attributes[] = { | ||
110 | &smbios_attr_label.attr, | ||
111 | &smbios_attr_instance.attr, | ||
112 | NULL, | ||
113 | }; | ||
114 | |||
115 | static struct attribute_group smbios_attr_group = { | ||
116 | .attrs = smbios_attributes, | ||
117 | .is_visible = smbios_instance_string_exist, | ||
118 | }; | ||
119 | |||
120 | static int | ||
121 | pci_create_smbiosname_file(struct pci_dev *pdev) | ||
122 | { | ||
123 | if (!sysfs_create_group(&pdev->dev.kobj, &smbios_attr_group)) | ||
124 | return 0; | ||
125 | return -ENODEV; | ||
126 | } | ||
127 | |||
128 | static void | ||
129 | pci_remove_smbiosname_file(struct pci_dev *pdev) | ||
130 | { | ||
131 | sysfs_remove_group(&pdev->dev.kobj, &smbios_attr_group); | ||
132 | } | ||
133 | |||
134 | void pci_create_firmware_label_files(struct pci_dev *pdev) | ||
135 | { | ||
136 | if (!pci_create_smbiosname_file(pdev)) | ||
137 | ; | ||
138 | } | ||
139 | |||
140 | void pci_remove_firmware_label_files(struct pci_dev *pdev) | ||
141 | { | ||
142 | pci_remove_smbiosname_file(pdev); | ||
143 | } | ||
diff --git a/drivers/pci/pci-stub.c b/drivers/pci/pci-stub.c index f7b68ca6cc98..775e933c2225 100644 --- a/drivers/pci/pci-stub.c +++ b/drivers/pci/pci-stub.c | |||
@@ -47,6 +47,10 @@ static int __init pci_stub_init(void) | |||
47 | if (rc) | 47 | if (rc) |
48 | return rc; | 48 | return rc; |
49 | 49 | ||
50 | /* no ids passed actually */ | ||
51 | if (ids[0] == '\0') | ||
52 | return 0; | ||
53 | |||
50 | /* add ids specified in the module parameter */ | 54 | /* add ids specified in the module parameter */ |
51 | p = ids; | 55 | p = ids; |
52 | while ((id = strsep(&p, ","))) { | 56 | while ((id = strsep(&p, ","))) { |
@@ -54,6 +58,9 @@ static int __init pci_stub_init(void) | |||
54 | subdevice = PCI_ANY_ID, class=0, class_mask=0; | 58 | subdevice = PCI_ANY_ID, class=0, class_mask=0; |
55 | int fields; | 59 | int fields; |
56 | 60 | ||
61 | if (!strlen(id)) | ||
62 | continue; | ||
63 | |||
57 | fields = sscanf(id, "%x:%x:%x:%x:%x:%x", | 64 | fields = sscanf(id, "%x:%x:%x:%x:%x:%x", |
58 | &vendor, &device, &subvendor, &subdevice, | 65 | &vendor, &device, &subvendor, &subdevice, |
59 | &class, &class_mask); | 66 | &class, &class_mask); |
diff --git a/drivers/pci/pci-sysfs.c b/drivers/pci/pci-sysfs.c index 807224ec8351..8ecaac983923 100644 --- a/drivers/pci/pci-sysfs.c +++ b/drivers/pci/pci-sysfs.c | |||
@@ -21,8 +21,10 @@ | |||
21 | #include <linux/stat.h> | 21 | #include <linux/stat.h> |
22 | #include <linux/topology.h> | 22 | #include <linux/topology.h> |
23 | #include <linux/mm.h> | 23 | #include <linux/mm.h> |
24 | #include <linux/fs.h> | ||
24 | #include <linux/capability.h> | 25 | #include <linux/capability.h> |
25 | #include <linux/pci-aspm.h> | 26 | #include <linux/pci-aspm.h> |
27 | #include <linux/slab.h> | ||
26 | #include "pci.h" | 28 | #include "pci.h" |
27 | 29 | ||
28 | static int sysfs_initialized; /* = 0 */ | 30 | static int sysfs_initialized; /* = 0 */ |
@@ -356,7 +358,8 @@ boot_vga_show(struct device *dev, struct device_attribute *attr, char *buf) | |||
356 | struct device_attribute vga_attr = __ATTR_RO(boot_vga); | 358 | struct device_attribute vga_attr = __ATTR_RO(boot_vga); |
357 | 359 | ||
358 | static ssize_t | 360 | static ssize_t |
359 | pci_read_config(struct kobject *kobj, struct bin_attribute *bin_attr, | 361 | pci_read_config(struct file *filp, struct kobject *kobj, |
362 | struct bin_attribute *bin_attr, | ||
360 | char *buf, loff_t off, size_t count) | 363 | char *buf, loff_t off, size_t count) |
361 | { | 364 | { |
362 | struct pci_dev *dev = to_pci_dev(container_of(kobj,struct device,kobj)); | 365 | struct pci_dev *dev = to_pci_dev(container_of(kobj,struct device,kobj)); |
@@ -365,7 +368,7 @@ pci_read_config(struct kobject *kobj, struct bin_attribute *bin_attr, | |||
365 | u8 *data = (u8*) buf; | 368 | u8 *data = (u8*) buf; |
366 | 369 | ||
367 | /* Several chips lock up trying to read undefined config space */ | 370 | /* Several chips lock up trying to read undefined config space */ |
368 | if (capable(CAP_SYS_ADMIN)) { | 371 | if (cap_raised(filp->f_cred->cap_effective, CAP_SYS_ADMIN)) { |
369 | size = dev->cfg_size; | 372 | size = dev->cfg_size; |
370 | } else if (dev->hdr_type == PCI_HEADER_TYPE_CARDBUS) { | 373 | } else if (dev->hdr_type == PCI_HEADER_TYPE_CARDBUS) { |
371 | size = 128; | 374 | size = 128; |
@@ -429,7 +432,8 @@ pci_read_config(struct kobject *kobj, struct bin_attribute *bin_attr, | |||
429 | } | 432 | } |
430 | 433 | ||
431 | static ssize_t | 434 | static ssize_t |
432 | pci_write_config(struct kobject *kobj, struct bin_attribute *bin_attr, | 435 | pci_write_config(struct file* filp, struct kobject *kobj, |
436 | struct bin_attribute *bin_attr, | ||
433 | char *buf, loff_t off, size_t count) | 437 | char *buf, loff_t off, size_t count) |
434 | { | 438 | { |
435 | struct pci_dev *dev = to_pci_dev(container_of(kobj,struct device,kobj)); | 439 | struct pci_dev *dev = to_pci_dev(container_of(kobj,struct device,kobj)); |
@@ -486,7 +490,8 @@ pci_write_config(struct kobject *kobj, struct bin_attribute *bin_attr, | |||
486 | } | 490 | } |
487 | 491 | ||
488 | static ssize_t | 492 | static ssize_t |
489 | read_vpd_attr(struct kobject *kobj, struct bin_attribute *bin_attr, | 493 | read_vpd_attr(struct file *filp, struct kobject *kobj, |
494 | struct bin_attribute *bin_attr, | ||
490 | char *buf, loff_t off, size_t count) | 495 | char *buf, loff_t off, size_t count) |
491 | { | 496 | { |
492 | struct pci_dev *dev = | 497 | struct pci_dev *dev = |
@@ -501,7 +506,8 @@ read_vpd_attr(struct kobject *kobj, struct bin_attribute *bin_attr, | |||
501 | } | 506 | } |
502 | 507 | ||
503 | static ssize_t | 508 | static ssize_t |
504 | write_vpd_attr(struct kobject *kobj, struct bin_attribute *bin_attr, | 509 | write_vpd_attr(struct file *filp, struct kobject *kobj, |
510 | struct bin_attribute *bin_attr, | ||
505 | char *buf, loff_t off, size_t count) | 511 | char *buf, loff_t off, size_t count) |
506 | { | 512 | { |
507 | struct pci_dev *dev = | 513 | struct pci_dev *dev = |
@@ -518,6 +524,7 @@ write_vpd_attr(struct kobject *kobj, struct bin_attribute *bin_attr, | |||
518 | #ifdef HAVE_PCI_LEGACY | 524 | #ifdef HAVE_PCI_LEGACY |
519 | /** | 525 | /** |
520 | * pci_read_legacy_io - read byte(s) from legacy I/O port space | 526 | * pci_read_legacy_io - read byte(s) from legacy I/O port space |
527 | * @filp: open sysfs file | ||
521 | * @kobj: kobject corresponding to file to read from | 528 | * @kobj: kobject corresponding to file to read from |
522 | * @bin_attr: struct bin_attribute for this file | 529 | * @bin_attr: struct bin_attribute for this file |
523 | * @buf: buffer to store results | 530 | * @buf: buffer to store results |
@@ -528,7 +535,8 @@ write_vpd_attr(struct kobject *kobj, struct bin_attribute *bin_attr, | |||
528 | * callback routine (pci_legacy_read). | 535 | * callback routine (pci_legacy_read). |
529 | */ | 536 | */ |
530 | static ssize_t | 537 | static ssize_t |
531 | pci_read_legacy_io(struct kobject *kobj, struct bin_attribute *bin_attr, | 538 | pci_read_legacy_io(struct file *filp, struct kobject *kobj, |
539 | struct bin_attribute *bin_attr, | ||
532 | char *buf, loff_t off, size_t count) | 540 | char *buf, loff_t off, size_t count) |
533 | { | 541 | { |
534 | struct pci_bus *bus = to_pci_bus(container_of(kobj, | 542 | struct pci_bus *bus = to_pci_bus(container_of(kobj, |
@@ -544,6 +552,7 @@ pci_read_legacy_io(struct kobject *kobj, struct bin_attribute *bin_attr, | |||
544 | 552 | ||
545 | /** | 553 | /** |
546 | * pci_write_legacy_io - write byte(s) to legacy I/O port space | 554 | * pci_write_legacy_io - write byte(s) to legacy I/O port space |
555 | * @filp: open sysfs file | ||
547 | * @kobj: kobject corresponding to file to read from | 556 | * @kobj: kobject corresponding to file to read from |
548 | * @bin_attr: struct bin_attribute for this file | 557 | * @bin_attr: struct bin_attribute for this file |
549 | * @buf: buffer containing value to be written | 558 | * @buf: buffer containing value to be written |
@@ -554,7 +563,8 @@ pci_read_legacy_io(struct kobject *kobj, struct bin_attribute *bin_attr, | |||
554 | * callback routine (pci_legacy_write). | 563 | * callback routine (pci_legacy_write). |
555 | */ | 564 | */ |
556 | static ssize_t | 565 | static ssize_t |
557 | pci_write_legacy_io(struct kobject *kobj, struct bin_attribute *bin_attr, | 566 | pci_write_legacy_io(struct file *filp, struct kobject *kobj, |
567 | struct bin_attribute *bin_attr, | ||
558 | char *buf, loff_t off, size_t count) | 568 | char *buf, loff_t off, size_t count) |
559 | { | 569 | { |
560 | struct pci_bus *bus = to_pci_bus(container_of(kobj, | 570 | struct pci_bus *bus = to_pci_bus(container_of(kobj, |
@@ -569,6 +579,7 @@ pci_write_legacy_io(struct kobject *kobj, struct bin_attribute *bin_attr, | |||
569 | 579 | ||
570 | /** | 580 | /** |
571 | * pci_mmap_legacy_mem - map legacy PCI memory into user memory space | 581 | * pci_mmap_legacy_mem - map legacy PCI memory into user memory space |
582 | * @filp: open sysfs file | ||
572 | * @kobj: kobject corresponding to device to be mapped | 583 | * @kobj: kobject corresponding to device to be mapped |
573 | * @attr: struct bin_attribute for this file | 584 | * @attr: struct bin_attribute for this file |
574 | * @vma: struct vm_area_struct passed to mmap | 585 | * @vma: struct vm_area_struct passed to mmap |
@@ -578,7 +589,8 @@ pci_write_legacy_io(struct kobject *kobj, struct bin_attribute *bin_attr, | |||
578 | * memory space. | 589 | * memory space. |
579 | */ | 590 | */ |
580 | static int | 591 | static int |
581 | pci_mmap_legacy_mem(struct kobject *kobj, struct bin_attribute *attr, | 592 | pci_mmap_legacy_mem(struct file *filp, struct kobject *kobj, |
593 | struct bin_attribute *attr, | ||
582 | struct vm_area_struct *vma) | 594 | struct vm_area_struct *vma) |
583 | { | 595 | { |
584 | struct pci_bus *bus = to_pci_bus(container_of(kobj, | 596 | struct pci_bus *bus = to_pci_bus(container_of(kobj, |
@@ -590,6 +602,7 @@ pci_mmap_legacy_mem(struct kobject *kobj, struct bin_attribute *attr, | |||
590 | 602 | ||
591 | /** | 603 | /** |
592 | * pci_mmap_legacy_io - map legacy PCI IO into user memory space | 604 | * pci_mmap_legacy_io - map legacy PCI IO into user memory space |
605 | * @filp: open sysfs file | ||
593 | * @kobj: kobject corresponding to device to be mapped | 606 | * @kobj: kobject corresponding to device to be mapped |
594 | * @attr: struct bin_attribute for this file | 607 | * @attr: struct bin_attribute for this file |
595 | * @vma: struct vm_area_struct passed to mmap | 608 | * @vma: struct vm_area_struct passed to mmap |
@@ -599,7 +612,8 @@ pci_mmap_legacy_mem(struct kobject *kobj, struct bin_attribute *attr, | |||
599 | * memory space. Returns -ENOSYS if the operation isn't supported | 612 | * memory space. Returns -ENOSYS if the operation isn't supported |
600 | */ | 613 | */ |
601 | static int | 614 | static int |
602 | pci_mmap_legacy_io(struct kobject *kobj, struct bin_attribute *attr, | 615 | pci_mmap_legacy_io(struct file *filp, struct kobject *kobj, |
616 | struct bin_attribute *attr, | ||
603 | struct vm_area_struct *vma) | 617 | struct vm_area_struct *vma) |
604 | { | 618 | { |
605 | struct pci_bus *bus = to_pci_bus(container_of(kobj, | 619 | struct pci_bus *bus = to_pci_bus(container_of(kobj, |
@@ -642,6 +656,7 @@ void pci_create_legacy_files(struct pci_bus *b) | |||
642 | if (!b->legacy_io) | 656 | if (!b->legacy_io) |
643 | goto kzalloc_err; | 657 | goto kzalloc_err; |
644 | 658 | ||
659 | sysfs_bin_attr_init(b->legacy_io); | ||
645 | b->legacy_io->attr.name = "legacy_io"; | 660 | b->legacy_io->attr.name = "legacy_io"; |
646 | b->legacy_io->size = 0xffff; | 661 | b->legacy_io->size = 0xffff; |
647 | b->legacy_io->attr.mode = S_IRUSR | S_IWUSR; | 662 | b->legacy_io->attr.mode = S_IRUSR | S_IWUSR; |
@@ -655,6 +670,7 @@ void pci_create_legacy_files(struct pci_bus *b) | |||
655 | 670 | ||
656 | /* Allocated above after the legacy_io struct */ | 671 | /* Allocated above after the legacy_io struct */ |
657 | b->legacy_mem = b->legacy_io + 1; | 672 | b->legacy_mem = b->legacy_io + 1; |
673 | sysfs_bin_attr_init(b->legacy_mem); | ||
658 | b->legacy_mem->attr.name = "legacy_mem"; | 674 | b->legacy_mem->attr.name = "legacy_mem"; |
659 | b->legacy_mem->size = 1024*1024; | 675 | b->legacy_mem->size = 1024*1024; |
660 | b->legacy_mem->attr.mode = S_IRUSR | S_IWUSR; | 676 | b->legacy_mem->attr.mode = S_IRUSR | S_IWUSR; |
@@ -689,17 +705,21 @@ void pci_remove_legacy_files(struct pci_bus *b) | |||
689 | 705 | ||
690 | #ifdef HAVE_PCI_MMAP | 706 | #ifdef HAVE_PCI_MMAP |
691 | 707 | ||
692 | int pci_mmap_fits(struct pci_dev *pdev, int resno, struct vm_area_struct *vma) | 708 | int pci_mmap_fits(struct pci_dev *pdev, int resno, struct vm_area_struct *vma, |
709 | enum pci_mmap_api mmap_api) | ||
693 | { | 710 | { |
694 | unsigned long nr, start, size; | 711 | unsigned long nr, start, size, pci_start; |
695 | 712 | ||
713 | if (pci_resource_len(pdev, resno) == 0) | ||
714 | return 0; | ||
696 | nr = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT; | 715 | nr = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT; |
697 | start = vma->vm_pgoff; | 716 | start = vma->vm_pgoff; |
698 | size = ((pci_resource_len(pdev, resno) - 1) >> PAGE_SHIFT) + 1; | 717 | size = ((pci_resource_len(pdev, resno) - 1) >> PAGE_SHIFT) + 1; |
699 | if (start < size && size - start >= nr) | 718 | pci_start = (mmap_api == PCI_MMAP_PROCFS) ? |
719 | pci_resource_start(pdev, resno) >> PAGE_SHIFT : 0; | ||
720 | if (start >= pci_start && start < pci_start + size && | ||
721 | start + nr <= pci_start + size) | ||
700 | return 1; | 722 | return 1; |
701 | WARN(1, "process \"%s\" tried to map 0x%08lx-0x%08lx on %s BAR %d (size 0x%08lx)\n", | ||
702 | current->comm, start, start+nr, pci_name(pdev), resno, size); | ||
703 | return 0; | 723 | return 0; |
704 | } | 724 | } |
705 | 725 | ||
@@ -718,7 +738,7 @@ pci_mmap_resource(struct kobject *kobj, struct bin_attribute *attr, | |||
718 | { | 738 | { |
719 | struct pci_dev *pdev = to_pci_dev(container_of(kobj, | 739 | struct pci_dev *pdev = to_pci_dev(container_of(kobj, |
720 | struct device, kobj)); | 740 | struct device, kobj)); |
721 | struct resource *res = (struct resource *)attr->private; | 741 | struct resource *res = attr->private; |
722 | enum pci_mmap_state mmap_type; | 742 | enum pci_mmap_state mmap_type; |
723 | resource_size_t start, end; | 743 | resource_size_t start, end; |
724 | int i; | 744 | int i; |
@@ -729,8 +749,15 @@ pci_mmap_resource(struct kobject *kobj, struct bin_attribute *attr, | |||
729 | if (i >= PCI_ROM_RESOURCE) | 749 | if (i >= PCI_ROM_RESOURCE) |
730 | return -ENODEV; | 750 | return -ENODEV; |
731 | 751 | ||
732 | if (!pci_mmap_fits(pdev, i, vma)) | 752 | if (!pci_mmap_fits(pdev, i, vma, PCI_MMAP_SYSFS)) { |
753 | WARN(1, "process \"%s\" tried to map 0x%08lx bytes " | ||
754 | "at page 0x%08lx on %s BAR %d (start 0x%16Lx, size 0x%16Lx)\n", | ||
755 | current->comm, vma->vm_end-vma->vm_start, vma->vm_pgoff, | ||
756 | pci_name(pdev), i, | ||
757 | (u64)pci_resource_start(pdev, i), | ||
758 | (u64)pci_resource_len(pdev, i)); | ||
733 | return -EINVAL; | 759 | return -EINVAL; |
760 | } | ||
734 | 761 | ||
735 | /* pci_mmap_page_range() expects the same kind of entry as coming | 762 | /* pci_mmap_page_range() expects the same kind of entry as coming |
736 | * from /proc/bus/pci/ which is a "user visible" value. If this is | 763 | * from /proc/bus/pci/ which is a "user visible" value. If this is |
@@ -747,19 +774,85 @@ pci_mmap_resource(struct kobject *kobj, struct bin_attribute *attr, | |||
747 | } | 774 | } |
748 | 775 | ||
749 | static int | 776 | static int |
750 | pci_mmap_resource_uc(struct kobject *kobj, struct bin_attribute *attr, | 777 | pci_mmap_resource_uc(struct file *filp, struct kobject *kobj, |
778 | struct bin_attribute *attr, | ||
751 | struct vm_area_struct *vma) | 779 | struct vm_area_struct *vma) |
752 | { | 780 | { |
753 | return pci_mmap_resource(kobj, attr, vma, 0); | 781 | return pci_mmap_resource(kobj, attr, vma, 0); |
754 | } | 782 | } |
755 | 783 | ||
756 | static int | 784 | static int |
757 | pci_mmap_resource_wc(struct kobject *kobj, struct bin_attribute *attr, | 785 | pci_mmap_resource_wc(struct file *filp, struct kobject *kobj, |
786 | struct bin_attribute *attr, | ||
758 | struct vm_area_struct *vma) | 787 | struct vm_area_struct *vma) |
759 | { | 788 | { |
760 | return pci_mmap_resource(kobj, attr, vma, 1); | 789 | return pci_mmap_resource(kobj, attr, vma, 1); |
761 | } | 790 | } |
762 | 791 | ||
792 | static ssize_t | ||
793 | pci_resource_io(struct file *filp, struct kobject *kobj, | ||
794 | struct bin_attribute *attr, char *buf, | ||
795 | loff_t off, size_t count, bool write) | ||
796 | { | ||
797 | struct pci_dev *pdev = to_pci_dev(container_of(kobj, | ||
798 | struct device, kobj)); | ||
799 | struct resource *res = attr->private; | ||
800 | unsigned long port = off; | ||
801 | int i; | ||
802 | |||
803 | for (i = 0; i < PCI_ROM_RESOURCE; i++) | ||
804 | if (res == &pdev->resource[i]) | ||
805 | break; | ||
806 | if (i >= PCI_ROM_RESOURCE) | ||
807 | return -ENODEV; | ||
808 | |||
809 | port += pci_resource_start(pdev, i); | ||
810 | |||
811 | if (port > pci_resource_end(pdev, i)) | ||
812 | return 0; | ||
813 | |||
814 | if (port + count - 1 > pci_resource_end(pdev, i)) | ||
815 | return -EINVAL; | ||
816 | |||
817 | switch (count) { | ||
818 | case 1: | ||
819 | if (write) | ||
820 | outb(*(u8 *)buf, port); | ||
821 | else | ||
822 | *(u8 *)buf = inb(port); | ||
823 | return 1; | ||
824 | case 2: | ||
825 | if (write) | ||
826 | outw(*(u16 *)buf, port); | ||
827 | else | ||
828 | *(u16 *)buf = inw(port); | ||
829 | return 2; | ||
830 | case 4: | ||
831 | if (write) | ||
832 | outl(*(u32 *)buf, port); | ||
833 | else | ||
834 | *(u32 *)buf = inl(port); | ||
835 | return 4; | ||
836 | } | ||
837 | return -EINVAL; | ||
838 | } | ||
839 | |||
840 | static ssize_t | ||
841 | pci_read_resource_io(struct file *filp, struct kobject *kobj, | ||
842 | struct bin_attribute *attr, char *buf, | ||
843 | loff_t off, size_t count) | ||
844 | { | ||
845 | return pci_resource_io(filp, kobj, attr, buf, off, count, false); | ||
846 | } | ||
847 | |||
848 | static ssize_t | ||
849 | pci_write_resource_io(struct file *filp, struct kobject *kobj, | ||
850 | struct bin_attribute *attr, char *buf, | ||
851 | loff_t off, size_t count) | ||
852 | { | ||
853 | return pci_resource_io(filp, kobj, attr, buf, off, count, true); | ||
854 | } | ||
855 | |||
763 | /** | 856 | /** |
764 | * pci_remove_resource_files - cleanup resource files | 857 | * pci_remove_resource_files - cleanup resource files |
765 | * @pdev: dev to cleanup | 858 | * @pdev: dev to cleanup |
@@ -800,6 +893,7 @@ static int pci_create_attr(struct pci_dev *pdev, int num, int write_combine) | |||
800 | if (res_attr) { | 893 | if (res_attr) { |
801 | char *res_attr_name = (char *)(res_attr + 1); | 894 | char *res_attr_name = (char *)(res_attr + 1); |
802 | 895 | ||
896 | sysfs_bin_attr_init(res_attr); | ||
803 | if (write_combine) { | 897 | if (write_combine) { |
804 | pdev->res_attr_wc[num] = res_attr; | 898 | pdev->res_attr_wc[num] = res_attr; |
805 | sprintf(res_attr_name, "resource%d_wc", num); | 899 | sprintf(res_attr_name, "resource%d_wc", num); |
@@ -809,6 +903,10 @@ static int pci_create_attr(struct pci_dev *pdev, int num, int write_combine) | |||
809 | sprintf(res_attr_name, "resource%d", num); | 903 | sprintf(res_attr_name, "resource%d", num); |
810 | res_attr->mmap = pci_mmap_resource_uc; | 904 | res_attr->mmap = pci_mmap_resource_uc; |
811 | } | 905 | } |
906 | if (pci_resource_flags(pdev, num) & IORESOURCE_IO) { | ||
907 | res_attr->read = pci_read_resource_io; | ||
908 | res_attr->write = pci_write_resource_io; | ||
909 | } | ||
812 | res_attr->attr.name = res_attr_name; | 910 | res_attr->attr.name = res_attr_name; |
813 | res_attr->attr.mode = S_IRUSR | S_IWUSR; | 911 | res_attr->attr.mode = S_IRUSR | S_IWUSR; |
814 | res_attr->size = pci_resource_len(pdev, num); | 912 | res_attr->size = pci_resource_len(pdev, num); |
@@ -857,6 +955,7 @@ void __weak pci_remove_resource_files(struct pci_dev *dev) { return; } | |||
857 | 955 | ||
858 | /** | 956 | /** |
859 | * pci_write_rom - used to enable access to the PCI ROM display | 957 | * pci_write_rom - used to enable access to the PCI ROM display |
958 | * @filp: sysfs file | ||
860 | * @kobj: kernel object handle | 959 | * @kobj: kernel object handle |
861 | * @bin_attr: struct bin_attribute for this file | 960 | * @bin_attr: struct bin_attribute for this file |
862 | * @buf: user input | 961 | * @buf: user input |
@@ -866,7 +965,8 @@ void __weak pci_remove_resource_files(struct pci_dev *dev) { return; } | |||
866 | * writing anything except 0 enables it | 965 | * writing anything except 0 enables it |
867 | */ | 966 | */ |
868 | static ssize_t | 967 | static ssize_t |
869 | pci_write_rom(struct kobject *kobj, struct bin_attribute *bin_attr, | 968 | pci_write_rom(struct file *filp, struct kobject *kobj, |
969 | struct bin_attribute *bin_attr, | ||
870 | char *buf, loff_t off, size_t count) | 970 | char *buf, loff_t off, size_t count) |
871 | { | 971 | { |
872 | struct pci_dev *pdev = to_pci_dev(container_of(kobj, struct device, kobj)); | 972 | struct pci_dev *pdev = to_pci_dev(container_of(kobj, struct device, kobj)); |
@@ -881,6 +981,7 @@ pci_write_rom(struct kobject *kobj, struct bin_attribute *bin_attr, | |||
881 | 981 | ||
882 | /** | 982 | /** |
883 | * pci_read_rom - read a PCI ROM | 983 | * pci_read_rom - read a PCI ROM |
984 | * @filp: sysfs file | ||
884 | * @kobj: kernel object handle | 985 | * @kobj: kernel object handle |
885 | * @bin_attr: struct bin_attribute for this file | 986 | * @bin_attr: struct bin_attribute for this file |
886 | * @buf: where to put the data we read from the ROM | 987 | * @buf: where to put the data we read from the ROM |
@@ -891,7 +992,8 @@ pci_write_rom(struct kobject *kobj, struct bin_attribute *bin_attr, | |||
891 | * device corresponding to @kobj. | 992 | * device corresponding to @kobj. |
892 | */ | 993 | */ |
893 | static ssize_t | 994 | static ssize_t |
894 | pci_read_rom(struct kobject *kobj, struct bin_attribute *bin_attr, | 995 | pci_read_rom(struct file *filp, struct kobject *kobj, |
996 | struct bin_attribute *bin_attr, | ||
895 | char *buf, loff_t off, size_t count) | 997 | char *buf, loff_t off, size_t count) |
896 | { | 998 | { |
897 | struct pci_dev *pdev = to_pci_dev(container_of(kobj, struct device, kobj)); | 999 | struct pci_dev *pdev = to_pci_dev(container_of(kobj, struct device, kobj)); |
@@ -956,7 +1058,12 @@ static ssize_t reset_store(struct device *dev, | |||
956 | 1058 | ||
957 | if (val != 1) | 1059 | if (val != 1) |
958 | return -EINVAL; | 1060 | return -EINVAL; |
959 | return pci_reset_function(pdev); | 1061 | |
1062 | result = pci_reset_function(pdev); | ||
1063 | if (result < 0) | ||
1064 | return result; | ||
1065 | |||
1066 | return count; | ||
960 | } | 1067 | } |
961 | 1068 | ||
962 | static struct device_attribute reset_attr = __ATTR(reset, 0200, NULL, reset_store); | 1069 | static struct device_attribute reset_attr = __ATTR(reset, 0200, NULL, reset_store); |
@@ -972,6 +1079,7 @@ static int pci_create_capabilities_sysfs(struct pci_dev *dev) | |||
972 | if (!attr) | 1079 | if (!attr) |
973 | return -ENOMEM; | 1080 | return -ENOMEM; |
974 | 1081 | ||
1082 | sysfs_bin_attr_init(attr); | ||
975 | attr->size = dev->vpd->len; | 1083 | attr->size = dev->vpd->len; |
976 | attr->attr.name = "vpd"; | 1084 | attr->attr.name = "vpd"; |
977 | attr->attr.mode = S_IRUSR | S_IWUSR; | 1085 | attr->attr.mode = S_IRUSR | S_IWUSR; |
@@ -1038,9 +1146,10 @@ int __must_check pci_create_sysfs_dev_files (struct pci_dev *pdev) | |||
1038 | retval = -ENOMEM; | 1146 | retval = -ENOMEM; |
1039 | goto err_resource_files; | 1147 | goto err_resource_files; |
1040 | } | 1148 | } |
1149 | sysfs_bin_attr_init(attr); | ||
1041 | attr->size = rom_size; | 1150 | attr->size = rom_size; |
1042 | attr->attr.name = "rom"; | 1151 | attr->attr.name = "rom"; |
1043 | attr->attr.mode = S_IRUSR; | 1152 | attr->attr.mode = S_IRUSR | S_IWUSR; |
1044 | attr->read = pci_read_rom; | 1153 | attr->read = pci_read_rom; |
1045 | attr->write = pci_write_rom; | 1154 | attr->write = pci_write_rom; |
1046 | retval = sysfs_create_bin_file(&pdev->dev.kobj, attr); | 1155 | retval = sysfs_create_bin_file(&pdev->dev.kobj, attr); |
@@ -1067,6 +1176,8 @@ int __must_check pci_create_sysfs_dev_files (struct pci_dev *pdev) | |||
1067 | if (retval) | 1176 | if (retval) |
1068 | goto err_vga_file; | 1177 | goto err_vga_file; |
1069 | 1178 | ||
1179 | pci_create_firmware_label_files(pdev); | ||
1180 | |||
1070 | return 0; | 1181 | return 0; |
1071 | 1182 | ||
1072 | err_vga_file: | 1183 | err_vga_file: |
@@ -1134,6 +1245,9 @@ void pci_remove_sysfs_dev_files(struct pci_dev *pdev) | |||
1134 | sysfs_remove_bin_file(&pdev->dev.kobj, pdev->rom_attr); | 1245 | sysfs_remove_bin_file(&pdev->dev.kobj, pdev->rom_attr); |
1135 | kfree(pdev->rom_attr); | 1246 | kfree(pdev->rom_attr); |
1136 | } | 1247 | } |
1248 | |||
1249 | pci_remove_firmware_label_files(pdev); | ||
1250 | |||
1137 | } | 1251 | } |
1138 | 1252 | ||
1139 | static int __init pci_sysfs_init(void) | 1253 | static int __init pci_sysfs_init(void) |
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c index 315fea47e784..b714d787bddd 100644 --- a/drivers/pci/pci.c +++ b/drivers/pci/pci.c | |||
@@ -12,6 +12,7 @@ | |||
12 | #include <linux/init.h> | 12 | #include <linux/init.h> |
13 | #include <linux/pci.h> | 13 | #include <linux/pci.h> |
14 | #include <linux/pm.h> | 14 | #include <linux/pm.h> |
15 | #include <linux/slab.h> | ||
15 | #include <linux/module.h> | 16 | #include <linux/module.h> |
16 | #include <linux/spinlock.h> | 17 | #include <linux/spinlock.h> |
17 | #include <linux/string.h> | 18 | #include <linux/string.h> |
@@ -19,8 +20,8 @@ | |||
19 | #include <linux/pci-aspm.h> | 20 | #include <linux/pci-aspm.h> |
20 | #include <linux/pm_wakeup.h> | 21 | #include <linux/pm_wakeup.h> |
21 | #include <linux/interrupt.h> | 22 | #include <linux/interrupt.h> |
22 | #include <asm/dma.h> /* isa_dma_bridge_buggy */ | ||
23 | #include <linux/device.h> | 23 | #include <linux/device.h> |
24 | #include <linux/pm_runtime.h> | ||
24 | #include <asm/setup.h> | 25 | #include <asm/setup.h> |
25 | #include "pci.h" | 26 | #include "pci.h" |
26 | 27 | ||
@@ -29,8 +30,27 @@ const char *pci_power_names[] = { | |||
29 | }; | 30 | }; |
30 | EXPORT_SYMBOL_GPL(pci_power_names); | 31 | EXPORT_SYMBOL_GPL(pci_power_names); |
31 | 32 | ||
33 | int isa_dma_bridge_buggy; | ||
34 | EXPORT_SYMBOL(isa_dma_bridge_buggy); | ||
35 | |||
36 | int pci_pci_problems; | ||
37 | EXPORT_SYMBOL(pci_pci_problems); | ||
38 | |||
32 | unsigned int pci_pm_d3_delay; | 39 | unsigned int pci_pm_d3_delay; |
33 | 40 | ||
41 | static void pci_pme_list_scan(struct work_struct *work); | ||
42 | |||
43 | static LIST_HEAD(pci_pme_list); | ||
44 | static DEFINE_MUTEX(pci_pme_list_mutex); | ||
45 | static DECLARE_DELAYED_WORK(pci_pme_work, pci_pme_list_scan); | ||
46 | |||
47 | struct pci_pme_device { | ||
48 | struct list_head list; | ||
49 | struct pci_dev *dev; | ||
50 | }; | ||
51 | |||
52 | #define PME_TIMEOUT 1000 /* How long between PME checks */ | ||
53 | |||
34 | static void pci_dev_d3_sleep(struct pci_dev *dev) | 54 | static void pci_dev_d3_sleep(struct pci_dev *dev) |
35 | { | 55 | { |
36 | unsigned int delay = dev->d3_delay; | 56 | unsigned int delay = dev->d3_delay; |
@@ -297,6 +317,49 @@ int pci_find_ext_capability(struct pci_dev *dev, int cap) | |||
297 | } | 317 | } |
298 | EXPORT_SYMBOL_GPL(pci_find_ext_capability); | 318 | EXPORT_SYMBOL_GPL(pci_find_ext_capability); |
299 | 319 | ||
320 | /** | ||
321 | * pci_bus_find_ext_capability - find an extended capability | ||
322 | * @bus: the PCI bus to query | ||
323 | * @devfn: PCI device to query | ||
324 | * @cap: capability code | ||
325 | * | ||
326 | * Like pci_find_ext_capability() but works for pci devices that do not have a | ||
327 | * pci_dev structure set up yet. | ||
328 | * | ||
329 | * Returns the address of the requested capability structure within the | ||
330 | * device's PCI configuration space or 0 in case the device does not | ||
331 | * support it. | ||
332 | */ | ||
333 | int pci_bus_find_ext_capability(struct pci_bus *bus, unsigned int devfn, | ||
334 | int cap) | ||
335 | { | ||
336 | u32 header; | ||
337 | int ttl; | ||
338 | int pos = PCI_CFG_SPACE_SIZE; | ||
339 | |||
340 | /* minimum 8 bytes per capability */ | ||
341 | ttl = (PCI_CFG_SPACE_EXP_SIZE - PCI_CFG_SPACE_SIZE) / 8; | ||
342 | |||
343 | if (!pci_bus_read_config_dword(bus, devfn, pos, &header)) | ||
344 | return 0; | ||
345 | if (header == 0xffffffff || header == 0) | ||
346 | return 0; | ||
347 | |||
348 | while (ttl-- > 0) { | ||
349 | if (PCI_EXT_CAP_ID(header) == cap) | ||
350 | return pos; | ||
351 | |||
352 | pos = PCI_EXT_CAP_NEXT(header); | ||
353 | if (pos < PCI_CFG_SPACE_SIZE) | ||
354 | break; | ||
355 | |||
356 | if (!pci_bus_read_config_dword(bus, devfn, pos, &header)) | ||
357 | break; | ||
358 | } | ||
359 | |||
360 | return 0; | ||
361 | } | ||
362 | |||
300 | static int __pci_find_next_ht_cap(struct pci_dev *dev, int pos, int ht_cap) | 363 | static int __pci_find_next_ht_cap(struct pci_dev *dev, int pos, int ht_cap) |
301 | { | 364 | { |
302 | int rc, ttl = PCI_FIND_CAP_TTL; | 365 | int rc, ttl = PCI_FIND_CAP_TTL; |
@@ -380,10 +443,9 @@ pci_find_parent_resource(const struct pci_dev *dev, struct resource *res) | |||
380 | { | 443 | { |
381 | const struct pci_bus *bus = dev->bus; | 444 | const struct pci_bus *bus = dev->bus; |
382 | int i; | 445 | int i; |
383 | struct resource *best = NULL; | 446 | struct resource *best = NULL, *r; |
384 | 447 | ||
385 | for(i = 0; i < PCI_BUS_NUM_RESOURCES; i++) { | 448 | pci_bus_for_each_resource(bus, r, i) { |
386 | struct resource *r = bus->resource[i]; | ||
387 | if (!r) | 449 | if (!r) |
388 | continue; | 450 | continue; |
389 | if (res->start && !(res->start >= r->start && res->end <= r->end)) | 451 | if (res->start && !(res->start >= r->start && res->end <= r->end)) |
@@ -457,6 +519,12 @@ static inline int platform_pci_sleep_wake(struct pci_dev *dev, bool enable) | |||
457 | pci_platform_pm->sleep_wake(dev, enable) : -ENODEV; | 519 | pci_platform_pm->sleep_wake(dev, enable) : -ENODEV; |
458 | } | 520 | } |
459 | 521 | ||
522 | static inline int platform_pci_run_wake(struct pci_dev *dev, bool enable) | ||
523 | { | ||
524 | return pci_platform_pm ? | ||
525 | pci_platform_pm->run_wake(dev, enable) : -ENODEV; | ||
526 | } | ||
527 | |||
460 | /** | 528 | /** |
461 | * pci_raw_set_power_state - Use PCI PM registers to set the power state of | 529 | * pci_raw_set_power_state - Use PCI PM registers to set the power state of |
462 | * given PCI device | 530 | * given PCI device |
@@ -624,7 +692,7 @@ static void __pci_start_power_transition(struct pci_dev *dev, pci_power_t state) | |||
624 | */ | 692 | */ |
625 | int __pci_complete_power_transition(struct pci_dev *dev, pci_power_t state) | 693 | int __pci_complete_power_transition(struct pci_dev *dev, pci_power_t state) |
626 | { | 694 | { |
627 | return state > PCI_D0 ? | 695 | return state >= PCI_D0 ? |
628 | pci_platform_power_transition(dev, state) : -EINVAL; | 696 | pci_platform_power_transition(dev, state) : -EINVAL; |
629 | } | 697 | } |
630 | EXPORT_SYMBOL_GPL(__pci_complete_power_transition); | 698 | EXPORT_SYMBOL_GPL(__pci_complete_power_transition); |
@@ -661,10 +729,6 @@ int pci_set_power_state(struct pci_dev *dev, pci_power_t state) | |||
661 | */ | 729 | */ |
662 | return 0; | 730 | return 0; |
663 | 731 | ||
664 | /* Check if we're already there */ | ||
665 | if (dev->current_state == state) | ||
666 | return 0; | ||
667 | |||
668 | __pci_start_power_transition(dev, state); | 732 | __pci_start_power_transition(dev, state); |
669 | 733 | ||
670 | /* This device is quirked not to be put into D3, so | 734 | /* This device is quirked not to be put into D3, so |
@@ -873,14 +937,13 @@ pci_save_state(struct pci_dev *dev) | |||
873 | * pci_restore_state - Restore the saved state of a PCI device | 937 | * pci_restore_state - Restore the saved state of a PCI device |
874 | * @dev: - PCI device that we're dealing with | 938 | * @dev: - PCI device that we're dealing with |
875 | */ | 939 | */ |
876 | int | 940 | void pci_restore_state(struct pci_dev *dev) |
877 | pci_restore_state(struct pci_dev *dev) | ||
878 | { | 941 | { |
879 | int i; | 942 | int i; |
880 | u32 val; | 943 | u32 val; |
881 | 944 | ||
882 | if (!dev->state_saved) | 945 | if (!dev->state_saved) |
883 | return 0; | 946 | return; |
884 | 947 | ||
885 | /* PCI Express register must be restored first */ | 948 | /* PCI Express register must be restored first */ |
886 | pci_restore_pcie_state(dev); | 949 | pci_restore_pcie_state(dev); |
@@ -904,8 +967,6 @@ pci_restore_state(struct pci_dev *dev) | |||
904 | pci_restore_iov_state(dev); | 967 | pci_restore_iov_state(dev); |
905 | 968 | ||
906 | dev->state_saved = false; | 969 | dev->state_saved = false; |
907 | |||
908 | return 0; | ||
909 | } | 970 | } |
910 | 971 | ||
911 | static int do_pci_enable_device(struct pci_dev *dev, int bars) | 972 | static int do_pci_enable_device(struct pci_dev *dev, int bars) |
@@ -943,6 +1004,18 @@ static int __pci_enable_device_flags(struct pci_dev *dev, | |||
943 | int err; | 1004 | int err; |
944 | int i, bars = 0; | 1005 | int i, bars = 0; |
945 | 1006 | ||
1007 | /* | ||
1008 | * Power state could be unknown at this point, either due to a fresh | ||
1009 | * boot or a device removal call. So get the current power state | ||
1010 | * so that things like MSI message writing will behave as expected | ||
1011 | * (e.g. if the device really is in D0 at enable time). | ||
1012 | */ | ||
1013 | if (dev->pm_cap) { | ||
1014 | u16 pmcsr; | ||
1015 | pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr); | ||
1016 | dev->current_state = (pmcsr & PCI_PM_CTRL_STATE_MASK); | ||
1017 | } | ||
1018 | |||
946 | if (atomic_add_return(1, &dev->enable_cnt) > 1) | 1019 | if (atomic_add_return(1, &dev->enable_cnt) > 1) |
947 | return 0; /* already enabled */ | 1020 | return 0; /* already enabled */ |
948 | 1021 | ||
@@ -1142,7 +1215,7 @@ void pci_disable_enabled_device(struct pci_dev *dev) | |||
1142 | * anymore. This only involves disabling PCI bus-mastering, if active. | 1215 | * anymore. This only involves disabling PCI bus-mastering, if active. |
1143 | * | 1216 | * |
1144 | * Note we don't actually disable the device until all callers of | 1217 | * Note we don't actually disable the device until all callers of |
1145 | * pci_device_enable() have called pci_device_disable(). | 1218 | * pci_enable_device() have called pci_disable_device(). |
1146 | */ | 1219 | */ |
1147 | void | 1220 | void |
1148 | pci_disable_device(struct pci_dev *dev) | 1221 | pci_disable_device(struct pci_dev *dev) |
@@ -1190,6 +1263,68 @@ int pci_set_pcie_reset_state(struct pci_dev *dev, enum pcie_reset_state state) | |||
1190 | } | 1263 | } |
1191 | 1264 | ||
1192 | /** | 1265 | /** |
1266 | * pci_check_pme_status - Check if given device has generated PME. | ||
1267 | * @dev: Device to check. | ||
1268 | * | ||
1269 | * Check the PME status of the device and if set, clear it and clear PME enable | ||
1270 | * (if set). Return 'true' if PME status and PME enable were both set or | ||
1271 | * 'false' otherwise. | ||
1272 | */ | ||
1273 | bool pci_check_pme_status(struct pci_dev *dev) | ||
1274 | { | ||
1275 | int pmcsr_pos; | ||
1276 | u16 pmcsr; | ||
1277 | bool ret = false; | ||
1278 | |||
1279 | if (!dev->pm_cap) | ||
1280 | return false; | ||
1281 | |||
1282 | pmcsr_pos = dev->pm_cap + PCI_PM_CTRL; | ||
1283 | pci_read_config_word(dev, pmcsr_pos, &pmcsr); | ||
1284 | if (!(pmcsr & PCI_PM_CTRL_PME_STATUS)) | ||
1285 | return false; | ||
1286 | |||
1287 | /* Clear PME status. */ | ||
1288 | pmcsr |= PCI_PM_CTRL_PME_STATUS; | ||
1289 | if (pmcsr & PCI_PM_CTRL_PME_ENABLE) { | ||
1290 | /* Disable PME to avoid interrupt flood. */ | ||
1291 | pmcsr &= ~PCI_PM_CTRL_PME_ENABLE; | ||
1292 | ret = true; | ||
1293 | } | ||
1294 | |||
1295 | pci_write_config_word(dev, pmcsr_pos, pmcsr); | ||
1296 | |||
1297 | return ret; | ||
1298 | } | ||
1299 | |||
1300 | /** | ||
1301 | * pci_pme_wakeup - Wake up a PCI device if its PME Status bit is set. | ||
1302 | * @dev: Device to handle. | ||
1303 | * @ign: Ignored. | ||
1304 | * | ||
1305 | * Check if @dev has generated PME and queue a resume request for it in that | ||
1306 | * case. | ||
1307 | */ | ||
1308 | static int pci_pme_wakeup(struct pci_dev *dev, void *ign) | ||
1309 | { | ||
1310 | if (pci_check_pme_status(dev)) { | ||
1311 | pci_wakeup_event(dev); | ||
1312 | pm_request_resume(&dev->dev); | ||
1313 | } | ||
1314 | return 0; | ||
1315 | } | ||
1316 | |||
1317 | /** | ||
1318 | * pci_pme_wakeup_bus - Walk given bus and wake up devices on it, if necessary. | ||
1319 | * @bus: Top bus of the subtree to walk. | ||
1320 | */ | ||
1321 | void pci_pme_wakeup_bus(struct pci_bus *bus) | ||
1322 | { | ||
1323 | if (bus) | ||
1324 | pci_walk_bus(bus, pci_pme_wakeup, NULL); | ||
1325 | } | ||
1326 | |||
1327 | /** | ||
1193 | * pci_pme_capable - check the capability of PCI device to generate PME# | 1328 | * pci_pme_capable - check the capability of PCI device to generate PME# |
1194 | * @dev: PCI device to handle. | 1329 | * @dev: PCI device to handle. |
1195 | * @state: PCI state from which device will issue PME#. | 1330 | * @state: PCI state from which device will issue PME#. |
@@ -1202,6 +1337,32 @@ bool pci_pme_capable(struct pci_dev *dev, pci_power_t state) | |||
1202 | return !!(dev->pme_support & (1 << state)); | 1337 | return !!(dev->pme_support & (1 << state)); |
1203 | } | 1338 | } |
1204 | 1339 | ||
1340 | static void pci_pme_list_scan(struct work_struct *work) | ||
1341 | { | ||
1342 | struct pci_pme_device *pme_dev; | ||
1343 | |||
1344 | mutex_lock(&pci_pme_list_mutex); | ||
1345 | if (!list_empty(&pci_pme_list)) { | ||
1346 | list_for_each_entry(pme_dev, &pci_pme_list, list) | ||
1347 | pci_pme_wakeup(pme_dev->dev, NULL); | ||
1348 | schedule_delayed_work(&pci_pme_work, msecs_to_jiffies(PME_TIMEOUT)); | ||
1349 | } | ||
1350 | mutex_unlock(&pci_pme_list_mutex); | ||
1351 | } | ||
1352 | |||
1353 | /** | ||
1354 | * pci_external_pme - is a device an external PCI PME source? | ||
1355 | * @dev: PCI device to check | ||
1356 | * | ||
1357 | */ | ||
1358 | |||
1359 | static bool pci_external_pme(struct pci_dev *dev) | ||
1360 | { | ||
1361 | if (pci_is_pcie(dev) || dev->bus->number == 0) | ||
1362 | return false; | ||
1363 | return true; | ||
1364 | } | ||
1365 | |||
1205 | /** | 1366 | /** |
1206 | * pci_pme_active - enable or disable PCI device's PME# function | 1367 | * pci_pme_active - enable or disable PCI device's PME# function |
1207 | * @dev: PCI device to handle. | 1368 | * @dev: PCI device to handle. |
@@ -1225,14 +1386,53 @@ void pci_pme_active(struct pci_dev *dev, bool enable) | |||
1225 | 1386 | ||
1226 | pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, pmcsr); | 1387 | pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, pmcsr); |
1227 | 1388 | ||
1389 | /* PCI (as opposed to PCIe) PME requires that the device have | ||
1390 | its PME# line hooked up correctly. Not all hardware vendors | ||
1391 | do this, so the PME never gets delivered and the device | ||
1392 | remains asleep. The easiest way around this is to | ||
1393 | periodically walk the list of suspended devices and check | ||
1394 | whether any have their PME flag set. The assumption is that | ||
1395 | we'll wake up often enough anyway that this won't be a huge | ||
1396 | hit, and the power savings from the devices will still be a | ||
1397 | win. */ | ||
1398 | |||
1399 | if (pci_external_pme(dev)) { | ||
1400 | struct pci_pme_device *pme_dev; | ||
1401 | if (enable) { | ||
1402 | pme_dev = kmalloc(sizeof(struct pci_pme_device), | ||
1403 | GFP_KERNEL); | ||
1404 | if (!pme_dev) | ||
1405 | goto out; | ||
1406 | pme_dev->dev = dev; | ||
1407 | mutex_lock(&pci_pme_list_mutex); | ||
1408 | list_add(&pme_dev->list, &pci_pme_list); | ||
1409 | if (list_is_singular(&pci_pme_list)) | ||
1410 | schedule_delayed_work(&pci_pme_work, | ||
1411 | msecs_to_jiffies(PME_TIMEOUT)); | ||
1412 | mutex_unlock(&pci_pme_list_mutex); | ||
1413 | } else { | ||
1414 | mutex_lock(&pci_pme_list_mutex); | ||
1415 | list_for_each_entry(pme_dev, &pci_pme_list, list) { | ||
1416 | if (pme_dev->dev == dev) { | ||
1417 | list_del(&pme_dev->list); | ||
1418 | kfree(pme_dev); | ||
1419 | break; | ||
1420 | } | ||
1421 | } | ||
1422 | mutex_unlock(&pci_pme_list_mutex); | ||
1423 | } | ||
1424 | } | ||
1425 | |||
1426 | out: | ||
1228 | dev_printk(KERN_DEBUG, &dev->dev, "PME# %s\n", | 1427 | dev_printk(KERN_DEBUG, &dev->dev, "PME# %s\n", |
1229 | enable ? "enabled" : "disabled"); | 1428 | enable ? "enabled" : "disabled"); |
1230 | } | 1429 | } |
1231 | 1430 | ||
1232 | /** | 1431 | /** |
1233 | * pci_enable_wake - enable PCI device as wakeup event source | 1432 | * __pci_enable_wake - enable PCI device as wakeup event source |
1234 | * @dev: PCI device affected | 1433 | * @dev: PCI device affected |
1235 | * @state: PCI state from which device will issue wakeup events | 1434 | * @state: PCI state from which device will issue wakeup events |
1435 | * @runtime: True if the events are to be generated at run time | ||
1236 | * @enable: True to enable event generation; false to disable | 1436 | * @enable: True to enable event generation; false to disable |
1237 | * | 1437 | * |
1238 | * This enables the device as a wakeup event source, or disables it. | 1438 | * This enables the device as a wakeup event source, or disables it. |
@@ -1248,11 +1448,12 @@ void pci_pme_active(struct pci_dev *dev, bool enable) | |||
1248 | * Error code depending on the platform is returned if both the platform and | 1448 | * Error code depending on the platform is returned if both the platform and |
1249 | * the native mechanism fail to enable the generation of wake-up events | 1449 | * the native mechanism fail to enable the generation of wake-up events |
1250 | */ | 1450 | */ |
1251 | int pci_enable_wake(struct pci_dev *dev, pci_power_t state, bool enable) | 1451 | int __pci_enable_wake(struct pci_dev *dev, pci_power_t state, |
1452 | bool runtime, bool enable) | ||
1252 | { | 1453 | { |
1253 | int ret = 0; | 1454 | int ret = 0; |
1254 | 1455 | ||
1255 | if (enable && !device_may_wakeup(&dev->dev)) | 1456 | if (enable && !runtime && !device_may_wakeup(&dev->dev)) |
1256 | return -EINVAL; | 1457 | return -EINVAL; |
1257 | 1458 | ||
1258 | /* Don't do the same thing twice in a row for one device. */ | 1459 | /* Don't do the same thing twice in a row for one device. */ |
@@ -1272,19 +1473,24 @@ int pci_enable_wake(struct pci_dev *dev, pci_power_t state, bool enable) | |||
1272 | pci_pme_active(dev, true); | 1473 | pci_pme_active(dev, true); |
1273 | else | 1474 | else |
1274 | ret = 1; | 1475 | ret = 1; |
1275 | error = platform_pci_sleep_wake(dev, true); | 1476 | error = runtime ? platform_pci_run_wake(dev, true) : |
1477 | platform_pci_sleep_wake(dev, true); | ||
1276 | if (ret) | 1478 | if (ret) |
1277 | ret = error; | 1479 | ret = error; |
1278 | if (!ret) | 1480 | if (!ret) |
1279 | dev->wakeup_prepared = true; | 1481 | dev->wakeup_prepared = true; |
1280 | } else { | 1482 | } else { |
1281 | platform_pci_sleep_wake(dev, false); | 1483 | if (runtime) |
1484 | platform_pci_run_wake(dev, false); | ||
1485 | else | ||
1486 | platform_pci_sleep_wake(dev, false); | ||
1282 | pci_pme_active(dev, false); | 1487 | pci_pme_active(dev, false); |
1283 | dev->wakeup_prepared = false; | 1488 | dev->wakeup_prepared = false; |
1284 | } | 1489 | } |
1285 | 1490 | ||
1286 | return ret; | 1491 | return ret; |
1287 | } | 1492 | } |
1493 | EXPORT_SYMBOL(__pci_enable_wake); | ||
1288 | 1494 | ||
1289 | /** | 1495 | /** |
1290 | * pci_wake_from_d3 - enable/disable device to wake up from D3_hot or D3_cold | 1496 | * pci_wake_from_d3 - enable/disable device to wake up from D3_hot or D3_cold |
@@ -1385,7 +1591,7 @@ int pci_prepare_to_sleep(struct pci_dev *dev) | |||
1385 | * pci_back_from_sleep - turn PCI device on during system-wide transition into working state | 1591 | * pci_back_from_sleep - turn PCI device on during system-wide transition into working state |
1386 | * @dev: Device to handle. | 1592 | * @dev: Device to handle. |
1387 | * | 1593 | * |
1388 | * Disable device's sytem wake-up capability and put it into D0. | 1594 | * Disable device's system wake-up capability and put it into D0. |
1389 | */ | 1595 | */ |
1390 | int pci_back_from_sleep(struct pci_dev *dev) | 1596 | int pci_back_from_sleep(struct pci_dev *dev) |
1391 | { | 1597 | { |
@@ -1394,6 +1600,66 @@ int pci_back_from_sleep(struct pci_dev *dev) | |||
1394 | } | 1600 | } |
1395 | 1601 | ||
1396 | /** | 1602 | /** |
1603 | * pci_finish_runtime_suspend - Carry out PCI-specific part of runtime suspend. | ||
1604 | * @dev: PCI device being suspended. | ||
1605 | * | ||
1606 | * Prepare @dev to generate wake-up events at run time and put it into a low | ||
1607 | * power state. | ||
1608 | */ | ||
1609 | int pci_finish_runtime_suspend(struct pci_dev *dev) | ||
1610 | { | ||
1611 | pci_power_t target_state = pci_target_state(dev); | ||
1612 | int error; | ||
1613 | |||
1614 | if (target_state == PCI_POWER_ERROR) | ||
1615 | return -EIO; | ||
1616 | |||
1617 | __pci_enable_wake(dev, target_state, true, pci_dev_run_wake(dev)); | ||
1618 | |||
1619 | error = pci_set_power_state(dev, target_state); | ||
1620 | |||
1621 | if (error) | ||
1622 | __pci_enable_wake(dev, target_state, true, false); | ||
1623 | |||
1624 | return error; | ||
1625 | } | ||
1626 | |||
1627 | /** | ||
1628 | * pci_dev_run_wake - Check if device can generate run-time wake-up events. | ||
1629 | * @dev: Device to check. | ||
1630 | * | ||
1631 | * Return true if the device itself is cabable of generating wake-up events | ||
1632 | * (through the platform or using the native PCIe PME) or if the device supports | ||
1633 | * PME and one of its upstream bridges can generate wake-up events. | ||
1634 | */ | ||
1635 | bool pci_dev_run_wake(struct pci_dev *dev) | ||
1636 | { | ||
1637 | struct pci_bus *bus = dev->bus; | ||
1638 | |||
1639 | if (device_run_wake(&dev->dev)) | ||
1640 | return true; | ||
1641 | |||
1642 | if (!dev->pme_support) | ||
1643 | return false; | ||
1644 | |||
1645 | while (bus->parent) { | ||
1646 | struct pci_dev *bridge = bus->self; | ||
1647 | |||
1648 | if (device_run_wake(&bridge->dev)) | ||
1649 | return true; | ||
1650 | |||
1651 | bus = bus->parent; | ||
1652 | } | ||
1653 | |||
1654 | /* We have reached the root bus. */ | ||
1655 | if (bus->bridge) | ||
1656 | return device_run_wake(bus->bridge); | ||
1657 | |||
1658 | return false; | ||
1659 | } | ||
1660 | EXPORT_SYMBOL_GPL(pci_dev_run_wake); | ||
1661 | |||
1662 | /** | ||
1397 | * pci_pm_init - Initialize PM functions of given PCI device | 1663 | * pci_pm_init - Initialize PM functions of given PCI device |
1398 | * @dev: PCI device to handle. | 1664 | * @dev: PCI device to handle. |
1399 | */ | 1665 | */ |
@@ -1402,7 +1668,10 @@ void pci_pm_init(struct pci_dev *dev) | |||
1402 | int pm; | 1668 | int pm; |
1403 | u16 pmc; | 1669 | u16 pmc; |
1404 | 1670 | ||
1671 | pm_runtime_forbid(&dev->dev); | ||
1672 | device_enable_async_suspend(&dev->dev); | ||
1405 | dev->wakeup_prepared = false; | 1673 | dev->wakeup_prepared = false; |
1674 | |||
1406 | dev->pm_cap = 0; | 1675 | dev->pm_cap = 0; |
1407 | 1676 | ||
1408 | /* find PCI PM capability in list */ | 1677 | /* find PCI PM capability in list */ |
@@ -1450,7 +1719,6 @@ void pci_pm_init(struct pci_dev *dev) | |||
1450 | * let the user space enable it to wake up the system as needed. | 1719 | * let the user space enable it to wake up the system as needed. |
1451 | */ | 1720 | */ |
1452 | device_set_wakeup_capable(&dev->dev, true); | 1721 | device_set_wakeup_capable(&dev->dev, true); |
1453 | device_set_wakeup_enable(&dev->dev, false); | ||
1454 | /* Disable the PME# generation functionality */ | 1722 | /* Disable the PME# generation functionality */ |
1455 | pci_pme_active(dev, false); | 1723 | pci_pme_active(dev, false); |
1456 | } else { | 1724 | } else { |
@@ -1474,7 +1742,6 @@ void platform_pci_wakeup_init(struct pci_dev *dev) | |||
1474 | return; | 1742 | return; |
1475 | 1743 | ||
1476 | device_set_wakeup_capable(&dev->dev, true); | 1744 | device_set_wakeup_capable(&dev->dev, true); |
1477 | device_set_wakeup_enable(&dev->dev, false); | ||
1478 | platform_pci_sleep_wake(dev, false); | 1745 | platform_pci_sleep_wake(dev, false); |
1479 | } | 1746 | } |
1480 | 1747 | ||
@@ -2113,51 +2380,19 @@ void pci_msi_off(struct pci_dev *dev) | |||
2113 | pci_write_config_word(dev, pos + PCI_MSIX_FLAGS, control); | 2380 | pci_write_config_word(dev, pos + PCI_MSIX_FLAGS, control); |
2114 | } | 2381 | } |
2115 | } | 2382 | } |
2383 | EXPORT_SYMBOL_GPL(pci_msi_off); | ||
2116 | 2384 | ||
2117 | #ifndef HAVE_ARCH_PCI_SET_DMA_MASK | ||
2118 | /* | ||
2119 | * These can be overridden by arch-specific implementations | ||
2120 | */ | ||
2121 | int | ||
2122 | pci_set_dma_mask(struct pci_dev *dev, u64 mask) | ||
2123 | { | ||
2124 | if (!pci_dma_supported(dev, mask)) | ||
2125 | return -EIO; | ||
2126 | |||
2127 | dev->dma_mask = mask; | ||
2128 | dev_dbg(&dev->dev, "using %dbit DMA mask\n", fls64(mask)); | ||
2129 | |||
2130 | return 0; | ||
2131 | } | ||
2132 | |||
2133 | int | ||
2134 | pci_set_consistent_dma_mask(struct pci_dev *dev, u64 mask) | ||
2135 | { | ||
2136 | if (!pci_dma_supported(dev, mask)) | ||
2137 | return -EIO; | ||
2138 | |||
2139 | dev->dev.coherent_dma_mask = mask; | ||
2140 | dev_dbg(&dev->dev, "using %dbit consistent DMA mask\n", fls64(mask)); | ||
2141 | |||
2142 | return 0; | ||
2143 | } | ||
2144 | #endif | ||
2145 | |||
2146 | #ifndef HAVE_ARCH_PCI_SET_DMA_MAX_SEGMENT_SIZE | ||
2147 | int pci_set_dma_max_seg_size(struct pci_dev *dev, unsigned int size) | 2385 | int pci_set_dma_max_seg_size(struct pci_dev *dev, unsigned int size) |
2148 | { | 2386 | { |
2149 | return dma_set_max_seg_size(&dev->dev, size); | 2387 | return dma_set_max_seg_size(&dev->dev, size); |
2150 | } | 2388 | } |
2151 | EXPORT_SYMBOL(pci_set_dma_max_seg_size); | 2389 | EXPORT_SYMBOL(pci_set_dma_max_seg_size); |
2152 | #endif | ||
2153 | 2390 | ||
2154 | #ifndef HAVE_ARCH_PCI_SET_DMA_SEGMENT_BOUNDARY | ||
2155 | int pci_set_dma_seg_boundary(struct pci_dev *dev, unsigned long mask) | 2391 | int pci_set_dma_seg_boundary(struct pci_dev *dev, unsigned long mask) |
2156 | { | 2392 | { |
2157 | return dma_set_seg_boundary(&dev->dev, mask); | 2393 | return dma_set_seg_boundary(&dev->dev, mask); |
2158 | } | 2394 | } |
2159 | EXPORT_SYMBOL(pci_set_dma_seg_boundary); | 2395 | EXPORT_SYMBOL(pci_set_dma_seg_boundary); |
2160 | #endif | ||
2161 | 2396 | ||
2162 | static int pcie_flr(struct pci_dev *dev, int probe) | 2397 | static int pcie_flr(struct pci_dev *dev, int probe) |
2163 | { | 2398 | { |
@@ -2304,7 +2539,7 @@ static int pci_dev_reset(struct pci_dev *dev, int probe) | |||
2304 | if (!probe) { | 2539 | if (!probe) { |
2305 | pci_block_user_cfg_access(dev); | 2540 | pci_block_user_cfg_access(dev); |
2306 | /* block PM suspend, driver probe, etc. */ | 2541 | /* block PM suspend, driver probe, etc. */ |
2307 | down(&dev->dev.sem); | 2542 | device_lock(&dev->dev); |
2308 | } | 2543 | } |
2309 | 2544 | ||
2310 | rc = pci_dev_specific_reset(dev, probe); | 2545 | rc = pci_dev_specific_reset(dev, probe); |
@@ -2326,7 +2561,7 @@ static int pci_dev_reset(struct pci_dev *dev, int probe) | |||
2326 | rc = pci_parent_bus_reset(dev, probe); | 2561 | rc = pci_parent_bus_reset(dev, probe); |
2327 | done: | 2562 | done: |
2328 | if (!probe) { | 2563 | if (!probe) { |
2329 | up(&dev->dev.sem); | 2564 | device_unlock(&dev->dev); |
2330 | pci_unblock_user_cfg_access(dev); | 2565 | pci_unblock_user_cfg_access(dev); |
2331 | } | 2566 | } |
2332 | 2567 | ||
@@ -2421,18 +2656,17 @@ EXPORT_SYMBOL_GPL(pci_reset_function); | |||
2421 | */ | 2656 | */ |
2422 | int pcix_get_max_mmrbc(struct pci_dev *dev) | 2657 | int pcix_get_max_mmrbc(struct pci_dev *dev) |
2423 | { | 2658 | { |
2424 | int err, cap; | 2659 | int cap; |
2425 | u32 stat; | 2660 | u32 stat; |
2426 | 2661 | ||
2427 | cap = pci_find_capability(dev, PCI_CAP_ID_PCIX); | 2662 | cap = pci_find_capability(dev, PCI_CAP_ID_PCIX); |
2428 | if (!cap) | 2663 | if (!cap) |
2429 | return -EINVAL; | 2664 | return -EINVAL; |
2430 | 2665 | ||
2431 | err = pci_read_config_dword(dev, cap + PCI_X_STATUS, &stat); | 2666 | if (pci_read_config_dword(dev, cap + PCI_X_STATUS, &stat)) |
2432 | if (err) | ||
2433 | return -EINVAL; | 2667 | return -EINVAL; |
2434 | 2668 | ||
2435 | return (stat & PCI_X_STATUS_MAX_READ) >> 12; | 2669 | return 512 << ((stat & PCI_X_STATUS_MAX_READ) >> 21); |
2436 | } | 2670 | } |
2437 | EXPORT_SYMBOL(pcix_get_max_mmrbc); | 2671 | EXPORT_SYMBOL(pcix_get_max_mmrbc); |
2438 | 2672 | ||
@@ -2445,18 +2679,17 @@ EXPORT_SYMBOL(pcix_get_max_mmrbc); | |||
2445 | */ | 2679 | */ |
2446 | int pcix_get_mmrbc(struct pci_dev *dev) | 2680 | int pcix_get_mmrbc(struct pci_dev *dev) |
2447 | { | 2681 | { |
2448 | int ret, cap; | 2682 | int cap; |
2449 | u32 cmd; | 2683 | u16 cmd; |
2450 | 2684 | ||
2451 | cap = pci_find_capability(dev, PCI_CAP_ID_PCIX); | 2685 | cap = pci_find_capability(dev, PCI_CAP_ID_PCIX); |
2452 | if (!cap) | 2686 | if (!cap) |
2453 | return -EINVAL; | 2687 | return -EINVAL; |
2454 | 2688 | ||
2455 | ret = pci_read_config_dword(dev, cap + PCI_X_CMD, &cmd); | 2689 | if (pci_read_config_word(dev, cap + PCI_X_CMD, &cmd)) |
2456 | if (!ret) | 2690 | return -EINVAL; |
2457 | ret = 512 << ((cmd & PCI_X_CMD_MAX_READ) >> 2); | ||
2458 | 2691 | ||
2459 | return ret; | 2692 | return 512 << ((cmd & PCI_X_CMD_MAX_READ) >> 2); |
2460 | } | 2693 | } |
2461 | EXPORT_SYMBOL(pcix_get_mmrbc); | 2694 | EXPORT_SYMBOL(pcix_get_mmrbc); |
2462 | 2695 | ||
@@ -2471,28 +2704,27 @@ EXPORT_SYMBOL(pcix_get_mmrbc); | |||
2471 | */ | 2704 | */ |
2472 | int pcix_set_mmrbc(struct pci_dev *dev, int mmrbc) | 2705 | int pcix_set_mmrbc(struct pci_dev *dev, int mmrbc) |
2473 | { | 2706 | { |
2474 | int cap, err = -EINVAL; | 2707 | int cap; |
2475 | u32 stat, cmd, v, o; | 2708 | u32 stat, v, o; |
2709 | u16 cmd; | ||
2476 | 2710 | ||
2477 | if (mmrbc < 512 || mmrbc > 4096 || !is_power_of_2(mmrbc)) | 2711 | if (mmrbc < 512 || mmrbc > 4096 || !is_power_of_2(mmrbc)) |
2478 | goto out; | 2712 | return -EINVAL; |
2479 | 2713 | ||
2480 | v = ffs(mmrbc) - 10; | 2714 | v = ffs(mmrbc) - 10; |
2481 | 2715 | ||
2482 | cap = pci_find_capability(dev, PCI_CAP_ID_PCIX); | 2716 | cap = pci_find_capability(dev, PCI_CAP_ID_PCIX); |
2483 | if (!cap) | 2717 | if (!cap) |
2484 | goto out; | 2718 | return -EINVAL; |
2485 | 2719 | ||
2486 | err = pci_read_config_dword(dev, cap + PCI_X_STATUS, &stat); | 2720 | if (pci_read_config_dword(dev, cap + PCI_X_STATUS, &stat)) |
2487 | if (err) | 2721 | return -EINVAL; |
2488 | goto out; | ||
2489 | 2722 | ||
2490 | if (v > (stat & PCI_X_STATUS_MAX_READ) >> 21) | 2723 | if (v > (stat & PCI_X_STATUS_MAX_READ) >> 21) |
2491 | return -E2BIG; | 2724 | return -E2BIG; |
2492 | 2725 | ||
2493 | err = pci_read_config_dword(dev, cap + PCI_X_CMD, &cmd); | 2726 | if (pci_read_config_word(dev, cap + PCI_X_CMD, &cmd)) |
2494 | if (err) | 2727 | return -EINVAL; |
2495 | goto out; | ||
2496 | 2728 | ||
2497 | o = (cmd & PCI_X_CMD_MAX_READ) >> 2; | 2729 | o = (cmd & PCI_X_CMD_MAX_READ) >> 2; |
2498 | if (o != v) { | 2730 | if (o != v) { |
@@ -2502,10 +2734,10 @@ int pcix_set_mmrbc(struct pci_dev *dev, int mmrbc) | |||
2502 | 2734 | ||
2503 | cmd &= ~PCI_X_CMD_MAX_READ; | 2735 | cmd &= ~PCI_X_CMD_MAX_READ; |
2504 | cmd |= v << 2; | 2736 | cmd |= v << 2; |
2505 | err = pci_write_config_dword(dev, cap + PCI_X_CMD, cmd); | 2737 | if (pci_write_config_word(dev, cap + PCI_X_CMD, cmd)) |
2738 | return -EIO; | ||
2506 | } | 2739 | } |
2507 | out: | 2740 | return 0; |
2508 | return err; | ||
2509 | } | 2741 | } |
2510 | EXPORT_SYMBOL(pcix_set_mmrbc); | 2742 | EXPORT_SYMBOL(pcix_set_mmrbc); |
2511 | 2743 | ||
@@ -2527,7 +2759,7 @@ int pcie_get_readrq(struct pci_dev *dev) | |||
2527 | 2759 | ||
2528 | ret = pci_read_config_word(dev, cap + PCI_EXP_DEVCTL, &ctl); | 2760 | ret = pci_read_config_word(dev, cap + PCI_EXP_DEVCTL, &ctl); |
2529 | if (!ret) | 2761 | if (!ret) |
2530 | ret = 128 << ((ctl & PCI_EXP_DEVCTL_READRQ) >> 12); | 2762 | ret = 128 << ((ctl & PCI_EXP_DEVCTL_READRQ) >> 12); |
2531 | 2763 | ||
2532 | return ret; | 2764 | return ret; |
2533 | } | 2765 | } |
@@ -2615,6 +2847,23 @@ int pci_resource_bar(struct pci_dev *dev, int resno, enum pci_bar_type *type) | |||
2615 | return 0; | 2847 | return 0; |
2616 | } | 2848 | } |
2617 | 2849 | ||
2850 | /* Some architectures require additional programming to enable VGA */ | ||
2851 | static arch_set_vga_state_t arch_set_vga_state; | ||
2852 | |||
2853 | void __init pci_register_set_vga_state(arch_set_vga_state_t func) | ||
2854 | { | ||
2855 | arch_set_vga_state = func; /* NULL disables */ | ||
2856 | } | ||
2857 | |||
2858 | static int pci_set_vga_state_arch(struct pci_dev *dev, bool decode, | ||
2859 | unsigned int command_bits, bool change_bridge) | ||
2860 | { | ||
2861 | if (arch_set_vga_state) | ||
2862 | return arch_set_vga_state(dev, decode, command_bits, | ||
2863 | change_bridge); | ||
2864 | return 0; | ||
2865 | } | ||
2866 | |||
2618 | /** | 2867 | /** |
2619 | * pci_set_vga_state - set VGA decode state on device and parents if requested | 2868 | * pci_set_vga_state - set VGA decode state on device and parents if requested |
2620 | * @dev: the PCI device | 2869 | * @dev: the PCI device |
@@ -2628,9 +2877,15 @@ int pci_set_vga_state(struct pci_dev *dev, bool decode, | |||
2628 | struct pci_bus *bus; | 2877 | struct pci_bus *bus; |
2629 | struct pci_dev *bridge; | 2878 | struct pci_dev *bridge; |
2630 | u16 cmd; | 2879 | u16 cmd; |
2880 | int rc; | ||
2631 | 2881 | ||
2632 | WARN_ON(command_bits & ~(PCI_COMMAND_IO|PCI_COMMAND_MEMORY)); | 2882 | WARN_ON(command_bits & ~(PCI_COMMAND_IO|PCI_COMMAND_MEMORY)); |
2633 | 2883 | ||
2884 | /* ARCH specific VGA enables */ | ||
2885 | rc = pci_set_vga_state_arch(dev, decode, command_bits, change_bridge); | ||
2886 | if (rc) | ||
2887 | return rc; | ||
2888 | |||
2634 | pci_read_config_word(dev, PCI_COMMAND, &cmd); | 2889 | pci_read_config_word(dev, PCI_COMMAND, &cmd); |
2635 | if (decode == true) | 2890 | if (decode == true) |
2636 | cmd |= command_bits; | 2891 | cmd |= command_bits; |
@@ -2860,8 +3115,6 @@ EXPORT_SYMBOL(pci_set_mwi); | |||
2860 | EXPORT_SYMBOL(pci_try_set_mwi); | 3115 | EXPORT_SYMBOL(pci_try_set_mwi); |
2861 | EXPORT_SYMBOL(pci_clear_mwi); | 3116 | EXPORT_SYMBOL(pci_clear_mwi); |
2862 | EXPORT_SYMBOL_GPL(pci_intx); | 3117 | EXPORT_SYMBOL_GPL(pci_intx); |
2863 | EXPORT_SYMBOL(pci_set_dma_mask); | ||
2864 | EXPORT_SYMBOL(pci_set_consistent_dma_mask); | ||
2865 | EXPORT_SYMBOL(pci_assign_resource); | 3118 | EXPORT_SYMBOL(pci_assign_resource); |
2866 | EXPORT_SYMBOL(pci_find_parent_resource); | 3119 | EXPORT_SYMBOL(pci_find_parent_resource); |
2867 | EXPORT_SYMBOL(pci_select_bars); | 3120 | EXPORT_SYMBOL(pci_select_bars); |
@@ -2871,10 +3124,8 @@ EXPORT_SYMBOL(pci_save_state); | |||
2871 | EXPORT_SYMBOL(pci_restore_state); | 3124 | EXPORT_SYMBOL(pci_restore_state); |
2872 | EXPORT_SYMBOL(pci_pme_capable); | 3125 | EXPORT_SYMBOL(pci_pme_capable); |
2873 | EXPORT_SYMBOL(pci_pme_active); | 3126 | EXPORT_SYMBOL(pci_pme_active); |
2874 | EXPORT_SYMBOL(pci_enable_wake); | ||
2875 | EXPORT_SYMBOL(pci_wake_from_d3); | 3127 | EXPORT_SYMBOL(pci_wake_from_d3); |
2876 | EXPORT_SYMBOL(pci_target_state); | 3128 | EXPORT_SYMBOL(pci_target_state); |
2877 | EXPORT_SYMBOL(pci_prepare_to_sleep); | 3129 | EXPORT_SYMBOL(pci_prepare_to_sleep); |
2878 | EXPORT_SYMBOL(pci_back_from_sleep); | 3130 | EXPORT_SYMBOL(pci_back_from_sleep); |
2879 | EXPORT_SYMBOL_GPL(pci_set_pcie_reset_state); | 3131 | EXPORT_SYMBOL_GPL(pci_set_pcie_reset_state); |
2880 | |||
diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h index fbd0e3adbca3..f69d6e0fda75 100644 --- a/drivers/pci/pci.h +++ b/drivers/pci/pci.h | |||
@@ -11,10 +11,24 @@ | |||
11 | extern int pci_uevent(struct device *dev, struct kobj_uevent_env *env); | 11 | extern int pci_uevent(struct device *dev, struct kobj_uevent_env *env); |
12 | extern int pci_create_sysfs_dev_files(struct pci_dev *pdev); | 12 | extern int pci_create_sysfs_dev_files(struct pci_dev *pdev); |
13 | extern void pci_remove_sysfs_dev_files(struct pci_dev *pdev); | 13 | extern void pci_remove_sysfs_dev_files(struct pci_dev *pdev); |
14 | #ifndef CONFIG_DMI | ||
15 | static inline void pci_create_firmware_label_files(struct pci_dev *pdev) | ||
16 | { return; } | ||
17 | static inline void pci_remove_firmware_label_files(struct pci_dev *pdev) | ||
18 | { return; } | ||
19 | #else | ||
20 | extern void pci_create_firmware_label_files(struct pci_dev *pdev); | ||
21 | extern void pci_remove_firmware_label_files(struct pci_dev *pdev); | ||
22 | #endif | ||
14 | extern void pci_cleanup_rom(struct pci_dev *dev); | 23 | extern void pci_cleanup_rom(struct pci_dev *dev); |
15 | #ifdef HAVE_PCI_MMAP | 24 | #ifdef HAVE_PCI_MMAP |
25 | enum pci_mmap_api { | ||
26 | PCI_MMAP_SYSFS, /* mmap on /sys/bus/pci/devices/<BDF>/resource<N> */ | ||
27 | PCI_MMAP_PROCFS /* mmap on /proc/bus/pci/<BDF> */ | ||
28 | }; | ||
16 | extern int pci_mmap_fits(struct pci_dev *pdev, int resno, | 29 | extern int pci_mmap_fits(struct pci_dev *pdev, int resno, |
17 | struct vm_area_struct *vma); | 30 | struct vm_area_struct *vmai, |
31 | enum pci_mmap_api mmap_api); | ||
18 | #endif | 32 | #endif |
19 | int pci_probe_reset_function(struct pci_dev *dev); | 33 | int pci_probe_reset_function(struct pci_dev *dev); |
20 | 34 | ||
@@ -35,6 +49,10 @@ int pci_probe_reset_function(struct pci_dev *dev); | |||
35 | * | 49 | * |
36 | * @sleep_wake: enables/disables the system wake up capability of given device | 50 | * @sleep_wake: enables/disables the system wake up capability of given device |
37 | * | 51 | * |
52 | * @run_wake: enables/disables the platform to generate run-time wake-up events | ||
53 | * for given device (the device's wake-up capability has to be | ||
54 | * enabled by @sleep_wake for this feature to work) | ||
55 | * | ||
38 | * If given platform is generally capable of power managing PCI devices, all of | 56 | * If given platform is generally capable of power managing PCI devices, all of |
39 | * these callbacks are mandatory. | 57 | * these callbacks are mandatory. |
40 | */ | 58 | */ |
@@ -44,15 +62,24 @@ struct pci_platform_pm_ops { | |||
44 | pci_power_t (*choose_state)(struct pci_dev *dev); | 62 | pci_power_t (*choose_state)(struct pci_dev *dev); |
45 | bool (*can_wakeup)(struct pci_dev *dev); | 63 | bool (*can_wakeup)(struct pci_dev *dev); |
46 | int (*sleep_wake)(struct pci_dev *dev, bool enable); | 64 | int (*sleep_wake)(struct pci_dev *dev, bool enable); |
65 | int (*run_wake)(struct pci_dev *dev, bool enable); | ||
47 | }; | 66 | }; |
48 | 67 | ||
49 | extern int pci_set_platform_pm(struct pci_platform_pm_ops *ops); | 68 | extern int pci_set_platform_pm(struct pci_platform_pm_ops *ops); |
50 | extern void pci_update_current_state(struct pci_dev *dev, pci_power_t state); | 69 | extern void pci_update_current_state(struct pci_dev *dev, pci_power_t state); |
51 | extern void pci_disable_enabled_device(struct pci_dev *dev); | 70 | extern void pci_disable_enabled_device(struct pci_dev *dev); |
71 | extern int pci_finish_runtime_suspend(struct pci_dev *dev); | ||
72 | extern int __pci_pme_wakeup(struct pci_dev *dev, void *ign); | ||
52 | extern void pci_pm_init(struct pci_dev *dev); | 73 | extern void pci_pm_init(struct pci_dev *dev); |
53 | extern void platform_pci_wakeup_init(struct pci_dev *dev); | 74 | extern void platform_pci_wakeup_init(struct pci_dev *dev); |
54 | extern void pci_allocate_cap_save_buffers(struct pci_dev *dev); | 75 | extern void pci_allocate_cap_save_buffers(struct pci_dev *dev); |
55 | 76 | ||
77 | static inline void pci_wakeup_event(struct pci_dev *dev) | ||
78 | { | ||
79 | /* Wait 100 ms before the system can be put into a sleep state. */ | ||
80 | pm_wakeup_event(&dev->dev, 100); | ||
81 | } | ||
82 | |||
56 | static inline bool pci_is_bridge(struct pci_dev *pci_dev) | 83 | static inline bool pci_is_bridge(struct pci_dev *pci_dev) |
57 | { | 84 | { |
58 | return !!(pci_dev->subordinate); | 85 | return !!(pci_dev->subordinate); |
@@ -119,12 +146,6 @@ static inline void pci_no_msi(void) { } | |||
119 | static inline void pci_msi_init_pci_dev(struct pci_dev *dev) { } | 146 | static inline void pci_msi_init_pci_dev(struct pci_dev *dev) { } |
120 | #endif | 147 | #endif |
121 | 148 | ||
122 | #ifdef CONFIG_PCIEAER | ||
123 | void pci_no_aer(void); | ||
124 | #else | ||
125 | static inline void pci_no_aer(void) { } | ||
126 | #endif | ||
127 | |||
128 | static inline int pci_no_d1d2(struct pci_dev *dev) | 149 | static inline int pci_no_d1d2(struct pci_dev *dev) |
129 | { | 150 | { |
130 | unsigned int parent_dstates = 0; | 151 | unsigned int parent_dstates = 0; |
@@ -235,7 +256,7 @@ struct pci_ats { | |||
235 | int stu; /* Smallest Translation Unit */ | 256 | int stu; /* Smallest Translation Unit */ |
236 | int qdep; /* Invalidate Queue Depth */ | 257 | int qdep; /* Invalidate Queue Depth */ |
237 | int ref_cnt; /* Physical Function reference count */ | 258 | int ref_cnt; /* Physical Function reference count */ |
238 | int is_enabled:1; /* Enable bit is set */ | 259 | unsigned int is_enabled:1; /* Enable bit is set */ |
239 | }; | 260 | }; |
240 | 261 | ||
241 | #ifdef CONFIG_PCI_IOV | 262 | #ifdef CONFIG_PCI_IOV |
@@ -243,7 +264,8 @@ extern int pci_iov_init(struct pci_dev *dev); | |||
243 | extern void pci_iov_release(struct pci_dev *dev); | 264 | extern void pci_iov_release(struct pci_dev *dev); |
244 | extern int pci_iov_resource_bar(struct pci_dev *dev, int resno, | 265 | extern int pci_iov_resource_bar(struct pci_dev *dev, int resno, |
245 | enum pci_bar_type *type); | 266 | enum pci_bar_type *type); |
246 | extern int pci_sriov_resource_alignment(struct pci_dev *dev, int resno); | 267 | extern resource_size_t pci_sriov_resource_alignment(struct pci_dev *dev, |
268 | int resno); | ||
247 | extern void pci_restore_iov_state(struct pci_dev *dev); | 269 | extern void pci_restore_iov_state(struct pci_dev *dev); |
248 | extern int pci_iov_bus_range(struct pci_bus *bus); | 270 | extern int pci_iov_bus_range(struct pci_bus *bus); |
249 | 271 | ||
@@ -299,7 +321,7 @@ static inline int pci_ats_enabled(struct pci_dev *dev) | |||
299 | } | 321 | } |
300 | #endif /* CONFIG_PCI_IOV */ | 322 | #endif /* CONFIG_PCI_IOV */ |
301 | 323 | ||
302 | static inline int pci_resource_alignment(struct pci_dev *dev, | 324 | static inline resource_size_t pci_resource_alignment(struct pci_dev *dev, |
303 | struct resource *res) | 325 | struct resource *res) |
304 | { | 326 | { |
305 | #ifdef CONFIG_PCI_IOV | 327 | #ifdef CONFIG_PCI_IOV |
@@ -319,6 +341,13 @@ struct pci_dev_reset_methods { | |||
319 | int (*reset)(struct pci_dev *dev, int probe); | 341 | int (*reset)(struct pci_dev *dev, int probe); |
320 | }; | 342 | }; |
321 | 343 | ||
344 | #ifdef CONFIG_PCI_QUIRKS | ||
322 | extern int pci_dev_specific_reset(struct pci_dev *dev, int probe); | 345 | extern int pci_dev_specific_reset(struct pci_dev *dev, int probe); |
346 | #else | ||
347 | static inline int pci_dev_specific_reset(struct pci_dev *dev, int probe) | ||
348 | { | ||
349 | return -ENOTTY; | ||
350 | } | ||
351 | #endif | ||
323 | 352 | ||
324 | #endif /* DRIVERS_PCI_H */ | 353 | #endif /* DRIVERS_PCI_H */ |
diff --git a/drivers/pci/pcie/Kconfig b/drivers/pci/pcie/Kconfig index 5a0c6ad53f8e..dda70981b7a6 100644 --- a/drivers/pci/pcie/Kconfig +++ b/drivers/pci/pcie/Kconfig | |||
@@ -31,14 +31,22 @@ source "drivers/pci/pcie/aer/Kconfig" | |||
31 | # PCI Express ASPM | 31 | # PCI Express ASPM |
32 | # | 32 | # |
33 | config PCIEASPM | 33 | config PCIEASPM |
34 | bool "PCI Express ASPM support(Experimental)" | 34 | bool "PCI Express ASPM control" if EMBEDDED |
35 | depends on PCI && EXPERIMENTAL && PCIEPORTBUS | 35 | depends on PCI && PCIEPORTBUS |
36 | default n | 36 | default y |
37 | help | 37 | help |
38 | This enables PCI Express ASPM (Active State Power Management) and | 38 | This enables OS control over PCI Express ASPM (Active State |
39 | Clock Power Management. ASPM supports state L0/L0s/L1. | 39 | Power Management) and Clock Power Management. ASPM supports |
40 | state L0/L0s/L1. | ||
40 | 41 | ||
41 | When in doubt, say N. | 42 | ASPM is initially set up the the firmware. With this option enabled, |
43 | Linux can modify this state in order to disable ASPM on known-bad | ||
44 | hardware or configurations and enable it when known-safe. | ||
45 | |||
46 | ASPM can be disabled or enabled at runtime via | ||
47 | /sys/module/pcie_aspm/parameters/policy | ||
48 | |||
49 | When in doubt, say Y. | ||
42 | config PCIEASPM_DEBUG | 50 | config PCIEASPM_DEBUG |
43 | bool "Debug PCI Express ASPM" | 51 | bool "Debug PCI Express ASPM" |
44 | depends on PCIEASPM | 52 | depends on PCIEASPM |
@@ -46,3 +54,7 @@ config PCIEASPM_DEBUG | |||
46 | help | 54 | help |
47 | This enables PCI Express ASPM debug support. It will add per-device | 55 | This enables PCI Express ASPM debug support. It will add per-device |
48 | interface to control ASPM. | 56 | interface to control ASPM. |
57 | |||
58 | config PCIE_PME | ||
59 | def_bool y | ||
60 | depends on PCIEPORTBUS && PM_RUNTIME && EXPERIMENTAL && ACPI | ||
diff --git a/drivers/pci/pcie/Makefile b/drivers/pci/pcie/Makefile index 11f6bb1eae24..00c62df5a9fc 100644 --- a/drivers/pci/pcie/Makefile +++ b/drivers/pci/pcie/Makefile | |||
@@ -6,8 +6,11 @@ | |||
6 | obj-$(CONFIG_PCIEASPM) += aspm.o | 6 | obj-$(CONFIG_PCIEASPM) += aspm.o |
7 | 7 | ||
8 | pcieportdrv-y := portdrv_core.o portdrv_pci.o portdrv_bus.o | 8 | pcieportdrv-y := portdrv_core.o portdrv_pci.o portdrv_bus.o |
9 | pcieportdrv-$(CONFIG_ACPI) += portdrv_acpi.o | ||
9 | 10 | ||
10 | obj-$(CONFIG_PCIEPORTBUS) += pcieportdrv.o | 11 | obj-$(CONFIG_PCIEPORTBUS) += pcieportdrv.o |
11 | 12 | ||
12 | # Build PCI Express AER if needed | 13 | # Build PCI Express AER if needed |
13 | obj-$(CONFIG_PCIEAER) += aer/ | 14 | obj-$(CONFIG_PCIEAER) += aer/ |
15 | |||
16 | obj-$(CONFIG_PCIE_PME) += pme.o | ||
diff --git a/drivers/pci/pcie/aer/aer_inject.c b/drivers/pci/pcie/aer/aer_inject.c index 223052b73563..b3cf6223f63a 100644 --- a/drivers/pci/pcie/aer/aer_inject.c +++ b/drivers/pci/pcie/aer/aer_inject.c | |||
@@ -21,6 +21,7 @@ | |||
21 | #include <linux/init.h> | 21 | #include <linux/init.h> |
22 | #include <linux/miscdevice.h> | 22 | #include <linux/miscdevice.h> |
23 | #include <linux/pci.h> | 23 | #include <linux/pci.h> |
24 | #include <linux/slab.h> | ||
24 | #include <linux/fs.h> | 25 | #include <linux/fs.h> |
25 | #include <linux/uaccess.h> | 26 | #include <linux/uaccess.h> |
26 | #include <linux/stddef.h> | 27 | #include <linux/stddef.h> |
@@ -167,7 +168,7 @@ static u32 *find_pci_config_dword(struct aer_error *err, int where, | |||
167 | target = &err->root_status; | 168 | target = &err->root_status; |
168 | rw1cs = 1; | 169 | rw1cs = 1; |
169 | break; | 170 | break; |
170 | case PCI_ERR_ROOT_COR_SRC: | 171 | case PCI_ERR_ROOT_ERR_SRC: |
171 | target = &err->source_id; | 172 | target = &err->source_id; |
172 | break; | 173 | break; |
173 | } | 174 | } |
@@ -471,6 +472,7 @@ static ssize_t aer_inject_write(struct file *filp, const char __user *ubuf, | |||
471 | static const struct file_operations aer_inject_fops = { | 472 | static const struct file_operations aer_inject_fops = { |
472 | .write = aer_inject_write, | 473 | .write = aer_inject_write, |
473 | .owner = THIS_MODULE, | 474 | .owner = THIS_MODULE, |
475 | .llseek = noop_llseek, | ||
474 | }; | 476 | }; |
475 | 477 | ||
476 | static struct miscdevice aer_inject_device = { | 478 | static struct miscdevice aer_inject_device = { |
diff --git a/drivers/pci/pcie/aer/aerdrv.c b/drivers/pci/pcie/aer/aerdrv.c index 21f215f4daa3..58ad7917553c 100644 --- a/drivers/pci/pcie/aer/aerdrv.c +++ b/drivers/pci/pcie/aer/aerdrv.c | |||
@@ -17,6 +17,7 @@ | |||
17 | 17 | ||
18 | #include <linux/module.h> | 18 | #include <linux/module.h> |
19 | #include <linux/pci.h> | 19 | #include <linux/pci.h> |
20 | #include <linux/pci-acpi.h> | ||
20 | #include <linux/sched.h> | 21 | #include <linux/sched.h> |
21 | #include <linux/kernel.h> | 22 | #include <linux/kernel.h> |
22 | #include <linux/errno.h> | 23 | #include <linux/errno.h> |
@@ -25,6 +26,7 @@ | |||
25 | #include <linux/interrupt.h> | 26 | #include <linux/interrupt.h> |
26 | #include <linux/delay.h> | 27 | #include <linux/delay.h> |
27 | #include <linux/pcieport_if.h> | 28 | #include <linux/pcieport_if.h> |
29 | #include <linux/slab.h> | ||
28 | 30 | ||
29 | #include "aerdrv.h" | 31 | #include "aerdrv.h" |
30 | #include "../../pci.h" | 32 | #include "../../pci.h" |
@@ -71,13 +73,125 @@ void pci_no_aer(void) | |||
71 | pcie_aer_disable = 1; /* has priority over 'forceload' */ | 73 | pcie_aer_disable = 1; /* has priority over 'forceload' */ |
72 | } | 74 | } |
73 | 75 | ||
76 | bool pci_aer_available(void) | ||
77 | { | ||
78 | return !pcie_aer_disable && pci_msi_enabled(); | ||
79 | } | ||
80 | |||
81 | static int set_device_error_reporting(struct pci_dev *dev, void *data) | ||
82 | { | ||
83 | bool enable = *((bool *)data); | ||
84 | |||
85 | if ((dev->pcie_type == PCI_EXP_TYPE_ROOT_PORT) || | ||
86 | (dev->pcie_type == PCI_EXP_TYPE_UPSTREAM) || | ||
87 | (dev->pcie_type == PCI_EXP_TYPE_DOWNSTREAM)) { | ||
88 | if (enable) | ||
89 | pci_enable_pcie_error_reporting(dev); | ||
90 | else | ||
91 | pci_disable_pcie_error_reporting(dev); | ||
92 | } | ||
93 | |||
94 | if (enable) | ||
95 | pcie_set_ecrc_checking(dev); | ||
96 | |||
97 | return 0; | ||
98 | } | ||
99 | |||
100 | /** | ||
101 | * set_downstream_devices_error_reporting - enable/disable the error reporting bits on the root port and its downstream ports. | ||
102 | * @dev: pointer to root port's pci_dev data structure | ||
103 | * @enable: true = enable error reporting, false = disable error reporting. | ||
104 | */ | ||
105 | static void set_downstream_devices_error_reporting(struct pci_dev *dev, | ||
106 | bool enable) | ||
107 | { | ||
108 | set_device_error_reporting(dev, &enable); | ||
109 | |||
110 | if (!dev->subordinate) | ||
111 | return; | ||
112 | pci_walk_bus(dev->subordinate, set_device_error_reporting, &enable); | ||
113 | } | ||
114 | |||
115 | /** | ||
116 | * aer_enable_rootport - enable Root Port's interrupts when receiving messages | ||
117 | * @rpc: pointer to a Root Port data structure | ||
118 | * | ||
119 | * Invoked when PCIe bus loads AER service driver. | ||
120 | */ | ||
121 | static void aer_enable_rootport(struct aer_rpc *rpc) | ||
122 | { | ||
123 | struct pci_dev *pdev = rpc->rpd->port; | ||
124 | int pos, aer_pos; | ||
125 | u16 reg16; | ||
126 | u32 reg32; | ||
127 | |||
128 | pos = pci_pcie_cap(pdev); | ||
129 | /* Clear PCIe Capability's Device Status */ | ||
130 | pci_read_config_word(pdev, pos+PCI_EXP_DEVSTA, ®16); | ||
131 | pci_write_config_word(pdev, pos+PCI_EXP_DEVSTA, reg16); | ||
132 | |||
133 | /* Disable system error generation in response to error messages */ | ||
134 | pci_read_config_word(pdev, pos + PCI_EXP_RTCTL, ®16); | ||
135 | reg16 &= ~(SYSTEM_ERROR_INTR_ON_MESG_MASK); | ||
136 | pci_write_config_word(pdev, pos + PCI_EXP_RTCTL, reg16); | ||
137 | |||
138 | aer_pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_ERR); | ||
139 | /* Clear error status */ | ||
140 | pci_read_config_dword(pdev, aer_pos + PCI_ERR_ROOT_STATUS, ®32); | ||
141 | pci_write_config_dword(pdev, aer_pos + PCI_ERR_ROOT_STATUS, reg32); | ||
142 | pci_read_config_dword(pdev, aer_pos + PCI_ERR_COR_STATUS, ®32); | ||
143 | pci_write_config_dword(pdev, aer_pos + PCI_ERR_COR_STATUS, reg32); | ||
144 | pci_read_config_dword(pdev, aer_pos + PCI_ERR_UNCOR_STATUS, ®32); | ||
145 | pci_write_config_dword(pdev, aer_pos + PCI_ERR_UNCOR_STATUS, reg32); | ||
146 | |||
147 | /* | ||
148 | * Enable error reporting for the root port device and downstream port | ||
149 | * devices. | ||
150 | */ | ||
151 | set_downstream_devices_error_reporting(pdev, true); | ||
152 | |||
153 | /* Enable Root Port's interrupt in response to error messages */ | ||
154 | pci_read_config_dword(pdev, aer_pos + PCI_ERR_ROOT_COMMAND, ®32); | ||
155 | reg32 |= ROOT_PORT_INTR_ON_MESG_MASK; | ||
156 | pci_write_config_dword(pdev, aer_pos + PCI_ERR_ROOT_COMMAND, reg32); | ||
157 | } | ||
158 | |||
159 | /** | ||
160 | * aer_disable_rootport - disable Root Port's interrupts when receiving messages | ||
161 | * @rpc: pointer to a Root Port data structure | ||
162 | * | ||
163 | * Invoked when PCIe bus unloads AER service driver. | ||
164 | */ | ||
165 | static void aer_disable_rootport(struct aer_rpc *rpc) | ||
166 | { | ||
167 | struct pci_dev *pdev = rpc->rpd->port; | ||
168 | u32 reg32; | ||
169 | int pos; | ||
170 | |||
171 | /* | ||
172 | * Disable error reporting for the root port device and downstream port | ||
173 | * devices. | ||
174 | */ | ||
175 | set_downstream_devices_error_reporting(pdev, false); | ||
176 | |||
177 | pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_ERR); | ||
178 | /* Disable Root's interrupt in response to error messages */ | ||
179 | pci_read_config_dword(pdev, pos + PCI_ERR_ROOT_COMMAND, ®32); | ||
180 | reg32 &= ~ROOT_PORT_INTR_ON_MESG_MASK; | ||
181 | pci_write_config_dword(pdev, pos + PCI_ERR_ROOT_COMMAND, reg32); | ||
182 | |||
183 | /* Clear Root's error status reg */ | ||
184 | pci_read_config_dword(pdev, pos + PCI_ERR_ROOT_STATUS, ®32); | ||
185 | pci_write_config_dword(pdev, pos + PCI_ERR_ROOT_STATUS, reg32); | ||
186 | } | ||
187 | |||
74 | /** | 188 | /** |
75 | * aer_irq - Root Port's ISR | 189 | * aer_irq - Root Port's ISR |
76 | * @irq: IRQ assigned to Root Port | 190 | * @irq: IRQ assigned to Root Port |
77 | * @context: pointer to Root Port data structure | 191 | * @context: pointer to Root Port data structure |
78 | * | 192 | * |
79 | * Invoked when Root Port detects AER messages. | 193 | * Invoked when Root Port detects AER messages. |
80 | **/ | 194 | */ |
81 | irqreturn_t aer_irq(int irq, void *context) | 195 | irqreturn_t aer_irq(int irq, void *context) |
82 | { | 196 | { |
83 | unsigned int status, id; | 197 | unsigned int status, id; |
@@ -96,13 +210,13 @@ irqreturn_t aer_irq(int irq, void *context) | |||
96 | 210 | ||
97 | /* Read error status */ | 211 | /* Read error status */ |
98 | pci_read_config_dword(pdev->port, pos + PCI_ERR_ROOT_STATUS, &status); | 212 | pci_read_config_dword(pdev->port, pos + PCI_ERR_ROOT_STATUS, &status); |
99 | if (!(status & ROOT_ERR_STATUS_MASKS)) { | 213 | if (!(status & (PCI_ERR_ROOT_UNCOR_RCV|PCI_ERR_ROOT_COR_RCV))) { |
100 | spin_unlock_irqrestore(&rpc->e_lock, flags); | 214 | spin_unlock_irqrestore(&rpc->e_lock, flags); |
101 | return IRQ_NONE; | 215 | return IRQ_NONE; |
102 | } | 216 | } |
103 | 217 | ||
104 | /* Read error source and clear error status */ | 218 | /* Read error source and clear error status */ |
105 | pci_read_config_dword(pdev->port, pos + PCI_ERR_ROOT_COR_SRC, &id); | 219 | pci_read_config_dword(pdev->port, pos + PCI_ERR_ROOT_ERR_SRC, &id); |
106 | pci_write_config_dword(pdev->port, pos + PCI_ERR_ROOT_STATUS, status); | 220 | pci_write_config_dword(pdev->port, pos + PCI_ERR_ROOT_STATUS, status); |
107 | 221 | ||
108 | /* Store error source for later DPC handler */ | 222 | /* Store error source for later DPC handler */ |
@@ -134,7 +248,7 @@ EXPORT_SYMBOL_GPL(aer_irq); | |||
134 | * @dev: pointer to the pcie_dev data structure | 248 | * @dev: pointer to the pcie_dev data structure |
135 | * | 249 | * |
136 | * Invoked when Root Port's AER service is loaded. | 250 | * Invoked when Root Port's AER service is loaded. |
137 | **/ | 251 | */ |
138 | static struct aer_rpc *aer_alloc_rpc(struct pcie_device *dev) | 252 | static struct aer_rpc *aer_alloc_rpc(struct pcie_device *dev) |
139 | { | 253 | { |
140 | struct aer_rpc *rpc; | 254 | struct aer_rpc *rpc; |
@@ -143,15 +257,11 @@ static struct aer_rpc *aer_alloc_rpc(struct pcie_device *dev) | |||
143 | if (!rpc) | 257 | if (!rpc) |
144 | return NULL; | 258 | return NULL; |
145 | 259 | ||
146 | /* | 260 | /* Initialize Root lock access, e_lock, to Root Error Status Reg */ |
147 | * Initialize Root lock access, e_lock, to Root Error Status Reg, | ||
148 | * Root Error ID Reg, and Root error producer/consumer index. | ||
149 | */ | ||
150 | spin_lock_init(&rpc->e_lock); | 261 | spin_lock_init(&rpc->e_lock); |
151 | 262 | ||
152 | rpc->rpd = dev; | 263 | rpc->rpd = dev; |
153 | INIT_WORK(&rpc->dpc_handler, aer_isr); | 264 | INIT_WORK(&rpc->dpc_handler, aer_isr); |
154 | rpc->prod_idx = rpc->cons_idx = 0; | ||
155 | mutex_init(&rpc->rpc_mutex); | 265 | mutex_init(&rpc->rpc_mutex); |
156 | init_waitqueue_head(&rpc->wait_release); | 266 | init_waitqueue_head(&rpc->wait_release); |
157 | 267 | ||
@@ -166,7 +276,7 @@ static struct aer_rpc *aer_alloc_rpc(struct pcie_device *dev) | |||
166 | * @dev: pointer to the pcie_dev data structure | 276 | * @dev: pointer to the pcie_dev data structure |
167 | * | 277 | * |
168 | * Invoked when PCI Express bus unloads or AER probe fails. | 278 | * Invoked when PCI Express bus unloads or AER probe fails. |
169 | **/ | 279 | */ |
170 | static void aer_remove(struct pcie_device *dev) | 280 | static void aer_remove(struct pcie_device *dev) |
171 | { | 281 | { |
172 | struct aer_rpc *rpc = get_service_data(dev); | 282 | struct aer_rpc *rpc = get_service_data(dev); |
@@ -178,7 +288,8 @@ static void aer_remove(struct pcie_device *dev) | |||
178 | 288 | ||
179 | wait_event(rpc->wait_release, rpc->prod_idx == rpc->cons_idx); | 289 | wait_event(rpc->wait_release, rpc->prod_idx == rpc->cons_idx); |
180 | 290 | ||
181 | aer_delete_rootport(rpc); | 291 | aer_disable_rootport(rpc); |
292 | kfree(rpc); | ||
182 | set_service_data(dev, NULL); | 293 | set_service_data(dev, NULL); |
183 | } | 294 | } |
184 | } | 295 | } |
@@ -189,7 +300,7 @@ static void aer_remove(struct pcie_device *dev) | |||
189 | * @id: pointer to the service id data structure | 300 | * @id: pointer to the service id data structure |
190 | * | 301 | * |
191 | * Invoked when PCI Express bus loads AER service driver. | 302 | * Invoked when PCI Express bus loads AER service driver. |
192 | **/ | 303 | */ |
193 | static int __devinit aer_probe(struct pcie_device *dev) | 304 | static int __devinit aer_probe(struct pcie_device *dev) |
194 | { | 305 | { |
195 | int status; | 306 | int status; |
@@ -229,41 +340,30 @@ static int __devinit aer_probe(struct pcie_device *dev) | |||
229 | * @dev: pointer to Root Port's pci_dev data structure | 340 | * @dev: pointer to Root Port's pci_dev data structure |
230 | * | 341 | * |
231 | * Invoked by Port Bus driver when performing link reset at Root Port. | 342 | * Invoked by Port Bus driver when performing link reset at Root Port. |
232 | **/ | 343 | */ |
233 | static pci_ers_result_t aer_root_reset(struct pci_dev *dev) | 344 | static pci_ers_result_t aer_root_reset(struct pci_dev *dev) |
234 | { | 345 | { |
235 | u16 p2p_ctrl; | 346 | u32 reg32; |
236 | u32 status; | ||
237 | int pos; | 347 | int pos; |
238 | 348 | ||
239 | pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR); | 349 | pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR); |
240 | 350 | ||
241 | /* Disable Root's interrupt in response to error messages */ | 351 | /* Disable Root's interrupt in response to error messages */ |
242 | pci_write_config_dword(dev, pos + PCI_ERR_ROOT_COMMAND, 0); | 352 | pci_read_config_dword(dev, pos + PCI_ERR_ROOT_COMMAND, ®32); |
353 | reg32 &= ~ROOT_PORT_INTR_ON_MESG_MASK; | ||
354 | pci_write_config_dword(dev, pos + PCI_ERR_ROOT_COMMAND, reg32); | ||
243 | 355 | ||
244 | /* Assert Secondary Bus Reset */ | 356 | aer_do_secondary_bus_reset(dev); |
245 | pci_read_config_word(dev, PCI_BRIDGE_CONTROL, &p2p_ctrl); | ||
246 | p2p_ctrl |= PCI_CB_BRIDGE_CTL_CB_RESET; | ||
247 | pci_write_config_word(dev, PCI_BRIDGE_CONTROL, p2p_ctrl); | ||
248 | |||
249 | /* De-assert Secondary Bus Reset */ | ||
250 | p2p_ctrl &= ~PCI_CB_BRIDGE_CTL_CB_RESET; | ||
251 | pci_write_config_word(dev, PCI_BRIDGE_CONTROL, p2p_ctrl); | ||
252 | |||
253 | /* | ||
254 | * System software must wait for at least 100ms from the end | ||
255 | * of a reset of one or more device before it is permitted | ||
256 | * to issue Configuration Requests to those devices. | ||
257 | */ | ||
258 | msleep(200); | ||
259 | dev_printk(KERN_DEBUG, &dev->dev, "Root Port link has been reset\n"); | 357 | dev_printk(KERN_DEBUG, &dev->dev, "Root Port link has been reset\n"); |
260 | 358 | ||
359 | /* Clear Root Error Status */ | ||
360 | pci_read_config_dword(dev, pos + PCI_ERR_ROOT_STATUS, ®32); | ||
361 | pci_write_config_dword(dev, pos + PCI_ERR_ROOT_STATUS, reg32); | ||
362 | |||
261 | /* Enable Root Port's interrupt in response to error messages */ | 363 | /* Enable Root Port's interrupt in response to error messages */ |
262 | pci_read_config_dword(dev, pos + PCI_ERR_ROOT_STATUS, &status); | 364 | pci_read_config_dword(dev, pos + PCI_ERR_ROOT_COMMAND, ®32); |
263 | pci_write_config_dword(dev, pos + PCI_ERR_ROOT_STATUS, status); | 365 | reg32 |= ROOT_PORT_INTR_ON_MESG_MASK; |
264 | pci_write_config_dword(dev, | 366 | pci_write_config_dword(dev, pos + PCI_ERR_ROOT_COMMAND, reg32); |
265 | pos + PCI_ERR_ROOT_COMMAND, | ||
266 | ROOT_PORT_INTR_ON_MESG_MASK); | ||
267 | 367 | ||
268 | return PCI_ERS_RESULT_RECOVERED; | 368 | return PCI_ERS_RESULT_RECOVERED; |
269 | } | 369 | } |
@@ -274,7 +374,7 @@ static pci_ers_result_t aer_root_reset(struct pci_dev *dev) | |||
274 | * @error: error severity being notified by port bus | 374 | * @error: error severity being notified by port bus |
275 | * | 375 | * |
276 | * Invoked by Port Bus driver during error recovery. | 376 | * Invoked by Port Bus driver during error recovery. |
277 | **/ | 377 | */ |
278 | static pci_ers_result_t aer_error_detected(struct pci_dev *dev, | 378 | static pci_ers_result_t aer_error_detected(struct pci_dev *dev, |
279 | enum pci_channel_state error) | 379 | enum pci_channel_state error) |
280 | { | 380 | { |
@@ -287,7 +387,7 @@ static pci_ers_result_t aer_error_detected(struct pci_dev *dev, | |||
287 | * @dev: pointer to Root Port's pci_dev data structure | 387 | * @dev: pointer to Root Port's pci_dev data structure |
288 | * | 388 | * |
289 | * Invoked by Port Bus driver during nonfatal recovery. | 389 | * Invoked by Port Bus driver during nonfatal recovery. |
290 | **/ | 390 | */ |
291 | static void aer_error_resume(struct pci_dev *dev) | 391 | static void aer_error_resume(struct pci_dev *dev) |
292 | { | 392 | { |
293 | int pos; | 393 | int pos; |
@@ -314,12 +414,10 @@ static void aer_error_resume(struct pci_dev *dev) | |||
314 | * aer_service_init - register AER root service driver | 414 | * aer_service_init - register AER root service driver |
315 | * | 415 | * |
316 | * Invoked when AER root service driver is loaded. | 416 | * Invoked when AER root service driver is loaded. |
317 | **/ | 417 | */ |
318 | static int __init aer_service_init(void) | 418 | static int __init aer_service_init(void) |
319 | { | 419 | { |
320 | if (pcie_aer_disable) | 420 | if (!pci_aer_available() || aer_acpi_firmware_first()) |
321 | return -ENXIO; | ||
322 | if (!pci_msi_enabled()) | ||
323 | return -ENXIO; | 421 | return -ENXIO; |
324 | return pcie_port_service_register(&aerdriver); | 422 | return pcie_port_service_register(&aerdriver); |
325 | } | 423 | } |
@@ -328,7 +426,7 @@ static int __init aer_service_init(void) | |||
328 | * aer_service_exit - unregister AER root service driver | 426 | * aer_service_exit - unregister AER root service driver |
329 | * | 427 | * |
330 | * Invoked when AER root service driver is unloaded. | 428 | * Invoked when AER root service driver is unloaded. |
331 | **/ | 429 | */ |
332 | static void __exit aer_service_exit(void) | 430 | static void __exit aer_service_exit(void) |
333 | { | 431 | { |
334 | pcie_port_service_unregister(&aerdriver); | 432 | pcie_port_service_unregister(&aerdriver); |
diff --git a/drivers/pci/pcie/aer/aerdrv.h b/drivers/pci/pcie/aer/aerdrv.h index bd833ea3ba49..80c11d131499 100644 --- a/drivers/pci/pcie/aer/aerdrv.h +++ b/drivers/pci/pcie/aer/aerdrv.h | |||
@@ -17,9 +17,6 @@ | |||
17 | #define AER_FATAL 1 | 17 | #define AER_FATAL 1 |
18 | #define AER_CORRECTABLE 2 | 18 | #define AER_CORRECTABLE 2 |
19 | 19 | ||
20 | /* Root Error Status Register Bits */ | ||
21 | #define ROOT_ERR_STATUS_MASKS 0x0f | ||
22 | |||
23 | #define SYSTEM_ERROR_INTR_ON_MESG_MASK (PCI_EXP_RTCTL_SECEE| \ | 20 | #define SYSTEM_ERROR_INTR_ON_MESG_MASK (PCI_EXP_RTCTL_SECEE| \ |
24 | PCI_EXP_RTCTL_SENFEE| \ | 21 | PCI_EXP_RTCTL_SENFEE| \ |
25 | PCI_EXP_RTCTL_SEFEE) | 22 | PCI_EXP_RTCTL_SEFEE) |
@@ -117,8 +114,7 @@ static inline pci_ers_result_t merge_result(enum pci_ers_result orig, | |||
117 | } | 114 | } |
118 | 115 | ||
119 | extern struct bus_type pcie_port_bus_type; | 116 | extern struct bus_type pcie_port_bus_type; |
120 | extern void aer_enable_rootport(struct aer_rpc *rpc); | 117 | extern void aer_do_secondary_bus_reset(struct pci_dev *dev); |
121 | extern void aer_delete_rootport(struct aer_rpc *rpc); | ||
122 | extern int aer_init(struct pcie_device *dev); | 118 | extern int aer_init(struct pcie_device *dev); |
123 | extern void aer_isr(struct work_struct *work); | 119 | extern void aer_isr(struct work_struct *work); |
124 | extern void aer_print_error(struct pci_dev *dev, struct aer_err_info *info); | 120 | extern void aer_print_error(struct pci_dev *dev, struct aer_err_info *info); |
@@ -134,4 +130,21 @@ static inline int aer_osc_setup(struct pcie_device *pciedev) | |||
134 | } | 130 | } |
135 | #endif | 131 | #endif |
136 | 132 | ||
133 | #ifdef CONFIG_ACPI_APEI | ||
134 | extern int pcie_aer_get_firmware_first(struct pci_dev *pci_dev); | ||
135 | #else | ||
136 | static inline int pcie_aer_get_firmware_first(struct pci_dev *pci_dev) | ||
137 | { | ||
138 | if (pci_dev->__aer_firmware_first_valid) | ||
139 | return pci_dev->__aer_firmware_first; | ||
140 | return 0; | ||
141 | } | ||
142 | #endif | ||
143 | |||
144 | static inline void pcie_aer_force_firmware_first(struct pci_dev *pci_dev, | ||
145 | int enable) | ||
146 | { | ||
147 | pci_dev->__aer_firmware_first = !!enable; | ||
148 | pci_dev->__aer_firmware_first_valid = 1; | ||
149 | } | ||
137 | #endif /* _AERDRV_H_ */ | 150 | #endif /* _AERDRV_H_ */ |
diff --git a/drivers/pci/pcie/aer/aerdrv_acpi.c b/drivers/pci/pcie/aer/aerdrv_acpi.c index 04814087658d..275bf158ffa7 100644 --- a/drivers/pci/pcie/aer/aerdrv_acpi.c +++ b/drivers/pci/pcie/aer/aerdrv_acpi.c | |||
@@ -16,40 +16,115 @@ | |||
16 | #include <linux/acpi.h> | 16 | #include <linux/acpi.h> |
17 | #include <linux/pci-acpi.h> | 17 | #include <linux/pci-acpi.h> |
18 | #include <linux/delay.h> | 18 | #include <linux/delay.h> |
19 | #include <acpi/apei.h> | ||
19 | #include "aerdrv.h" | 20 | #include "aerdrv.h" |
20 | 21 | ||
21 | /** | 22 | #ifdef CONFIG_ACPI_APEI |
22 | * aer_osc_setup - run ACPI _OSC method | 23 | static inline int hest_match_pci(struct acpi_hest_aer_common *p, |
23 | * @pciedev: pcie_device which AER is being enabled on | 24 | struct pci_dev *pci) |
24 | * | ||
25 | * @return: Zero on success. Nonzero otherwise. | ||
26 | * | ||
27 | * Invoked when PCIe bus loads AER service driver. To avoid conflict with | ||
28 | * BIOS AER support requires BIOS to yield AER control to OS native driver. | ||
29 | **/ | ||
30 | int aer_osc_setup(struct pcie_device *pciedev) | ||
31 | { | 25 | { |
32 | acpi_status status = AE_NOT_FOUND; | 26 | return (0 == pci_domain_nr(pci->bus) && |
33 | struct pci_dev *pdev = pciedev->port; | 27 | p->bus == pci->bus->number && |
34 | acpi_handle handle = NULL; | 28 | p->device == PCI_SLOT(pci->devfn) && |
35 | 29 | p->function == PCI_FUNC(pci->devfn)); | |
36 | if (acpi_pci_disabled) | 30 | } |
37 | return -1; | 31 | |
38 | 32 | struct aer_hest_parse_info { | |
39 | handle = acpi_find_root_bridge_handle(pdev); | 33 | struct pci_dev *pci_dev; |
40 | if (handle) { | 34 | int firmware_first; |
41 | status = acpi_pci_osc_control_set(handle, | 35 | }; |
42 | OSC_PCI_EXPRESS_AER_CONTROL | | 36 | |
43 | OSC_PCI_EXPRESS_CAP_STRUCTURE_CONTROL); | 37 | static int aer_hest_parse(struct acpi_hest_header *hest_hdr, void *data) |
44 | } | 38 | { |
39 | struct aer_hest_parse_info *info = data; | ||
40 | struct acpi_hest_aer_common *p; | ||
41 | u8 pcie_type = 0; | ||
42 | u8 bridge = 0; | ||
43 | int ff = 0; | ||
45 | 44 | ||
46 | if (ACPI_FAILURE(status)) { | 45 | switch (hest_hdr->type) { |
47 | dev_printk(KERN_DEBUG, &pciedev->device, "AER service couldn't " | 46 | case ACPI_HEST_TYPE_AER_ROOT_PORT: |
48 | "init device: %s\n", | 47 | pcie_type = PCI_EXP_TYPE_ROOT_PORT; |
49 | (status == AE_SUPPORT || status == AE_NOT_FOUND) ? | 48 | break; |
50 | "no _OSC support" : "_OSC failed"); | 49 | case ACPI_HEST_TYPE_AER_ENDPOINT: |
51 | return -1; | 50 | pcie_type = PCI_EXP_TYPE_ENDPOINT; |
51 | break; | ||
52 | case ACPI_HEST_TYPE_AER_BRIDGE: | ||
53 | if ((info->pci_dev->class >> 16) == PCI_BASE_CLASS_BRIDGE) | ||
54 | bridge = 1; | ||
55 | break; | ||
56 | default: | ||
57 | return 0; | ||
52 | } | 58 | } |
53 | 59 | ||
60 | p = (struct acpi_hest_aer_common *)(hest_hdr + 1); | ||
61 | if (p->flags & ACPI_HEST_GLOBAL) { | ||
62 | if ((info->pci_dev->is_pcie && | ||
63 | info->pci_dev->pcie_type == pcie_type) || bridge) | ||
64 | ff = !!(p->flags & ACPI_HEST_FIRMWARE_FIRST); | ||
65 | } else | ||
66 | if (hest_match_pci(p, info->pci_dev)) | ||
67 | ff = !!(p->flags & ACPI_HEST_FIRMWARE_FIRST); | ||
68 | info->firmware_first = ff; | ||
69 | |||
54 | return 0; | 70 | return 0; |
55 | } | 71 | } |
72 | |||
73 | static void aer_set_firmware_first(struct pci_dev *pci_dev) | ||
74 | { | ||
75 | int rc; | ||
76 | struct aer_hest_parse_info info = { | ||
77 | .pci_dev = pci_dev, | ||
78 | .firmware_first = 0, | ||
79 | }; | ||
80 | |||
81 | rc = apei_hest_parse(aer_hest_parse, &info); | ||
82 | |||
83 | if (rc) | ||
84 | pci_dev->__aer_firmware_first = 0; | ||
85 | else | ||
86 | pci_dev->__aer_firmware_first = info.firmware_first; | ||
87 | pci_dev->__aer_firmware_first_valid = 1; | ||
88 | } | ||
89 | |||
90 | int pcie_aer_get_firmware_first(struct pci_dev *dev) | ||
91 | { | ||
92 | if (!dev->__aer_firmware_first_valid) | ||
93 | aer_set_firmware_first(dev); | ||
94 | return dev->__aer_firmware_first; | ||
95 | } | ||
96 | |||
97 | static bool aer_firmware_first; | ||
98 | |||
99 | static int aer_hest_parse_aff(struct acpi_hest_header *hest_hdr, void *data) | ||
100 | { | ||
101 | struct acpi_hest_aer_common *p; | ||
102 | |||
103 | if (aer_firmware_first) | ||
104 | return 0; | ||
105 | |||
106 | switch (hest_hdr->type) { | ||
107 | case ACPI_HEST_TYPE_AER_ROOT_PORT: | ||
108 | case ACPI_HEST_TYPE_AER_ENDPOINT: | ||
109 | case ACPI_HEST_TYPE_AER_BRIDGE: | ||
110 | p = (struct acpi_hest_aer_common *)(hest_hdr + 1); | ||
111 | aer_firmware_first = !!(p->flags & ACPI_HEST_FIRMWARE_FIRST); | ||
112 | default: | ||
113 | return 0; | ||
114 | } | ||
115 | } | ||
116 | |||
117 | /** | ||
118 | * aer_acpi_firmware_first - Check if APEI should control AER. | ||
119 | */ | ||
120 | bool aer_acpi_firmware_first(void) | ||
121 | { | ||
122 | static bool parsed = false; | ||
123 | |||
124 | if (!parsed) { | ||
125 | apei_hest_parse(aer_hest_parse_aff, NULL); | ||
126 | parsed = true; | ||
127 | } | ||
128 | return aer_firmware_first; | ||
129 | } | ||
130 | #endif | ||
diff --git a/drivers/pci/pcie/aer/aerdrv_core.c b/drivers/pci/pcie/aer/aerdrv_core.c index c843a799814d..43421fbe080a 100644 --- a/drivers/pci/pcie/aer/aerdrv_core.c +++ b/drivers/pci/pcie/aer/aerdrv_core.c | |||
@@ -23,6 +23,7 @@ | |||
23 | #include <linux/pm.h> | 23 | #include <linux/pm.h> |
24 | #include <linux/suspend.h> | 24 | #include <linux/suspend.h> |
25 | #include <linux/delay.h> | 25 | #include <linux/delay.h> |
26 | #include <linux/slab.h> | ||
26 | #include "aerdrv.h" | 27 | #include "aerdrv.h" |
27 | 28 | ||
28 | static int forceload; | 29 | static int forceload; |
@@ -35,7 +36,7 @@ int pci_enable_pcie_error_reporting(struct pci_dev *dev) | |||
35 | u16 reg16 = 0; | 36 | u16 reg16 = 0; |
36 | int pos; | 37 | int pos; |
37 | 38 | ||
38 | if (dev->aer_firmware_first) | 39 | if (pcie_aer_get_firmware_first(dev)) |
39 | return -EIO; | 40 | return -EIO; |
40 | 41 | ||
41 | pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR); | 42 | pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR); |
@@ -46,13 +47,12 @@ int pci_enable_pcie_error_reporting(struct pci_dev *dev) | |||
46 | if (!pos) | 47 | if (!pos) |
47 | return -EIO; | 48 | return -EIO; |
48 | 49 | ||
49 | pci_read_config_word(dev, pos+PCI_EXP_DEVCTL, ®16); | 50 | pci_read_config_word(dev, pos + PCI_EXP_DEVCTL, ®16); |
50 | reg16 = reg16 | | 51 | reg16 |= (PCI_EXP_DEVCTL_CERE | |
51 | PCI_EXP_DEVCTL_CERE | | ||
52 | PCI_EXP_DEVCTL_NFERE | | 52 | PCI_EXP_DEVCTL_NFERE | |
53 | PCI_EXP_DEVCTL_FERE | | 53 | PCI_EXP_DEVCTL_FERE | |
54 | PCI_EXP_DEVCTL_URRE; | 54 | PCI_EXP_DEVCTL_URRE); |
55 | pci_write_config_word(dev, pos+PCI_EXP_DEVCTL, reg16); | 55 | pci_write_config_word(dev, pos + PCI_EXP_DEVCTL, reg16); |
56 | 56 | ||
57 | return 0; | 57 | return 0; |
58 | } | 58 | } |
@@ -63,19 +63,19 @@ int pci_disable_pcie_error_reporting(struct pci_dev *dev) | |||
63 | u16 reg16 = 0; | 63 | u16 reg16 = 0; |
64 | int pos; | 64 | int pos; |
65 | 65 | ||
66 | if (dev->aer_firmware_first) | 66 | if (pcie_aer_get_firmware_first(dev)) |
67 | return -EIO; | 67 | return -EIO; |
68 | 68 | ||
69 | pos = pci_pcie_cap(dev); | 69 | pos = pci_pcie_cap(dev); |
70 | if (!pos) | 70 | if (!pos) |
71 | return -EIO; | 71 | return -EIO; |
72 | 72 | ||
73 | pci_read_config_word(dev, pos+PCI_EXP_DEVCTL, ®16); | 73 | pci_read_config_word(dev, pos + PCI_EXP_DEVCTL, ®16); |
74 | reg16 = reg16 & ~(PCI_EXP_DEVCTL_CERE | | 74 | reg16 &= ~(PCI_EXP_DEVCTL_CERE | |
75 | PCI_EXP_DEVCTL_NFERE | | 75 | PCI_EXP_DEVCTL_NFERE | |
76 | PCI_EXP_DEVCTL_FERE | | 76 | PCI_EXP_DEVCTL_FERE | |
77 | PCI_EXP_DEVCTL_URRE); | 77 | PCI_EXP_DEVCTL_URRE); |
78 | pci_write_config_word(dev, pos+PCI_EXP_DEVCTL, reg16); | 78 | pci_write_config_word(dev, pos + PCI_EXP_DEVCTL, reg16); |
79 | 79 | ||
80 | return 0; | 80 | return 0; |
81 | } | 81 | } |
@@ -98,99 +98,46 @@ int pci_cleanup_aer_uncorrect_error_status(struct pci_dev *dev) | |||
98 | } | 98 | } |
99 | EXPORT_SYMBOL_GPL(pci_cleanup_aer_uncorrect_error_status); | 99 | EXPORT_SYMBOL_GPL(pci_cleanup_aer_uncorrect_error_status); |
100 | 100 | ||
101 | static int set_device_error_reporting(struct pci_dev *dev, void *data) | ||
102 | { | ||
103 | bool enable = *((bool *)data); | ||
104 | |||
105 | if ((dev->pcie_type == PCI_EXP_TYPE_ROOT_PORT) || | ||
106 | (dev->pcie_type == PCI_EXP_TYPE_UPSTREAM) || | ||
107 | (dev->pcie_type == PCI_EXP_TYPE_DOWNSTREAM)) { | ||
108 | if (enable) | ||
109 | pci_enable_pcie_error_reporting(dev); | ||
110 | else | ||
111 | pci_disable_pcie_error_reporting(dev); | ||
112 | } | ||
113 | |||
114 | if (enable) | ||
115 | pcie_set_ecrc_checking(dev); | ||
116 | |||
117 | return 0; | ||
118 | } | ||
119 | |||
120 | /** | 101 | /** |
121 | * set_downstream_devices_error_reporting - enable/disable the error reporting bits on the root port and its downstream ports. | 102 | * add_error_device - list device to be handled |
122 | * @dev: pointer to root port's pci_dev data structure | 103 | * @e_info: pointer to error info |
123 | * @enable: true = enable error reporting, false = disable error reporting. | 104 | * @dev: pointer to pci_dev to be added |
124 | */ | 105 | */ |
125 | static void set_downstream_devices_error_reporting(struct pci_dev *dev, | ||
126 | bool enable) | ||
127 | { | ||
128 | set_device_error_reporting(dev, &enable); | ||
129 | |||
130 | if (!dev->subordinate) | ||
131 | return; | ||
132 | pci_walk_bus(dev->subordinate, set_device_error_reporting, &enable); | ||
133 | } | ||
134 | |||
135 | static inline int compare_device_id(struct pci_dev *dev, | ||
136 | struct aer_err_info *e_info) | ||
137 | { | ||
138 | if (e_info->id == ((dev->bus->number << 8) | dev->devfn)) { | ||
139 | /* | ||
140 | * Device ID match | ||
141 | */ | ||
142 | return 1; | ||
143 | } | ||
144 | |||
145 | return 0; | ||
146 | } | ||
147 | |||
148 | static int add_error_device(struct aer_err_info *e_info, struct pci_dev *dev) | 106 | static int add_error_device(struct aer_err_info *e_info, struct pci_dev *dev) |
149 | { | 107 | { |
150 | if (e_info->error_dev_num < AER_MAX_MULTI_ERR_DEVICES) { | 108 | if (e_info->error_dev_num < AER_MAX_MULTI_ERR_DEVICES) { |
151 | e_info->dev[e_info->error_dev_num] = dev; | 109 | e_info->dev[e_info->error_dev_num] = dev; |
152 | e_info->error_dev_num++; | 110 | e_info->error_dev_num++; |
153 | return 1; | 111 | return 0; |
154 | } | 112 | } |
155 | 113 | return -ENOSPC; | |
156 | return 0; | ||
157 | } | 114 | } |
158 | 115 | ||
159 | |||
160 | #define PCI_BUS(x) (((x) >> 8) & 0xff) | 116 | #define PCI_BUS(x) (((x) >> 8) & 0xff) |
161 | 117 | ||
162 | static int find_device_iter(struct pci_dev *dev, void *data) | 118 | /** |
119 | * is_error_source - check whether the device is source of reported error | ||
120 | * @dev: pointer to pci_dev to be checked | ||
121 | * @e_info: pointer to reported error info | ||
122 | */ | ||
123 | static bool is_error_source(struct pci_dev *dev, struct aer_err_info *e_info) | ||
163 | { | 124 | { |
164 | int pos; | 125 | int pos; |
165 | u32 status; | 126 | u32 status, mask; |
166 | u32 mask; | ||
167 | u16 reg16; | 127 | u16 reg16; |
168 | int result; | ||
169 | struct aer_err_info *e_info = (struct aer_err_info *)data; | ||
170 | 128 | ||
171 | /* | 129 | /* |
172 | * When bus id is equal to 0, it might be a bad id | 130 | * When bus id is equal to 0, it might be a bad id |
173 | * reported by root port. | 131 | * reported by root port. |
174 | */ | 132 | */ |
175 | if (!nosourceid && (PCI_BUS(e_info->id) != 0)) { | 133 | if (!nosourceid && (PCI_BUS(e_info->id) != 0)) { |
176 | result = compare_device_id(dev, e_info); | 134 | /* Device ID match? */ |
177 | if (result) | 135 | if (e_info->id == ((dev->bus->number << 8) | dev->devfn)) |
178 | add_error_device(e_info, dev); | 136 | return true; |
179 | 137 | ||
180 | /* | 138 | /* Continue id comparing if there is no multiple error */ |
181 | * If there is no multiple error, we stop | ||
182 | * or continue based on the id comparing. | ||
183 | */ | ||
184 | if (!e_info->multi_error_valid) | 139 | if (!e_info->multi_error_valid) |
185 | return result; | 140 | return false; |
186 | |||
187 | /* | ||
188 | * If there are multiple errors and id does match, | ||
189 | * We need continue to search other devices under | ||
190 | * the root port. Return 0 means that. | ||
191 | */ | ||
192 | if (result) | ||
193 | return 0; | ||
194 | } | 141 | } |
195 | 142 | ||
196 | /* | 143 | /* |
@@ -199,71 +146,94 @@ static int find_device_iter(struct pci_dev *dev, void *data) | |||
199 | * 2) bus id is equal to 0. Some ports might lose the bus | 146 | * 2) bus id is equal to 0. Some ports might lose the bus |
200 | * id of error source id; | 147 | * id of error source id; |
201 | * 3) There are multiple errors and prior id comparing fails; | 148 | * 3) There are multiple errors and prior id comparing fails; |
202 | * We check AER status registers to find the initial reporter. | 149 | * We check AER status registers to find possible reporter. |
203 | */ | 150 | */ |
204 | if (atomic_read(&dev->enable_cnt) == 0) | 151 | if (atomic_read(&dev->enable_cnt) == 0) |
205 | return 0; | 152 | return false; |
206 | pos = pci_pcie_cap(dev); | 153 | pos = pci_pcie_cap(dev); |
207 | if (!pos) | 154 | if (!pos) |
208 | return 0; | 155 | return false; |
156 | |||
209 | /* Check if AER is enabled */ | 157 | /* Check if AER is enabled */ |
210 | pci_read_config_word(dev, pos+PCI_EXP_DEVCTL, ®16); | 158 | pci_read_config_word(dev, pos + PCI_EXP_DEVCTL, ®16); |
211 | if (!(reg16 & ( | 159 | if (!(reg16 & ( |
212 | PCI_EXP_DEVCTL_CERE | | 160 | PCI_EXP_DEVCTL_CERE | |
213 | PCI_EXP_DEVCTL_NFERE | | 161 | PCI_EXP_DEVCTL_NFERE | |
214 | PCI_EXP_DEVCTL_FERE | | 162 | PCI_EXP_DEVCTL_FERE | |
215 | PCI_EXP_DEVCTL_URRE))) | 163 | PCI_EXP_DEVCTL_URRE))) |
216 | return 0; | 164 | return false; |
217 | pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR); | 165 | pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR); |
218 | if (!pos) | 166 | if (!pos) |
219 | return 0; | 167 | return false; |
220 | 168 | ||
221 | status = 0; | 169 | /* Check if error is recorded */ |
222 | mask = 0; | ||
223 | if (e_info->severity == AER_CORRECTABLE) { | 170 | if (e_info->severity == AER_CORRECTABLE) { |
224 | pci_read_config_dword(dev, pos + PCI_ERR_COR_STATUS, &status); | 171 | pci_read_config_dword(dev, pos + PCI_ERR_COR_STATUS, &status); |
225 | pci_read_config_dword(dev, pos + PCI_ERR_COR_MASK, &mask); | 172 | pci_read_config_dword(dev, pos + PCI_ERR_COR_MASK, &mask); |
226 | if (status & ~mask) { | ||
227 | add_error_device(e_info, dev); | ||
228 | goto added; | ||
229 | } | ||
230 | } else { | 173 | } else { |
231 | pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_STATUS, &status); | 174 | pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_STATUS, &status); |
232 | pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_MASK, &mask); | 175 | pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_MASK, &mask); |
233 | if (status & ~mask) { | ||
234 | add_error_device(e_info, dev); | ||
235 | goto added; | ||
236 | } | ||
237 | } | 176 | } |
177 | if (status & ~mask) | ||
178 | return true; | ||
238 | 179 | ||
239 | return 0; | 180 | return false; |
181 | } | ||
240 | 182 | ||
241 | added: | 183 | static int find_device_iter(struct pci_dev *dev, void *data) |
242 | if (e_info->multi_error_valid) | 184 | { |
243 | return 0; | 185 | struct aer_err_info *e_info = (struct aer_err_info *)data; |
244 | else | 186 | |
245 | return 1; | 187 | if (is_error_source(dev, e_info)) { |
188 | /* List this device */ | ||
189 | if (add_error_device(e_info, dev)) { | ||
190 | /* We cannot handle more... Stop iteration */ | ||
191 | /* TODO: Should print error message here? */ | ||
192 | return 1; | ||
193 | } | ||
194 | |||
195 | /* If there is only a single error, stop iteration */ | ||
196 | if (!e_info->multi_error_valid) | ||
197 | return 1; | ||
198 | } | ||
199 | return 0; | ||
246 | } | 200 | } |
247 | 201 | ||
248 | /** | 202 | /** |
249 | * find_source_device - search through device hierarchy for source device | 203 | * find_source_device - search through device hierarchy for source device |
250 | * @parent: pointer to Root Port pci_dev data structure | 204 | * @parent: pointer to Root Port pci_dev data structure |
251 | * @err_info: including detailed error information such like id | 205 | * @e_info: including detailed error information such like id |
252 | * | 206 | * |
253 | * Invoked when error is detected at the Root Port. | 207 | * Return true if found. |
208 | * | ||
209 | * Invoked by DPC when error is detected at the Root Port. | ||
210 | * Caller of this function must set id, severity, and multi_error_valid of | ||
211 | * struct aer_err_info pointed by @e_info properly. This function must fill | ||
212 | * e_info->error_dev_num and e_info->dev[], based on the given information. | ||
254 | */ | 213 | */ |
255 | static void find_source_device(struct pci_dev *parent, | 214 | static bool find_source_device(struct pci_dev *parent, |
256 | struct aer_err_info *e_info) | 215 | struct aer_err_info *e_info) |
257 | { | 216 | { |
258 | struct pci_dev *dev = parent; | 217 | struct pci_dev *dev = parent; |
259 | int result; | 218 | int result; |
260 | 219 | ||
220 | /* Must reset in this function */ | ||
221 | e_info->error_dev_num = 0; | ||
222 | |||
261 | /* Is Root Port an agent that sends error message? */ | 223 | /* Is Root Port an agent that sends error message? */ |
262 | result = find_device_iter(dev, e_info); | 224 | result = find_device_iter(dev, e_info); |
263 | if (result) | 225 | if (result) |
264 | return; | 226 | return true; |
265 | 227 | ||
266 | pci_walk_bus(parent->subordinate, find_device_iter, e_info); | 228 | pci_walk_bus(parent->subordinate, find_device_iter, e_info); |
229 | |||
230 | if (!e_info->error_dev_num) { | ||
231 | dev_printk(KERN_DEBUG, &parent->dev, | ||
232 | "can't find device of ID%04x\n", | ||
233 | e_info->id); | ||
234 | return false; | ||
235 | } | ||
236 | return true; | ||
267 | } | 237 | } |
268 | 238 | ||
269 | static int report_error_detected(struct pci_dev *dev, void *data) | 239 | static int report_error_detected(struct pci_dev *dev, void *data) |
@@ -402,43 +372,77 @@ static pci_ers_result_t broadcast_error_message(struct pci_dev *dev, | |||
402 | return result_data.result; | 372 | return result_data.result; |
403 | } | 373 | } |
404 | 374 | ||
405 | struct find_aer_service_data { | 375 | /** |
406 | struct pcie_port_service_driver *aer_driver; | 376 | * aer_do_secondary_bus_reset - perform secondary bus reset |
407 | int is_downstream; | 377 | * @dev: pointer to bridge's pci_dev data structure |
408 | }; | 378 | * |
409 | 379 | * Invoked when performing link reset at Root Port or Downstream Port. | |
410 | static int find_aer_service_iter(struct device *device, void *data) | 380 | */ |
381 | void aer_do_secondary_bus_reset(struct pci_dev *dev) | ||
411 | { | 382 | { |
412 | struct device_driver *driver; | 383 | u16 p2p_ctrl; |
413 | struct pcie_port_service_driver *service_driver; | 384 | |
414 | struct find_aer_service_data *result; | 385 | /* Assert Secondary Bus Reset */ |
386 | pci_read_config_word(dev, PCI_BRIDGE_CONTROL, &p2p_ctrl); | ||
387 | p2p_ctrl |= PCI_BRIDGE_CTL_BUS_RESET; | ||
388 | pci_write_config_word(dev, PCI_BRIDGE_CONTROL, p2p_ctrl); | ||
389 | |||
390 | /* | ||
391 | * we should send hot reset message for 2ms to allow it time to | ||
392 | * propagate to all downstream ports | ||
393 | */ | ||
394 | msleep(2); | ||
415 | 395 | ||
416 | result = (struct find_aer_service_data *) data; | 396 | /* De-assert Secondary Bus Reset */ |
397 | p2p_ctrl &= ~PCI_BRIDGE_CTL_BUS_RESET; | ||
398 | pci_write_config_word(dev, PCI_BRIDGE_CONTROL, p2p_ctrl); | ||
417 | 399 | ||
418 | if (device->bus == &pcie_port_bus_type) { | 400 | /* |
419 | struct pcie_device *pcie = to_pcie_device(device); | 401 | * System software must wait for at least 100ms from the end |
402 | * of a reset of one or more device before it is permitted | ||
403 | * to issue Configuration Requests to those devices. | ||
404 | */ | ||
405 | msleep(200); | ||
406 | } | ||
407 | |||
408 | /** | ||
409 | * default_downstream_reset_link - default reset function for Downstream Port | ||
410 | * @dev: pointer to downstream port's pci_dev data structure | ||
411 | * | ||
412 | * Invoked when performing link reset at Downstream Port w/ no aer driver. | ||
413 | */ | ||
414 | static pci_ers_result_t default_downstream_reset_link(struct pci_dev *dev) | ||
415 | { | ||
416 | aer_do_secondary_bus_reset(dev); | ||
417 | dev_printk(KERN_DEBUG, &dev->dev, | ||
418 | "Downstream Port link has been reset\n"); | ||
419 | return PCI_ERS_RESULT_RECOVERED; | ||
420 | } | ||
420 | 421 | ||
421 | if (pcie->port->pcie_type == PCI_EXP_TYPE_DOWNSTREAM) | 422 | static int find_aer_service_iter(struct device *device, void *data) |
422 | result->is_downstream = 1; | 423 | { |
424 | struct pcie_port_service_driver *service_driver, **drv; | ||
423 | 425 | ||
424 | driver = device->driver; | 426 | drv = (struct pcie_port_service_driver **) data; |
425 | if (driver) { | 427 | |
426 | service_driver = to_service_driver(driver); | 428 | if (device->bus == &pcie_port_bus_type && device->driver) { |
427 | if (service_driver->service == PCIE_PORT_SERVICE_AER) { | 429 | service_driver = to_service_driver(device->driver); |
428 | result->aer_driver = service_driver; | 430 | if (service_driver->service == PCIE_PORT_SERVICE_AER) { |
429 | return 1; | 431 | *drv = service_driver; |
430 | } | 432 | return 1; |
431 | } | 433 | } |
432 | } | 434 | } |
433 | 435 | ||
434 | return 0; | 436 | return 0; |
435 | } | 437 | } |
436 | 438 | ||
437 | static void find_aer_service(struct pci_dev *dev, | 439 | static struct pcie_port_service_driver *find_aer_service(struct pci_dev *dev) |
438 | struct find_aer_service_data *data) | ||
439 | { | 440 | { |
440 | int retval; | 441 | struct pcie_port_service_driver *drv = NULL; |
441 | retval = device_for_each_child(&dev->dev, data, find_aer_service_iter); | 442 | |
443 | device_for_each_child(&dev->dev, &drv, find_aer_service_iter); | ||
444 | |||
445 | return drv; | ||
442 | } | 446 | } |
443 | 447 | ||
444 | static pci_ers_result_t reset_link(struct pcie_device *aerdev, | 448 | static pci_ers_result_t reset_link(struct pcie_device *aerdev, |
@@ -446,38 +450,34 @@ static pci_ers_result_t reset_link(struct pcie_device *aerdev, | |||
446 | { | 450 | { |
447 | struct pci_dev *udev; | 451 | struct pci_dev *udev; |
448 | pci_ers_result_t status; | 452 | pci_ers_result_t status; |
449 | struct find_aer_service_data data; | 453 | struct pcie_port_service_driver *driver; |
450 | 454 | ||
451 | if (dev->hdr_type & PCI_HEADER_TYPE_BRIDGE) | 455 | if (dev->hdr_type & PCI_HEADER_TYPE_BRIDGE) { |
456 | /* Reset this port for all subordinates */ | ||
452 | udev = dev; | 457 | udev = dev; |
453 | else | 458 | } else { |
459 | /* Reset the upstream component (likely downstream port) */ | ||
454 | udev = dev->bus->self; | 460 | udev = dev->bus->self; |
461 | } | ||
455 | 462 | ||
456 | data.is_downstream = 0; | 463 | /* Use the aer driver of the component firstly */ |
457 | data.aer_driver = NULL; | 464 | driver = find_aer_service(udev); |
458 | find_aer_service(udev, &data); | ||
459 | 465 | ||
460 | /* | 466 | if (driver && driver->reset_link) { |
461 | * Use the aer driver of the error agent firstly. | 467 | status = driver->reset_link(udev); |
462 | * If it hasn't the aer driver, use the root port's | 468 | } else if (udev->pcie_type == PCI_EXP_TYPE_DOWNSTREAM) { |
463 | */ | 469 | status = default_downstream_reset_link(udev); |
464 | if (!data.aer_driver || !data.aer_driver->reset_link) { | 470 | } else { |
465 | if (data.is_downstream && | 471 | dev_printk(KERN_DEBUG, &dev->dev, |
466 | aerdev->device.driver && | 472 | "no link-reset support at upstream device %s\n", |
467 | to_service_driver(aerdev->device.driver)->reset_link) { | 473 | pci_name(udev)); |
468 | data.aer_driver = | 474 | return PCI_ERS_RESULT_DISCONNECT; |
469 | to_service_driver(aerdev->device.driver); | ||
470 | } else { | ||
471 | dev_printk(KERN_DEBUG, &dev->dev, "no link-reset " | ||
472 | "support\n"); | ||
473 | return PCI_ERS_RESULT_DISCONNECT; | ||
474 | } | ||
475 | } | 475 | } |
476 | 476 | ||
477 | status = data.aer_driver->reset_link(udev); | ||
478 | if (status != PCI_ERS_RESULT_RECOVERED) { | 477 | if (status != PCI_ERS_RESULT_RECOVERED) { |
479 | dev_printk(KERN_DEBUG, &dev->dev, "link reset at upstream " | 478 | dev_printk(KERN_DEBUG, &dev->dev, |
480 | "device %s failed\n", pci_name(udev)); | 479 | "link reset at upstream device %s failed\n", |
480 | pci_name(udev)); | ||
481 | return PCI_ERS_RESULT_DISCONNECT; | 481 | return PCI_ERS_RESULT_DISCONNECT; |
482 | } | 482 | } |
483 | 483 | ||
@@ -494,8 +494,7 @@ static pci_ers_result_t reset_link(struct pcie_device *aerdev, | |||
494 | * error detected message to all downstream drivers within a hierarchy in | 494 | * error detected message to all downstream drivers within a hierarchy in |
495 | * question and return the returned code. | 495 | * question and return the returned code. |
496 | */ | 496 | */ |
497 | static pci_ers_result_t do_recovery(struct pcie_device *aerdev, | 497 | static void do_recovery(struct pcie_device *aerdev, struct pci_dev *dev, |
498 | struct pci_dev *dev, | ||
499 | int severity) | 498 | int severity) |
500 | { | 499 | { |
501 | pci_ers_result_t status, result = PCI_ERS_RESULT_RECOVERED; | 500 | pci_ers_result_t status, result = PCI_ERS_RESULT_RECOVERED; |
@@ -513,10 +512,8 @@ static pci_ers_result_t do_recovery(struct pcie_device *aerdev, | |||
513 | 512 | ||
514 | if (severity == AER_FATAL) { | 513 | if (severity == AER_FATAL) { |
515 | result = reset_link(aerdev, dev); | 514 | result = reset_link(aerdev, dev); |
516 | if (result != PCI_ERS_RESULT_RECOVERED) { | 515 | if (result != PCI_ERS_RESULT_RECOVERED) |
517 | /* TODO: Should panic here? */ | 516 | goto failed; |
518 | return result; | ||
519 | } | ||
520 | } | 517 | } |
521 | 518 | ||
522 | if (status == PCI_ERS_RESULT_CAN_RECOVER) | 519 | if (status == PCI_ERS_RESULT_CAN_RECOVER) |
@@ -537,13 +534,22 @@ static pci_ers_result_t do_recovery(struct pcie_device *aerdev, | |||
537 | report_slot_reset); | 534 | report_slot_reset); |
538 | } | 535 | } |
539 | 536 | ||
540 | if (status == PCI_ERS_RESULT_RECOVERED) | 537 | if (status != PCI_ERS_RESULT_RECOVERED) |
541 | broadcast_error_message(dev, | 538 | goto failed; |
539 | |||
540 | broadcast_error_message(dev, | ||
542 | state, | 541 | state, |
543 | "resume", | 542 | "resume", |
544 | report_resume); | 543 | report_resume); |
545 | 544 | ||
546 | return status; | 545 | dev_printk(KERN_DEBUG, &dev->dev, |
546 | "AER driver successfully recovered\n"); | ||
547 | return; | ||
548 | |||
549 | failed: | ||
550 | /* TODO: Should kernel panic here? */ | ||
551 | dev_printk(KERN_DEBUG, &dev->dev, | ||
552 | "AER driver didn't recover\n"); | ||
547 | } | 553 | } |
548 | 554 | ||
549 | /** | 555 | /** |
@@ -558,7 +564,6 @@ static void handle_error_source(struct pcie_device *aerdev, | |||
558 | struct pci_dev *dev, | 564 | struct pci_dev *dev, |
559 | struct aer_err_info *info) | 565 | struct aer_err_info *info) |
560 | { | 566 | { |
561 | pci_ers_result_t status = 0; | ||
562 | int pos; | 567 | int pos; |
563 | 568 | ||
564 | if (info->severity == AER_CORRECTABLE) { | 569 | if (info->severity == AER_CORRECTABLE) { |
@@ -570,114 +575,8 @@ static void handle_error_source(struct pcie_device *aerdev, | |||
570 | if (pos) | 575 | if (pos) |
571 | pci_write_config_dword(dev, pos + PCI_ERR_COR_STATUS, | 576 | pci_write_config_dword(dev, pos + PCI_ERR_COR_STATUS, |
572 | info->status); | 577 | info->status); |
573 | } else { | 578 | } else |
574 | status = do_recovery(aerdev, dev, info->severity); | 579 | do_recovery(aerdev, dev, info->severity); |
575 | if (status == PCI_ERS_RESULT_RECOVERED) { | ||
576 | dev_printk(KERN_DEBUG, &dev->dev, "AER driver " | ||
577 | "successfully recovered\n"); | ||
578 | } else { | ||
579 | /* TODO: Should kernel panic here? */ | ||
580 | dev_printk(KERN_DEBUG, &dev->dev, "AER driver didn't " | ||
581 | "recover\n"); | ||
582 | } | ||
583 | } | ||
584 | } | ||
585 | |||
586 | /** | ||
587 | * aer_enable_rootport - enable Root Port's interrupts when receiving messages | ||
588 | * @rpc: pointer to a Root Port data structure | ||
589 | * | ||
590 | * Invoked when PCIe bus loads AER service driver. | ||
591 | */ | ||
592 | void aer_enable_rootport(struct aer_rpc *rpc) | ||
593 | { | ||
594 | struct pci_dev *pdev = rpc->rpd->port; | ||
595 | int pos, aer_pos; | ||
596 | u16 reg16; | ||
597 | u32 reg32; | ||
598 | |||
599 | pos = pci_pcie_cap(pdev); | ||
600 | /* Clear PCIe Capability's Device Status */ | ||
601 | pci_read_config_word(pdev, pos+PCI_EXP_DEVSTA, ®16); | ||
602 | pci_write_config_word(pdev, pos+PCI_EXP_DEVSTA, reg16); | ||
603 | |||
604 | /* Disable system error generation in response to error messages */ | ||
605 | pci_read_config_word(pdev, pos + PCI_EXP_RTCTL, ®16); | ||
606 | reg16 &= ~(SYSTEM_ERROR_INTR_ON_MESG_MASK); | ||
607 | pci_write_config_word(pdev, pos + PCI_EXP_RTCTL, reg16); | ||
608 | |||
609 | aer_pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_ERR); | ||
610 | /* Clear error status */ | ||
611 | pci_read_config_dword(pdev, aer_pos + PCI_ERR_ROOT_STATUS, ®32); | ||
612 | pci_write_config_dword(pdev, aer_pos + PCI_ERR_ROOT_STATUS, reg32); | ||
613 | pci_read_config_dword(pdev, aer_pos + PCI_ERR_COR_STATUS, ®32); | ||
614 | pci_write_config_dword(pdev, aer_pos + PCI_ERR_COR_STATUS, reg32); | ||
615 | pci_read_config_dword(pdev, aer_pos + PCI_ERR_UNCOR_STATUS, ®32); | ||
616 | pci_write_config_dword(pdev, aer_pos + PCI_ERR_UNCOR_STATUS, reg32); | ||
617 | |||
618 | /* | ||
619 | * Enable error reporting for the root port device and downstream port | ||
620 | * devices. | ||
621 | */ | ||
622 | set_downstream_devices_error_reporting(pdev, true); | ||
623 | |||
624 | /* Enable Root Port's interrupt in response to error messages */ | ||
625 | pci_write_config_dword(pdev, | ||
626 | aer_pos + PCI_ERR_ROOT_COMMAND, | ||
627 | ROOT_PORT_INTR_ON_MESG_MASK); | ||
628 | } | ||
629 | |||
630 | /** | ||
631 | * disable_root_aer - disable Root Port's interrupts when receiving messages | ||
632 | * @rpc: pointer to a Root Port data structure | ||
633 | * | ||
634 | * Invoked when PCIe bus unloads AER service driver. | ||
635 | */ | ||
636 | static void disable_root_aer(struct aer_rpc *rpc) | ||
637 | { | ||
638 | struct pci_dev *pdev = rpc->rpd->port; | ||
639 | u32 reg32; | ||
640 | int pos; | ||
641 | |||
642 | /* | ||
643 | * Disable error reporting for the root port device and downstream port | ||
644 | * devices. | ||
645 | */ | ||
646 | set_downstream_devices_error_reporting(pdev, false); | ||
647 | |||
648 | pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_ERR); | ||
649 | /* Disable Root's interrupt in response to error messages */ | ||
650 | pci_write_config_dword(pdev, pos + PCI_ERR_ROOT_COMMAND, 0); | ||
651 | |||
652 | /* Clear Root's error status reg */ | ||
653 | pci_read_config_dword(pdev, pos + PCI_ERR_ROOT_STATUS, ®32); | ||
654 | pci_write_config_dword(pdev, pos + PCI_ERR_ROOT_STATUS, reg32); | ||
655 | } | ||
656 | |||
657 | /** | ||
658 | * get_e_source - retrieve an error source | ||
659 | * @rpc: pointer to the root port which holds an error | ||
660 | * | ||
661 | * Invoked by DPC handler to consume an error. | ||
662 | */ | ||
663 | static struct aer_err_source *get_e_source(struct aer_rpc *rpc) | ||
664 | { | ||
665 | struct aer_err_source *e_source; | ||
666 | unsigned long flags; | ||
667 | |||
668 | /* Lock access to Root error producer/consumer index */ | ||
669 | spin_lock_irqsave(&rpc->e_lock, flags); | ||
670 | if (rpc->prod_idx == rpc->cons_idx) { | ||
671 | spin_unlock_irqrestore(&rpc->e_lock, flags); | ||
672 | return NULL; | ||
673 | } | ||
674 | e_source = &rpc->e_sources[rpc->cons_idx]; | ||
675 | rpc->cons_idx++; | ||
676 | if (rpc->cons_idx == AER_ERROR_SOURCES_MAX) | ||
677 | rpc->cons_idx = 0; | ||
678 | spin_unlock_irqrestore(&rpc->e_lock, flags); | ||
679 | |||
680 | return e_source; | ||
681 | } | 580 | } |
682 | 581 | ||
683 | /** | 582 | /** |
@@ -686,11 +585,14 @@ static struct aer_err_source *get_e_source(struct aer_rpc *rpc) | |||
686 | * @info: pointer to structure to store the error record | 585 | * @info: pointer to structure to store the error record |
687 | * | 586 | * |
688 | * Return 1 on success, 0 on error. | 587 | * Return 1 on success, 0 on error. |
588 | * | ||
589 | * Note that @info is reused among all error devices. Clear fields properly. | ||
689 | */ | 590 | */ |
690 | static int get_device_error_info(struct pci_dev *dev, struct aer_err_info *info) | 591 | static int get_device_error_info(struct pci_dev *dev, struct aer_err_info *info) |
691 | { | 592 | { |
692 | int pos, temp; | 593 | int pos, temp; |
693 | 594 | ||
595 | /* Must reset in this function */ | ||
694 | info->status = 0; | 596 | info->status = 0; |
695 | info->tlp_header_valid = 0; | 597 | info->tlp_header_valid = 0; |
696 | 598 | ||
@@ -743,12 +645,6 @@ static inline void aer_process_err_devices(struct pcie_device *p_device, | |||
743 | { | 645 | { |
744 | int i; | 646 | int i; |
745 | 647 | ||
746 | if (!e_info->dev[0]) { | ||
747 | dev_printk(KERN_DEBUG, &p_device->port->dev, | ||
748 | "can't find device of ID%04x\n", | ||
749 | e_info->id); | ||
750 | } | ||
751 | |||
752 | /* Report all before handle them, not to lost records by reset etc. */ | 648 | /* Report all before handle them, not to lost records by reset etc. */ |
753 | for (i = 0; i < e_info->error_dev_num && e_info->dev[i]; i++) { | 649 | for (i = 0; i < e_info->error_dev_num && e_info->dev[i]; i++) { |
754 | if (get_device_error_info(e_info->dev[i], e_info)) | 650 | if (get_device_error_info(e_info->dev[i], e_info)) |
@@ -769,11 +665,10 @@ static void aer_isr_one_error(struct pcie_device *p_device, | |||
769 | struct aer_err_source *e_src) | 665 | struct aer_err_source *e_src) |
770 | { | 666 | { |
771 | struct aer_err_info *e_info; | 667 | struct aer_err_info *e_info; |
772 | int i; | ||
773 | 668 | ||
774 | /* struct aer_err_info might be big, so we allocate it with slab */ | 669 | /* struct aer_err_info might be big, so we allocate it with slab */ |
775 | e_info = kmalloc(sizeof(struct aer_err_info), GFP_KERNEL); | 670 | e_info = kmalloc(sizeof(struct aer_err_info), GFP_KERNEL); |
776 | if (e_info == NULL) { | 671 | if (!e_info) { |
777 | dev_printk(KERN_DEBUG, &p_device->port->dev, | 672 | dev_printk(KERN_DEBUG, &p_device->port->dev, |
778 | "Can't allocate mem when processing AER errors\n"); | 673 | "Can't allocate mem when processing AER errors\n"); |
779 | return; | 674 | return; |
@@ -783,37 +678,73 @@ static void aer_isr_one_error(struct pcie_device *p_device, | |||
783 | * There is a possibility that both correctable error and | 678 | * There is a possibility that both correctable error and |
784 | * uncorrectable error being logged. Report correctable error first. | 679 | * uncorrectable error being logged. Report correctable error first. |
785 | */ | 680 | */ |
786 | for (i = 1; i & ROOT_ERR_STATUS_MASKS ; i <<= 2) { | 681 | if (e_src->status & PCI_ERR_ROOT_COR_RCV) { |
787 | if (i > 4) | 682 | e_info->id = ERR_COR_ID(e_src->id); |
788 | break; | 683 | e_info->severity = AER_CORRECTABLE; |
789 | if (!(e_src->status & i)) | 684 | |
790 | continue; | 685 | if (e_src->status & PCI_ERR_ROOT_MULTI_COR_RCV) |
791 | 686 | e_info->multi_error_valid = 1; | |
792 | memset(e_info, 0, sizeof(struct aer_err_info)); | 687 | else |
793 | 688 | e_info->multi_error_valid = 0; | |
794 | /* Init comprehensive error information */ | 689 | |
795 | if (i & PCI_ERR_ROOT_COR_RCV) { | 690 | aer_print_port_info(p_device->port, e_info); |
796 | e_info->id = ERR_COR_ID(e_src->id); | 691 | |
797 | e_info->severity = AER_CORRECTABLE; | 692 | if (find_source_device(p_device->port, e_info)) |
798 | } else { | 693 | aer_process_err_devices(p_device, e_info); |
799 | e_info->id = ERR_UNCOR_ID(e_src->id); | 694 | } |
800 | e_info->severity = ((e_src->status >> 6) & 1); | 695 | |
801 | } | 696 | if (e_src->status & PCI_ERR_ROOT_UNCOR_RCV) { |
802 | if (e_src->status & | 697 | e_info->id = ERR_UNCOR_ID(e_src->id); |
803 | (PCI_ERR_ROOT_MULTI_COR_RCV | | 698 | |
804 | PCI_ERR_ROOT_MULTI_UNCOR_RCV)) | 699 | if (e_src->status & PCI_ERR_ROOT_FATAL_RCV) |
700 | e_info->severity = AER_FATAL; | ||
701 | else | ||
702 | e_info->severity = AER_NONFATAL; | ||
703 | |||
704 | if (e_src->status & PCI_ERR_ROOT_MULTI_UNCOR_RCV) | ||
805 | e_info->multi_error_valid = 1; | 705 | e_info->multi_error_valid = 1; |
706 | else | ||
707 | e_info->multi_error_valid = 0; | ||
806 | 708 | ||
807 | aer_print_port_info(p_device->port, e_info); | 709 | aer_print_port_info(p_device->port, e_info); |
808 | 710 | ||
809 | find_source_device(p_device->port, e_info); | 711 | if (find_source_device(p_device->port, e_info)) |
810 | aer_process_err_devices(p_device, e_info); | 712 | aer_process_err_devices(p_device, e_info); |
811 | } | 713 | } |
812 | 714 | ||
813 | kfree(e_info); | 715 | kfree(e_info); |
814 | } | 716 | } |
815 | 717 | ||
816 | /** | 718 | /** |
719 | * get_e_source - retrieve an error source | ||
720 | * @rpc: pointer to the root port which holds an error | ||
721 | * @e_src: pointer to store retrieved error source | ||
722 | * | ||
723 | * Return 1 if an error source is retrieved, otherwise 0. | ||
724 | * | ||
725 | * Invoked by DPC handler to consume an error. | ||
726 | */ | ||
727 | static int get_e_source(struct aer_rpc *rpc, struct aer_err_source *e_src) | ||
728 | { | ||
729 | unsigned long flags; | ||
730 | |||
731 | /* Lock access to Root error producer/consumer index */ | ||
732 | spin_lock_irqsave(&rpc->e_lock, flags); | ||
733 | if (rpc->prod_idx == rpc->cons_idx) { | ||
734 | spin_unlock_irqrestore(&rpc->e_lock, flags); | ||
735 | return 0; | ||
736 | } | ||
737 | |||
738 | *e_src = rpc->e_sources[rpc->cons_idx]; | ||
739 | rpc->cons_idx++; | ||
740 | if (rpc->cons_idx == AER_ERROR_SOURCES_MAX) | ||
741 | rpc->cons_idx = 0; | ||
742 | spin_unlock_irqrestore(&rpc->e_lock, flags); | ||
743 | |||
744 | return 1; | ||
745 | } | ||
746 | |||
747 | /** | ||
817 | * aer_isr - consume errors detected by root port | 748 | * aer_isr - consume errors detected by root port |
818 | * @work: definition of this work item | 749 | * @work: definition of this work item |
819 | * | 750 | * |
@@ -823,34 +754,17 @@ void aer_isr(struct work_struct *work) | |||
823 | { | 754 | { |
824 | struct aer_rpc *rpc = container_of(work, struct aer_rpc, dpc_handler); | 755 | struct aer_rpc *rpc = container_of(work, struct aer_rpc, dpc_handler); |
825 | struct pcie_device *p_device = rpc->rpd; | 756 | struct pcie_device *p_device = rpc->rpd; |
826 | struct aer_err_source *e_src; | 757 | struct aer_err_source uninitialized_var(e_src); |
827 | 758 | ||
828 | mutex_lock(&rpc->rpc_mutex); | 759 | mutex_lock(&rpc->rpc_mutex); |
829 | e_src = get_e_source(rpc); | 760 | while (get_e_source(rpc, &e_src)) |
830 | while (e_src) { | 761 | aer_isr_one_error(p_device, &e_src); |
831 | aer_isr_one_error(p_device, e_src); | ||
832 | e_src = get_e_source(rpc); | ||
833 | } | ||
834 | mutex_unlock(&rpc->rpc_mutex); | 762 | mutex_unlock(&rpc->rpc_mutex); |
835 | 763 | ||
836 | wake_up(&rpc->wait_release); | 764 | wake_up(&rpc->wait_release); |
837 | } | 765 | } |
838 | 766 | ||
839 | /** | 767 | /** |
840 | * aer_delete_rootport - disable root port aer and delete service data | ||
841 | * @rpc: pointer to a root port device being deleted | ||
842 | * | ||
843 | * Invoked when AER service unloaded on a specific Root Port | ||
844 | */ | ||
845 | void aer_delete_rootport(struct aer_rpc *rpc) | ||
846 | { | ||
847 | /* Disable root port AER itself */ | ||
848 | disable_root_aer(rpc); | ||
849 | |||
850 | kfree(rpc); | ||
851 | } | ||
852 | |||
853 | /** | ||
854 | * aer_init - provide AER initialization | 768 | * aer_init - provide AER initialization |
855 | * @dev: pointer to AER pcie device | 769 | * @dev: pointer to AER pcie device |
856 | * | 770 | * |
@@ -858,22 +772,10 @@ void aer_delete_rootport(struct aer_rpc *rpc) | |||
858 | */ | 772 | */ |
859 | int aer_init(struct pcie_device *dev) | 773 | int aer_init(struct pcie_device *dev) |
860 | { | 774 | { |
861 | if (dev->port->aer_firmware_first) { | ||
862 | dev_printk(KERN_DEBUG, &dev->device, | ||
863 | "PCIe errors handled by platform firmware.\n"); | ||
864 | goto out; | ||
865 | } | ||
866 | |||
867 | if (aer_osc_setup(dev)) | ||
868 | goto out; | ||
869 | |||
870 | return 0; | ||
871 | out: | ||
872 | if (forceload) { | 775 | if (forceload) { |
873 | dev_printk(KERN_DEBUG, &dev->device, | 776 | dev_printk(KERN_DEBUG, &dev->device, |
874 | "aerdrv forceload requested.\n"); | 777 | "aerdrv forceload requested.\n"); |
875 | dev->port->aer_firmware_first = 0; | 778 | pcie_aer_force_firmware_first(dev->port, 0); |
876 | return 0; | ||
877 | } | 779 | } |
878 | return -ENXIO; | 780 | return 0; |
879 | } | 781 | } |
diff --git a/drivers/pci/pcie/aspm.c b/drivers/pci/pcie/aspm.c index be53d98fa384..3188cd96b338 100644 --- a/drivers/pci/pcie/aspm.c +++ b/drivers/pci/pcie/aspm.c | |||
@@ -68,7 +68,7 @@ struct pcie_link_state { | |||
68 | struct aspm_latency acceptable[8]; | 68 | struct aspm_latency acceptable[8]; |
69 | }; | 69 | }; |
70 | 70 | ||
71 | static int aspm_disabled, aspm_force; | 71 | static int aspm_disabled, aspm_force, aspm_clear_state; |
72 | static DEFINE_MUTEX(aspm_lock); | 72 | static DEFINE_MUTEX(aspm_lock); |
73 | static LIST_HEAD(link_list); | 73 | static LIST_HEAD(link_list); |
74 | 74 | ||
@@ -139,7 +139,7 @@ static void pcie_set_clkpm(struct pcie_link_state *link, int enable) | |||
139 | { | 139 | { |
140 | /* Don't enable Clock PM if the link is not Clock PM capable */ | 140 | /* Don't enable Clock PM if the link is not Clock PM capable */ |
141 | if (!link->clkpm_capable && enable) | 141 | if (!link->clkpm_capable && enable) |
142 | return; | 142 | enable = 0; |
143 | /* Need nothing if the specified equals to current state */ | 143 | /* Need nothing if the specified equals to current state */ |
144 | if (link->clkpm_enabled == enable) | 144 | if (link->clkpm_enabled == enable) |
145 | return; | 145 | return; |
@@ -498,6 +498,10 @@ static int pcie_aspm_sanity_check(struct pci_dev *pdev) | |||
498 | struct pci_dev *child; | 498 | struct pci_dev *child; |
499 | int pos; | 499 | int pos; |
500 | u32 reg32; | 500 | u32 reg32; |
501 | |||
502 | if (aspm_clear_state) | ||
503 | return -EINVAL; | ||
504 | |||
501 | /* | 505 | /* |
502 | * Some functions in a slot might not all be PCIe functions, | 506 | * Some functions in a slot might not all be PCIe functions, |
503 | * very strange. Disable ASPM for the whole slot | 507 | * very strange. Disable ASPM for the whole slot |
@@ -563,12 +567,15 @@ void pcie_aspm_init_link_state(struct pci_dev *pdev) | |||
563 | struct pcie_link_state *link; | 567 | struct pcie_link_state *link; |
564 | int blacklist = !!pcie_aspm_sanity_check(pdev); | 568 | int blacklist = !!pcie_aspm_sanity_check(pdev); |
565 | 569 | ||
566 | if (aspm_disabled || !pci_is_pcie(pdev) || pdev->link_state) | 570 | if (!pci_is_pcie(pdev) || pdev->link_state) |
567 | return; | 571 | return; |
568 | if (pdev->pcie_type != PCI_EXP_TYPE_ROOT_PORT && | 572 | if (pdev->pcie_type != PCI_EXP_TYPE_ROOT_PORT && |
569 | pdev->pcie_type != PCI_EXP_TYPE_DOWNSTREAM) | 573 | pdev->pcie_type != PCI_EXP_TYPE_DOWNSTREAM) |
570 | return; | 574 | return; |
571 | 575 | ||
576 | if (aspm_disabled && !aspm_clear_state) | ||
577 | return; | ||
578 | |||
572 | /* VIA has a strange chipset, root port is under a bridge */ | 579 | /* VIA has a strange chipset, root port is under a bridge */ |
573 | if (pdev->pcie_type == PCI_EXP_TYPE_ROOT_PORT && | 580 | if (pdev->pcie_type == PCI_EXP_TYPE_ROOT_PORT && |
574 | pdev->bus->self) | 581 | pdev->bus->self) |
@@ -588,11 +595,23 @@ void pcie_aspm_init_link_state(struct pci_dev *pdev) | |||
588 | * update through pcie_aspm_cap_init(). | 595 | * update through pcie_aspm_cap_init(). |
589 | */ | 596 | */ |
590 | pcie_aspm_cap_init(link, blacklist); | 597 | pcie_aspm_cap_init(link, blacklist); |
591 | pcie_config_aspm_path(link); | ||
592 | 598 | ||
593 | /* Setup initial Clock PM state */ | 599 | /* Setup initial Clock PM state */ |
594 | pcie_clkpm_cap_init(link, blacklist); | 600 | pcie_clkpm_cap_init(link, blacklist); |
595 | pcie_set_clkpm(link, policy_to_clkpm_state(link)); | 601 | |
602 | /* | ||
603 | * At this stage drivers haven't had an opportunity to change the | ||
604 | * link policy setting. Enabling ASPM on broken hardware can cripple | ||
605 | * it even before the driver has had a chance to disable ASPM, so | ||
606 | * default to a safe level right now. If we're enabling ASPM beyond | ||
607 | * the BIOS's expectation, we'll do so once pci_enable_device() is | ||
608 | * called. | ||
609 | */ | ||
610 | if (aspm_policy != POLICY_POWERSAVE) { | ||
611 | pcie_config_aspm_path(link); | ||
612 | pcie_set_clkpm(link, policy_to_clkpm_state(link)); | ||
613 | } | ||
614 | |||
596 | unlock: | 615 | unlock: |
597 | mutex_unlock(&aspm_lock); | 616 | mutex_unlock(&aspm_lock); |
598 | out: | 617 | out: |
@@ -629,7 +648,7 @@ void pcie_aspm_exit_link_state(struct pci_dev *pdev) | |||
629 | struct pci_dev *parent = pdev->bus->self; | 648 | struct pci_dev *parent = pdev->bus->self; |
630 | struct pcie_link_state *link, *root, *parent_link; | 649 | struct pcie_link_state *link, *root, *parent_link; |
631 | 650 | ||
632 | if (aspm_disabled || !pci_is_pcie(pdev) || | 651 | if ((aspm_disabled && !aspm_clear_state) || !pci_is_pcie(pdev) || |
633 | !parent || !parent->link_state) | 652 | !parent || !parent->link_state) |
634 | return; | 653 | return; |
635 | if ((parent->pcie_type != PCI_EXP_TYPE_ROOT_PORT) && | 654 | if ((parent->pcie_type != PCI_EXP_TYPE_ROOT_PORT) && |
@@ -887,6 +906,12 @@ static int __init pcie_aspm_disable(char *str) | |||
887 | 906 | ||
888 | __setup("pcie_aspm=", pcie_aspm_disable); | 907 | __setup("pcie_aspm=", pcie_aspm_disable); |
889 | 908 | ||
909 | void pcie_clear_aspm(void) | ||
910 | { | ||
911 | if (!aspm_force) | ||
912 | aspm_clear_state = 1; | ||
913 | } | ||
914 | |||
890 | void pcie_no_aspm(void) | 915 | void pcie_no_aspm(void) |
891 | { | 916 | { |
892 | if (!aspm_force) | 917 | if (!aspm_force) |
diff --git a/drivers/pci/pcie/pme.c b/drivers/pci/pcie/pme.c new file mode 100644 index 000000000000..0057344a3fcb --- /dev/null +++ b/drivers/pci/pcie/pme.c | |||
@@ -0,0 +1,443 @@ | |||
1 | /* | ||
2 | * PCIe Native PME support | ||
3 | * | ||
4 | * Copyright (C) 2007 - 2009 Intel Corp | ||
5 | * Copyright (C) 2007 - 2009 Shaohua Li <shaohua.li@intel.com> | ||
6 | * Copyright (C) 2009 Rafael J. Wysocki <rjw@sisk.pl>, Novell Inc. | ||
7 | * | ||
8 | * This file is subject to the terms and conditions of the GNU General Public | ||
9 | * License V2. See the file "COPYING" in the main directory of this archive | ||
10 | * for more details. | ||
11 | */ | ||
12 | |||
13 | #include <linux/module.h> | ||
14 | #include <linux/pci.h> | ||
15 | #include <linux/kernel.h> | ||
16 | #include <linux/errno.h> | ||
17 | #include <linux/slab.h> | ||
18 | #include <linux/init.h> | ||
19 | #include <linux/interrupt.h> | ||
20 | #include <linux/device.h> | ||
21 | #include <linux/pcieport_if.h> | ||
22 | #include <linux/acpi.h> | ||
23 | #include <linux/pci-acpi.h> | ||
24 | #include <linux/pm_runtime.h> | ||
25 | |||
26 | #include "../pci.h" | ||
27 | #include "portdrv.h" | ||
28 | |||
29 | /* | ||
30 | * If this switch is set, MSI will not be used for PCIe PME signaling. This | ||
31 | * causes the PCIe port driver to use INTx interrupts only, but it turns out | ||
32 | * that using MSI for PCIe PME signaling doesn't play well with PCIe PME-based | ||
33 | * wake-up from system sleep states. | ||
34 | */ | ||
35 | bool pcie_pme_msi_disabled; | ||
36 | |||
37 | static int __init pcie_pme_setup(char *str) | ||
38 | { | ||
39 | if (!strncmp(str, "nomsi", 5)) | ||
40 | pcie_pme_msi_disabled = true; | ||
41 | |||
42 | return 1; | ||
43 | } | ||
44 | __setup("pcie_pme=", pcie_pme_setup); | ||
45 | |||
46 | struct pcie_pme_service_data { | ||
47 | spinlock_t lock; | ||
48 | struct pcie_device *srv; | ||
49 | struct work_struct work; | ||
50 | bool noirq; /* Don't enable the PME interrupt used by this service. */ | ||
51 | }; | ||
52 | |||
53 | /** | ||
54 | * pcie_pme_interrupt_enable - Enable/disable PCIe PME interrupt generation. | ||
55 | * @dev: PCIe root port or event collector. | ||
56 | * @enable: Enable or disable the interrupt. | ||
57 | */ | ||
58 | void pcie_pme_interrupt_enable(struct pci_dev *dev, bool enable) | ||
59 | { | ||
60 | int rtctl_pos; | ||
61 | u16 rtctl; | ||
62 | |||
63 | rtctl_pos = pci_pcie_cap(dev) + PCI_EXP_RTCTL; | ||
64 | |||
65 | pci_read_config_word(dev, rtctl_pos, &rtctl); | ||
66 | if (enable) | ||
67 | rtctl |= PCI_EXP_RTCTL_PMEIE; | ||
68 | else | ||
69 | rtctl &= ~PCI_EXP_RTCTL_PMEIE; | ||
70 | pci_write_config_word(dev, rtctl_pos, rtctl); | ||
71 | } | ||
72 | |||
73 | /** | ||
74 | * pcie_pme_walk_bus - Scan a PCI bus for devices asserting PME#. | ||
75 | * @bus: PCI bus to scan. | ||
76 | * | ||
77 | * Scan given PCI bus and all buses under it for devices asserting PME#. | ||
78 | */ | ||
79 | static bool pcie_pme_walk_bus(struct pci_bus *bus) | ||
80 | { | ||
81 | struct pci_dev *dev; | ||
82 | bool ret = false; | ||
83 | |||
84 | list_for_each_entry(dev, &bus->devices, bus_list) { | ||
85 | /* Skip PCIe devices in case we started from a root port. */ | ||
86 | if (!pci_is_pcie(dev) && pci_check_pme_status(dev)) { | ||
87 | pci_wakeup_event(dev); | ||
88 | pm_request_resume(&dev->dev); | ||
89 | ret = true; | ||
90 | } | ||
91 | |||
92 | if (dev->subordinate && pcie_pme_walk_bus(dev->subordinate)) | ||
93 | ret = true; | ||
94 | } | ||
95 | |||
96 | return ret; | ||
97 | } | ||
98 | |||
99 | /** | ||
100 | * pcie_pme_from_pci_bridge - Check if PCIe-PCI bridge generated a PME. | ||
101 | * @bus: Secondary bus of the bridge. | ||
102 | * @devfn: Device/function number to check. | ||
103 | * | ||
104 | * PME from PCI devices under a PCIe-PCI bridge may be converted to an in-band | ||
105 | * PCIe PME message. In such that case the bridge should use the Requester ID | ||
106 | * of device/function number 0 on its secondary bus. | ||
107 | */ | ||
108 | static bool pcie_pme_from_pci_bridge(struct pci_bus *bus, u8 devfn) | ||
109 | { | ||
110 | struct pci_dev *dev; | ||
111 | bool found = false; | ||
112 | |||
113 | if (devfn) | ||
114 | return false; | ||
115 | |||
116 | dev = pci_dev_get(bus->self); | ||
117 | if (!dev) | ||
118 | return false; | ||
119 | |||
120 | if (pci_is_pcie(dev) && dev->pcie_type == PCI_EXP_TYPE_PCI_BRIDGE) { | ||
121 | down_read(&pci_bus_sem); | ||
122 | if (pcie_pme_walk_bus(bus)) | ||
123 | found = true; | ||
124 | up_read(&pci_bus_sem); | ||
125 | } | ||
126 | |||
127 | pci_dev_put(dev); | ||
128 | return found; | ||
129 | } | ||
130 | |||
131 | /** | ||
132 | * pcie_pme_handle_request - Find device that generated PME and handle it. | ||
133 | * @port: Root port or event collector that generated the PME interrupt. | ||
134 | * @req_id: PCIe Requester ID of the device that generated the PME. | ||
135 | */ | ||
136 | static void pcie_pme_handle_request(struct pci_dev *port, u16 req_id) | ||
137 | { | ||
138 | u8 busnr = req_id >> 8, devfn = req_id & 0xff; | ||
139 | struct pci_bus *bus; | ||
140 | struct pci_dev *dev; | ||
141 | bool found = false; | ||
142 | |||
143 | /* First, check if the PME is from the root port itself. */ | ||
144 | if (port->devfn == devfn && port->bus->number == busnr) { | ||
145 | if (pci_check_pme_status(port)) { | ||
146 | pm_request_resume(&port->dev); | ||
147 | found = true; | ||
148 | } else { | ||
149 | /* | ||
150 | * Apparently, the root port generated the PME on behalf | ||
151 | * of a non-PCIe device downstream. If this is done by | ||
152 | * a root port, the Requester ID field in its status | ||
153 | * register may contain either the root port's, or the | ||
154 | * source device's information (PCI Express Base | ||
155 | * Specification, Rev. 2.0, Section 6.1.9). | ||
156 | */ | ||
157 | down_read(&pci_bus_sem); | ||
158 | found = pcie_pme_walk_bus(port->subordinate); | ||
159 | up_read(&pci_bus_sem); | ||
160 | } | ||
161 | goto out; | ||
162 | } | ||
163 | |||
164 | /* Second, find the bus the source device is on. */ | ||
165 | bus = pci_find_bus(pci_domain_nr(port->bus), busnr); | ||
166 | if (!bus) | ||
167 | goto out; | ||
168 | |||
169 | /* Next, check if the PME is from a PCIe-PCI bridge. */ | ||
170 | found = pcie_pme_from_pci_bridge(bus, devfn); | ||
171 | if (found) | ||
172 | goto out; | ||
173 | |||
174 | /* Finally, try to find the PME source on the bus. */ | ||
175 | down_read(&pci_bus_sem); | ||
176 | list_for_each_entry(dev, &bus->devices, bus_list) { | ||
177 | pci_dev_get(dev); | ||
178 | if (dev->devfn == devfn) { | ||
179 | found = true; | ||
180 | break; | ||
181 | } | ||
182 | pci_dev_put(dev); | ||
183 | } | ||
184 | up_read(&pci_bus_sem); | ||
185 | |||
186 | if (found) { | ||
187 | /* The device is there, but we have to check its PME status. */ | ||
188 | found = pci_check_pme_status(dev); | ||
189 | if (found) { | ||
190 | pci_wakeup_event(dev); | ||
191 | pm_request_resume(&dev->dev); | ||
192 | } | ||
193 | pci_dev_put(dev); | ||
194 | } else if (devfn) { | ||
195 | /* | ||
196 | * The device is not there, but we can still try to recover by | ||
197 | * assuming that the PME was reported by a PCIe-PCI bridge that | ||
198 | * used devfn different from zero. | ||
199 | */ | ||
200 | dev_dbg(&port->dev, "PME interrupt generated for " | ||
201 | "non-existent device %02x:%02x.%d\n", | ||
202 | busnr, PCI_SLOT(devfn), PCI_FUNC(devfn)); | ||
203 | found = pcie_pme_from_pci_bridge(bus, 0); | ||
204 | } | ||
205 | |||
206 | out: | ||
207 | if (!found) | ||
208 | dev_dbg(&port->dev, "Spurious native PME interrupt!\n"); | ||
209 | } | ||
210 | |||
211 | /** | ||
212 | * pcie_pme_work_fn - Work handler for PCIe PME interrupt. | ||
213 | * @work: Work structure giving access to service data. | ||
214 | */ | ||
215 | static void pcie_pme_work_fn(struct work_struct *work) | ||
216 | { | ||
217 | struct pcie_pme_service_data *data = | ||
218 | container_of(work, struct pcie_pme_service_data, work); | ||
219 | struct pci_dev *port = data->srv->port; | ||
220 | int rtsta_pos; | ||
221 | u32 rtsta; | ||
222 | |||
223 | rtsta_pos = pci_pcie_cap(port) + PCI_EXP_RTSTA; | ||
224 | |||
225 | spin_lock_irq(&data->lock); | ||
226 | |||
227 | for (;;) { | ||
228 | if (data->noirq) | ||
229 | break; | ||
230 | |||
231 | pci_read_config_dword(port, rtsta_pos, &rtsta); | ||
232 | if (rtsta & PCI_EXP_RTSTA_PME) { | ||
233 | /* | ||
234 | * Clear PME status of the port. If there are other | ||
235 | * pending PMEs, the status will be set again. | ||
236 | */ | ||
237 | pcie_clear_root_pme_status(port); | ||
238 | |||
239 | spin_unlock_irq(&data->lock); | ||
240 | pcie_pme_handle_request(port, rtsta & 0xffff); | ||
241 | spin_lock_irq(&data->lock); | ||
242 | |||
243 | continue; | ||
244 | } | ||
245 | |||
246 | /* No need to loop if there are no more PMEs pending. */ | ||
247 | if (!(rtsta & PCI_EXP_RTSTA_PENDING)) | ||
248 | break; | ||
249 | |||
250 | spin_unlock_irq(&data->lock); | ||
251 | cpu_relax(); | ||
252 | spin_lock_irq(&data->lock); | ||
253 | } | ||
254 | |||
255 | if (!data->noirq) | ||
256 | pcie_pme_interrupt_enable(port, true); | ||
257 | |||
258 | spin_unlock_irq(&data->lock); | ||
259 | } | ||
260 | |||
261 | /** | ||
262 | * pcie_pme_irq - Interrupt handler for PCIe root port PME interrupt. | ||
263 | * @irq: Interrupt vector. | ||
264 | * @context: Interrupt context pointer. | ||
265 | */ | ||
266 | static irqreturn_t pcie_pme_irq(int irq, void *context) | ||
267 | { | ||
268 | struct pci_dev *port; | ||
269 | struct pcie_pme_service_data *data; | ||
270 | int rtsta_pos; | ||
271 | u32 rtsta; | ||
272 | unsigned long flags; | ||
273 | |||
274 | port = ((struct pcie_device *)context)->port; | ||
275 | data = get_service_data((struct pcie_device *)context); | ||
276 | |||
277 | rtsta_pos = pci_pcie_cap(port) + PCI_EXP_RTSTA; | ||
278 | |||
279 | spin_lock_irqsave(&data->lock, flags); | ||
280 | pci_read_config_dword(port, rtsta_pos, &rtsta); | ||
281 | |||
282 | if (!(rtsta & PCI_EXP_RTSTA_PME)) { | ||
283 | spin_unlock_irqrestore(&data->lock, flags); | ||
284 | return IRQ_NONE; | ||
285 | } | ||
286 | |||
287 | pcie_pme_interrupt_enable(port, false); | ||
288 | spin_unlock_irqrestore(&data->lock, flags); | ||
289 | |||
290 | /* We don't use pm_wq, because it's freezable. */ | ||
291 | schedule_work(&data->work); | ||
292 | |||
293 | return IRQ_HANDLED; | ||
294 | } | ||
295 | |||
296 | /** | ||
297 | * pcie_pme_set_native - Set the PME interrupt flag for given device. | ||
298 | * @dev: PCI device to handle. | ||
299 | * @ign: Ignored. | ||
300 | */ | ||
301 | static int pcie_pme_set_native(struct pci_dev *dev, void *ign) | ||
302 | { | ||
303 | dev_info(&dev->dev, "Signaling PME through PCIe PME interrupt\n"); | ||
304 | |||
305 | device_set_run_wake(&dev->dev, true); | ||
306 | dev->pme_interrupt = true; | ||
307 | return 0; | ||
308 | } | ||
309 | |||
310 | /** | ||
311 | * pcie_pme_mark_devices - Set the PME interrupt flag for devices below a port. | ||
312 | * @port: PCIe root port or event collector to handle. | ||
313 | * | ||
314 | * For each device below given root port, including the port itself (or for each | ||
315 | * root complex integrated endpoint if @port is a root complex event collector) | ||
316 | * set the flag indicating that it can signal run-time wake-up events via PCIe | ||
317 | * PME interrupts. | ||
318 | */ | ||
319 | static void pcie_pme_mark_devices(struct pci_dev *port) | ||
320 | { | ||
321 | pcie_pme_set_native(port, NULL); | ||
322 | if (port->subordinate) { | ||
323 | pci_walk_bus(port->subordinate, pcie_pme_set_native, NULL); | ||
324 | } else { | ||
325 | struct pci_bus *bus = port->bus; | ||
326 | struct pci_dev *dev; | ||
327 | |||
328 | /* Check if this is a root port event collector. */ | ||
329 | if (port->pcie_type != PCI_EXP_TYPE_RC_EC || !bus) | ||
330 | return; | ||
331 | |||
332 | down_read(&pci_bus_sem); | ||
333 | list_for_each_entry(dev, &bus->devices, bus_list) | ||
334 | if (pci_is_pcie(dev) | ||
335 | && dev->pcie_type == PCI_EXP_TYPE_RC_END) | ||
336 | pcie_pme_set_native(dev, NULL); | ||
337 | up_read(&pci_bus_sem); | ||
338 | } | ||
339 | } | ||
340 | |||
341 | /** | ||
342 | * pcie_pme_probe - Initialize PCIe PME service for given root port. | ||
343 | * @srv: PCIe service to initialize. | ||
344 | */ | ||
345 | static int pcie_pme_probe(struct pcie_device *srv) | ||
346 | { | ||
347 | struct pci_dev *port; | ||
348 | struct pcie_pme_service_data *data; | ||
349 | int ret; | ||
350 | |||
351 | data = kzalloc(sizeof(*data), GFP_KERNEL); | ||
352 | if (!data) | ||
353 | return -ENOMEM; | ||
354 | |||
355 | spin_lock_init(&data->lock); | ||
356 | INIT_WORK(&data->work, pcie_pme_work_fn); | ||
357 | data->srv = srv; | ||
358 | set_service_data(srv, data); | ||
359 | |||
360 | port = srv->port; | ||
361 | pcie_pme_interrupt_enable(port, false); | ||
362 | pcie_clear_root_pme_status(port); | ||
363 | |||
364 | ret = request_irq(srv->irq, pcie_pme_irq, IRQF_SHARED, "PCIe PME", srv); | ||
365 | if (ret) { | ||
366 | kfree(data); | ||
367 | } else { | ||
368 | pcie_pme_mark_devices(port); | ||
369 | pcie_pme_interrupt_enable(port, true); | ||
370 | } | ||
371 | |||
372 | return ret; | ||
373 | } | ||
374 | |||
375 | /** | ||
376 | * pcie_pme_suspend - Suspend PCIe PME service device. | ||
377 | * @srv: PCIe service device to suspend. | ||
378 | */ | ||
379 | static int pcie_pme_suspend(struct pcie_device *srv) | ||
380 | { | ||
381 | struct pcie_pme_service_data *data = get_service_data(srv); | ||
382 | struct pci_dev *port = srv->port; | ||
383 | |||
384 | spin_lock_irq(&data->lock); | ||
385 | pcie_pme_interrupt_enable(port, false); | ||
386 | pcie_clear_root_pme_status(port); | ||
387 | data->noirq = true; | ||
388 | spin_unlock_irq(&data->lock); | ||
389 | |||
390 | synchronize_irq(srv->irq); | ||
391 | |||
392 | return 0; | ||
393 | } | ||
394 | |||
395 | /** | ||
396 | * pcie_pme_resume - Resume PCIe PME service device. | ||
397 | * @srv - PCIe service device to resume. | ||
398 | */ | ||
399 | static int pcie_pme_resume(struct pcie_device *srv) | ||
400 | { | ||
401 | struct pcie_pme_service_data *data = get_service_data(srv); | ||
402 | struct pci_dev *port = srv->port; | ||
403 | |||
404 | spin_lock_irq(&data->lock); | ||
405 | data->noirq = false; | ||
406 | pcie_clear_root_pme_status(port); | ||
407 | pcie_pme_interrupt_enable(port, true); | ||
408 | spin_unlock_irq(&data->lock); | ||
409 | |||
410 | return 0; | ||
411 | } | ||
412 | |||
413 | /** | ||
414 | * pcie_pme_remove - Prepare PCIe PME service device for removal. | ||
415 | * @srv - PCIe service device to resume. | ||
416 | */ | ||
417 | static void pcie_pme_remove(struct pcie_device *srv) | ||
418 | { | ||
419 | pcie_pme_suspend(srv); | ||
420 | free_irq(srv->irq, srv); | ||
421 | kfree(get_service_data(srv)); | ||
422 | } | ||
423 | |||
424 | static struct pcie_port_service_driver pcie_pme_driver = { | ||
425 | .name = "pcie_pme", | ||
426 | .port_type = PCI_EXP_TYPE_ROOT_PORT, | ||
427 | .service = PCIE_PORT_SERVICE_PME, | ||
428 | |||
429 | .probe = pcie_pme_probe, | ||
430 | .suspend = pcie_pme_suspend, | ||
431 | .resume = pcie_pme_resume, | ||
432 | .remove = pcie_pme_remove, | ||
433 | }; | ||
434 | |||
435 | /** | ||
436 | * pcie_pme_service_init - Register the PCIe PME service driver. | ||
437 | */ | ||
438 | static int __init pcie_pme_service_init(void) | ||
439 | { | ||
440 | return pcie_port_service_register(&pcie_pme_driver); | ||
441 | } | ||
442 | |||
443 | module_init(pcie_pme_service_init); | ||
diff --git a/drivers/pci/pcie/portdrv.h b/drivers/pci/pcie/portdrv.h index aaeb9d21cba5..bd00a01aef14 100644 --- a/drivers/pci/pcie/portdrv.h +++ b/drivers/pci/pcie/portdrv.h | |||
@@ -30,4 +30,42 @@ extern void pcie_port_device_remove(struct pci_dev *dev); | |||
30 | extern int __must_check pcie_port_bus_register(void); | 30 | extern int __must_check pcie_port_bus_register(void); |
31 | extern void pcie_port_bus_unregister(void); | 31 | extern void pcie_port_bus_unregister(void); |
32 | 32 | ||
33 | struct pci_dev; | ||
34 | |||
35 | extern void pcie_clear_root_pme_status(struct pci_dev *dev); | ||
36 | |||
37 | #ifdef CONFIG_PCIE_PME | ||
38 | extern bool pcie_pme_msi_disabled; | ||
39 | |||
40 | static inline void pcie_pme_disable_msi(void) | ||
41 | { | ||
42 | pcie_pme_msi_disabled = true; | ||
43 | } | ||
44 | |||
45 | static inline bool pcie_pme_no_msi(void) | ||
46 | { | ||
47 | return pcie_pme_msi_disabled; | ||
48 | } | ||
49 | |||
50 | extern void pcie_pme_interrupt_enable(struct pci_dev *dev, bool enable); | ||
51 | #else /* !CONFIG_PCIE_PME */ | ||
52 | static inline void pcie_pme_disable_msi(void) {} | ||
53 | static inline bool pcie_pme_no_msi(void) { return false; } | ||
54 | static inline void pcie_pme_interrupt_enable(struct pci_dev *dev, bool en) {} | ||
55 | #endif /* !CONFIG_PCIE_PME */ | ||
56 | |||
57 | #ifdef CONFIG_ACPI | ||
58 | extern int pcie_port_acpi_setup(struct pci_dev *port, int *mask); | ||
59 | |||
60 | static inline int pcie_port_platform_notify(struct pci_dev *port, int *mask) | ||
61 | { | ||
62 | return pcie_port_acpi_setup(port, mask); | ||
63 | } | ||
64 | #else /* !CONFIG_ACPI */ | ||
65 | static inline int pcie_port_platform_notify(struct pci_dev *port, int *mask) | ||
66 | { | ||
67 | return 0; | ||
68 | } | ||
69 | #endif /* !CONFIG_ACPI */ | ||
70 | |||
33 | #endif /* _PORTDRV_H_ */ | 71 | #endif /* _PORTDRV_H_ */ |
diff --git a/drivers/pci/pcie/portdrv_acpi.c b/drivers/pci/pcie/portdrv_acpi.c new file mode 100644 index 000000000000..a86b56e5f2f2 --- /dev/null +++ b/drivers/pci/pcie/portdrv_acpi.c | |||
@@ -0,0 +1,62 @@ | |||
1 | /* | ||
2 | * PCIe Port Native Services Support, ACPI-Related Part | ||
3 | * | ||
4 | * Copyright (C) 2010 Rafael J. Wysocki <rjw@sisk.pl>, Novell Inc. | ||
5 | * | ||
6 | * This file is subject to the terms and conditions of the GNU General Public | ||
7 | * License V2. See the file "COPYING" in the main directory of this archive | ||
8 | * for more details. | ||
9 | */ | ||
10 | |||
11 | #include <linux/pci.h> | ||
12 | #include <linux/kernel.h> | ||
13 | #include <linux/errno.h> | ||
14 | #include <linux/acpi.h> | ||
15 | #include <linux/pci-acpi.h> | ||
16 | #include <linux/pcieport_if.h> | ||
17 | |||
18 | #include "aer/aerdrv.h" | ||
19 | #include "../pci.h" | ||
20 | |||
21 | /** | ||
22 | * pcie_port_acpi_setup - Request the BIOS to release control of PCIe services. | ||
23 | * @port: PCIe Port service for a root port or event collector. | ||
24 | * @srv_mask: Bit mask of services that can be enabled for @port. | ||
25 | * | ||
26 | * Invoked when @port is identified as a PCIe port device. To avoid conflicts | ||
27 | * with the BIOS PCIe port native services support requires the BIOS to yield | ||
28 | * control of these services to the kernel. The mask of services that the BIOS | ||
29 | * allows to be enabled for @port is written to @srv_mask. | ||
30 | * | ||
31 | * NOTE: It turns out that we cannot do that for individual port services | ||
32 | * separately, because that would make some systems work incorrectly. | ||
33 | */ | ||
34 | int pcie_port_acpi_setup(struct pci_dev *port, int *srv_mask) | ||
35 | { | ||
36 | struct acpi_pci_root *root; | ||
37 | acpi_handle handle; | ||
38 | u32 flags; | ||
39 | |||
40 | if (acpi_pci_disabled) | ||
41 | return 0; | ||
42 | |||
43 | handle = acpi_find_root_bridge_handle(port); | ||
44 | if (!handle) | ||
45 | return -EINVAL; | ||
46 | |||
47 | root = acpi_pci_find_root(handle); | ||
48 | if (!root) | ||
49 | return -ENODEV; | ||
50 | |||
51 | flags = root->osc_control_set; | ||
52 | |||
53 | *srv_mask = PCIE_PORT_SERVICE_VC; | ||
54 | if (flags & OSC_PCI_EXPRESS_NATIVE_HP_CONTROL) | ||
55 | *srv_mask |= PCIE_PORT_SERVICE_HP; | ||
56 | if (flags & OSC_PCI_EXPRESS_PME_CONTROL) | ||
57 | *srv_mask |= PCIE_PORT_SERVICE_PME; | ||
58 | if (flags & OSC_PCI_EXPRESS_AER_CONTROL) | ||
59 | *srv_mask |= PCIE_PORT_SERVICE_AER; | ||
60 | |||
61 | return 0; | ||
62 | } | ||
diff --git a/drivers/pci/pcie/portdrv_core.c b/drivers/pci/pcie/portdrv_core.c index b174188ac121..5130d0d22390 100644 --- a/drivers/pci/pcie/portdrv_core.c +++ b/drivers/pci/pcie/portdrv_core.c | |||
@@ -14,6 +14,8 @@ | |||
14 | #include <linux/string.h> | 14 | #include <linux/string.h> |
15 | #include <linux/slab.h> | 15 | #include <linux/slab.h> |
16 | #include <linux/pcieport_if.h> | 16 | #include <linux/pcieport_if.h> |
17 | #include <linux/aer.h> | ||
18 | #include <linux/pci-aspm.h> | ||
17 | 19 | ||
18 | #include "../pci.h" | 20 | #include "../pci.h" |
19 | #include "portdrv.h" | 21 | #include "portdrv.h" |
@@ -186,16 +188,24 @@ static int pcie_port_enable_msix(struct pci_dev *dev, int *vectors, int mask) | |||
186 | */ | 188 | */ |
187 | static int init_service_irqs(struct pci_dev *dev, int *irqs, int mask) | 189 | static int init_service_irqs(struct pci_dev *dev, int *irqs, int mask) |
188 | { | 190 | { |
189 | int i, irq; | 191 | int i, irq = -1; |
192 | |||
193 | /* We have to use INTx if MSI cannot be used for PCIe PME. */ | ||
194 | if ((mask & PCIE_PORT_SERVICE_PME) && pcie_pme_no_msi()) { | ||
195 | if (dev->pin) | ||
196 | irq = dev->irq; | ||
197 | goto no_msi; | ||
198 | } | ||
190 | 199 | ||
191 | /* Try to use MSI-X if supported */ | 200 | /* Try to use MSI-X if supported */ |
192 | if (!pcie_port_enable_msix(dev, irqs, mask)) | 201 | if (!pcie_port_enable_msix(dev, irqs, mask)) |
193 | return 0; | 202 | return 0; |
203 | |||
194 | /* We're not going to use MSI-X, so try MSI and fall back to INTx */ | 204 | /* We're not going to use MSI-X, so try MSI and fall back to INTx */ |
195 | irq = -1; | ||
196 | if (!pci_enable_msi(dev) || dev->pin) | 205 | if (!pci_enable_msi(dev) || dev->pin) |
197 | irq = dev->irq; | 206 | irq = dev->irq; |
198 | 207 | ||
208 | no_msi: | ||
199 | for (i = 0; i < PCIE_PORT_DEVICE_MAXSERVICES; i++) | 209 | for (i = 0; i < PCIE_PORT_DEVICE_MAXSERVICES; i++) |
200 | irqs[i] = irq; | 210 | irqs[i] = irq; |
201 | irqs[PCIE_PORT_SERVICE_VC_SHIFT] = -1; | 211 | irqs[PCIE_PORT_SERVICE_VC_SHIFT] = -1; |
@@ -228,24 +238,64 @@ static int get_port_device_capability(struct pci_dev *dev) | |||
228 | int services = 0, pos; | 238 | int services = 0, pos; |
229 | u16 reg16; | 239 | u16 reg16; |
230 | u32 reg32; | 240 | u32 reg32; |
241 | int cap_mask; | ||
242 | int err; | ||
243 | |||
244 | if (pcie_ports_disabled) | ||
245 | return 0; | ||
246 | |||
247 | err = pcie_port_platform_notify(dev, &cap_mask); | ||
248 | if (!pcie_ports_auto) { | ||
249 | cap_mask = PCIE_PORT_SERVICE_PME | PCIE_PORT_SERVICE_HP | ||
250 | | PCIE_PORT_SERVICE_VC; | ||
251 | if (pci_aer_available()) | ||
252 | cap_mask |= PCIE_PORT_SERVICE_AER; | ||
253 | } else if (err) { | ||
254 | return 0; | ||
255 | } | ||
231 | 256 | ||
232 | pos = pci_pcie_cap(dev); | 257 | pos = pci_pcie_cap(dev); |
233 | pci_read_config_word(dev, pos + PCI_EXP_FLAGS, ®16); | 258 | pci_read_config_word(dev, pos + PCI_EXP_FLAGS, ®16); |
234 | /* Hot-Plug Capable */ | 259 | /* Hot-Plug Capable */ |
235 | if (reg16 & PCI_EXP_FLAGS_SLOT) { | 260 | if ((cap_mask & PCIE_PORT_SERVICE_HP) && (reg16 & PCI_EXP_FLAGS_SLOT)) { |
236 | pci_read_config_dword(dev, pos + PCI_EXP_SLTCAP, ®32); | 261 | pci_read_config_dword(dev, pos + PCI_EXP_SLTCAP, ®32); |
237 | if (reg32 & PCI_EXP_SLTCAP_HPC) | 262 | if (reg32 & PCI_EXP_SLTCAP_HPC) { |
238 | services |= PCIE_PORT_SERVICE_HP; | 263 | services |= PCIE_PORT_SERVICE_HP; |
264 | /* | ||
265 | * Disable hot-plug interrupts in case they have been | ||
266 | * enabled by the BIOS and the hot-plug service driver | ||
267 | * is not loaded. | ||
268 | */ | ||
269 | pos += PCI_EXP_SLTCTL; | ||
270 | pci_read_config_word(dev, pos, ®16); | ||
271 | reg16 &= ~(PCI_EXP_SLTCTL_CCIE | PCI_EXP_SLTCTL_HPIE); | ||
272 | pci_write_config_word(dev, pos, reg16); | ||
273 | } | ||
239 | } | 274 | } |
240 | /* AER capable */ | 275 | /* AER capable */ |
241 | if (pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR)) | 276 | if ((cap_mask & PCIE_PORT_SERVICE_AER) |
277 | && pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR)) { | ||
242 | services |= PCIE_PORT_SERVICE_AER; | 278 | services |= PCIE_PORT_SERVICE_AER; |
279 | /* | ||
280 | * Disable AER on this port in case it's been enabled by the | ||
281 | * BIOS (the AER service driver will enable it when necessary). | ||
282 | */ | ||
283 | pci_disable_pcie_error_reporting(dev); | ||
284 | } | ||
243 | /* VC support */ | 285 | /* VC support */ |
244 | if (pci_find_ext_capability(dev, PCI_EXT_CAP_ID_VC)) | 286 | if (pci_find_ext_capability(dev, PCI_EXT_CAP_ID_VC)) |
245 | services |= PCIE_PORT_SERVICE_VC; | 287 | services |= PCIE_PORT_SERVICE_VC; |
246 | /* Root ports are capable of generating PME too */ | 288 | /* Root ports are capable of generating PME too */ |
247 | if (dev->pcie_type == PCI_EXP_TYPE_ROOT_PORT) | 289 | if ((cap_mask & PCIE_PORT_SERVICE_PME) |
290 | && dev->pcie_type == PCI_EXP_TYPE_ROOT_PORT) { | ||
248 | services |= PCIE_PORT_SERVICE_PME; | 291 | services |= PCIE_PORT_SERVICE_PME; |
292 | /* | ||
293 | * Disable PME interrupt on this port in case it's been enabled | ||
294 | * by the BIOS (the PME service driver will enable it when | ||
295 | * necessary). | ||
296 | */ | ||
297 | pcie_pme_interrupt_enable(dev, false); | ||
298 | } | ||
249 | 299 | ||
250 | return services; | 300 | return services; |
251 | } | 301 | } |
@@ -277,6 +327,7 @@ static int pcie_device_init(struct pci_dev *pdev, int service, int irq) | |||
277 | pci_name(pdev), | 327 | pci_name(pdev), |
278 | get_descriptor_id(pdev->pcie_type, service)); | 328 | get_descriptor_id(pdev->pcie_type, service)); |
279 | device->parent = &pdev->dev; | 329 | device->parent = &pdev->dev; |
330 | device_enable_async_suspend(device); | ||
280 | 331 | ||
281 | retval = device_register(device); | 332 | retval = device_register(device); |
282 | if (retval) | 333 | if (retval) |
@@ -298,15 +349,18 @@ int pcie_port_device_register(struct pci_dev *dev) | |||
298 | int status, capabilities, i, nr_service; | 349 | int status, capabilities, i, nr_service; |
299 | int irqs[PCIE_PORT_DEVICE_MAXSERVICES]; | 350 | int irqs[PCIE_PORT_DEVICE_MAXSERVICES]; |
300 | 351 | ||
301 | /* Get and check PCI Express port services */ | ||
302 | capabilities = get_port_device_capability(dev); | ||
303 | if (!capabilities) | ||
304 | return -ENODEV; | ||
305 | |||
306 | /* Enable PCI Express port device */ | 352 | /* Enable PCI Express port device */ |
307 | status = pci_enable_device(dev); | 353 | status = pci_enable_device(dev); |
308 | if (status) | 354 | if (status) |
309 | return status; | 355 | return status; |
356 | |||
357 | /* Get and check PCI Express port services */ | ||
358 | capabilities = get_port_device_capability(dev); | ||
359 | if (!capabilities) { | ||
360 | pcie_no_aspm(); | ||
361 | return 0; | ||
362 | } | ||
363 | |||
310 | pci_set_master(dev); | 364 | pci_set_master(dev); |
311 | /* | 365 | /* |
312 | * Initialize service irqs. Don't use service devices that | 366 | * Initialize service irqs. Don't use service devices that |
@@ -485,6 +539,9 @@ static void pcie_port_shutdown_service(struct device *dev) {} | |||
485 | */ | 539 | */ |
486 | int pcie_port_service_register(struct pcie_port_service_driver *new) | 540 | int pcie_port_service_register(struct pcie_port_service_driver *new) |
487 | { | 541 | { |
542 | if (pcie_ports_disabled) | ||
543 | return -ENODEV; | ||
544 | |||
488 | new->driver.name = (char *)new->name; | 545 | new->driver.name = (char *)new->name; |
489 | new->driver.bus = &pcie_port_bus_type; | 546 | new->driver.bus = &pcie_port_bus_type; |
490 | new->driver.probe = pcie_port_probe_service; | 547 | new->driver.probe = pcie_port_probe_service; |
diff --git a/drivers/pci/pcie/portdrv_pci.c b/drivers/pci/pcie/portdrv_pci.c index 13c8972886e6..e0610bda1dea 100644 --- a/drivers/pci/pcie/portdrv_pci.c +++ b/drivers/pci/pcie/portdrv_pci.c | |||
@@ -12,9 +12,10 @@ | |||
12 | #include <linux/errno.h> | 12 | #include <linux/errno.h> |
13 | #include <linux/pm.h> | 13 | #include <linux/pm.h> |
14 | #include <linux/init.h> | 14 | #include <linux/init.h> |
15 | #include <linux/slab.h> | ||
16 | #include <linux/pcieport_if.h> | 15 | #include <linux/pcieport_if.h> |
17 | #include <linux/aer.h> | 16 | #include <linux/aer.h> |
17 | #include <linux/dmi.h> | ||
18 | #include <linux/pci-aspm.h> | ||
18 | 19 | ||
19 | #include "portdrv.h" | 20 | #include "portdrv.h" |
20 | #include "aer/aerdrv.h" | 21 | #include "aer/aerdrv.h" |
@@ -29,8 +30,49 @@ MODULE_AUTHOR(DRIVER_AUTHOR); | |||
29 | MODULE_DESCRIPTION(DRIVER_DESC); | 30 | MODULE_DESCRIPTION(DRIVER_DESC); |
30 | MODULE_LICENSE("GPL"); | 31 | MODULE_LICENSE("GPL"); |
31 | 32 | ||
33 | /* If this switch is set, PCIe port native services should not be enabled. */ | ||
34 | bool pcie_ports_disabled; | ||
35 | |||
36 | /* | ||
37 | * If this switch is set, ACPI _OSC will be used to determine whether or not to | ||
38 | * enable PCIe port native services. | ||
39 | */ | ||
40 | bool pcie_ports_auto = true; | ||
41 | |||
42 | static int __init pcie_port_setup(char *str) | ||
43 | { | ||
44 | if (!strncmp(str, "compat", 6)) { | ||
45 | pcie_ports_disabled = true; | ||
46 | } else if (!strncmp(str, "native", 6)) { | ||
47 | pcie_ports_disabled = false; | ||
48 | pcie_ports_auto = false; | ||
49 | } else if (!strncmp(str, "auto", 4)) { | ||
50 | pcie_ports_disabled = false; | ||
51 | pcie_ports_auto = true; | ||
52 | } | ||
53 | |||
54 | return 1; | ||
55 | } | ||
56 | __setup("pcie_ports=", pcie_port_setup); | ||
57 | |||
32 | /* global data */ | 58 | /* global data */ |
33 | 59 | ||
60 | /** | ||
61 | * pcie_clear_root_pme_status - Clear root port PME interrupt status. | ||
62 | * @dev: PCIe root port or event collector. | ||
63 | */ | ||
64 | void pcie_clear_root_pme_status(struct pci_dev *dev) | ||
65 | { | ||
66 | int rtsta_pos; | ||
67 | u32 rtsta; | ||
68 | |||
69 | rtsta_pos = pci_pcie_cap(dev) + PCI_EXP_RTSTA; | ||
70 | |||
71 | pci_read_config_dword(dev, rtsta_pos, &rtsta); | ||
72 | rtsta |= PCI_EXP_RTSTA_PME; | ||
73 | pci_write_config_dword(dev, rtsta_pos, rtsta); | ||
74 | } | ||
75 | |||
34 | static int pcie_portdrv_restore_config(struct pci_dev *dev) | 76 | static int pcie_portdrv_restore_config(struct pci_dev *dev) |
35 | { | 77 | { |
36 | int retval; | 78 | int retval; |
@@ -43,6 +85,20 @@ static int pcie_portdrv_restore_config(struct pci_dev *dev) | |||
43 | } | 85 | } |
44 | 86 | ||
45 | #ifdef CONFIG_PM | 87 | #ifdef CONFIG_PM |
88 | static int pcie_port_resume_noirq(struct device *dev) | ||
89 | { | ||
90 | struct pci_dev *pdev = to_pci_dev(dev); | ||
91 | |||
92 | /* | ||
93 | * Some BIOSes forget to clear Root PME Status bits after system wakeup | ||
94 | * which breaks ACPI-based runtime wakeup on PCI Express, so clear those | ||
95 | * bits now just in case (shouldn't hurt). | ||
96 | */ | ||
97 | if(pdev->pcie_type == PCI_EXP_TYPE_ROOT_PORT) | ||
98 | pcie_clear_root_pme_status(pdev); | ||
99 | return 0; | ||
100 | } | ||
101 | |||
46 | static const struct dev_pm_ops pcie_portdrv_pm_ops = { | 102 | static const struct dev_pm_ops pcie_portdrv_pm_ops = { |
47 | .suspend = pcie_port_device_suspend, | 103 | .suspend = pcie_port_device_suspend, |
48 | .resume = pcie_port_device_resume, | 104 | .resume = pcie_port_device_resume, |
@@ -50,6 +106,7 @@ static const struct dev_pm_ops pcie_portdrv_pm_ops = { | |||
50 | .thaw = pcie_port_device_resume, | 106 | .thaw = pcie_port_device_resume, |
51 | .poweroff = pcie_port_device_suspend, | 107 | .poweroff = pcie_port_device_suspend, |
52 | .restore = pcie_port_device_resume, | 108 | .restore = pcie_port_device_resume, |
109 | .resume_noirq = pcie_port_resume_noirq, | ||
53 | }; | 110 | }; |
54 | 111 | ||
55 | #define PCIE_PORTDRV_PM_OPS (&pcie_portdrv_pm_ops) | 112 | #define PCIE_PORTDRV_PM_OPS (&pcie_portdrv_pm_ops) |
@@ -273,10 +330,39 @@ static struct pci_driver pcie_portdriver = { | |||
273 | .driver.pm = PCIE_PORTDRV_PM_OPS, | 330 | .driver.pm = PCIE_PORTDRV_PM_OPS, |
274 | }; | 331 | }; |
275 | 332 | ||
333 | static int __init dmi_pcie_pme_disable_msi(const struct dmi_system_id *d) | ||
334 | { | ||
335 | pr_notice("%s detected: will not use MSI for PCIe PME signaling\n", | ||
336 | d->ident); | ||
337 | pcie_pme_disable_msi(); | ||
338 | return 0; | ||
339 | } | ||
340 | |||
341 | static struct dmi_system_id __initdata pcie_portdrv_dmi_table[] = { | ||
342 | /* | ||
343 | * Boxes that should not use MSI for PCIe PME signaling. | ||
344 | */ | ||
345 | { | ||
346 | .callback = dmi_pcie_pme_disable_msi, | ||
347 | .ident = "MSI Wind U-100", | ||
348 | .matches = { | ||
349 | DMI_MATCH(DMI_SYS_VENDOR, | ||
350 | "MICRO-STAR INTERNATIONAL CO., LTD"), | ||
351 | DMI_MATCH(DMI_PRODUCT_NAME, "U-100"), | ||
352 | }, | ||
353 | }, | ||
354 | {} | ||
355 | }; | ||
356 | |||
276 | static int __init pcie_portdrv_init(void) | 357 | static int __init pcie_portdrv_init(void) |
277 | { | 358 | { |
278 | int retval; | 359 | int retval; |
279 | 360 | ||
361 | if (pcie_ports_disabled) | ||
362 | return pci_register_driver(&pcie_portdriver); | ||
363 | |||
364 | dmi_check_system(pcie_portdrv_dmi_table); | ||
365 | |||
280 | retval = pcie_port_bus_register(); | 366 | retval = pcie_port_bus_register(); |
281 | if (retval) { | 367 | if (retval) { |
282 | printk(KERN_WARNING "PCIE: bus_register error: %d\n", retval); | 368 | printk(KERN_WARNING "PCIE: bus_register error: %d\n", retval); |
@@ -289,11 +375,4 @@ static int __init pcie_portdrv_init(void) | |||
289 | return retval; | 375 | return retval; |
290 | } | 376 | } |
291 | 377 | ||
292 | static void __exit pcie_portdrv_exit(void) | ||
293 | { | ||
294 | pci_unregister_driver(&pcie_portdriver); | ||
295 | pcie_port_bus_unregister(); | ||
296 | } | ||
297 | |||
298 | module_init(pcie_portdrv_init); | 378 | module_init(pcie_portdrv_init); |
299 | module_exit(pcie_portdrv_exit); | ||
diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c index 98ffb2de22e9..c84900da3c59 100644 --- a/drivers/pci/probe.c +++ b/drivers/pci/probe.c | |||
@@ -10,7 +10,6 @@ | |||
10 | #include <linux/module.h> | 10 | #include <linux/module.h> |
11 | #include <linux/cpumask.h> | 11 | #include <linux/cpumask.h> |
12 | #include <linux/pci-aspm.h> | 12 | #include <linux/pci-aspm.h> |
13 | #include <acpi/acpi_hest.h> | ||
14 | #include "pci.h" | 13 | #include "pci.h" |
15 | 14 | ||
16 | #define CARDBUS_LATENCY_TIMER 176 /* secondary latency timer */ | 15 | #define CARDBUS_LATENCY_TIMER 176 /* secondary latency timer */ |
@@ -89,6 +88,7 @@ static void release_pcibus_dev(struct device *dev) | |||
89 | 88 | ||
90 | if (pci_bus->bridge) | 89 | if (pci_bus->bridge) |
91 | put_device(pci_bus->bridge); | 90 | put_device(pci_bus->bridge); |
91 | pci_bus_remove_resources(pci_bus); | ||
92 | kfree(pci_bus); | 92 | kfree(pci_bus); |
93 | } | 93 | } |
94 | 94 | ||
@@ -163,9 +163,16 @@ int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type, | |||
163 | struct resource *res, unsigned int pos) | 163 | struct resource *res, unsigned int pos) |
164 | { | 164 | { |
165 | u32 l, sz, mask; | 165 | u32 l, sz, mask; |
166 | u16 orig_cmd; | ||
166 | 167 | ||
167 | mask = type ? PCI_ROM_ADDRESS_MASK : ~0; | 168 | mask = type ? PCI_ROM_ADDRESS_MASK : ~0; |
168 | 169 | ||
170 | if (!dev->mmio_always_on) { | ||
171 | pci_read_config_word(dev, PCI_COMMAND, &orig_cmd); | ||
172 | pci_write_config_word(dev, PCI_COMMAND, | ||
173 | orig_cmd & ~(PCI_COMMAND_MEMORY | PCI_COMMAND_IO)); | ||
174 | } | ||
175 | |||
169 | res->name = pci_name(dev); | 176 | res->name = pci_name(dev); |
170 | 177 | ||
171 | pci_read_config_dword(dev, pos, &l); | 178 | pci_read_config_dword(dev, pos, &l); |
@@ -173,6 +180,9 @@ int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type, | |||
173 | pci_read_config_dword(dev, pos, &sz); | 180 | pci_read_config_dword(dev, pos, &sz); |
174 | pci_write_config_dword(dev, pos, l); | 181 | pci_write_config_dword(dev, pos, l); |
175 | 182 | ||
183 | if (!dev->mmio_always_on) | ||
184 | pci_write_config_word(dev, PCI_COMMAND, orig_cmd); | ||
185 | |||
176 | /* | 186 | /* |
177 | * All bits set in sz means the device isn't working properly. | 187 | * All bits set in sz means the device isn't working properly. |
178 | * If the BAR isn't implemented, all bits must be 0. If it's a | 188 | * If the BAR isn't implemented, all bits must be 0. If it's a |
@@ -281,26 +291,12 @@ static void pci_read_bases(struct pci_dev *dev, unsigned int howmany, int rom) | |||
281 | } | 291 | } |
282 | } | 292 | } |
283 | 293 | ||
284 | void __devinit pci_read_bridge_bases(struct pci_bus *child) | 294 | static void __devinit pci_read_bridge_io(struct pci_bus *child) |
285 | { | 295 | { |
286 | struct pci_dev *dev = child->self; | 296 | struct pci_dev *dev = child->self; |
287 | u8 io_base_lo, io_limit_lo; | 297 | u8 io_base_lo, io_limit_lo; |
288 | u16 mem_base_lo, mem_limit_lo; | ||
289 | unsigned long base, limit; | 298 | unsigned long base, limit; |
290 | struct resource *res; | 299 | struct resource *res; |
291 | int i; | ||
292 | |||
293 | if (pci_is_root_bus(child)) /* It's a host bus, nothing to read */ | ||
294 | return; | ||
295 | |||
296 | dev_info(&dev->dev, "PCI bridge to [bus %02x-%02x]%s\n", | ||
297 | child->secondary, child->subordinate, | ||
298 | dev->transparent ? " (subtractive decode)": ""); | ||
299 | |||
300 | if (dev->transparent) { | ||
301 | for(i = 3; i < PCI_BUS_NUM_RESOURCES; i++) | ||
302 | child->resource[i] = child->parent->resource[i - 3]; | ||
303 | } | ||
304 | 300 | ||
305 | res = child->resource[0]; | 301 | res = child->resource[0]; |
306 | pci_read_config_byte(dev, PCI_IO_BASE, &io_base_lo); | 302 | pci_read_config_byte(dev, PCI_IO_BASE, &io_base_lo); |
@@ -316,26 +312,50 @@ void __devinit pci_read_bridge_bases(struct pci_bus *child) | |||
316 | limit |= (io_limit_hi << 16); | 312 | limit |= (io_limit_hi << 16); |
317 | } | 313 | } |
318 | 314 | ||
319 | if (base <= limit) { | 315 | if (base && base <= limit) { |
320 | res->flags = (io_base_lo & PCI_IO_RANGE_TYPE_MASK) | IORESOURCE_IO; | 316 | res->flags = (io_base_lo & PCI_IO_RANGE_TYPE_MASK) | IORESOURCE_IO; |
321 | if (!res->start) | 317 | if (!res->start) |
322 | res->start = base; | 318 | res->start = base; |
323 | if (!res->end) | 319 | if (!res->end) |
324 | res->end = limit + 0xfff; | 320 | res->end = limit + 0xfff; |
325 | dev_printk(KERN_DEBUG, &dev->dev, " bridge window %pR\n", res); | 321 | dev_printk(KERN_DEBUG, &dev->dev, " bridge window %pR\n", res); |
322 | } else { | ||
323 | dev_printk(KERN_DEBUG, &dev->dev, | ||
324 | " bridge window [io %#06lx-%#06lx] (disabled)\n", | ||
325 | base, limit); | ||
326 | } | 326 | } |
327 | } | ||
328 | |||
329 | static void __devinit pci_read_bridge_mmio(struct pci_bus *child) | ||
330 | { | ||
331 | struct pci_dev *dev = child->self; | ||
332 | u16 mem_base_lo, mem_limit_lo; | ||
333 | unsigned long base, limit; | ||
334 | struct resource *res; | ||
327 | 335 | ||
328 | res = child->resource[1]; | 336 | res = child->resource[1]; |
329 | pci_read_config_word(dev, PCI_MEMORY_BASE, &mem_base_lo); | 337 | pci_read_config_word(dev, PCI_MEMORY_BASE, &mem_base_lo); |
330 | pci_read_config_word(dev, PCI_MEMORY_LIMIT, &mem_limit_lo); | 338 | pci_read_config_word(dev, PCI_MEMORY_LIMIT, &mem_limit_lo); |
331 | base = (mem_base_lo & PCI_MEMORY_RANGE_MASK) << 16; | 339 | base = (mem_base_lo & PCI_MEMORY_RANGE_MASK) << 16; |
332 | limit = (mem_limit_lo & PCI_MEMORY_RANGE_MASK) << 16; | 340 | limit = (mem_limit_lo & PCI_MEMORY_RANGE_MASK) << 16; |
333 | if (base <= limit) { | 341 | if (base && base <= limit) { |
334 | res->flags = (mem_base_lo & PCI_MEMORY_RANGE_TYPE_MASK) | IORESOURCE_MEM; | 342 | res->flags = (mem_base_lo & PCI_MEMORY_RANGE_TYPE_MASK) | IORESOURCE_MEM; |
335 | res->start = base; | 343 | res->start = base; |
336 | res->end = limit + 0xfffff; | 344 | res->end = limit + 0xfffff; |
337 | dev_printk(KERN_DEBUG, &dev->dev, " bridge window %pR\n", res); | 345 | dev_printk(KERN_DEBUG, &dev->dev, " bridge window %pR\n", res); |
346 | } else { | ||
347 | dev_printk(KERN_DEBUG, &dev->dev, | ||
348 | " bridge window [mem %#010lx-%#010lx] (disabled)\n", | ||
349 | base, limit + 0xfffff); | ||
338 | } | 350 | } |
351 | } | ||
352 | |||
353 | static void __devinit pci_read_bridge_mmio_pref(struct pci_bus *child) | ||
354 | { | ||
355 | struct pci_dev *dev = child->self; | ||
356 | u16 mem_base_lo, mem_limit_lo; | ||
357 | unsigned long base, limit; | ||
358 | struct resource *res; | ||
339 | 359 | ||
340 | res = child->resource[2]; | 360 | res = child->resource[2]; |
341 | pci_read_config_word(dev, PCI_PREF_MEMORY_BASE, &mem_base_lo); | 361 | pci_read_config_word(dev, PCI_PREF_MEMORY_BASE, &mem_base_lo); |
@@ -366,7 +386,7 @@ void __devinit pci_read_bridge_bases(struct pci_bus *child) | |||
366 | #endif | 386 | #endif |
367 | } | 387 | } |
368 | } | 388 | } |
369 | if (base <= limit) { | 389 | if (base && base <= limit) { |
370 | res->flags = (mem_base_lo & PCI_PREF_RANGE_TYPE_MASK) | | 390 | res->flags = (mem_base_lo & PCI_PREF_RANGE_TYPE_MASK) | |
371 | IORESOURCE_MEM | IORESOURCE_PREFETCH; | 391 | IORESOURCE_MEM | IORESOURCE_PREFETCH; |
372 | if (res->flags & PCI_PREF_RANGE_TYPE_64) | 392 | if (res->flags & PCI_PREF_RANGE_TYPE_64) |
@@ -374,6 +394,44 @@ void __devinit pci_read_bridge_bases(struct pci_bus *child) | |||
374 | res->start = base; | 394 | res->start = base; |
375 | res->end = limit + 0xfffff; | 395 | res->end = limit + 0xfffff; |
376 | dev_printk(KERN_DEBUG, &dev->dev, " bridge window %pR\n", res); | 396 | dev_printk(KERN_DEBUG, &dev->dev, " bridge window %pR\n", res); |
397 | } else { | ||
398 | dev_printk(KERN_DEBUG, &dev->dev, | ||
399 | " bridge window [mem %#010lx-%#010lx pref] (disabled)\n", | ||
400 | base, limit + 0xfffff); | ||
401 | } | ||
402 | } | ||
403 | |||
404 | void __devinit pci_read_bridge_bases(struct pci_bus *child) | ||
405 | { | ||
406 | struct pci_dev *dev = child->self; | ||
407 | struct resource *res; | ||
408 | int i; | ||
409 | |||
410 | if (pci_is_root_bus(child)) /* It's a host bus, nothing to read */ | ||
411 | return; | ||
412 | |||
413 | dev_info(&dev->dev, "PCI bridge to [bus %02x-%02x]%s\n", | ||
414 | child->secondary, child->subordinate, | ||
415 | dev->transparent ? " (subtractive decode)" : ""); | ||
416 | |||
417 | pci_bus_remove_resources(child); | ||
418 | for (i = 0; i < PCI_BRIDGE_RESOURCE_NUM; i++) | ||
419 | child->resource[i] = &dev->resource[PCI_BRIDGE_RESOURCES+i]; | ||
420 | |||
421 | pci_read_bridge_io(child); | ||
422 | pci_read_bridge_mmio(child); | ||
423 | pci_read_bridge_mmio_pref(child); | ||
424 | |||
425 | if (dev->transparent) { | ||
426 | pci_bus_for_each_resource(child->parent, res, i) { | ||
427 | if (res) { | ||
428 | pci_bus_add_resource(child, res, | ||
429 | PCI_SUBTRACTIVE_DECODE); | ||
430 | dev_printk(KERN_DEBUG, &dev->dev, | ||
431 | " bridge window %pR (subtractive decode)\n", | ||
432 | res); | ||
433 | } | ||
434 | } | ||
377 | } | 435 | } |
378 | } | 436 | } |
379 | 437 | ||
@@ -387,10 +445,147 @@ static struct pci_bus * pci_alloc_bus(void) | |||
387 | INIT_LIST_HEAD(&b->children); | 445 | INIT_LIST_HEAD(&b->children); |
388 | INIT_LIST_HEAD(&b->devices); | 446 | INIT_LIST_HEAD(&b->devices); |
389 | INIT_LIST_HEAD(&b->slots); | 447 | INIT_LIST_HEAD(&b->slots); |
448 | INIT_LIST_HEAD(&b->resources); | ||
449 | b->max_bus_speed = PCI_SPEED_UNKNOWN; | ||
450 | b->cur_bus_speed = PCI_SPEED_UNKNOWN; | ||
390 | } | 451 | } |
391 | return b; | 452 | return b; |
392 | } | 453 | } |
393 | 454 | ||
455 | static unsigned char pcix_bus_speed[] = { | ||
456 | PCI_SPEED_UNKNOWN, /* 0 */ | ||
457 | PCI_SPEED_66MHz_PCIX, /* 1 */ | ||
458 | PCI_SPEED_100MHz_PCIX, /* 2 */ | ||
459 | PCI_SPEED_133MHz_PCIX, /* 3 */ | ||
460 | PCI_SPEED_UNKNOWN, /* 4 */ | ||
461 | PCI_SPEED_66MHz_PCIX_ECC, /* 5 */ | ||
462 | PCI_SPEED_100MHz_PCIX_ECC, /* 6 */ | ||
463 | PCI_SPEED_133MHz_PCIX_ECC, /* 7 */ | ||
464 | PCI_SPEED_UNKNOWN, /* 8 */ | ||
465 | PCI_SPEED_66MHz_PCIX_266, /* 9 */ | ||
466 | PCI_SPEED_100MHz_PCIX_266, /* A */ | ||
467 | PCI_SPEED_133MHz_PCIX_266, /* B */ | ||
468 | PCI_SPEED_UNKNOWN, /* C */ | ||
469 | PCI_SPEED_66MHz_PCIX_533, /* D */ | ||
470 | PCI_SPEED_100MHz_PCIX_533, /* E */ | ||
471 | PCI_SPEED_133MHz_PCIX_533 /* F */ | ||
472 | }; | ||
473 | |||
474 | static unsigned char pcie_link_speed[] = { | ||
475 | PCI_SPEED_UNKNOWN, /* 0 */ | ||
476 | PCIE_SPEED_2_5GT, /* 1 */ | ||
477 | PCIE_SPEED_5_0GT, /* 2 */ | ||
478 | PCIE_SPEED_8_0GT, /* 3 */ | ||
479 | PCI_SPEED_UNKNOWN, /* 4 */ | ||
480 | PCI_SPEED_UNKNOWN, /* 5 */ | ||
481 | PCI_SPEED_UNKNOWN, /* 6 */ | ||
482 | PCI_SPEED_UNKNOWN, /* 7 */ | ||
483 | PCI_SPEED_UNKNOWN, /* 8 */ | ||
484 | PCI_SPEED_UNKNOWN, /* 9 */ | ||
485 | PCI_SPEED_UNKNOWN, /* A */ | ||
486 | PCI_SPEED_UNKNOWN, /* B */ | ||
487 | PCI_SPEED_UNKNOWN, /* C */ | ||
488 | PCI_SPEED_UNKNOWN, /* D */ | ||
489 | PCI_SPEED_UNKNOWN, /* E */ | ||
490 | PCI_SPEED_UNKNOWN /* F */ | ||
491 | }; | ||
492 | |||
493 | void pcie_update_link_speed(struct pci_bus *bus, u16 linksta) | ||
494 | { | ||
495 | bus->cur_bus_speed = pcie_link_speed[linksta & 0xf]; | ||
496 | } | ||
497 | EXPORT_SYMBOL_GPL(pcie_update_link_speed); | ||
498 | |||
499 | static unsigned char agp_speeds[] = { | ||
500 | AGP_UNKNOWN, | ||
501 | AGP_1X, | ||
502 | AGP_2X, | ||
503 | AGP_4X, | ||
504 | AGP_8X | ||
505 | }; | ||
506 | |||
507 | static enum pci_bus_speed agp_speed(int agp3, int agpstat) | ||
508 | { | ||
509 | int index = 0; | ||
510 | |||
511 | if (agpstat & 4) | ||
512 | index = 3; | ||
513 | else if (agpstat & 2) | ||
514 | index = 2; | ||
515 | else if (agpstat & 1) | ||
516 | index = 1; | ||
517 | else | ||
518 | goto out; | ||
519 | |||
520 | if (agp3) { | ||
521 | index += 2; | ||
522 | if (index == 5) | ||
523 | index = 0; | ||
524 | } | ||
525 | |||
526 | out: | ||
527 | return agp_speeds[index]; | ||
528 | } | ||
529 | |||
530 | |||
531 | static void pci_set_bus_speed(struct pci_bus *bus) | ||
532 | { | ||
533 | struct pci_dev *bridge = bus->self; | ||
534 | int pos; | ||
535 | |||
536 | pos = pci_find_capability(bridge, PCI_CAP_ID_AGP); | ||
537 | if (!pos) | ||
538 | pos = pci_find_capability(bridge, PCI_CAP_ID_AGP3); | ||
539 | if (pos) { | ||
540 | u32 agpstat, agpcmd; | ||
541 | |||
542 | pci_read_config_dword(bridge, pos + PCI_AGP_STATUS, &agpstat); | ||
543 | bus->max_bus_speed = agp_speed(agpstat & 8, agpstat & 7); | ||
544 | |||
545 | pci_read_config_dword(bridge, pos + PCI_AGP_COMMAND, &agpcmd); | ||
546 | bus->cur_bus_speed = agp_speed(agpstat & 8, agpcmd & 7); | ||
547 | } | ||
548 | |||
549 | pos = pci_find_capability(bridge, PCI_CAP_ID_PCIX); | ||
550 | if (pos) { | ||
551 | u16 status; | ||
552 | enum pci_bus_speed max; | ||
553 | pci_read_config_word(bridge, pos + 2, &status); | ||
554 | |||
555 | if (status & 0x8000) { | ||
556 | max = PCI_SPEED_133MHz_PCIX_533; | ||
557 | } else if (status & 0x4000) { | ||
558 | max = PCI_SPEED_133MHz_PCIX_266; | ||
559 | } else if (status & 0x0002) { | ||
560 | if (((status >> 12) & 0x3) == 2) { | ||
561 | max = PCI_SPEED_133MHz_PCIX_ECC; | ||
562 | } else { | ||
563 | max = PCI_SPEED_133MHz_PCIX; | ||
564 | } | ||
565 | } else { | ||
566 | max = PCI_SPEED_66MHz_PCIX; | ||
567 | } | ||
568 | |||
569 | bus->max_bus_speed = max; | ||
570 | bus->cur_bus_speed = pcix_bus_speed[(status >> 6) & 0xf]; | ||
571 | |||
572 | return; | ||
573 | } | ||
574 | |||
575 | pos = pci_find_capability(bridge, PCI_CAP_ID_EXP); | ||
576 | if (pos) { | ||
577 | u32 linkcap; | ||
578 | u16 linksta; | ||
579 | |||
580 | pci_read_config_dword(bridge, pos + PCI_EXP_LNKCAP, &linkcap); | ||
581 | bus->max_bus_speed = pcie_link_speed[linkcap & 0xf]; | ||
582 | |||
583 | pci_read_config_word(bridge, pos + PCI_EXP_LNKSTA, &linksta); | ||
584 | pcie_update_link_speed(bus, linksta); | ||
585 | } | ||
586 | } | ||
587 | |||
588 | |||
394 | static struct pci_bus *pci_alloc_child_bus(struct pci_bus *parent, | 589 | static struct pci_bus *pci_alloc_child_bus(struct pci_bus *parent, |
395 | struct pci_dev *bridge, int busnr) | 590 | struct pci_dev *bridge, int busnr) |
396 | { | 591 | { |
@@ -430,6 +625,8 @@ static struct pci_bus *pci_alloc_child_bus(struct pci_bus *parent, | |||
430 | child->self = bridge; | 625 | child->self = bridge; |
431 | child->bridge = get_device(&bridge->dev); | 626 | child->bridge = get_device(&bridge->dev); |
432 | 627 | ||
628 | pci_set_bus_speed(child); | ||
629 | |||
433 | /* Set up default resource pointers and names.. */ | 630 | /* Set up default resource pointers and names.. */ |
434 | for (i = 0; i < PCI_BRIDGE_RESOURCE_NUM; i++) { | 631 | for (i = 0; i < PCI_BRIDGE_RESOURCE_NUM; i++) { |
435 | child->resource[i] = &bridge->resource[PCI_BRIDGE_RESOURCES+i]; | 632 | child->resource[i] = &bridge->resource[PCI_BRIDGE_RESOURCES+i]; |
@@ -485,16 +682,20 @@ int __devinit pci_scan_bridge(struct pci_bus *bus, struct pci_dev *dev, int max, | |||
485 | int is_cardbus = (dev->hdr_type == PCI_HEADER_TYPE_CARDBUS); | 682 | int is_cardbus = (dev->hdr_type == PCI_HEADER_TYPE_CARDBUS); |
486 | u32 buses, i, j = 0; | 683 | u32 buses, i, j = 0; |
487 | u16 bctl; | 684 | u16 bctl; |
685 | u8 primary, secondary, subordinate; | ||
488 | int broken = 0; | 686 | int broken = 0; |
489 | 687 | ||
490 | pci_read_config_dword(dev, PCI_PRIMARY_BUS, &buses); | 688 | pci_read_config_dword(dev, PCI_PRIMARY_BUS, &buses); |
689 | primary = buses & 0xFF; | ||
690 | secondary = (buses >> 8) & 0xFF; | ||
691 | subordinate = (buses >> 16) & 0xFF; | ||
491 | 692 | ||
492 | dev_dbg(&dev->dev, "scanning behind bridge, config %06x, pass %d\n", | 693 | dev_dbg(&dev->dev, "scanning [bus %02x-%02x] behind bridge, pass %d\n", |
493 | buses & 0xffffff, pass); | 694 | secondary, subordinate, pass); |
494 | 695 | ||
495 | /* Check if setup is sensible at all */ | 696 | /* Check if setup is sensible at all */ |
496 | if (!pass && | 697 | if (!pass && |
497 | ((buses & 0xff) != bus->number || ((buses >> 8) & 0xff) <= bus->number)) { | 698 | (primary != bus->number || secondary <= bus->number)) { |
498 | dev_dbg(&dev->dev, "bus configuration invalid, reconfiguring\n"); | 699 | dev_dbg(&dev->dev, "bus configuration invalid, reconfiguring\n"); |
499 | broken = 1; | 700 | broken = 1; |
500 | } | 701 | } |
@@ -505,15 +706,15 @@ int __devinit pci_scan_bridge(struct pci_bus *bus, struct pci_dev *dev, int max, | |||
505 | pci_write_config_word(dev, PCI_BRIDGE_CONTROL, | 706 | pci_write_config_word(dev, PCI_BRIDGE_CONTROL, |
506 | bctl & ~PCI_BRIDGE_CTL_MASTER_ABORT); | 707 | bctl & ~PCI_BRIDGE_CTL_MASTER_ABORT); |
507 | 708 | ||
508 | if ((buses & 0xffff00) && !pcibios_assign_all_busses() && !is_cardbus && !broken) { | 709 | if ((secondary || subordinate) && !pcibios_assign_all_busses() && |
509 | unsigned int cmax, busnr; | 710 | !is_cardbus && !broken) { |
711 | unsigned int cmax; | ||
510 | /* | 712 | /* |
511 | * Bus already configured by firmware, process it in the first | 713 | * Bus already configured by firmware, process it in the first |
512 | * pass and just note the configuration. | 714 | * pass and just note the configuration. |
513 | */ | 715 | */ |
514 | if (pass) | 716 | if (pass) |
515 | goto out; | 717 | goto out; |
516 | busnr = (buses >> 8) & 0xFF; | ||
517 | 718 | ||
518 | /* | 719 | /* |
519 | * If we already got to this bus through a different bridge, | 720 | * If we already got to this bus through a different bridge, |
@@ -522,13 +723,13 @@ int __devinit pci_scan_bridge(struct pci_bus *bus, struct pci_dev *dev, int max, | |||
522 | * However, we continue to descend down the hierarchy and | 723 | * However, we continue to descend down the hierarchy and |
523 | * scan remaining child buses. | 724 | * scan remaining child buses. |
524 | */ | 725 | */ |
525 | child = pci_find_bus(pci_domain_nr(bus), busnr); | 726 | child = pci_find_bus(pci_domain_nr(bus), secondary); |
526 | if (!child) { | 727 | if (!child) { |
527 | child = pci_add_new_bus(bus, dev, busnr); | 728 | child = pci_add_new_bus(bus, dev, secondary); |
528 | if (!child) | 729 | if (!child) |
529 | goto out; | 730 | goto out; |
530 | child->primary = buses & 0xFF; | 731 | child->primary = primary; |
531 | child->subordinate = (buses >> 16) & 0xFF; | 732 | child->subordinate = subordinate; |
532 | child->bridge_ctl = bctl; | 733 | child->bridge_ctl = bctl; |
533 | } | 734 | } |
534 | 735 | ||
@@ -681,7 +882,7 @@ static void pci_read_irq(struct pci_dev *dev) | |||
681 | dev->irq = irq; | 882 | dev->irq = irq; |
682 | } | 883 | } |
683 | 884 | ||
684 | static void set_pcie_port_type(struct pci_dev *pdev) | 885 | void set_pcie_port_type(struct pci_dev *pdev) |
685 | { | 886 | { |
686 | int pos; | 887 | int pos; |
687 | u16 reg16; | 888 | u16 reg16; |
@@ -695,7 +896,7 @@ static void set_pcie_port_type(struct pci_dev *pdev) | |||
695 | pdev->pcie_type = (reg16 & PCI_EXP_FLAGS_TYPE) >> 4; | 896 | pdev->pcie_type = (reg16 & PCI_EXP_FLAGS_TYPE) >> 4; |
696 | } | 897 | } |
697 | 898 | ||
698 | static void set_pcie_hotplug_bridge(struct pci_dev *pdev) | 899 | void set_pcie_hotplug_bridge(struct pci_dev *pdev) |
699 | { | 900 | { |
700 | int pos; | 901 | int pos; |
701 | u16 reg16; | 902 | u16 reg16; |
@@ -712,12 +913,6 @@ static void set_pcie_hotplug_bridge(struct pci_dev *pdev) | |||
712 | pdev->is_hotplug_bridge = 1; | 913 | pdev->is_hotplug_bridge = 1; |
713 | } | 914 | } |
714 | 915 | ||
715 | static void set_pci_aer_firmware_first(struct pci_dev *pdev) | ||
716 | { | ||
717 | if (acpi_hest_firmware_first_pci(pdev)) | ||
718 | pdev->aer_firmware_first = 1; | ||
719 | } | ||
720 | |||
721 | #define LEGACY_IO_RESOURCE (IORESOURCE_IO | IORESOURCE_PCI_FIXED) | 916 | #define LEGACY_IO_RESOURCE (IORESOURCE_IO | IORESOURCE_PCI_FIXED) |
722 | 917 | ||
723 | /** | 918 | /** |
@@ -747,7 +942,6 @@ int pci_setup_device(struct pci_dev *dev) | |||
747 | dev->multifunction = !!(hdr_type & 0x80); | 942 | dev->multifunction = !!(hdr_type & 0x80); |
748 | dev->error_state = pci_channel_io_normal; | 943 | dev->error_state = pci_channel_io_normal; |
749 | set_pcie_port_type(dev); | 944 | set_pcie_port_type(dev); |
750 | set_pci_aer_firmware_first(dev); | ||
751 | 945 | ||
752 | list_for_each_entry(slot, &dev->bus->slots, list) | 946 | list_for_each_entry(slot, &dev->bus->slots, list) |
753 | if (PCI_SLOT(dev->devfn) == slot->number) | 947 | if (PCI_SLOT(dev->devfn) == slot->number) |
@@ -767,8 +961,8 @@ int pci_setup_device(struct pci_dev *dev) | |||
767 | dev->class = class; | 961 | dev->class = class; |
768 | class >>= 8; | 962 | class >>= 8; |
769 | 963 | ||
770 | dev_dbg(&dev->dev, "found [%04x:%04x] class %06x header type %02x\n", | 964 | dev_printk(KERN_DEBUG, &dev->dev, "[%04x:%04x] type %d class %#08x\n", |
771 | dev->vendor, dev->device, class, dev->hdr_type); | 965 | dev->vendor, dev->device, dev->hdr_type, class); |
772 | 966 | ||
773 | /* need to have dev->class ready */ | 967 | /* need to have dev->class ready */ |
774 | dev->cfg_size = pci_cfg_space_size(dev); | 968 | dev->cfg_size = pci_cfg_space_size(dev); |
@@ -1081,6 +1275,45 @@ struct pci_dev *__ref pci_scan_single_device(struct pci_bus *bus, int devfn) | |||
1081 | } | 1275 | } |
1082 | EXPORT_SYMBOL(pci_scan_single_device); | 1276 | EXPORT_SYMBOL(pci_scan_single_device); |
1083 | 1277 | ||
1278 | static unsigned next_ari_fn(struct pci_dev *dev, unsigned fn) | ||
1279 | { | ||
1280 | u16 cap; | ||
1281 | unsigned pos, next_fn; | ||
1282 | |||
1283 | if (!dev) | ||
1284 | return 0; | ||
1285 | |||
1286 | pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ARI); | ||
1287 | if (!pos) | ||
1288 | return 0; | ||
1289 | pci_read_config_word(dev, pos + 4, &cap); | ||
1290 | next_fn = cap >> 8; | ||
1291 | if (next_fn <= fn) | ||
1292 | return 0; | ||
1293 | return next_fn; | ||
1294 | } | ||
1295 | |||
1296 | static unsigned next_trad_fn(struct pci_dev *dev, unsigned fn) | ||
1297 | { | ||
1298 | return (fn + 1) % 8; | ||
1299 | } | ||
1300 | |||
1301 | static unsigned no_next_fn(struct pci_dev *dev, unsigned fn) | ||
1302 | { | ||
1303 | return 0; | ||
1304 | } | ||
1305 | |||
1306 | static int only_one_child(struct pci_bus *bus) | ||
1307 | { | ||
1308 | struct pci_dev *parent = bus->self; | ||
1309 | if (!parent || !pci_is_pcie(parent)) | ||
1310 | return 0; | ||
1311 | if (parent->pcie_type == PCI_EXP_TYPE_ROOT_PORT || | ||
1312 | parent->pcie_type == PCI_EXP_TYPE_DOWNSTREAM) | ||
1313 | return 1; | ||
1314 | return 0; | ||
1315 | } | ||
1316 | |||
1084 | /** | 1317 | /** |
1085 | * pci_scan_slot - scan a PCI slot on a bus for devices. | 1318 | * pci_scan_slot - scan a PCI slot on a bus for devices. |
1086 | * @bus: PCI bus to scan | 1319 | * @bus: PCI bus to scan |
@@ -1094,21 +1327,30 @@ EXPORT_SYMBOL(pci_scan_single_device); | |||
1094 | */ | 1327 | */ |
1095 | int pci_scan_slot(struct pci_bus *bus, int devfn) | 1328 | int pci_scan_slot(struct pci_bus *bus, int devfn) |
1096 | { | 1329 | { |
1097 | int fn, nr = 0; | 1330 | unsigned fn, nr = 0; |
1098 | struct pci_dev *dev; | 1331 | struct pci_dev *dev; |
1332 | unsigned (*next_fn)(struct pci_dev *, unsigned) = no_next_fn; | ||
1333 | |||
1334 | if (only_one_child(bus) && (devfn > 0)) | ||
1335 | return 0; /* Already scanned the entire slot */ | ||
1099 | 1336 | ||
1100 | dev = pci_scan_single_device(bus, devfn); | 1337 | dev = pci_scan_single_device(bus, devfn); |
1101 | if (dev && !dev->is_added) /* new device? */ | 1338 | if (!dev) |
1339 | return 0; | ||
1340 | if (!dev->is_added) | ||
1102 | nr++; | 1341 | nr++; |
1103 | 1342 | ||
1104 | if (dev && dev->multifunction) { | 1343 | if (pci_ari_enabled(bus)) |
1105 | for (fn = 1; fn < 8; fn++) { | 1344 | next_fn = next_ari_fn; |
1106 | dev = pci_scan_single_device(bus, devfn + fn); | 1345 | else if (dev->multifunction) |
1107 | if (dev) { | 1346 | next_fn = next_trad_fn; |
1108 | if (!dev->is_added) | 1347 | |
1109 | nr++; | 1348 | for (fn = next_fn(dev, 0); fn > 0; fn = next_fn(dev, fn)) { |
1110 | dev->multifunction = 1; | 1349 | dev = pci_scan_single_device(bus, devfn + fn); |
1111 | } | 1350 | if (dev) { |
1351 | if (!dev->is_added) | ||
1352 | nr++; | ||
1353 | dev->multifunction = 1; | ||
1112 | } | 1354 | } |
1113 | } | 1355 | } |
1114 | 1356 | ||
@@ -1200,6 +1442,7 @@ struct pci_bus * pci_create_bus(struct device *parent, | |||
1200 | if (error) | 1442 | if (error) |
1201 | goto dev_reg_err; | 1443 | goto dev_reg_err; |
1202 | b->bridge = get_device(dev); | 1444 | b->bridge = get_device(dev); |
1445 | device_enable_async_suspend(b->bridge); | ||
1203 | 1446 | ||
1204 | if (!parent) | 1447 | if (!parent) |
1205 | set_dev_node(b->bridge, pcibus_to_node(b)); | 1448 | set_dev_node(b->bridge, pcibus_to_node(b)); |
diff --git a/drivers/pci/proc.c b/drivers/pci/proc.c index 593bb844b8db..27911b55c2a5 100644 --- a/drivers/pci/proc.c +++ b/drivers/pci/proc.c | |||
@@ -6,10 +6,10 @@ | |||
6 | 6 | ||
7 | #include <linux/init.h> | 7 | #include <linux/init.h> |
8 | #include <linux/pci.h> | 8 | #include <linux/pci.h> |
9 | #include <linux/slab.h> | ||
9 | #include <linux/module.h> | 10 | #include <linux/module.h> |
10 | #include <linux/proc_fs.h> | 11 | #include <linux/proc_fs.h> |
11 | #include <linux/seq_file.h> | 12 | #include <linux/seq_file.h> |
12 | #include <linux/smp_lock.h> | ||
13 | #include <linux/capability.h> | 13 | #include <linux/capability.h> |
14 | #include <asm/uaccess.h> | 14 | #include <asm/uaccess.h> |
15 | #include <asm/byteorder.h> | 15 | #include <asm/byteorder.h> |
@@ -211,8 +211,6 @@ static long proc_bus_pci_ioctl(struct file *file, unsigned int cmd, | |||
211 | #endif /* HAVE_PCI_MMAP */ | 211 | #endif /* HAVE_PCI_MMAP */ |
212 | int ret = 0; | 212 | int ret = 0; |
213 | 213 | ||
214 | lock_kernel(); | ||
215 | |||
216 | switch (cmd) { | 214 | switch (cmd) { |
217 | case PCIIOC_CONTROLLER: | 215 | case PCIIOC_CONTROLLER: |
218 | ret = pci_domain_nr(dev->bus); | 216 | ret = pci_domain_nr(dev->bus); |
@@ -241,7 +239,6 @@ static long proc_bus_pci_ioctl(struct file *file, unsigned int cmd, | |||
241 | break; | 239 | break; |
242 | }; | 240 | }; |
243 | 241 | ||
244 | unlock_kernel(); | ||
245 | return ret; | 242 | return ret; |
246 | } | 243 | } |
247 | 244 | ||
@@ -259,7 +256,7 @@ static int proc_bus_pci_mmap(struct file *file, struct vm_area_struct *vma) | |||
259 | 256 | ||
260 | /* Make sure the caller is mapping a real resource for this device */ | 257 | /* Make sure the caller is mapping a real resource for this device */ |
261 | for (i = 0; i < PCI_ROM_RESOURCE; i++) { | 258 | for (i = 0; i < PCI_ROM_RESOURCE; i++) { |
262 | if (pci_mmap_fits(dev, i, vma)) | 259 | if (pci_mmap_fits(dev, i, vma, PCI_MMAP_PROCFS)) |
263 | break; | 260 | break; |
264 | } | 261 | } |
265 | 262 | ||
@@ -305,6 +302,7 @@ static const struct file_operations proc_bus_pci_operations = { | |||
305 | .read = proc_bus_pci_read, | 302 | .read = proc_bus_pci_read, |
306 | .write = proc_bus_pci_write, | 303 | .write = proc_bus_pci_write, |
307 | .unlocked_ioctl = proc_bus_pci_ioctl, | 304 | .unlocked_ioctl = proc_bus_pci_ioctl, |
305 | .compat_ioctl = proc_bus_pci_ioctl, | ||
308 | #ifdef HAVE_PCI_MMAP | 306 | #ifdef HAVE_PCI_MMAP |
309 | .open = proc_bus_pci_open, | 307 | .open = proc_bus_pci_open, |
310 | .release = proc_bus_pci_release, | 308 | .release = proc_bus_pci_release, |
@@ -430,8 +428,6 @@ int pci_proc_detach_device(struct pci_dev *dev) | |||
430 | struct proc_dir_entry *e; | 428 | struct proc_dir_entry *e; |
431 | 429 | ||
432 | if ((e = dev->procent)) { | 430 | if ((e = dev->procent)) { |
433 | if (atomic_read(&e->count) > 1) | ||
434 | return -EBUSY; | ||
435 | remove_proc_entry(e->name, dev->bus->procdir); | 431 | remove_proc_entry(e->name, dev->bus->procdir); |
436 | dev->procent = NULL; | 432 | dev->procent = NULL; |
437 | } | 433 | } |
@@ -484,9 +480,9 @@ static int __init pci_proc_init(void) | |||
484 | proc_create("devices", 0, proc_bus_pci_dir, | 480 | proc_create("devices", 0, proc_bus_pci_dir, |
485 | &proc_bus_pci_dev_operations); | 481 | &proc_bus_pci_dev_operations); |
486 | proc_initialized = 1; | 482 | proc_initialized = 1; |
487 | while ((dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev)) != NULL) { | 483 | for_each_pci_dev(dev) |
488 | pci_proc_attach_device(dev); | 484 | pci_proc_attach_device(dev); |
489 | } | 485 | |
490 | return 0; | 486 | return 0; |
491 | } | 487 | } |
492 | 488 | ||
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c index c74694345b6e..53a786fd0d40 100644 --- a/drivers/pci/quirks.c +++ b/drivers/pci/quirks.c | |||
@@ -25,14 +25,9 @@ | |||
25 | #include <linux/dmi.h> | 25 | #include <linux/dmi.h> |
26 | #include <linux/pci-aspm.h> | 26 | #include <linux/pci-aspm.h> |
27 | #include <linux/ioport.h> | 27 | #include <linux/ioport.h> |
28 | #include <asm/dma.h> /* isa_dma_bridge_buggy */ | ||
28 | #include "pci.h" | 29 | #include "pci.h" |
29 | 30 | ||
30 | int isa_dma_bridge_buggy; | ||
31 | EXPORT_SYMBOL(isa_dma_bridge_buggy); | ||
32 | int pci_pci_problems; | ||
33 | EXPORT_SYMBOL(pci_pci_problems); | ||
34 | |||
35 | #ifdef CONFIG_PCI_QUIRKS | ||
36 | /* | 31 | /* |
37 | * This quirk function disables memory decoding and releases memory resources | 32 | * This quirk function disables memory decoding and releases memory resources |
38 | * of the device specified by kernel's boot parameter 'pci=resource_alignment='. | 33 | * of the device specified by kernel's boot parameter 'pci=resource_alignment='. |
@@ -96,6 +91,19 @@ static void __devinit quirk_resource_alignment(struct pci_dev *dev) | |||
96 | } | 91 | } |
97 | DECLARE_PCI_FIXUP_HEADER(PCI_ANY_ID, PCI_ANY_ID, quirk_resource_alignment); | 92 | DECLARE_PCI_FIXUP_HEADER(PCI_ANY_ID, PCI_ANY_ID, quirk_resource_alignment); |
98 | 93 | ||
94 | /* | ||
95 | * Decoding should be disabled for a PCI device during BAR sizing to avoid | ||
96 | * conflict. But doing so may cause problems on host bridge and perhaps other | ||
97 | * key system devices. For devices that need to have mmio decoding always-on, | ||
98 | * we need to set the dev->mmio_always_on bit. | ||
99 | */ | ||
100 | static void __devinit quirk_mmio_always_on(struct pci_dev *dev) | ||
101 | { | ||
102 | if ((dev->class >> 8) == PCI_CLASS_BRIDGE_HOST) | ||
103 | dev->mmio_always_on = 1; | ||
104 | } | ||
105 | DECLARE_PCI_FIXUP_EARLY(PCI_ANY_ID, PCI_ANY_ID, quirk_mmio_always_on); | ||
106 | |||
99 | /* The Mellanox Tavor device gives false positive parity errors | 107 | /* The Mellanox Tavor device gives false positive parity errors |
100 | * Mark this device with a broken_parity_status, to allow | 108 | * Mark this device with a broken_parity_status, to allow |
101 | * PCI scanning code to "skip" this now blacklisted device. | 109 | * PCI scanning code to "skip" this now blacklisted device. |
@@ -155,6 +163,26 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_NEC, PCI_DEVICE_ID_NEC_CBUS_2, quirk_isa_d | |||
155 | DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_NEC, PCI_DEVICE_ID_NEC_CBUS_3, quirk_isa_dma_hangs); | 163 | DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_NEC, PCI_DEVICE_ID_NEC_CBUS_3, quirk_isa_dma_hangs); |
156 | 164 | ||
157 | /* | 165 | /* |
166 | * Intel NM10 "TigerPoint" LPC PM1a_STS.BM_STS must be clear | ||
167 | * for some HT machines to use C4 w/o hanging. | ||
168 | */ | ||
169 | static void __devinit quirk_tigerpoint_bm_sts(struct pci_dev *dev) | ||
170 | { | ||
171 | u32 pmbase; | ||
172 | u16 pm1a; | ||
173 | |||
174 | pci_read_config_dword(dev, 0x40, &pmbase); | ||
175 | pmbase = pmbase & 0xff80; | ||
176 | pm1a = inw(pmbase); | ||
177 | |||
178 | if (pm1a & 0x10) { | ||
179 | dev_info(&dev->dev, FW_BUG "TigerPoint LPC.BM_STS cleared\n"); | ||
180 | outw(0x10, pmbase); | ||
181 | } | ||
182 | } | ||
183 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_TGP_LPC, quirk_tigerpoint_bm_sts); | ||
184 | |||
185 | /* | ||
158 | * Chipsets where PCI->PCI transfers vanish or hang | 186 | * Chipsets where PCI->PCI transfers vanish or hang |
159 | */ | 187 | */ |
160 | static void __devinit quirk_nopcipci(struct pci_dev *dev) | 188 | static void __devinit quirk_nopcipci(struct pci_dev *dev) |
@@ -198,6 +226,7 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82439TX, quir | |||
198 | * VIA Apollo KT133 needs PCI latency patch | 226 | * VIA Apollo KT133 needs PCI latency patch |
199 | * Made according to a windows driver based patch by George E. Breese | 227 | * Made according to a windows driver based patch by George E. Breese |
200 | * see PCI Latency Adjust on http://www.viahardware.com/download/viatweak.shtm | 228 | * see PCI Latency Adjust on http://www.viahardware.com/download/viatweak.shtm |
229 | * and http://www.georgebreese.com/net/software/#PCI | ||
201 | * Also see http://www.au-ja.org/review-kt133a-1-en.phtml for | 230 | * Also see http://www.au-ja.org/review-kt133a-1-en.phtml for |
202 | * the info on which Mr Breese based his work. | 231 | * the info on which Mr Breese based his work. |
203 | * | 232 | * |
@@ -338,6 +367,23 @@ static void __devinit quirk_s3_64M(struct pci_dev *dev) | |||
338 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_S3, PCI_DEVICE_ID_S3_868, quirk_s3_64M); | 367 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_S3, PCI_DEVICE_ID_S3_868, quirk_s3_64M); |
339 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_S3, PCI_DEVICE_ID_S3_968, quirk_s3_64M); | 368 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_S3, PCI_DEVICE_ID_S3_968, quirk_s3_64M); |
340 | 369 | ||
370 | /* | ||
371 | * Some CS5536 BIOSes (for example, the Soekris NET5501 board w/ comBIOS | ||
372 | * ver. 1.33 20070103) don't set the correct ISA PCI region header info. | ||
373 | * BAR0 should be 8 bytes; instead, it may be set to something like 8k | ||
374 | * (which conflicts w/ BAR1's memory range). | ||
375 | */ | ||
376 | static void __devinit quirk_cs5536_vsa(struct pci_dev *dev) | ||
377 | { | ||
378 | if (pci_resource_len(dev, 0) != 8) { | ||
379 | struct resource *res = &dev->resource[0]; | ||
380 | res->end = res->start + 8 - 1; | ||
381 | dev_info(&dev->dev, "CS5536 ISA bridge bug detected " | ||
382 | "(incorrect header); workaround applied.\n"); | ||
383 | } | ||
384 | } | ||
385 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_CS5536_ISA, quirk_cs5536_vsa); | ||
386 | |||
341 | static void __devinit quirk_io_region(struct pci_dev *dev, unsigned region, | 387 | static void __devinit quirk_io_region(struct pci_dev *dev, unsigned region, |
342 | unsigned size, int nr, const char *name) | 388 | unsigned size, int nr, const char *name) |
343 | { | 389 | { |
@@ -356,8 +402,9 @@ static void __devinit quirk_io_region(struct pci_dev *dev, unsigned region, | |||
356 | bus_region.end = res->end; | 402 | bus_region.end = res->end; |
357 | pcibios_bus_to_resource(dev, res, &bus_region); | 403 | pcibios_bus_to_resource(dev, res, &bus_region); |
358 | 404 | ||
359 | pci_claim_resource(dev, nr); | 405 | if (pci_claim_resource(dev, nr) == 0) |
360 | dev_info(&dev->dev, "quirk: %pR claimed by %s\n", res, name); | 406 | dev_info(&dev->dev, "quirk: %pR claimed by %s\n", |
407 | res, name); | ||
361 | } | 408 | } |
362 | } | 409 | } |
363 | 410 | ||
@@ -970,7 +1017,7 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_TOSHIBA, 0x605, quirk_transparent_bridge) | |||
970 | /* | 1017 | /* |
971 | * Common misconfiguration of the MediaGX/Geode PCI master that will | 1018 | * Common misconfiguration of the MediaGX/Geode PCI master that will |
972 | * reduce PCI bandwidth from 70MB/s to 25MB/s. See the GXM/GXLV/GX1 | 1019 | * reduce PCI bandwidth from 70MB/s to 25MB/s. See the GXM/GXLV/GX1 |
973 | * datasheets found at http://www.national.com/ds/GX for info on what | 1020 | * datasheets found at http://www.national.com/analog for info on what |
974 | * these bits do. <christer@weinigel.se> | 1021 | * these bits do. <christer@weinigel.se> |
975 | */ | 1022 | */ |
976 | static void quirk_mediagx_master(struct pci_dev *dev) | 1023 | static void quirk_mediagx_master(struct pci_dev *dev) |
@@ -1444,7 +1491,9 @@ static void quirk_jmicron_ata(struct pci_dev *pdev) | |||
1444 | conf5 &= ~(1 << 24); /* Clear bit 24 */ | 1491 | conf5 &= ~(1 << 24); /* Clear bit 24 */ |
1445 | 1492 | ||
1446 | switch (pdev->device) { | 1493 | switch (pdev->device) { |
1447 | case PCI_DEVICE_ID_JMICRON_JMB360: | 1494 | case PCI_DEVICE_ID_JMICRON_JMB360: /* SATA single port */ |
1495 | case PCI_DEVICE_ID_JMICRON_JMB362: /* SATA dual ports */ | ||
1496 | case PCI_DEVICE_ID_JMICRON_JMB364: /* SATA dual ports */ | ||
1448 | /* The controller should be in single function ahci mode */ | 1497 | /* The controller should be in single function ahci mode */ |
1449 | conf1 |= 0x0002A100; /* Set 8, 13, 15, 17 */ | 1498 | conf1 |= 0x0002A100; /* Set 8, 13, 15, 17 */ |
1450 | break; | 1499 | break; |
@@ -1456,6 +1505,7 @@ static void quirk_jmicron_ata(struct pci_dev *pdev) | |||
1456 | /* Fall through */ | 1505 | /* Fall through */ |
1457 | case PCI_DEVICE_ID_JMICRON_JMB361: | 1506 | case PCI_DEVICE_ID_JMICRON_JMB361: |
1458 | case PCI_DEVICE_ID_JMICRON_JMB363: | 1507 | case PCI_DEVICE_ID_JMICRON_JMB363: |
1508 | case PCI_DEVICE_ID_JMICRON_JMB369: | ||
1459 | /* Enable dual function mode, AHCI on fn 0, IDE fn1 */ | 1509 | /* Enable dual function mode, AHCI on fn 0, IDE fn1 */ |
1460 | /* Set the class codes correctly and then direct IDE 0 */ | 1510 | /* Set the class codes correctly and then direct IDE 0 */ |
1461 | conf1 |= 0x00C2A1B3; /* Set 0, 1, 4, 5, 7, 8, 13, 15, 17, 22, 23 */ | 1511 | conf1 |= 0x00C2A1B3; /* Set 0, 1, 4, 5, 7, 8, 13, 15, 17, 22, 23 */ |
@@ -1480,16 +1530,22 @@ static void quirk_jmicron_ata(struct pci_dev *pdev) | |||
1480 | } | 1530 | } |
1481 | DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB360, quirk_jmicron_ata); | 1531 | DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB360, quirk_jmicron_ata); |
1482 | DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB361, quirk_jmicron_ata); | 1532 | DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB361, quirk_jmicron_ata); |
1533 | DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB362, quirk_jmicron_ata); | ||
1483 | DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB363, quirk_jmicron_ata); | 1534 | DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB363, quirk_jmicron_ata); |
1535 | DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB364, quirk_jmicron_ata); | ||
1484 | DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB365, quirk_jmicron_ata); | 1536 | DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB365, quirk_jmicron_ata); |
1485 | DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB366, quirk_jmicron_ata); | 1537 | DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB366, quirk_jmicron_ata); |
1486 | DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB368, quirk_jmicron_ata); | 1538 | DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB368, quirk_jmicron_ata); |
1539 | DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB369, quirk_jmicron_ata); | ||
1487 | DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB360, quirk_jmicron_ata); | 1540 | DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB360, quirk_jmicron_ata); |
1488 | DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB361, quirk_jmicron_ata); | 1541 | DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB361, quirk_jmicron_ata); |
1542 | DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB362, quirk_jmicron_ata); | ||
1489 | DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB363, quirk_jmicron_ata); | 1543 | DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB363, quirk_jmicron_ata); |
1544 | DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB364, quirk_jmicron_ata); | ||
1490 | DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB365, quirk_jmicron_ata); | 1545 | DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB365, quirk_jmicron_ata); |
1491 | DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB366, quirk_jmicron_ata); | 1546 | DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB366, quirk_jmicron_ata); |
1492 | DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB368, quirk_jmicron_ata); | 1547 | DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB368, quirk_jmicron_ata); |
1548 | DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB369, quirk_jmicron_ata); | ||
1493 | 1549 | ||
1494 | #endif | 1550 | #endif |
1495 | 1551 | ||
@@ -1965,11 +2021,25 @@ static void __devinit quirk_via_cx700_pci_parking_caching(struct pci_dev *dev) | |||
1965 | /* | 2021 | /* |
1966 | * Disable PCI Bus Parking and PCI Master read caching on CX700 | 2022 | * Disable PCI Bus Parking and PCI Master read caching on CX700 |
1967 | * which causes unspecified timing errors with a VT6212L on the PCI | 2023 | * which causes unspecified timing errors with a VT6212L on the PCI |
1968 | * bus leading to USB2.0 packet loss. The defaults are that these | 2024 | * bus leading to USB2.0 packet loss. |
1969 | * features are turned off but some BIOSes turn them on. | 2025 | * |
2026 | * This quirk is only enabled if a second (on the external PCI bus) | ||
2027 | * VT6212L is found -- the CX700 core itself also contains a USB | ||
2028 | * host controller with the same PCI ID as the VT6212L. | ||
1970 | */ | 2029 | */ |
1971 | 2030 | ||
2031 | /* Count VT6212L instances */ | ||
2032 | struct pci_dev *p = pci_get_device(PCI_VENDOR_ID_VIA, | ||
2033 | PCI_DEVICE_ID_VIA_8235_USB_2, NULL); | ||
1972 | uint8_t b; | 2034 | uint8_t b; |
2035 | |||
2036 | /* p should contain the first (internal) VT6212L -- see if we have | ||
2037 | an external one by searching again */ | ||
2038 | p = pci_get_device(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8235_USB_2, p); | ||
2039 | if (!p) | ||
2040 | return; | ||
2041 | pci_dev_put(p); | ||
2042 | |||
1973 | if (pci_read_config_byte(dev, 0x76, &b) == 0) { | 2043 | if (pci_read_config_byte(dev, 0x76, &b) == 0) { |
1974 | if (b & 0x40) { | 2044 | if (b & 0x40) { |
1975 | /* Turn off PCI Bus Parking */ | 2045 | /* Turn off PCI Bus Parking */ |
@@ -1996,7 +2066,7 @@ static void __devinit quirk_via_cx700_pci_parking_caching(struct pci_dev *dev) | |||
1996 | } | 2066 | } |
1997 | } | 2067 | } |
1998 | } | 2068 | } |
1999 | DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_VIA, 0x324e, quirk_via_cx700_pci_parking_caching); | 2069 | DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, 0x324e, quirk_via_cx700_pci_parking_caching); |
2000 | 2070 | ||
2001 | /* | 2071 | /* |
2002 | * For Broadcom 5706, 5708, 5709 rev. A nics, any read beyond the | 2072 | * For Broadcom 5706, 5708, 5709 rev. A nics, any read beyond the |
@@ -2066,6 +2136,24 @@ DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82865_HB, | |||
2066 | DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82875_HB, | 2136 | DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82875_HB, |
2067 | quirk_unhide_mch_dev6); | 2137 | quirk_unhide_mch_dev6); |
2068 | 2138 | ||
2139 | #ifdef CONFIG_TILE | ||
2140 | /* | ||
2141 | * The Tilera TILEmpower platform needs to set the link speed | ||
2142 | * to 2.5GT(Giga-Transfers)/s (Gen 1). The default link speed | ||
2143 | * setting is 5GT/s (Gen 2). 0x98 is the Link Control2 PCIe | ||
2144 | * capability register of the PEX8624 PCIe switch. The switch | ||
2145 | * supports link speed auto negotiation, but falsely sets | ||
2146 | * the link speed to 5GT/s. | ||
2147 | */ | ||
2148 | static void __devinit quirk_tile_plx_gen1(struct pci_dev *dev) | ||
2149 | { | ||
2150 | if (tile_plx_gen1) { | ||
2151 | pci_write_config_dword(dev, 0x98, 0x1); | ||
2152 | mdelay(50); | ||
2153 | } | ||
2154 | } | ||
2155 | DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_PLX, 0x8624, quirk_tile_plx_gen1); | ||
2156 | #endif /* CONFIG_TILE */ | ||
2069 | 2157 | ||
2070 | #ifdef CONFIG_PCI_MSI | 2158 | #ifdef CONFIG_PCI_MSI |
2071 | /* Some chipsets do not support MSI. We cannot easily rely on setting | 2159 | /* Some chipsets do not support MSI. We cannot easily rely on setting |
@@ -2085,6 +2173,7 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_RS480, quirk_disabl | |||
2085 | DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_VT3336, quirk_disable_all_msi); | 2173 | DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_VT3336, quirk_disable_all_msi); |
2086 | DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_VT3351, quirk_disable_all_msi); | 2174 | DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_VT3351, quirk_disable_all_msi); |
2087 | DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_VT3364, quirk_disable_all_msi); | 2175 | DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_VT3364, quirk_disable_all_msi); |
2176 | DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8380_0, quirk_disable_all_msi); | ||
2088 | 2177 | ||
2089 | /* Disable MSI on chipsets that are known to not support it */ | 2178 | /* Disable MSI on chipsets that are known to not support it */ |
2090 | static void __devinit quirk_disable_msi(struct pci_dev *dev) | 2179 | static void __devinit quirk_disable_msi(struct pci_dev *dev) |
@@ -2096,6 +2185,28 @@ static void __devinit quirk_disable_msi(struct pci_dev *dev) | |||
2096 | } | 2185 | } |
2097 | } | 2186 | } |
2098 | DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8131_BRIDGE, quirk_disable_msi); | 2187 | DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8131_BRIDGE, quirk_disable_msi); |
2188 | DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, 0xa238, quirk_disable_msi); | ||
2189 | DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x5a3f, quirk_disable_msi); | ||
2190 | |||
2191 | /* | ||
2192 | * The APC bridge device in AMD 780 family northbridges has some random | ||
2193 | * OEM subsystem ID in its vendor ID register (erratum 18), so instead | ||
2194 | * we use the possible vendor/device IDs of the host bridge for the | ||
2195 | * declared quirk, and search for the APC bridge by slot number. | ||
2196 | */ | ||
2197 | static void __devinit quirk_amd_780_apc_msi(struct pci_dev *host_bridge) | ||
2198 | { | ||
2199 | struct pci_dev *apc_bridge; | ||
2200 | |||
2201 | apc_bridge = pci_get_slot(host_bridge->bus, PCI_DEVFN(1, 0)); | ||
2202 | if (apc_bridge) { | ||
2203 | if (apc_bridge->device == 0x9602) | ||
2204 | quirk_disable_msi(apc_bridge); | ||
2205 | pci_dev_put(apc_bridge); | ||
2206 | } | ||
2207 | } | ||
2208 | DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, 0x9600, quirk_amd_780_apc_msi); | ||
2209 | DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, 0x9601, quirk_amd_780_apc_msi); | ||
2099 | 2210 | ||
2100 | /* Go through the list of Hypertransport capabilities and | 2211 | /* Go through the list of Hypertransport capabilities and |
2101 | * return 1 if a HT MSI capability is found and enabled */ | 2212 | * return 1 if a HT MSI capability is found and enabled */ |
@@ -2187,15 +2298,16 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_SERVERWORKS, | |||
2187 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8132_BRIDGE, | 2298 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8132_BRIDGE, |
2188 | ht_enable_msi_mapping); | 2299 | ht_enable_msi_mapping); |
2189 | 2300 | ||
2190 | /* The P5N32-SLI Premium motherboard from Asus has a problem with msi | 2301 | /* The P5N32-SLI motherboards from Asus have a problem with msi |
2191 | * for the MCP55 NIC. It is not yet determined whether the msi problem | 2302 | * for the MCP55 NIC. It is not yet determined whether the msi problem |
2192 | * also affects other devices. As for now, turn off msi for this device. | 2303 | * also affects other devices. As for now, turn off msi for this device. |
2193 | */ | 2304 | */ |
2194 | static void __devinit nvenet_msi_disable(struct pci_dev *dev) | 2305 | static void __devinit nvenet_msi_disable(struct pci_dev *dev) |
2195 | { | 2306 | { |
2196 | if (dmi_name_in_vendors("P5N32-SLI PREMIUM")) { | 2307 | if (dmi_name_in_vendors("P5N32-SLI PREMIUM") || |
2308 | dmi_name_in_vendors("P5N32-E SLI")) { | ||
2197 | dev_info(&dev->dev, | 2309 | dev_info(&dev->dev, |
2198 | "Disabling msi for MCP55 NIC on P5N32-SLI Premium\n"); | 2310 | "Disabling msi for MCP55 NIC on P5N32-SLI\n"); |
2199 | dev->no_msi = 1; | 2311 | dev->no_msi = 1; |
2200 | } | 2312 | } |
2201 | } | 2313 | } |
@@ -2203,6 +2315,40 @@ DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_NVIDIA, | |||
2203 | PCI_DEVICE_ID_NVIDIA_NVENET_15, | 2315 | PCI_DEVICE_ID_NVIDIA_NVENET_15, |
2204 | nvenet_msi_disable); | 2316 | nvenet_msi_disable); |
2205 | 2317 | ||
2318 | /* | ||
2319 | * Some versions of the MCP55 bridge from nvidia have a legacy irq routing | ||
2320 | * config register. This register controls the routing of legacy interrupts | ||
2321 | * from devices that route through the MCP55. If this register is misprogramed | ||
2322 | * interrupts are only sent to the bsp, unlike conventional systems where the | ||
2323 | * irq is broadxast to all online cpus. Not having this register set | ||
2324 | * properly prevents kdump from booting up properly, so lets make sure that | ||
2325 | * we have it set correctly. | ||
2326 | * Note this is an undocumented register. | ||
2327 | */ | ||
2328 | static void __devinit nvbridge_check_legacy_irq_routing(struct pci_dev *dev) | ||
2329 | { | ||
2330 | u32 cfg; | ||
2331 | |||
2332 | if (!pci_find_capability(dev, PCI_CAP_ID_HT)) | ||
2333 | return; | ||
2334 | |||
2335 | pci_read_config_dword(dev, 0x74, &cfg); | ||
2336 | |||
2337 | if (cfg & ((1 << 2) | (1 << 15))) { | ||
2338 | printk(KERN_INFO "Rewriting irq routing register on MCP55\n"); | ||
2339 | cfg &= ~((1 << 2) | (1 << 15)); | ||
2340 | pci_write_config_dword(dev, 0x74, cfg); | ||
2341 | } | ||
2342 | } | ||
2343 | |||
2344 | DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_NVIDIA, | ||
2345 | PCI_DEVICE_ID_NVIDIA_MCP55_BRIDGE_V0, | ||
2346 | nvbridge_check_legacy_irq_routing); | ||
2347 | |||
2348 | DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_NVIDIA, | ||
2349 | PCI_DEVICE_ID_NVIDIA_MCP55_BRIDGE_V4, | ||
2350 | nvbridge_check_legacy_irq_routing); | ||
2351 | |||
2206 | static int __devinit ht_check_msi_mapping(struct pci_dev *dev) | 2352 | static int __devinit ht_check_msi_mapping(struct pci_dev *dev) |
2207 | { | 2353 | { |
2208 | int pos, ttl = 48; | 2354 | int pos, ttl = 48; |
@@ -2354,6 +2500,9 @@ static void __devinit __nv_msi_ht_cap_quirk(struct pci_dev *dev, int all) | |||
2354 | int pos; | 2500 | int pos; |
2355 | int found; | 2501 | int found; |
2356 | 2502 | ||
2503 | if (!pci_msi_enabled()) | ||
2504 | return; | ||
2505 | |||
2357 | /* check if there is HT MSI cap or enabled on this device */ | 2506 | /* check if there is HT MSI cap or enabled on this device */ |
2358 | found = ht_check_msi_mapping(dev); | 2507 | found = ht_check_msi_mapping(dev); |
2359 | 2508 | ||
@@ -2517,9 +2666,131 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x10e7, quirk_i82576_sriov); | |||
2517 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x10e8, quirk_i82576_sriov); | 2666 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x10e8, quirk_i82576_sriov); |
2518 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x150a, quirk_i82576_sriov); | 2667 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x150a, quirk_i82576_sriov); |
2519 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x150d, quirk_i82576_sriov); | 2668 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x150d, quirk_i82576_sriov); |
2669 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x1518, quirk_i82576_sriov); | ||
2520 | 2670 | ||
2521 | #endif /* CONFIG_PCI_IOV */ | 2671 | #endif /* CONFIG_PCI_IOV */ |
2522 | 2672 | ||
2673 | /* Allow manual resource allocation for PCI hotplug bridges | ||
2674 | * via pci=hpmemsize=nnM and pci=hpiosize=nnM parameters. For | ||
2675 | * some PCI-PCI hotplug bridges, like PLX 6254 (former HINT HB6), | ||
2676 | * kernel fails to allocate resources when hotplug device is | ||
2677 | * inserted and PCI bus is rescanned. | ||
2678 | */ | ||
2679 | static void __devinit quirk_hotplug_bridge(struct pci_dev *dev) | ||
2680 | { | ||
2681 | dev->is_hotplug_bridge = 1; | ||
2682 | } | ||
2683 | |||
2684 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_HINT, 0x0020, quirk_hotplug_bridge); | ||
2685 | |||
2686 | /* | ||
2687 | * This is a quirk for the Ricoh MMC controller found as a part of | ||
2688 | * some mulifunction chips. | ||
2689 | |||
2690 | * This is very similiar and based on the ricoh_mmc driver written by | ||
2691 | * Philip Langdale. Thank you for these magic sequences. | ||
2692 | * | ||
2693 | * These chips implement the four main memory card controllers (SD, MMC, MS, xD) | ||
2694 | * and one or both of cardbus or firewire. | ||
2695 | * | ||
2696 | * It happens that they implement SD and MMC | ||
2697 | * support as separate controllers (and PCI functions). The linux SDHCI | ||
2698 | * driver supports MMC cards but the chip detects MMC cards in hardware | ||
2699 | * and directs them to the MMC controller - so the SDHCI driver never sees | ||
2700 | * them. | ||
2701 | * | ||
2702 | * To get around this, we must disable the useless MMC controller. | ||
2703 | * At that point, the SDHCI controller will start seeing them | ||
2704 | * It seems to be the case that the relevant PCI registers to deactivate the | ||
2705 | * MMC controller live on PCI function 0, which might be the cardbus controller | ||
2706 | * or the firewire controller, depending on the particular chip in question | ||
2707 | * | ||
2708 | * This has to be done early, because as soon as we disable the MMC controller | ||
2709 | * other pci functions shift up one level, e.g. function #2 becomes function | ||
2710 | * #1, and this will confuse the pci core. | ||
2711 | */ | ||
2712 | |||
2713 | #ifdef CONFIG_MMC_RICOH_MMC | ||
2714 | static void ricoh_mmc_fixup_rl5c476(struct pci_dev *dev) | ||
2715 | { | ||
2716 | /* disable via cardbus interface */ | ||
2717 | u8 write_enable; | ||
2718 | u8 write_target; | ||
2719 | u8 disable; | ||
2720 | |||
2721 | /* disable must be done via function #0 */ | ||
2722 | if (PCI_FUNC(dev->devfn)) | ||
2723 | return; | ||
2724 | |||
2725 | pci_read_config_byte(dev, 0xB7, &disable); | ||
2726 | if (disable & 0x02) | ||
2727 | return; | ||
2728 | |||
2729 | pci_read_config_byte(dev, 0x8E, &write_enable); | ||
2730 | pci_write_config_byte(dev, 0x8E, 0xAA); | ||
2731 | pci_read_config_byte(dev, 0x8D, &write_target); | ||
2732 | pci_write_config_byte(dev, 0x8D, 0xB7); | ||
2733 | pci_write_config_byte(dev, 0xB7, disable | 0x02); | ||
2734 | pci_write_config_byte(dev, 0x8E, write_enable); | ||
2735 | pci_write_config_byte(dev, 0x8D, write_target); | ||
2736 | |||
2737 | dev_notice(&dev->dev, "proprietary Ricoh MMC controller disabled (via cardbus function)\n"); | ||
2738 | dev_notice(&dev->dev, "MMC cards are now supported by standard SDHCI controller\n"); | ||
2739 | } | ||
2740 | DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_RICOH, PCI_DEVICE_ID_RICOH_RL5C476, ricoh_mmc_fixup_rl5c476); | ||
2741 | DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_RICOH, PCI_DEVICE_ID_RICOH_RL5C476, ricoh_mmc_fixup_rl5c476); | ||
2742 | |||
2743 | static void ricoh_mmc_fixup_r5c832(struct pci_dev *dev) | ||
2744 | { | ||
2745 | /* disable via firewire interface */ | ||
2746 | u8 write_enable; | ||
2747 | u8 disable; | ||
2748 | |||
2749 | /* disable must be done via function #0 */ | ||
2750 | if (PCI_FUNC(dev->devfn)) | ||
2751 | return; | ||
2752 | |||
2753 | pci_read_config_byte(dev, 0xCB, &disable); | ||
2754 | |||
2755 | if (disable & 0x02) | ||
2756 | return; | ||
2757 | |||
2758 | pci_read_config_byte(dev, 0xCA, &write_enable); | ||
2759 | pci_write_config_byte(dev, 0xCA, 0x57); | ||
2760 | pci_write_config_byte(dev, 0xCB, disable | 0x02); | ||
2761 | pci_write_config_byte(dev, 0xCA, write_enable); | ||
2762 | |||
2763 | dev_notice(&dev->dev, "proprietary Ricoh MMC controller disabled (via firewire function)\n"); | ||
2764 | dev_notice(&dev->dev, "MMC cards are now supported by standard SDHCI controller\n"); | ||
2765 | } | ||
2766 | DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_RICOH, PCI_DEVICE_ID_RICOH_R5C832, ricoh_mmc_fixup_r5c832); | ||
2767 | DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_RICOH, PCI_DEVICE_ID_RICOH_R5C832, ricoh_mmc_fixup_r5c832); | ||
2768 | #endif /*CONFIG_MMC_RICOH_MMC*/ | ||
2769 | |||
2770 | #if defined(CONFIG_DMAR) || defined(CONFIG_INTR_REMAP) | ||
2771 | #define VTUNCERRMSK_REG 0x1ac | ||
2772 | #define VTD_MSK_SPEC_ERRORS (1 << 31) | ||
2773 | /* | ||
2774 | * This is a quirk for masking vt-d spec defined errors to platform error | ||
2775 | * handling logic. With out this, platforms using Intel 7500, 5500 chipsets | ||
2776 | * (and the derivative chipsets like X58 etc) seem to generate NMI/SMI (based | ||
2777 | * on the RAS config settings of the platform) when a vt-d fault happens. | ||
2778 | * The resulting SMI caused the system to hang. | ||
2779 | * | ||
2780 | * VT-d spec related errors are already handled by the VT-d OS code, so no | ||
2781 | * need to report the same error through other channels. | ||
2782 | */ | ||
2783 | static void vtd_mask_spec_errors(struct pci_dev *dev) | ||
2784 | { | ||
2785 | u32 word; | ||
2786 | |||
2787 | pci_read_config_dword(dev, VTUNCERRMSK_REG, &word); | ||
2788 | pci_write_config_dword(dev, VTUNCERRMSK_REG, word | VTD_MSK_SPEC_ERRORS); | ||
2789 | } | ||
2790 | DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x342e, vtd_mask_spec_errors); | ||
2791 | DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x3c28, vtd_mask_spec_errors); | ||
2792 | #endif | ||
2793 | |||
2523 | static void pci_do_fixups(struct pci_dev *dev, struct pci_fixup *f, | 2794 | static void pci_do_fixups(struct pci_dev *dev, struct pci_fixup *f, |
2524 | struct pci_fixup *end) | 2795 | struct pci_fixup *end) |
2525 | { | 2796 | { |
@@ -2595,6 +2866,7 @@ void pci_fixup_device(enum pci_fixup_pass pass, struct pci_dev *dev) | |||
2595 | } | 2866 | } |
2596 | pci_do_fixups(dev, start, end); | 2867 | pci_do_fixups(dev, start, end); |
2597 | } | 2868 | } |
2869 | EXPORT_SYMBOL(pci_fixup_device); | ||
2598 | 2870 | ||
2599 | static int __init pci_apply_final_quirks(void) | 2871 | static int __init pci_apply_final_quirks(void) |
2600 | { | 2872 | { |
@@ -2606,7 +2878,7 @@ static int __init pci_apply_final_quirks(void) | |||
2606 | printk(KERN_DEBUG "PCI: CLS %u bytes\n", | 2878 | printk(KERN_DEBUG "PCI: CLS %u bytes\n", |
2607 | pci_cache_line_size << 2); | 2879 | pci_cache_line_size << 2); |
2608 | 2880 | ||
2609 | while ((dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev)) != NULL) { | 2881 | for_each_pci_dev(dev) { |
2610 | pci_fixup_device(pci_fixup_final, dev); | 2882 | pci_fixup_device(pci_fixup_final, dev); |
2611 | /* | 2883 | /* |
2612 | * If arch hasn't set it explicitly yet, use the CLS | 2884 | * If arch hasn't set it explicitly yet, use the CLS |
@@ -2706,9 +2978,3 @@ int pci_dev_specific_reset(struct pci_dev *dev, int probe) | |||
2706 | 2978 | ||
2707 | return -ENOTTY; | 2979 | return -ENOTTY; |
2708 | } | 2980 | } |
2709 | |||
2710 | #else | ||
2711 | void pci_fixup_device(enum pci_fixup_pass pass, struct pci_dev *dev) {} | ||
2712 | int pci_dev_specific_reset(struct pci_dev *dev, int probe) { return -ENOTTY; } | ||
2713 | #endif | ||
2714 | EXPORT_SYMBOL(pci_fixup_device); | ||
diff --git a/drivers/pci/search.c b/drivers/pci/search.c index 4a471dc4f4b9..9d75dc8ca602 100644 --- a/drivers/pci/search.c +++ b/drivers/pci/search.c | |||
@@ -9,6 +9,7 @@ | |||
9 | 9 | ||
10 | #include <linux/init.h> | 10 | #include <linux/init.h> |
11 | #include <linux/pci.h> | 11 | #include <linux/pci.h> |
12 | #include <linux/slab.h> | ||
12 | #include <linux/module.h> | 13 | #include <linux/module.h> |
13 | #include <linux/interrupt.h> | 14 | #include <linux/interrupt.h> |
14 | #include "pci.h" | 15 | #include "pci.h" |
@@ -168,7 +169,7 @@ struct pci_dev *pci_get_domain_bus_and_slot(int domain, unsigned int bus, | |||
168 | { | 169 | { |
169 | struct pci_dev *dev = NULL; | 170 | struct pci_dev *dev = NULL; |
170 | 171 | ||
171 | while ((dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev)) != NULL) { | 172 | for_each_pci_dev(dev) { |
172 | if (pci_domain_nr(dev->bus) == domain && | 173 | if (pci_domain_nr(dev->bus) == domain && |
173 | (dev->bus->number == bus && dev->devfn == devfn)) | 174 | (dev->bus->number == bus && dev->devfn == devfn)) |
174 | return dev; | 175 | return dev; |
diff --git a/drivers/pci/setup-bus.c b/drivers/pci/setup-bus.c index c48cd377b3f5..66cb8f4cc5f4 100644 --- a/drivers/pci/setup-bus.c +++ b/drivers/pci/setup-bus.c | |||
@@ -27,37 +27,91 @@ | |||
27 | #include <linux/slab.h> | 27 | #include <linux/slab.h> |
28 | #include "pci.h" | 28 | #include "pci.h" |
29 | 29 | ||
30 | static void pbus_assign_resources_sorted(const struct pci_bus *bus) | 30 | struct resource_list_x { |
31 | { | 31 | struct resource_list_x *next; |
32 | struct pci_dev *dev; | ||
33 | struct resource *res; | 32 | struct resource *res; |
34 | struct resource_list head, *list, *tmp; | 33 | struct pci_dev *dev; |
35 | int idx; | 34 | resource_size_t start; |
35 | resource_size_t end; | ||
36 | unsigned long flags; | ||
37 | }; | ||
36 | 38 | ||
37 | head.next = NULL; | 39 | static void add_to_failed_list(struct resource_list_x *head, |
38 | list_for_each_entry(dev, &bus->devices, bus_list) { | 40 | struct pci_dev *dev, struct resource *res) |
39 | u16 class = dev->class >> 8; | 41 | { |
42 | struct resource_list_x *list = head; | ||
43 | struct resource_list_x *ln = list->next; | ||
44 | struct resource_list_x *tmp; | ||
40 | 45 | ||
41 | /* Don't touch classless devices or host bridges or ioapics. */ | 46 | tmp = kmalloc(sizeof(*tmp), GFP_KERNEL); |
42 | if (class == PCI_CLASS_NOT_DEFINED || | 47 | if (!tmp) { |
43 | class == PCI_CLASS_BRIDGE_HOST) | 48 | pr_warning("add_to_failed_list: kmalloc() failed!\n"); |
44 | continue; | 49 | return; |
50 | } | ||
45 | 51 | ||
46 | /* Don't touch ioapic devices already enabled by firmware */ | 52 | tmp->next = ln; |
47 | if (class == PCI_CLASS_SYSTEM_PIC) { | 53 | tmp->res = res; |
48 | u16 command; | 54 | tmp->dev = dev; |
49 | pci_read_config_word(dev, PCI_COMMAND, &command); | 55 | tmp->start = res->start; |
50 | if (command & (PCI_COMMAND_IO | PCI_COMMAND_MEMORY)) | 56 | tmp->end = res->end; |
51 | continue; | 57 | tmp->flags = res->flags; |
52 | } | 58 | list->next = tmp; |
59 | } | ||
60 | |||
61 | static void free_failed_list(struct resource_list_x *head) | ||
62 | { | ||
63 | struct resource_list_x *list, *tmp; | ||
53 | 64 | ||
54 | pdev_sort_resources(dev, &head); | 65 | for (list = head->next; list;) { |
66 | tmp = list; | ||
67 | list = list->next; | ||
68 | kfree(tmp); | ||
55 | } | 69 | } |
56 | 70 | ||
57 | for (list = head.next; list;) { | 71 | head->next = NULL; |
72 | } | ||
73 | |||
74 | static void __dev_sort_resources(struct pci_dev *dev, | ||
75 | struct resource_list *head) | ||
76 | { | ||
77 | u16 class = dev->class >> 8; | ||
78 | |||
79 | /* Don't touch classless devices or host bridges or ioapics. */ | ||
80 | if (class == PCI_CLASS_NOT_DEFINED || class == PCI_CLASS_BRIDGE_HOST) | ||
81 | return; | ||
82 | |||
83 | /* Don't touch ioapic devices already enabled by firmware */ | ||
84 | if (class == PCI_CLASS_SYSTEM_PIC) { | ||
85 | u16 command; | ||
86 | pci_read_config_word(dev, PCI_COMMAND, &command); | ||
87 | if (command & (PCI_COMMAND_IO | PCI_COMMAND_MEMORY)) | ||
88 | return; | ||
89 | } | ||
90 | |||
91 | pdev_sort_resources(dev, head); | ||
92 | } | ||
93 | |||
94 | static void __assign_resources_sorted(struct resource_list *head, | ||
95 | struct resource_list_x *fail_head) | ||
96 | { | ||
97 | struct resource *res; | ||
98 | struct resource_list *list, *tmp; | ||
99 | int idx; | ||
100 | |||
101 | for (list = head->next; list;) { | ||
58 | res = list->res; | 102 | res = list->res; |
59 | idx = res - &list->dev->resource[0]; | 103 | idx = res - &list->dev->resource[0]; |
104 | |||
60 | if (pci_assign_resource(list->dev, idx)) { | 105 | if (pci_assign_resource(list->dev, idx)) { |
106 | if (fail_head && !pci_is_root_bus(list->dev->bus)) { | ||
107 | /* | ||
108 | * if the failed res is for ROM BAR, and it will | ||
109 | * be enabled later, don't add it to the list | ||
110 | */ | ||
111 | if (!((idx == PCI_ROM_RESOURCE) && | ||
112 | (!(res->flags & IORESOURCE_ROM_ENABLE)))) | ||
113 | add_to_failed_list(fail_head, list->dev, res); | ||
114 | } | ||
61 | res->start = 0; | 115 | res->start = 0; |
62 | res->end = 0; | 116 | res->end = 0; |
63 | res->flags = 0; | 117 | res->flags = 0; |
@@ -68,6 +122,30 @@ static void pbus_assign_resources_sorted(const struct pci_bus *bus) | |||
68 | } | 122 | } |
69 | } | 123 | } |
70 | 124 | ||
125 | static void pdev_assign_resources_sorted(struct pci_dev *dev, | ||
126 | struct resource_list_x *fail_head) | ||
127 | { | ||
128 | struct resource_list head; | ||
129 | |||
130 | head.next = NULL; | ||
131 | __dev_sort_resources(dev, &head); | ||
132 | __assign_resources_sorted(&head, fail_head); | ||
133 | |||
134 | } | ||
135 | |||
136 | static void pbus_assign_resources_sorted(const struct pci_bus *bus, | ||
137 | struct resource_list_x *fail_head) | ||
138 | { | ||
139 | struct pci_dev *dev; | ||
140 | struct resource_list head; | ||
141 | |||
142 | head.next = NULL; | ||
143 | list_for_each_entry(dev, &bus->devices, bus_list) | ||
144 | __dev_sort_resources(dev, &head); | ||
145 | |||
146 | __assign_resources_sorted(&head, fail_head); | ||
147 | } | ||
148 | |||
71 | void pci_setup_cardbus(struct pci_bus *bus) | 149 | void pci_setup_cardbus(struct pci_bus *bus) |
72 | { | 150 | { |
73 | struct pci_dev *bridge = bus->self; | 151 | struct pci_dev *bridge = bus->self; |
@@ -134,18 +212,12 @@ EXPORT_SYMBOL(pci_setup_cardbus); | |||
134 | config space writes, so it's quite possible that an I/O window of | 212 | config space writes, so it's quite possible that an I/O window of |
135 | the bridge will have some undesirable address (e.g. 0) after the | 213 | the bridge will have some undesirable address (e.g. 0) after the |
136 | first write. Ditto 64-bit prefetchable MMIO. */ | 214 | first write. Ditto 64-bit prefetchable MMIO. */ |
137 | static void pci_setup_bridge(struct pci_bus *bus) | 215 | static void pci_setup_bridge_io(struct pci_bus *bus) |
138 | { | 216 | { |
139 | struct pci_dev *bridge = bus->self; | 217 | struct pci_dev *bridge = bus->self; |
140 | struct resource *res; | 218 | struct resource *res; |
141 | struct pci_bus_region region; | 219 | struct pci_bus_region region; |
142 | u32 l, bu, lu, io_upper16; | 220 | u32 l, io_upper16; |
143 | |||
144 | if (pci_is_enabled(bridge)) | ||
145 | return; | ||
146 | |||
147 | dev_info(&bridge->dev, "PCI bridge to [bus %02x-%02x]\n", | ||
148 | bus->secondary, bus->subordinate); | ||
149 | 221 | ||
150 | /* Set up the top and bottom of the PCI I/O segment for this bus. */ | 222 | /* Set up the top and bottom of the PCI I/O segment for this bus. */ |
151 | res = bus->resource[0]; | 223 | res = bus->resource[0]; |
@@ -158,8 +230,7 @@ static void pci_setup_bridge(struct pci_bus *bus) | |||
158 | /* Set up upper 16 bits of I/O base/limit. */ | 230 | /* Set up upper 16 bits of I/O base/limit. */ |
159 | io_upper16 = (region.end & 0xffff0000) | (region.start >> 16); | 231 | io_upper16 = (region.end & 0xffff0000) | (region.start >> 16); |
160 | dev_info(&bridge->dev, " bridge window %pR\n", res); | 232 | dev_info(&bridge->dev, " bridge window %pR\n", res); |
161 | } | 233 | } else { |
162 | else { | ||
163 | /* Clear upper 16 bits of I/O base/limit. */ | 234 | /* Clear upper 16 bits of I/O base/limit. */ |
164 | io_upper16 = 0; | 235 | io_upper16 = 0; |
165 | l = 0x00f0; | 236 | l = 0x00f0; |
@@ -171,21 +242,35 @@ static void pci_setup_bridge(struct pci_bus *bus) | |||
171 | pci_write_config_dword(bridge, PCI_IO_BASE, l); | 242 | pci_write_config_dword(bridge, PCI_IO_BASE, l); |
172 | /* Update upper 16 bits of I/O base/limit. */ | 243 | /* Update upper 16 bits of I/O base/limit. */ |
173 | pci_write_config_dword(bridge, PCI_IO_BASE_UPPER16, io_upper16); | 244 | pci_write_config_dword(bridge, PCI_IO_BASE_UPPER16, io_upper16); |
245 | } | ||
174 | 246 | ||
175 | /* Set up the top and bottom of the PCI Memory segment | 247 | static void pci_setup_bridge_mmio(struct pci_bus *bus) |
176 | for this bus. */ | 248 | { |
249 | struct pci_dev *bridge = bus->self; | ||
250 | struct resource *res; | ||
251 | struct pci_bus_region region; | ||
252 | u32 l; | ||
253 | |||
254 | /* Set up the top and bottom of the PCI Memory segment for this bus. */ | ||
177 | res = bus->resource[1]; | 255 | res = bus->resource[1]; |
178 | pcibios_resource_to_bus(bridge, ®ion, res); | 256 | pcibios_resource_to_bus(bridge, ®ion, res); |
179 | if (res->flags & IORESOURCE_MEM) { | 257 | if (res->flags & IORESOURCE_MEM) { |
180 | l = (region.start >> 16) & 0xfff0; | 258 | l = (region.start >> 16) & 0xfff0; |
181 | l |= region.end & 0xfff00000; | 259 | l |= region.end & 0xfff00000; |
182 | dev_info(&bridge->dev, " bridge window %pR\n", res); | 260 | dev_info(&bridge->dev, " bridge window %pR\n", res); |
183 | } | 261 | } else { |
184 | else { | ||
185 | l = 0x0000fff0; | 262 | l = 0x0000fff0; |
186 | dev_info(&bridge->dev, " bridge window [mem disabled]\n"); | 263 | dev_info(&bridge->dev, " bridge window [mem disabled]\n"); |
187 | } | 264 | } |
188 | pci_write_config_dword(bridge, PCI_MEMORY_BASE, l); | 265 | pci_write_config_dword(bridge, PCI_MEMORY_BASE, l); |
266 | } | ||
267 | |||
268 | static void pci_setup_bridge_mmio_pref(struct pci_bus *bus) | ||
269 | { | ||
270 | struct pci_dev *bridge = bus->self; | ||
271 | struct resource *res; | ||
272 | struct pci_bus_region region; | ||
273 | u32 l, bu, lu; | ||
189 | 274 | ||
190 | /* Clear out the upper 32 bits of PREF limit. | 275 | /* Clear out the upper 32 bits of PREF limit. |
191 | If PCI_PREF_BASE_UPPER32 was non-zero, this temporarily | 276 | If PCI_PREF_BASE_UPPER32 was non-zero, this temporarily |
@@ -204,8 +289,7 @@ static void pci_setup_bridge(struct pci_bus *bus) | |||
204 | lu = upper_32_bits(region.end); | 289 | lu = upper_32_bits(region.end); |
205 | } | 290 | } |
206 | dev_info(&bridge->dev, " bridge window %pR\n", res); | 291 | dev_info(&bridge->dev, " bridge window %pR\n", res); |
207 | } | 292 | } else { |
208 | else { | ||
209 | l = 0x0000fff0; | 293 | l = 0x0000fff0; |
210 | dev_info(&bridge->dev, " bridge window [mem pref disabled]\n"); | 294 | dev_info(&bridge->dev, " bridge window [mem pref disabled]\n"); |
211 | } | 295 | } |
@@ -214,10 +298,35 @@ static void pci_setup_bridge(struct pci_bus *bus) | |||
214 | /* Set the upper 32 bits of PREF base & limit. */ | 298 | /* Set the upper 32 bits of PREF base & limit. */ |
215 | pci_write_config_dword(bridge, PCI_PREF_BASE_UPPER32, bu); | 299 | pci_write_config_dword(bridge, PCI_PREF_BASE_UPPER32, bu); |
216 | pci_write_config_dword(bridge, PCI_PREF_LIMIT_UPPER32, lu); | 300 | pci_write_config_dword(bridge, PCI_PREF_LIMIT_UPPER32, lu); |
301 | } | ||
302 | |||
303 | static void __pci_setup_bridge(struct pci_bus *bus, unsigned long type) | ||
304 | { | ||
305 | struct pci_dev *bridge = bus->self; | ||
306 | |||
307 | dev_info(&bridge->dev, "PCI bridge to [bus %02x-%02x]\n", | ||
308 | bus->secondary, bus->subordinate); | ||
309 | |||
310 | if (type & IORESOURCE_IO) | ||
311 | pci_setup_bridge_io(bus); | ||
312 | |||
313 | if (type & IORESOURCE_MEM) | ||
314 | pci_setup_bridge_mmio(bus); | ||
315 | |||
316 | if (type & IORESOURCE_PREFETCH) | ||
317 | pci_setup_bridge_mmio_pref(bus); | ||
217 | 318 | ||
218 | pci_write_config_word(bridge, PCI_BRIDGE_CONTROL, bus->bridge_ctl); | 319 | pci_write_config_word(bridge, PCI_BRIDGE_CONTROL, bus->bridge_ctl); |
219 | } | 320 | } |
220 | 321 | ||
322 | static void pci_setup_bridge(struct pci_bus *bus) | ||
323 | { | ||
324 | unsigned long type = IORESOURCE_IO | IORESOURCE_MEM | | ||
325 | IORESOURCE_PREFETCH; | ||
326 | |||
327 | __pci_setup_bridge(bus, type); | ||
328 | } | ||
329 | |||
221 | /* Check whether the bridge supports optional I/O and | 330 | /* Check whether the bridge supports optional I/O and |
222 | prefetchable memory ranges. If not, the respective | 331 | prefetchable memory ranges. If not, the respective |
223 | base/limit registers must be read-only and read as 0. */ | 332 | base/limit registers must be read-only and read as 0. */ |
@@ -253,8 +362,11 @@ static void pci_bridge_check_ranges(struct pci_bus *bus) | |||
253 | } | 362 | } |
254 | if (pmem) { | 363 | if (pmem) { |
255 | b_res[2].flags |= IORESOURCE_MEM | IORESOURCE_PREFETCH; | 364 | b_res[2].flags |= IORESOURCE_MEM | IORESOURCE_PREFETCH; |
256 | if ((pmem & PCI_PREF_RANGE_TYPE_MASK) == PCI_PREF_RANGE_TYPE_64) | 365 | if ((pmem & PCI_PREF_RANGE_TYPE_MASK) == |
366 | PCI_PREF_RANGE_TYPE_64) { | ||
257 | b_res[2].flags |= IORESOURCE_MEM_64; | 367 | b_res[2].flags |= IORESOURCE_MEM_64; |
368 | b_res[2].flags |= PCI_PREF_RANGE_TYPE_64; | ||
369 | } | ||
258 | } | 370 | } |
259 | 371 | ||
260 | /* double check if bridge does support 64 bit pref */ | 372 | /* double check if bridge does support 64 bit pref */ |
@@ -283,8 +395,7 @@ static struct resource *find_free_bus_resource(struct pci_bus *bus, unsigned lon | |||
283 | unsigned long type_mask = IORESOURCE_IO | IORESOURCE_MEM | | 395 | unsigned long type_mask = IORESOURCE_IO | IORESOURCE_MEM | |
284 | IORESOURCE_PREFETCH; | 396 | IORESOURCE_PREFETCH; |
285 | 397 | ||
286 | for (i = 0; i < PCI_BUS_NUM_RESOURCES; i++) { | 398 | pci_bus_for_each_resource(bus, r, i) { |
287 | r = bus->resource[i]; | ||
288 | if (r == &ioport_resource || r == &iomem_resource) | 399 | if (r == &ioport_resource || r == &iomem_resource) |
289 | continue; | 400 | continue; |
290 | if (r && (r->flags & type_mask) == type && !r->parent) | 401 | if (r && (r->flags & type_mask) == type && !r->parent) |
@@ -301,7 +412,7 @@ static void pbus_size_io(struct pci_bus *bus, resource_size_t min_size) | |||
301 | { | 412 | { |
302 | struct pci_dev *dev; | 413 | struct pci_dev *dev; |
303 | struct resource *b_res = find_free_bus_resource(bus, IORESOURCE_IO); | 414 | struct resource *b_res = find_free_bus_resource(bus, IORESOURCE_IO); |
304 | unsigned long size = 0, size1 = 0; | 415 | unsigned long size = 0, size1 = 0, old_size; |
305 | 416 | ||
306 | if (!b_res) | 417 | if (!b_res) |
307 | return; | 418 | return; |
@@ -326,12 +437,17 @@ static void pbus_size_io(struct pci_bus *bus, resource_size_t min_size) | |||
326 | } | 437 | } |
327 | if (size < min_size) | 438 | if (size < min_size) |
328 | size = min_size; | 439 | size = min_size; |
440 | old_size = resource_size(b_res); | ||
441 | if (old_size == 1) | ||
442 | old_size = 0; | ||
329 | /* To be fixed in 2.5: we should have sort of HAVE_ISA | 443 | /* To be fixed in 2.5: we should have sort of HAVE_ISA |
330 | flag in the struct pci_bus. */ | 444 | flag in the struct pci_bus. */ |
331 | #if defined(CONFIG_ISA) || defined(CONFIG_EISA) | 445 | #if defined(CONFIG_ISA) || defined(CONFIG_EISA) |
332 | size = (size & 0xff) + ((size & ~0xffUL) << 2); | 446 | size = (size & 0xff) + ((size & ~0xffUL) << 2); |
333 | #endif | 447 | #endif |
334 | size = ALIGN(size + size1, 4096); | 448 | size = ALIGN(size + size1, 4096); |
449 | if (size < old_size) | ||
450 | size = old_size; | ||
335 | if (!size) { | 451 | if (!size) { |
336 | if (b_res->start || b_res->end) | 452 | if (b_res->start || b_res->end) |
337 | dev_info(&bus->self->dev, "disabling bridge window " | 453 | dev_info(&bus->self->dev, "disabling bridge window " |
@@ -352,7 +468,7 @@ static int pbus_size_mem(struct pci_bus *bus, unsigned long mask, | |||
352 | unsigned long type, resource_size_t min_size) | 468 | unsigned long type, resource_size_t min_size) |
353 | { | 469 | { |
354 | struct pci_dev *dev; | 470 | struct pci_dev *dev; |
355 | resource_size_t min_align, align, size; | 471 | resource_size_t min_align, align, size, old_size; |
356 | resource_size_t aligns[12]; /* Alignments from 1Mb to 2Gb */ | 472 | resource_size_t aligns[12]; /* Alignments from 1Mb to 2Gb */ |
357 | int order, max_order; | 473 | int order, max_order; |
358 | struct resource *b_res = find_free_bus_resource(bus, type); | 474 | struct resource *b_res = find_free_bus_resource(bus, type); |
@@ -402,6 +518,11 @@ static int pbus_size_mem(struct pci_bus *bus, unsigned long mask, | |||
402 | } | 518 | } |
403 | if (size < min_size) | 519 | if (size < min_size) |
404 | size = min_size; | 520 | size = min_size; |
521 | old_size = resource_size(b_res); | ||
522 | if (old_size == 1) | ||
523 | old_size = 0; | ||
524 | if (size < old_size) | ||
525 | size = old_size; | ||
405 | 526 | ||
406 | align = 0; | 527 | align = 0; |
407 | min_align = 0; | 528 | min_align = 0; |
@@ -538,23 +659,25 @@ void __ref pci_bus_size_bridges(struct pci_bus *bus) | |||
538 | } | 659 | } |
539 | EXPORT_SYMBOL(pci_bus_size_bridges); | 660 | EXPORT_SYMBOL(pci_bus_size_bridges); |
540 | 661 | ||
541 | void __ref pci_bus_assign_resources(const struct pci_bus *bus) | 662 | static void __ref __pci_bus_assign_resources(const struct pci_bus *bus, |
663 | struct resource_list_x *fail_head) | ||
542 | { | 664 | { |
543 | struct pci_bus *b; | 665 | struct pci_bus *b; |
544 | struct pci_dev *dev; | 666 | struct pci_dev *dev; |
545 | 667 | ||
546 | pbus_assign_resources_sorted(bus); | 668 | pbus_assign_resources_sorted(bus, fail_head); |
547 | 669 | ||
548 | list_for_each_entry(dev, &bus->devices, bus_list) { | 670 | list_for_each_entry(dev, &bus->devices, bus_list) { |
549 | b = dev->subordinate; | 671 | b = dev->subordinate; |
550 | if (!b) | 672 | if (!b) |
551 | continue; | 673 | continue; |
552 | 674 | ||
553 | pci_bus_assign_resources(b); | 675 | __pci_bus_assign_resources(b, fail_head); |
554 | 676 | ||
555 | switch (dev->class >> 8) { | 677 | switch (dev->class >> 8) { |
556 | case PCI_CLASS_BRIDGE_PCI: | 678 | case PCI_CLASS_BRIDGE_PCI: |
557 | pci_setup_bridge(b); | 679 | if (!pci_is_enabled(dev)) |
680 | pci_setup_bridge(b); | ||
558 | break; | 681 | break; |
559 | 682 | ||
560 | case PCI_CLASS_BRIDGE_CARDBUS: | 683 | case PCI_CLASS_BRIDGE_CARDBUS: |
@@ -568,15 +691,130 @@ void __ref pci_bus_assign_resources(const struct pci_bus *bus) | |||
568 | } | 691 | } |
569 | } | 692 | } |
570 | } | 693 | } |
694 | |||
695 | void __ref pci_bus_assign_resources(const struct pci_bus *bus) | ||
696 | { | ||
697 | __pci_bus_assign_resources(bus, NULL); | ||
698 | } | ||
571 | EXPORT_SYMBOL(pci_bus_assign_resources); | 699 | EXPORT_SYMBOL(pci_bus_assign_resources); |
572 | 700 | ||
701 | static void __ref __pci_bridge_assign_resources(const struct pci_dev *bridge, | ||
702 | struct resource_list_x *fail_head) | ||
703 | { | ||
704 | struct pci_bus *b; | ||
705 | |||
706 | pdev_assign_resources_sorted((struct pci_dev *)bridge, fail_head); | ||
707 | |||
708 | b = bridge->subordinate; | ||
709 | if (!b) | ||
710 | return; | ||
711 | |||
712 | __pci_bus_assign_resources(b, fail_head); | ||
713 | |||
714 | switch (bridge->class >> 8) { | ||
715 | case PCI_CLASS_BRIDGE_PCI: | ||
716 | pci_setup_bridge(b); | ||
717 | break; | ||
718 | |||
719 | case PCI_CLASS_BRIDGE_CARDBUS: | ||
720 | pci_setup_cardbus(b); | ||
721 | break; | ||
722 | |||
723 | default: | ||
724 | dev_info(&bridge->dev, "not setting up bridge for bus " | ||
725 | "%04x:%02x\n", pci_domain_nr(b), b->number); | ||
726 | break; | ||
727 | } | ||
728 | } | ||
729 | static void pci_bridge_release_resources(struct pci_bus *bus, | ||
730 | unsigned long type) | ||
731 | { | ||
732 | int idx; | ||
733 | bool changed = false; | ||
734 | struct pci_dev *dev; | ||
735 | struct resource *r; | ||
736 | unsigned long type_mask = IORESOURCE_IO | IORESOURCE_MEM | | ||
737 | IORESOURCE_PREFETCH; | ||
738 | |||
739 | dev = bus->self; | ||
740 | for (idx = PCI_BRIDGE_RESOURCES; idx <= PCI_BRIDGE_RESOURCE_END; | ||
741 | idx++) { | ||
742 | r = &dev->resource[idx]; | ||
743 | if ((r->flags & type_mask) != type) | ||
744 | continue; | ||
745 | if (!r->parent) | ||
746 | continue; | ||
747 | /* | ||
748 | * if there are children under that, we should release them | ||
749 | * all | ||
750 | */ | ||
751 | release_child_resources(r); | ||
752 | if (!release_resource(r)) { | ||
753 | dev_printk(KERN_DEBUG, &dev->dev, | ||
754 | "resource %d %pR released\n", idx, r); | ||
755 | /* keep the old size */ | ||
756 | r->end = resource_size(r) - 1; | ||
757 | r->start = 0; | ||
758 | r->flags = 0; | ||
759 | changed = true; | ||
760 | } | ||
761 | } | ||
762 | |||
763 | if (changed) { | ||
764 | /* avoiding touch the one without PREF */ | ||
765 | if (type & IORESOURCE_PREFETCH) | ||
766 | type = IORESOURCE_PREFETCH; | ||
767 | __pci_setup_bridge(bus, type); | ||
768 | } | ||
769 | } | ||
770 | |||
771 | enum release_type { | ||
772 | leaf_only, | ||
773 | whole_subtree, | ||
774 | }; | ||
775 | /* | ||
776 | * try to release pci bridge resources that is from leaf bridge, | ||
777 | * so we can allocate big new one later | ||
778 | */ | ||
779 | static void __ref pci_bus_release_bridge_resources(struct pci_bus *bus, | ||
780 | unsigned long type, | ||
781 | enum release_type rel_type) | ||
782 | { | ||
783 | struct pci_dev *dev; | ||
784 | bool is_leaf_bridge = true; | ||
785 | |||
786 | list_for_each_entry(dev, &bus->devices, bus_list) { | ||
787 | struct pci_bus *b = dev->subordinate; | ||
788 | if (!b) | ||
789 | continue; | ||
790 | |||
791 | is_leaf_bridge = false; | ||
792 | |||
793 | if ((dev->class >> 8) != PCI_CLASS_BRIDGE_PCI) | ||
794 | continue; | ||
795 | |||
796 | if (rel_type == whole_subtree) | ||
797 | pci_bus_release_bridge_resources(b, type, | ||
798 | whole_subtree); | ||
799 | } | ||
800 | |||
801 | if (pci_is_root_bus(bus)) | ||
802 | return; | ||
803 | |||
804 | if ((bus->self->class >> 8) != PCI_CLASS_BRIDGE_PCI) | ||
805 | return; | ||
806 | |||
807 | if ((rel_type == whole_subtree) || is_leaf_bridge) | ||
808 | pci_bridge_release_resources(bus, type); | ||
809 | } | ||
810 | |||
573 | static void pci_bus_dump_res(struct pci_bus *bus) | 811 | static void pci_bus_dump_res(struct pci_bus *bus) |
574 | { | 812 | { |
575 | int i; | 813 | struct resource *res; |
814 | int i; | ||
576 | 815 | ||
577 | for (i = 0; i < PCI_BUS_NUM_RESOURCES; i++) { | 816 | pci_bus_for_each_resource(bus, res, i) { |
578 | struct resource *res = bus->resource[i]; | 817 | if (!res || !res->end || !res->flags) |
579 | if (!res || !res->end) | ||
580 | continue; | 818 | continue; |
581 | 819 | ||
582 | dev_printk(KERN_DEBUG, &bus->dev, "resource %d %pR\n", i, res); | 820 | dev_printk(KERN_DEBUG, &bus->dev, "resource %d %pR\n", i, res); |
@@ -621,3 +859,67 @@ pci_assign_unassigned_resources(void) | |||
621 | pci_bus_dump_resources(bus); | 859 | pci_bus_dump_resources(bus); |
622 | } | 860 | } |
623 | } | 861 | } |
862 | |||
863 | void pci_assign_unassigned_bridge_resources(struct pci_dev *bridge) | ||
864 | { | ||
865 | struct pci_bus *parent = bridge->subordinate; | ||
866 | int tried_times = 0; | ||
867 | struct resource_list_x head, *list; | ||
868 | int retval; | ||
869 | unsigned long type_mask = IORESOURCE_IO | IORESOURCE_MEM | | ||
870 | IORESOURCE_PREFETCH; | ||
871 | |||
872 | head.next = NULL; | ||
873 | |||
874 | again: | ||
875 | pci_bus_size_bridges(parent); | ||
876 | __pci_bridge_assign_resources(bridge, &head); | ||
877 | |||
878 | tried_times++; | ||
879 | |||
880 | if (!head.next) | ||
881 | goto enable_all; | ||
882 | |||
883 | if (tried_times >= 2) { | ||
884 | /* still fail, don't need to try more */ | ||
885 | free_failed_list(&head); | ||
886 | goto enable_all; | ||
887 | } | ||
888 | |||
889 | printk(KERN_DEBUG "PCI: No. %d try to assign unassigned res\n", | ||
890 | tried_times + 1); | ||
891 | |||
892 | /* | ||
893 | * Try to release leaf bridge's resources that doesn't fit resource of | ||
894 | * child device under that bridge | ||
895 | */ | ||
896 | for (list = head.next; list;) { | ||
897 | struct pci_bus *bus = list->dev->bus; | ||
898 | unsigned long flags = list->flags; | ||
899 | |||
900 | pci_bus_release_bridge_resources(bus, flags & type_mask, | ||
901 | whole_subtree); | ||
902 | list = list->next; | ||
903 | } | ||
904 | /* restore size and flags */ | ||
905 | for (list = head.next; list;) { | ||
906 | struct resource *res = list->res; | ||
907 | |||
908 | res->start = list->start; | ||
909 | res->end = list->end; | ||
910 | res->flags = list->flags; | ||
911 | if (list->dev->subordinate) | ||
912 | res->flags = 0; | ||
913 | |||
914 | list = list->next; | ||
915 | } | ||
916 | free_failed_list(&head); | ||
917 | |||
918 | goto again; | ||
919 | |||
920 | enable_all: | ||
921 | retval = pci_reenable_device(bridge); | ||
922 | pci_set_master(bridge); | ||
923 | pci_enable_bridges(parent); | ||
924 | } | ||
925 | EXPORT_SYMBOL_GPL(pci_assign_unassigned_bridge_resources); | ||
diff --git a/drivers/pci/setup-irq.c b/drivers/pci/setup-irq.c index aa795fd428de..eec9738f3492 100644 --- a/drivers/pci/setup-irq.c +++ b/drivers/pci/setup-irq.c | |||
@@ -59,7 +59,6 @@ pci_fixup_irqs(u8 (*swizzle)(struct pci_dev *, u8 *), | |||
59 | int (*map_irq)(struct pci_dev *, u8, u8)) | 59 | int (*map_irq)(struct pci_dev *, u8, u8)) |
60 | { | 60 | { |
61 | struct pci_dev *dev = NULL; | 61 | struct pci_dev *dev = NULL; |
62 | while ((dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev)) != NULL) { | 62 | for_each_pci_dev(dev) |
63 | pdev_fixup_irq(dev, swizzle, map_irq); | 63 | pdev_fixup_irq(dev, swizzle, map_irq); |
64 | } | ||
65 | } | 64 | } |
diff --git a/drivers/pci/setup-res.c b/drivers/pci/setup-res.c index 7d678bb15ffb..bc0e6eea0fff 100644 --- a/drivers/pci/setup-res.c +++ b/drivers/pci/setup-res.c | |||
@@ -85,7 +85,7 @@ void pci_update_resource(struct pci_dev *dev, int resno) | |||
85 | } | 85 | } |
86 | } | 86 | } |
87 | res->flags &= ~IORESOURCE_UNSET; | 87 | res->flags &= ~IORESOURCE_UNSET; |
88 | dev_info(&dev->dev, "BAR %d: set to %pR (PCI address [%#llx-%#llx]\n", | 88 | dev_info(&dev->dev, "BAR %d: set to %pR (PCI address [%#llx-%#llx])\n", |
89 | resno, res, (unsigned long long)region.start, | 89 | resno, res, (unsigned long long)region.start, |
90 | (unsigned long long)region.end); | 90 | (unsigned long long)region.end); |
91 | } | 91 | } |
@@ -93,22 +93,24 @@ void pci_update_resource(struct pci_dev *dev, int resno) | |||
93 | int pci_claim_resource(struct pci_dev *dev, int resource) | 93 | int pci_claim_resource(struct pci_dev *dev, int resource) |
94 | { | 94 | { |
95 | struct resource *res = &dev->resource[resource]; | 95 | struct resource *res = &dev->resource[resource]; |
96 | struct resource *root; | 96 | struct resource *root, *conflict; |
97 | int err; | ||
98 | 97 | ||
99 | root = pci_find_parent_resource(dev, res); | 98 | root = pci_find_parent_resource(dev, res); |
100 | if (!root) { | 99 | if (!root) { |
101 | dev_err(&dev->dev, "no compatible bridge window for %pR\n", | 100 | dev_info(&dev->dev, "no compatible bridge window for %pR\n", |
102 | res); | 101 | res); |
103 | return -EINVAL; | 102 | return -EINVAL; |
104 | } | 103 | } |
105 | 104 | ||
106 | err = request_resource(root, res); | 105 | conflict = request_resource_conflict(root, res); |
107 | if (err) | 106 | if (conflict) { |
108 | dev_err(&dev->dev, | 107 | dev_info(&dev->dev, |
109 | "address space collision: %pR already in use\n", res); | 108 | "address space collision: %pR conflicts with %s %pR\n", |
109 | res, conflict->name, conflict); | ||
110 | return -EBUSY; | ||
111 | } | ||
110 | 112 | ||
111 | return err; | 113 | return 0; |
112 | } | 114 | } |
113 | EXPORT_SYMBOL(pci_claim_resource); | 115 | EXPORT_SYMBOL(pci_claim_resource); |
114 | 116 | ||
@@ -154,6 +156,38 @@ static int __pci_assign_resource(struct pci_bus *bus, struct pci_dev *dev, | |||
154 | pcibios_align_resource, dev); | 156 | pcibios_align_resource, dev); |
155 | } | 157 | } |
156 | 158 | ||
159 | if (ret < 0 && dev->fw_addr[resno]) { | ||
160 | struct resource *root, *conflict; | ||
161 | resource_size_t start, end; | ||
162 | |||
163 | /* | ||
164 | * If we failed to assign anything, let's try the address | ||
165 | * where firmware left it. That at least has a chance of | ||
166 | * working, which is better than just leaving it disabled. | ||
167 | */ | ||
168 | |||
169 | if (res->flags & IORESOURCE_IO) | ||
170 | root = &ioport_resource; | ||
171 | else | ||
172 | root = &iomem_resource; | ||
173 | |||
174 | start = res->start; | ||
175 | end = res->end; | ||
176 | res->start = dev->fw_addr[resno]; | ||
177 | res->end = res->start + size - 1; | ||
178 | dev_info(&dev->dev, "BAR %d: trying firmware assignment %pR\n", | ||
179 | resno, res); | ||
180 | conflict = request_resource_conflict(root, res); | ||
181 | if (conflict) { | ||
182 | dev_info(&dev->dev, | ||
183 | "BAR %d: %pR conflicts with %s %pR\n", resno, | ||
184 | res, conflict->name, conflict); | ||
185 | res->start = start; | ||
186 | res->end = end; | ||
187 | } else | ||
188 | ret = 0; | ||
189 | } | ||
190 | |||
157 | if (!ret) { | 191 | if (!ret) { |
158 | res->flags &= ~IORESOURCE_STARTALIGN; | 192 | res->flags &= ~IORESOURCE_STARTALIGN; |
159 | dev_info(&dev->dev, "BAR %d: assigned %pR\n", resno, res); | 193 | dev_info(&dev->dev, "BAR %d: assigned %pR\n", resno, res); |
diff --git a/drivers/pci/slot.c b/drivers/pci/slot.c index 8c02b6c53bdb..968cfea04f74 100644 --- a/drivers/pci/slot.c +++ b/drivers/pci/slot.c | |||
@@ -6,6 +6,7 @@ | |||
6 | */ | 6 | */ |
7 | 7 | ||
8 | #include <linux/kobject.h> | 8 | #include <linux/kobject.h> |
9 | #include <linux/slab.h> | ||
9 | #include <linux/pci.h> | 10 | #include <linux/pci.h> |
10 | #include <linux/err.h> | 11 | #include <linux/err.h> |
11 | #include "pci.h" | 12 | #include "pci.h" |
@@ -29,7 +30,7 @@ static ssize_t pci_slot_attr_store(struct kobject *kobj, | |||
29 | return attribute->store ? attribute->store(slot, buf, len) : -EIO; | 30 | return attribute->store ? attribute->store(slot, buf, len) : -EIO; |
30 | } | 31 | } |
31 | 32 | ||
32 | static struct sysfs_ops pci_slot_sysfs_ops = { | 33 | static const struct sysfs_ops pci_slot_sysfs_ops = { |
33 | .show = pci_slot_attr_show, | 34 | .show = pci_slot_attr_show, |
34 | .store = pci_slot_attr_store, | 35 | .store = pci_slot_attr_store, |
35 | }; | 36 | }; |
@@ -47,6 +48,55 @@ static ssize_t address_read_file(struct pci_slot *slot, char *buf) | |||
47 | slot->number); | 48 | slot->number); |
48 | } | 49 | } |
49 | 50 | ||
51 | /* these strings match up with the values in pci_bus_speed */ | ||
52 | static const char *pci_bus_speed_strings[] = { | ||
53 | "33 MHz PCI", /* 0x00 */ | ||
54 | "66 MHz PCI", /* 0x01 */ | ||
55 | "66 MHz PCI-X", /* 0x02 */ | ||
56 | "100 MHz PCI-X", /* 0x03 */ | ||
57 | "133 MHz PCI-X", /* 0x04 */ | ||
58 | NULL, /* 0x05 */ | ||
59 | NULL, /* 0x06 */ | ||
60 | NULL, /* 0x07 */ | ||
61 | NULL, /* 0x08 */ | ||
62 | "66 MHz PCI-X 266", /* 0x09 */ | ||
63 | "100 MHz PCI-X 266", /* 0x0a */ | ||
64 | "133 MHz PCI-X 266", /* 0x0b */ | ||
65 | "Unknown AGP", /* 0x0c */ | ||
66 | "1x AGP", /* 0x0d */ | ||
67 | "2x AGP", /* 0x0e */ | ||
68 | "4x AGP", /* 0x0f */ | ||
69 | "8x AGP", /* 0x10 */ | ||
70 | "66 MHz PCI-X 533", /* 0x11 */ | ||
71 | "100 MHz PCI-X 533", /* 0x12 */ | ||
72 | "133 MHz PCI-X 533", /* 0x13 */ | ||
73 | "2.5 GT/s PCIe", /* 0x14 */ | ||
74 | "5.0 GT/s PCIe", /* 0x15 */ | ||
75 | "8.0 GT/s PCIe", /* 0x16 */ | ||
76 | }; | ||
77 | |||
78 | static ssize_t bus_speed_read(enum pci_bus_speed speed, char *buf) | ||
79 | { | ||
80 | const char *speed_string; | ||
81 | |||
82 | if (speed < ARRAY_SIZE(pci_bus_speed_strings)) | ||
83 | speed_string = pci_bus_speed_strings[speed]; | ||
84 | else | ||
85 | speed_string = "Unknown"; | ||
86 | |||
87 | return sprintf(buf, "%s\n", speed_string); | ||
88 | } | ||
89 | |||
90 | static ssize_t max_speed_read_file(struct pci_slot *slot, char *buf) | ||
91 | { | ||
92 | return bus_speed_read(slot->bus->max_bus_speed, buf); | ||
93 | } | ||
94 | |||
95 | static ssize_t cur_speed_read_file(struct pci_slot *slot, char *buf) | ||
96 | { | ||
97 | return bus_speed_read(slot->bus->cur_bus_speed, buf); | ||
98 | } | ||
99 | |||
50 | static void pci_slot_release(struct kobject *kobj) | 100 | static void pci_slot_release(struct kobject *kobj) |
51 | { | 101 | { |
52 | struct pci_dev *dev; | 102 | struct pci_dev *dev; |
@@ -66,9 +116,15 @@ static void pci_slot_release(struct kobject *kobj) | |||
66 | 116 | ||
67 | static struct pci_slot_attribute pci_slot_attr_address = | 117 | static struct pci_slot_attribute pci_slot_attr_address = |
68 | __ATTR(address, (S_IFREG | S_IRUGO), address_read_file, NULL); | 118 | __ATTR(address, (S_IFREG | S_IRUGO), address_read_file, NULL); |
119 | static struct pci_slot_attribute pci_slot_attr_max_speed = | ||
120 | __ATTR(max_bus_speed, (S_IFREG | S_IRUGO), max_speed_read_file, NULL); | ||
121 | static struct pci_slot_attribute pci_slot_attr_cur_speed = | ||
122 | __ATTR(cur_bus_speed, (S_IFREG | S_IRUGO), cur_speed_read_file, NULL); | ||
69 | 123 | ||
70 | static struct attribute *pci_slot_default_attrs[] = { | 124 | static struct attribute *pci_slot_default_attrs[] = { |
71 | &pci_slot_attr_address.attr, | 125 | &pci_slot_attr_address.attr, |
126 | &pci_slot_attr_max_speed.attr, | ||
127 | &pci_slot_attr_cur_speed.attr, | ||
72 | NULL, | 128 | NULL, |
73 | }; | 129 | }; |
74 | 130 | ||
diff --git a/drivers/pci/vpd.c b/drivers/pci/vpd.c new file mode 100644 index 000000000000..a5a5ca17cfe6 --- /dev/null +++ b/drivers/pci/vpd.c | |||
@@ -0,0 +1,61 @@ | |||
1 | /* | ||
2 | * File: vpd.c | ||
3 | * Purpose: Provide PCI VPD support | ||
4 | * | ||
5 | * Copyright (C) 2010 Broadcom Corporation. | ||
6 | */ | ||
7 | |||
8 | #include <linux/pci.h> | ||
9 | |||
10 | int pci_vpd_find_tag(const u8 *buf, unsigned int off, unsigned int len, u8 rdt) | ||
11 | { | ||
12 | int i; | ||
13 | |||
14 | for (i = off; i < len; ) { | ||
15 | u8 val = buf[i]; | ||
16 | |||
17 | if (val & PCI_VPD_LRDT) { | ||
18 | /* Don't return success of the tag isn't complete */ | ||
19 | if (i + PCI_VPD_LRDT_TAG_SIZE > len) | ||
20 | break; | ||
21 | |||
22 | if (val == rdt) | ||
23 | return i; | ||
24 | |||
25 | i += PCI_VPD_LRDT_TAG_SIZE + | ||
26 | pci_vpd_lrdt_size(&buf[i]); | ||
27 | } else { | ||
28 | u8 tag = val & ~PCI_VPD_SRDT_LEN_MASK; | ||
29 | |||
30 | if (tag == rdt) | ||
31 | return i; | ||
32 | |||
33 | if (tag == PCI_VPD_SRDT_END) | ||
34 | break; | ||
35 | |||
36 | i += PCI_VPD_SRDT_TAG_SIZE + | ||
37 | pci_vpd_srdt_size(&buf[i]); | ||
38 | } | ||
39 | } | ||
40 | |||
41 | return -ENOENT; | ||
42 | } | ||
43 | EXPORT_SYMBOL_GPL(pci_vpd_find_tag); | ||
44 | |||
45 | int pci_vpd_find_info_keyword(const u8 *buf, unsigned int off, | ||
46 | unsigned int len, const char *kw) | ||
47 | { | ||
48 | int i; | ||
49 | |||
50 | for (i = off; i + PCI_VPD_INFO_FLD_HDR_SIZE <= off + len;) { | ||
51 | if (buf[i + 0] == kw[0] && | ||
52 | buf[i + 1] == kw[1]) | ||
53 | return i; | ||
54 | |||
55 | i += PCI_VPD_INFO_FLD_HDR_SIZE + | ||
56 | pci_vpd_info_field_size(&buf[i]); | ||
57 | } | ||
58 | |||
59 | return -ENOENT; | ||
60 | } | ||
61 | EXPORT_SYMBOL_GPL(pci_vpd_find_info_keyword); | ||
diff --git a/drivers/pci/xen-pcifront.c b/drivers/pci/xen-pcifront.c new file mode 100644 index 000000000000..3a5a6fcc0ead --- /dev/null +++ b/drivers/pci/xen-pcifront.c | |||
@@ -0,0 +1,1148 @@ | |||
1 | /* | ||
2 | * Xen PCI Frontend. | ||
3 | * | ||
4 | * Author: Ryan Wilson <hap9@epoch.ncsc.mil> | ||
5 | */ | ||
6 | #include <linux/module.h> | ||
7 | #include <linux/init.h> | ||
8 | #include <linux/mm.h> | ||
9 | #include <xen/xenbus.h> | ||
10 | #include <xen/events.h> | ||
11 | #include <xen/grant_table.h> | ||
12 | #include <xen/page.h> | ||
13 | #include <linux/spinlock.h> | ||
14 | #include <linux/pci.h> | ||
15 | #include <linux/msi.h> | ||
16 | #include <xen/interface/io/pciif.h> | ||
17 | #include <asm/xen/pci.h> | ||
18 | #include <linux/interrupt.h> | ||
19 | #include <asm/atomic.h> | ||
20 | #include <linux/workqueue.h> | ||
21 | #include <linux/bitops.h> | ||
22 | #include <linux/time.h> | ||
23 | |||
24 | #define INVALID_GRANT_REF (0) | ||
25 | #define INVALID_EVTCHN (-1) | ||
26 | |||
27 | struct pci_bus_entry { | ||
28 | struct list_head list; | ||
29 | struct pci_bus *bus; | ||
30 | }; | ||
31 | |||
32 | #define _PDEVB_op_active (0) | ||
33 | #define PDEVB_op_active (1 << (_PDEVB_op_active)) | ||
34 | |||
35 | struct pcifront_device { | ||
36 | struct xenbus_device *xdev; | ||
37 | struct list_head root_buses; | ||
38 | |||
39 | int evtchn; | ||
40 | int gnt_ref; | ||
41 | |||
42 | int irq; | ||
43 | |||
44 | /* Lock this when doing any operations in sh_info */ | ||
45 | spinlock_t sh_info_lock; | ||
46 | struct xen_pci_sharedinfo *sh_info; | ||
47 | struct work_struct op_work; | ||
48 | unsigned long flags; | ||
49 | |||
50 | }; | ||
51 | |||
52 | struct pcifront_sd { | ||
53 | int domain; | ||
54 | struct pcifront_device *pdev; | ||
55 | }; | ||
56 | |||
57 | static inline struct pcifront_device * | ||
58 | pcifront_get_pdev(struct pcifront_sd *sd) | ||
59 | { | ||
60 | return sd->pdev; | ||
61 | } | ||
62 | |||
63 | static inline void pcifront_init_sd(struct pcifront_sd *sd, | ||
64 | unsigned int domain, unsigned int bus, | ||
65 | struct pcifront_device *pdev) | ||
66 | { | ||
67 | sd->domain = domain; | ||
68 | sd->pdev = pdev; | ||
69 | } | ||
70 | |||
71 | static DEFINE_SPINLOCK(pcifront_dev_lock); | ||
72 | static struct pcifront_device *pcifront_dev; | ||
73 | |||
74 | static int verbose_request; | ||
75 | module_param(verbose_request, int, 0644); | ||
76 | |||
77 | static int errno_to_pcibios_err(int errno) | ||
78 | { | ||
79 | switch (errno) { | ||
80 | case XEN_PCI_ERR_success: | ||
81 | return PCIBIOS_SUCCESSFUL; | ||
82 | |||
83 | case XEN_PCI_ERR_dev_not_found: | ||
84 | return PCIBIOS_DEVICE_NOT_FOUND; | ||
85 | |||
86 | case XEN_PCI_ERR_invalid_offset: | ||
87 | case XEN_PCI_ERR_op_failed: | ||
88 | return PCIBIOS_BAD_REGISTER_NUMBER; | ||
89 | |||
90 | case XEN_PCI_ERR_not_implemented: | ||
91 | return PCIBIOS_FUNC_NOT_SUPPORTED; | ||
92 | |||
93 | case XEN_PCI_ERR_access_denied: | ||
94 | return PCIBIOS_SET_FAILED; | ||
95 | } | ||
96 | return errno; | ||
97 | } | ||
98 | |||
99 | static inline void schedule_pcifront_aer_op(struct pcifront_device *pdev) | ||
100 | { | ||
101 | if (test_bit(_XEN_PCIB_active, (unsigned long *)&pdev->sh_info->flags) | ||
102 | && !test_and_set_bit(_PDEVB_op_active, &pdev->flags)) { | ||
103 | dev_dbg(&pdev->xdev->dev, "schedule aer frontend job\n"); | ||
104 | schedule_work(&pdev->op_work); | ||
105 | } | ||
106 | } | ||
107 | |||
108 | static int do_pci_op(struct pcifront_device *pdev, struct xen_pci_op *op) | ||
109 | { | ||
110 | int err = 0; | ||
111 | struct xen_pci_op *active_op = &pdev->sh_info->op; | ||
112 | unsigned long irq_flags; | ||
113 | evtchn_port_t port = pdev->evtchn; | ||
114 | unsigned irq = pdev->irq; | ||
115 | s64 ns, ns_timeout; | ||
116 | struct timeval tv; | ||
117 | |||
118 | spin_lock_irqsave(&pdev->sh_info_lock, irq_flags); | ||
119 | |||
120 | memcpy(active_op, op, sizeof(struct xen_pci_op)); | ||
121 | |||
122 | /* Go */ | ||
123 | wmb(); | ||
124 | set_bit(_XEN_PCIF_active, (unsigned long *)&pdev->sh_info->flags); | ||
125 | notify_remote_via_evtchn(port); | ||
126 | |||
127 | /* | ||
128 | * We set a poll timeout of 3 seconds but give up on return after | ||
129 | * 2 seconds. It is better to time out too late rather than too early | ||
130 | * (in the latter case we end up continually re-executing poll() with a | ||
131 | * timeout in the past). 1s difference gives plenty of slack for error. | ||
132 | */ | ||
133 | do_gettimeofday(&tv); | ||
134 | ns_timeout = timeval_to_ns(&tv) + 2 * (s64)NSEC_PER_SEC; | ||
135 | |||
136 | xen_clear_irq_pending(irq); | ||
137 | |||
138 | while (test_bit(_XEN_PCIF_active, | ||
139 | (unsigned long *)&pdev->sh_info->flags)) { | ||
140 | xen_poll_irq_timeout(irq, jiffies + 3*HZ); | ||
141 | xen_clear_irq_pending(irq); | ||
142 | do_gettimeofday(&tv); | ||
143 | ns = timeval_to_ns(&tv); | ||
144 | if (ns > ns_timeout) { | ||
145 | dev_err(&pdev->xdev->dev, | ||
146 | "pciback not responding!!!\n"); | ||
147 | clear_bit(_XEN_PCIF_active, | ||
148 | (unsigned long *)&pdev->sh_info->flags); | ||
149 | err = XEN_PCI_ERR_dev_not_found; | ||
150 | goto out; | ||
151 | } | ||
152 | } | ||
153 | |||
154 | /* | ||
155 | * We might lose backend service request since we | ||
156 | * reuse same evtchn with pci_conf backend response. So re-schedule | ||
157 | * aer pcifront service. | ||
158 | */ | ||
159 | if (test_bit(_XEN_PCIB_active, | ||
160 | (unsigned long *)&pdev->sh_info->flags)) { | ||
161 | dev_err(&pdev->xdev->dev, | ||
162 | "schedule aer pcifront service\n"); | ||
163 | schedule_pcifront_aer_op(pdev); | ||
164 | } | ||
165 | |||
166 | memcpy(op, active_op, sizeof(struct xen_pci_op)); | ||
167 | |||
168 | err = op->err; | ||
169 | out: | ||
170 | spin_unlock_irqrestore(&pdev->sh_info_lock, irq_flags); | ||
171 | return err; | ||
172 | } | ||
173 | |||
174 | /* Access to this function is spinlocked in drivers/pci/access.c */ | ||
175 | static int pcifront_bus_read(struct pci_bus *bus, unsigned int devfn, | ||
176 | int where, int size, u32 *val) | ||
177 | { | ||
178 | int err = 0; | ||
179 | struct xen_pci_op op = { | ||
180 | .cmd = XEN_PCI_OP_conf_read, | ||
181 | .domain = pci_domain_nr(bus), | ||
182 | .bus = bus->number, | ||
183 | .devfn = devfn, | ||
184 | .offset = where, | ||
185 | .size = size, | ||
186 | }; | ||
187 | struct pcifront_sd *sd = bus->sysdata; | ||
188 | struct pcifront_device *pdev = pcifront_get_pdev(sd); | ||
189 | |||
190 | if (verbose_request) | ||
191 | dev_info(&pdev->xdev->dev, | ||
192 | "read dev=%04x:%02x:%02x.%01x - offset %x size %d\n", | ||
193 | pci_domain_nr(bus), bus->number, PCI_SLOT(devfn), | ||
194 | PCI_FUNC(devfn), where, size); | ||
195 | |||
196 | err = do_pci_op(pdev, &op); | ||
197 | |||
198 | if (likely(!err)) { | ||
199 | if (verbose_request) | ||
200 | dev_info(&pdev->xdev->dev, "read got back value %x\n", | ||
201 | op.value); | ||
202 | |||
203 | *val = op.value; | ||
204 | } else if (err == -ENODEV) { | ||
205 | /* No device here, pretend that it just returned 0 */ | ||
206 | err = 0; | ||
207 | *val = 0; | ||
208 | } | ||
209 | |||
210 | return errno_to_pcibios_err(err); | ||
211 | } | ||
212 | |||
213 | /* Access to this function is spinlocked in drivers/pci/access.c */ | ||
214 | static int pcifront_bus_write(struct pci_bus *bus, unsigned int devfn, | ||
215 | int where, int size, u32 val) | ||
216 | { | ||
217 | struct xen_pci_op op = { | ||
218 | .cmd = XEN_PCI_OP_conf_write, | ||
219 | .domain = pci_domain_nr(bus), | ||
220 | .bus = bus->number, | ||
221 | .devfn = devfn, | ||
222 | .offset = where, | ||
223 | .size = size, | ||
224 | .value = val, | ||
225 | }; | ||
226 | struct pcifront_sd *sd = bus->sysdata; | ||
227 | struct pcifront_device *pdev = pcifront_get_pdev(sd); | ||
228 | |||
229 | if (verbose_request) | ||
230 | dev_info(&pdev->xdev->dev, | ||
231 | "write dev=%04x:%02x:%02x.%01x - " | ||
232 | "offset %x size %d val %x\n", | ||
233 | pci_domain_nr(bus), bus->number, | ||
234 | PCI_SLOT(devfn), PCI_FUNC(devfn), where, size, val); | ||
235 | |||
236 | return errno_to_pcibios_err(do_pci_op(pdev, &op)); | ||
237 | } | ||
238 | |||
239 | struct pci_ops pcifront_bus_ops = { | ||
240 | .read = pcifront_bus_read, | ||
241 | .write = pcifront_bus_write, | ||
242 | }; | ||
243 | |||
244 | #ifdef CONFIG_PCI_MSI | ||
245 | static int pci_frontend_enable_msix(struct pci_dev *dev, | ||
246 | int **vector, int nvec) | ||
247 | { | ||
248 | int err; | ||
249 | int i; | ||
250 | struct xen_pci_op op = { | ||
251 | .cmd = XEN_PCI_OP_enable_msix, | ||
252 | .domain = pci_domain_nr(dev->bus), | ||
253 | .bus = dev->bus->number, | ||
254 | .devfn = dev->devfn, | ||
255 | .value = nvec, | ||
256 | }; | ||
257 | struct pcifront_sd *sd = dev->bus->sysdata; | ||
258 | struct pcifront_device *pdev = pcifront_get_pdev(sd); | ||
259 | struct msi_desc *entry; | ||
260 | |||
261 | if (nvec > SH_INFO_MAX_VEC) { | ||
262 | dev_err(&dev->dev, "too much vector for pci frontend: %x." | ||
263 | " Increase SH_INFO_MAX_VEC.\n", nvec); | ||
264 | return -EINVAL; | ||
265 | } | ||
266 | |||
267 | i = 0; | ||
268 | list_for_each_entry(entry, &dev->msi_list, list) { | ||
269 | op.msix_entries[i].entry = entry->msi_attrib.entry_nr; | ||
270 | /* Vector is useless at this point. */ | ||
271 | op.msix_entries[i].vector = -1; | ||
272 | i++; | ||
273 | } | ||
274 | |||
275 | err = do_pci_op(pdev, &op); | ||
276 | |||
277 | if (likely(!err)) { | ||
278 | if (likely(!op.value)) { | ||
279 | /* we get the result */ | ||
280 | for (i = 0; i < nvec; i++) | ||
281 | *(*vector+i) = op.msix_entries[i].vector; | ||
282 | return 0; | ||
283 | } else { | ||
284 | printk(KERN_DEBUG "enable msix get value %x\n", | ||
285 | op.value); | ||
286 | return op.value; | ||
287 | } | ||
288 | } else { | ||
289 | dev_err(&dev->dev, "enable msix get err %x\n", err); | ||
290 | return err; | ||
291 | } | ||
292 | } | ||
293 | |||
294 | static void pci_frontend_disable_msix(struct pci_dev *dev) | ||
295 | { | ||
296 | int err; | ||
297 | struct xen_pci_op op = { | ||
298 | .cmd = XEN_PCI_OP_disable_msix, | ||
299 | .domain = pci_domain_nr(dev->bus), | ||
300 | .bus = dev->bus->number, | ||
301 | .devfn = dev->devfn, | ||
302 | }; | ||
303 | struct pcifront_sd *sd = dev->bus->sysdata; | ||
304 | struct pcifront_device *pdev = pcifront_get_pdev(sd); | ||
305 | |||
306 | err = do_pci_op(pdev, &op); | ||
307 | |||
308 | /* What should do for error ? */ | ||
309 | if (err) | ||
310 | dev_err(&dev->dev, "pci_disable_msix get err %x\n", err); | ||
311 | } | ||
312 | |||
313 | static int pci_frontend_enable_msi(struct pci_dev *dev, int **vector) | ||
314 | { | ||
315 | int err; | ||
316 | struct xen_pci_op op = { | ||
317 | .cmd = XEN_PCI_OP_enable_msi, | ||
318 | .domain = pci_domain_nr(dev->bus), | ||
319 | .bus = dev->bus->number, | ||
320 | .devfn = dev->devfn, | ||
321 | }; | ||
322 | struct pcifront_sd *sd = dev->bus->sysdata; | ||
323 | struct pcifront_device *pdev = pcifront_get_pdev(sd); | ||
324 | |||
325 | err = do_pci_op(pdev, &op); | ||
326 | if (likely(!err)) { | ||
327 | *(*vector) = op.value; | ||
328 | } else { | ||
329 | dev_err(&dev->dev, "pci frontend enable msi failed for dev " | ||
330 | "%x:%x\n", op.bus, op.devfn); | ||
331 | err = -EINVAL; | ||
332 | } | ||
333 | return err; | ||
334 | } | ||
335 | |||
336 | static void pci_frontend_disable_msi(struct pci_dev *dev) | ||
337 | { | ||
338 | int err; | ||
339 | struct xen_pci_op op = { | ||
340 | .cmd = XEN_PCI_OP_disable_msi, | ||
341 | .domain = pci_domain_nr(dev->bus), | ||
342 | .bus = dev->bus->number, | ||
343 | .devfn = dev->devfn, | ||
344 | }; | ||
345 | struct pcifront_sd *sd = dev->bus->sysdata; | ||
346 | struct pcifront_device *pdev = pcifront_get_pdev(sd); | ||
347 | |||
348 | err = do_pci_op(pdev, &op); | ||
349 | if (err == XEN_PCI_ERR_dev_not_found) { | ||
350 | /* XXX No response from backend, what shall we do? */ | ||
351 | printk(KERN_DEBUG "get no response from backend for disable MSI\n"); | ||
352 | return; | ||
353 | } | ||
354 | if (err) | ||
355 | /* how can pciback notify us fail? */ | ||
356 | printk(KERN_DEBUG "get fake response frombackend\n"); | ||
357 | } | ||
358 | |||
359 | static struct xen_pci_frontend_ops pci_frontend_ops = { | ||
360 | .enable_msi = pci_frontend_enable_msi, | ||
361 | .disable_msi = pci_frontend_disable_msi, | ||
362 | .enable_msix = pci_frontend_enable_msix, | ||
363 | .disable_msix = pci_frontend_disable_msix, | ||
364 | }; | ||
365 | |||
366 | static void pci_frontend_registrar(int enable) | ||
367 | { | ||
368 | if (enable) | ||
369 | xen_pci_frontend = &pci_frontend_ops; | ||
370 | else | ||
371 | xen_pci_frontend = NULL; | ||
372 | }; | ||
373 | #else | ||
374 | static inline void pci_frontend_registrar(int enable) { }; | ||
375 | #endif /* CONFIG_PCI_MSI */ | ||
376 | |||
377 | /* Claim resources for the PCI frontend as-is, backend won't allow changes */ | ||
378 | static int pcifront_claim_resource(struct pci_dev *dev, void *data) | ||
379 | { | ||
380 | struct pcifront_device *pdev = data; | ||
381 | int i; | ||
382 | struct resource *r; | ||
383 | |||
384 | for (i = 0; i < PCI_NUM_RESOURCES; i++) { | ||
385 | r = &dev->resource[i]; | ||
386 | |||
387 | if (!r->parent && r->start && r->flags) { | ||
388 | dev_info(&pdev->xdev->dev, "claiming resource %s/%d\n", | ||
389 | pci_name(dev), i); | ||
390 | if (pci_claim_resource(dev, i)) { | ||
391 | dev_err(&pdev->xdev->dev, "Could not claim " | ||
392 | "resource %s/%d! Device offline. Try " | ||
393 | "giving less than 4GB to domain.\n", | ||
394 | pci_name(dev), i); | ||
395 | } | ||
396 | } | ||
397 | } | ||
398 | |||
399 | return 0; | ||
400 | } | ||
401 | |||
402 | static int __devinit pcifront_scan_bus(struct pcifront_device *pdev, | ||
403 | unsigned int domain, unsigned int bus, | ||
404 | struct pci_bus *b) | ||
405 | { | ||
406 | struct pci_dev *d; | ||
407 | unsigned int devfn; | ||
408 | |||
409 | /* Scan the bus for functions and add. | ||
410 | * We omit handling of PCI bridge attachment because pciback prevents | ||
411 | * bridges from being exported. | ||
412 | */ | ||
413 | for (devfn = 0; devfn < 0x100; devfn++) { | ||
414 | d = pci_get_slot(b, devfn); | ||
415 | if (d) { | ||
416 | /* Device is already known. */ | ||
417 | pci_dev_put(d); | ||
418 | continue; | ||
419 | } | ||
420 | |||
421 | d = pci_scan_single_device(b, devfn); | ||
422 | if (d) | ||
423 | dev_info(&pdev->xdev->dev, "New device on " | ||
424 | "%04x:%02x:%02x.%02x found.\n", domain, bus, | ||
425 | PCI_SLOT(devfn), PCI_FUNC(devfn)); | ||
426 | } | ||
427 | |||
428 | return 0; | ||
429 | } | ||
430 | |||
431 | static int __devinit pcifront_scan_root(struct pcifront_device *pdev, | ||
432 | unsigned int domain, unsigned int bus) | ||
433 | { | ||
434 | struct pci_bus *b; | ||
435 | struct pcifront_sd *sd = NULL; | ||
436 | struct pci_bus_entry *bus_entry = NULL; | ||
437 | int err = 0; | ||
438 | |||
439 | #ifndef CONFIG_PCI_DOMAINS | ||
440 | if (domain != 0) { | ||
441 | dev_err(&pdev->xdev->dev, | ||
442 | "PCI Root in non-zero PCI Domain! domain=%d\n", domain); | ||
443 | dev_err(&pdev->xdev->dev, | ||
444 | "Please compile with CONFIG_PCI_DOMAINS\n"); | ||
445 | err = -EINVAL; | ||
446 | goto err_out; | ||
447 | } | ||
448 | #endif | ||
449 | |||
450 | dev_info(&pdev->xdev->dev, "Creating PCI Frontend Bus %04x:%02x\n", | ||
451 | domain, bus); | ||
452 | |||
453 | bus_entry = kmalloc(sizeof(*bus_entry), GFP_KERNEL); | ||
454 | sd = kmalloc(sizeof(*sd), GFP_KERNEL); | ||
455 | if (!bus_entry || !sd) { | ||
456 | err = -ENOMEM; | ||
457 | goto err_out; | ||
458 | } | ||
459 | pcifront_init_sd(sd, domain, bus, pdev); | ||
460 | |||
461 | b = pci_scan_bus_parented(&pdev->xdev->dev, bus, | ||
462 | &pcifront_bus_ops, sd); | ||
463 | if (!b) { | ||
464 | dev_err(&pdev->xdev->dev, | ||
465 | "Error creating PCI Frontend Bus!\n"); | ||
466 | err = -ENOMEM; | ||
467 | goto err_out; | ||
468 | } | ||
469 | |||
470 | bus_entry->bus = b; | ||
471 | |||
472 | list_add(&bus_entry->list, &pdev->root_buses); | ||
473 | |||
474 | /* pci_scan_bus_parented skips devices which do not have a have | ||
475 | * devfn==0. The pcifront_scan_bus enumerates all devfn. */ | ||
476 | err = pcifront_scan_bus(pdev, domain, bus, b); | ||
477 | |||
478 | /* Claim resources before going "live" with our devices */ | ||
479 | pci_walk_bus(b, pcifront_claim_resource, pdev); | ||
480 | |||
481 | /* Create SysFS and notify udev of the devices. Aka: "going live" */ | ||
482 | pci_bus_add_devices(b); | ||
483 | |||
484 | return err; | ||
485 | |||
486 | err_out: | ||
487 | kfree(bus_entry); | ||
488 | kfree(sd); | ||
489 | |||
490 | return err; | ||
491 | } | ||
492 | |||
493 | static int __devinit pcifront_rescan_root(struct pcifront_device *pdev, | ||
494 | unsigned int domain, unsigned int bus) | ||
495 | { | ||
496 | int err; | ||
497 | struct pci_bus *b; | ||
498 | |||
499 | #ifndef CONFIG_PCI_DOMAINS | ||
500 | if (domain != 0) { | ||
501 | dev_err(&pdev->xdev->dev, | ||
502 | "PCI Root in non-zero PCI Domain! domain=%d\n", domain); | ||
503 | dev_err(&pdev->xdev->dev, | ||
504 | "Please compile with CONFIG_PCI_DOMAINS\n"); | ||
505 | return -EINVAL; | ||
506 | } | ||
507 | #endif | ||
508 | |||
509 | dev_info(&pdev->xdev->dev, "Rescanning PCI Frontend Bus %04x:%02x\n", | ||
510 | domain, bus); | ||
511 | |||
512 | b = pci_find_bus(domain, bus); | ||
513 | if (!b) | ||
514 | /* If the bus is unknown, create it. */ | ||
515 | return pcifront_scan_root(pdev, domain, bus); | ||
516 | |||
517 | err = pcifront_scan_bus(pdev, domain, bus, b); | ||
518 | |||
519 | /* Claim resources before going "live" with our devices */ | ||
520 | pci_walk_bus(b, pcifront_claim_resource, pdev); | ||
521 | |||
522 | /* Create SysFS and notify udev of the devices. Aka: "going live" */ | ||
523 | pci_bus_add_devices(b); | ||
524 | |||
525 | return err; | ||
526 | } | ||
527 | |||
528 | static void free_root_bus_devs(struct pci_bus *bus) | ||
529 | { | ||
530 | struct pci_dev *dev; | ||
531 | |||
532 | while (!list_empty(&bus->devices)) { | ||
533 | dev = container_of(bus->devices.next, struct pci_dev, | ||
534 | bus_list); | ||
535 | dev_dbg(&dev->dev, "removing device\n"); | ||
536 | pci_remove_bus_device(dev); | ||
537 | } | ||
538 | } | ||
539 | |||
540 | static void pcifront_free_roots(struct pcifront_device *pdev) | ||
541 | { | ||
542 | struct pci_bus_entry *bus_entry, *t; | ||
543 | |||
544 | dev_dbg(&pdev->xdev->dev, "cleaning up root buses\n"); | ||
545 | |||
546 | list_for_each_entry_safe(bus_entry, t, &pdev->root_buses, list) { | ||
547 | list_del(&bus_entry->list); | ||
548 | |||
549 | free_root_bus_devs(bus_entry->bus); | ||
550 | |||
551 | kfree(bus_entry->bus->sysdata); | ||
552 | |||
553 | device_unregister(bus_entry->bus->bridge); | ||
554 | pci_remove_bus(bus_entry->bus); | ||
555 | |||
556 | kfree(bus_entry); | ||
557 | } | ||
558 | } | ||
559 | |||
560 | static pci_ers_result_t pcifront_common_process(int cmd, | ||
561 | struct pcifront_device *pdev, | ||
562 | pci_channel_state_t state) | ||
563 | { | ||
564 | pci_ers_result_t result; | ||
565 | struct pci_driver *pdrv; | ||
566 | int bus = pdev->sh_info->aer_op.bus; | ||
567 | int devfn = pdev->sh_info->aer_op.devfn; | ||
568 | struct pci_dev *pcidev; | ||
569 | int flag = 0; | ||
570 | |||
571 | dev_dbg(&pdev->xdev->dev, | ||
572 | "pcifront AER process: cmd %x (bus:%x, devfn%x)", | ||
573 | cmd, bus, devfn); | ||
574 | result = PCI_ERS_RESULT_NONE; | ||
575 | |||
576 | pcidev = pci_get_bus_and_slot(bus, devfn); | ||
577 | if (!pcidev || !pcidev->driver) { | ||
578 | dev_err(&pdev->xdev->dev, "device or AER driver is NULL\n"); | ||
579 | if (pcidev) | ||
580 | pci_dev_put(pcidev); | ||
581 | return result; | ||
582 | } | ||
583 | pdrv = pcidev->driver; | ||
584 | |||
585 | if (get_driver(&pdrv->driver)) { | ||
586 | if (pdrv->err_handler && pdrv->err_handler->error_detected) { | ||
587 | dev_dbg(&pcidev->dev, | ||
588 | "trying to call AER service\n"); | ||
589 | if (pcidev) { | ||
590 | flag = 1; | ||
591 | switch (cmd) { | ||
592 | case XEN_PCI_OP_aer_detected: | ||
593 | result = pdrv->err_handler-> | ||
594 | error_detected(pcidev, state); | ||
595 | break; | ||
596 | case XEN_PCI_OP_aer_mmio: | ||
597 | result = pdrv->err_handler-> | ||
598 | mmio_enabled(pcidev); | ||
599 | break; | ||
600 | case XEN_PCI_OP_aer_slotreset: | ||
601 | result = pdrv->err_handler-> | ||
602 | slot_reset(pcidev); | ||
603 | break; | ||
604 | case XEN_PCI_OP_aer_resume: | ||
605 | pdrv->err_handler->resume(pcidev); | ||
606 | break; | ||
607 | default: | ||
608 | dev_err(&pdev->xdev->dev, | ||
609 | "bad request in aer recovery " | ||
610 | "operation!\n"); | ||
611 | |||
612 | } | ||
613 | } | ||
614 | } | ||
615 | put_driver(&pdrv->driver); | ||
616 | } | ||
617 | if (!flag) | ||
618 | result = PCI_ERS_RESULT_NONE; | ||
619 | |||
620 | return result; | ||
621 | } | ||
622 | |||
623 | |||
624 | static void pcifront_do_aer(struct work_struct *data) | ||
625 | { | ||
626 | struct pcifront_device *pdev = | ||
627 | container_of(data, struct pcifront_device, op_work); | ||
628 | int cmd = pdev->sh_info->aer_op.cmd; | ||
629 | pci_channel_state_t state = | ||
630 | (pci_channel_state_t)pdev->sh_info->aer_op.err; | ||
631 | |||
632 | /*If a pci_conf op is in progress, | ||
633 | we have to wait until it is done before service aer op*/ | ||
634 | dev_dbg(&pdev->xdev->dev, | ||
635 | "pcifront service aer bus %x devfn %x\n", | ||
636 | pdev->sh_info->aer_op.bus, pdev->sh_info->aer_op.devfn); | ||
637 | |||
638 | pdev->sh_info->aer_op.err = pcifront_common_process(cmd, pdev, state); | ||
639 | |||
640 | /* Post the operation to the guest. */ | ||
641 | wmb(); | ||
642 | clear_bit(_XEN_PCIB_active, (unsigned long *)&pdev->sh_info->flags); | ||
643 | notify_remote_via_evtchn(pdev->evtchn); | ||
644 | |||
645 | /*in case of we lost an aer request in four lines time_window*/ | ||
646 | smp_mb__before_clear_bit(); | ||
647 | clear_bit(_PDEVB_op_active, &pdev->flags); | ||
648 | smp_mb__after_clear_bit(); | ||
649 | |||
650 | schedule_pcifront_aer_op(pdev); | ||
651 | |||
652 | } | ||
653 | |||
654 | static irqreturn_t pcifront_handler_aer(int irq, void *dev) | ||
655 | { | ||
656 | struct pcifront_device *pdev = dev; | ||
657 | schedule_pcifront_aer_op(pdev); | ||
658 | return IRQ_HANDLED; | ||
659 | } | ||
660 | static int pcifront_connect(struct pcifront_device *pdev) | ||
661 | { | ||
662 | int err = 0; | ||
663 | |||
664 | spin_lock(&pcifront_dev_lock); | ||
665 | |||
666 | if (!pcifront_dev) { | ||
667 | dev_info(&pdev->xdev->dev, "Installing PCI frontend\n"); | ||
668 | pcifront_dev = pdev; | ||
669 | } else { | ||
670 | dev_err(&pdev->xdev->dev, "PCI frontend already installed!\n"); | ||
671 | err = -EEXIST; | ||
672 | } | ||
673 | |||
674 | spin_unlock(&pcifront_dev_lock); | ||
675 | |||
676 | return err; | ||
677 | } | ||
678 | |||
679 | static void pcifront_disconnect(struct pcifront_device *pdev) | ||
680 | { | ||
681 | spin_lock(&pcifront_dev_lock); | ||
682 | |||
683 | if (pdev == pcifront_dev) { | ||
684 | dev_info(&pdev->xdev->dev, | ||
685 | "Disconnecting PCI Frontend Buses\n"); | ||
686 | pcifront_dev = NULL; | ||
687 | } | ||
688 | |||
689 | spin_unlock(&pcifront_dev_lock); | ||
690 | } | ||
691 | static struct pcifront_device *alloc_pdev(struct xenbus_device *xdev) | ||
692 | { | ||
693 | struct pcifront_device *pdev; | ||
694 | |||
695 | pdev = kzalloc(sizeof(struct pcifront_device), GFP_KERNEL); | ||
696 | if (pdev == NULL) | ||
697 | goto out; | ||
698 | |||
699 | pdev->sh_info = | ||
700 | (struct xen_pci_sharedinfo *)__get_free_page(GFP_KERNEL); | ||
701 | if (pdev->sh_info == NULL) { | ||
702 | kfree(pdev); | ||
703 | pdev = NULL; | ||
704 | goto out; | ||
705 | } | ||
706 | pdev->sh_info->flags = 0; | ||
707 | |||
708 | /*Flag for registering PV AER handler*/ | ||
709 | set_bit(_XEN_PCIB_AERHANDLER, (void *)&pdev->sh_info->flags); | ||
710 | |||
711 | dev_set_drvdata(&xdev->dev, pdev); | ||
712 | pdev->xdev = xdev; | ||
713 | |||
714 | INIT_LIST_HEAD(&pdev->root_buses); | ||
715 | |||
716 | spin_lock_init(&pdev->sh_info_lock); | ||
717 | |||
718 | pdev->evtchn = INVALID_EVTCHN; | ||
719 | pdev->gnt_ref = INVALID_GRANT_REF; | ||
720 | pdev->irq = -1; | ||
721 | |||
722 | INIT_WORK(&pdev->op_work, pcifront_do_aer); | ||
723 | |||
724 | dev_dbg(&xdev->dev, "Allocated pdev @ 0x%p pdev->sh_info @ 0x%p\n", | ||
725 | pdev, pdev->sh_info); | ||
726 | out: | ||
727 | return pdev; | ||
728 | } | ||
729 | |||
730 | static void free_pdev(struct pcifront_device *pdev) | ||
731 | { | ||
732 | dev_dbg(&pdev->xdev->dev, "freeing pdev @ 0x%p\n", pdev); | ||
733 | |||
734 | pcifront_free_roots(pdev); | ||
735 | |||
736 | /*For PCIE_AER error handling job*/ | ||
737 | flush_scheduled_work(); | ||
738 | |||
739 | if (pdev->irq >= 0) | ||
740 | unbind_from_irqhandler(pdev->irq, pdev); | ||
741 | |||
742 | if (pdev->evtchn != INVALID_EVTCHN) | ||
743 | xenbus_free_evtchn(pdev->xdev, pdev->evtchn); | ||
744 | |||
745 | if (pdev->gnt_ref != INVALID_GRANT_REF) | ||
746 | gnttab_end_foreign_access(pdev->gnt_ref, 0 /* r/w page */, | ||
747 | (unsigned long)pdev->sh_info); | ||
748 | else | ||
749 | free_page((unsigned long)pdev->sh_info); | ||
750 | |||
751 | dev_set_drvdata(&pdev->xdev->dev, NULL); | ||
752 | |||
753 | kfree(pdev); | ||
754 | } | ||
755 | |||
756 | static int pcifront_publish_info(struct pcifront_device *pdev) | ||
757 | { | ||
758 | int err = 0; | ||
759 | struct xenbus_transaction trans; | ||
760 | |||
761 | err = xenbus_grant_ring(pdev->xdev, virt_to_mfn(pdev->sh_info)); | ||
762 | if (err < 0) | ||
763 | goto out; | ||
764 | |||
765 | pdev->gnt_ref = err; | ||
766 | |||
767 | err = xenbus_alloc_evtchn(pdev->xdev, &pdev->evtchn); | ||
768 | if (err) | ||
769 | goto out; | ||
770 | |||
771 | err = bind_evtchn_to_irqhandler(pdev->evtchn, pcifront_handler_aer, | ||
772 | 0, "pcifront", pdev); | ||
773 | |||
774 | if (err < 0) | ||
775 | return err; | ||
776 | |||
777 | pdev->irq = err; | ||
778 | |||
779 | do_publish: | ||
780 | err = xenbus_transaction_start(&trans); | ||
781 | if (err) { | ||
782 | xenbus_dev_fatal(pdev->xdev, err, | ||
783 | "Error writing configuration for backend " | ||
784 | "(start transaction)"); | ||
785 | goto out; | ||
786 | } | ||
787 | |||
788 | err = xenbus_printf(trans, pdev->xdev->nodename, | ||
789 | "pci-op-ref", "%u", pdev->gnt_ref); | ||
790 | if (!err) | ||
791 | err = xenbus_printf(trans, pdev->xdev->nodename, | ||
792 | "event-channel", "%u", pdev->evtchn); | ||
793 | if (!err) | ||
794 | err = xenbus_printf(trans, pdev->xdev->nodename, | ||
795 | "magic", XEN_PCI_MAGIC); | ||
796 | |||
797 | if (err) { | ||
798 | xenbus_transaction_end(trans, 1); | ||
799 | xenbus_dev_fatal(pdev->xdev, err, | ||
800 | "Error writing configuration for backend"); | ||
801 | goto out; | ||
802 | } else { | ||
803 | err = xenbus_transaction_end(trans, 0); | ||
804 | if (err == -EAGAIN) | ||
805 | goto do_publish; | ||
806 | else if (err) { | ||
807 | xenbus_dev_fatal(pdev->xdev, err, | ||
808 | "Error completing transaction " | ||
809 | "for backend"); | ||
810 | goto out; | ||
811 | } | ||
812 | } | ||
813 | |||
814 | xenbus_switch_state(pdev->xdev, XenbusStateInitialised); | ||
815 | |||
816 | dev_dbg(&pdev->xdev->dev, "publishing successful!\n"); | ||
817 | |||
818 | out: | ||
819 | return err; | ||
820 | } | ||
821 | |||
822 | static int __devinit pcifront_try_connect(struct pcifront_device *pdev) | ||
823 | { | ||
824 | int err = -EFAULT; | ||
825 | int i, num_roots, len; | ||
826 | char str[64]; | ||
827 | unsigned int domain, bus; | ||
828 | |||
829 | |||
830 | /* Only connect once */ | ||
831 | if (xenbus_read_driver_state(pdev->xdev->nodename) != | ||
832 | XenbusStateInitialised) | ||
833 | goto out; | ||
834 | |||
835 | err = pcifront_connect(pdev); | ||
836 | if (err) { | ||
837 | xenbus_dev_fatal(pdev->xdev, err, | ||
838 | "Error connecting PCI Frontend"); | ||
839 | goto out; | ||
840 | } | ||
841 | |||
842 | err = xenbus_scanf(XBT_NIL, pdev->xdev->otherend, | ||
843 | "root_num", "%d", &num_roots); | ||
844 | if (err == -ENOENT) { | ||
845 | xenbus_dev_error(pdev->xdev, err, | ||
846 | "No PCI Roots found, trying 0000:00"); | ||
847 | err = pcifront_scan_root(pdev, 0, 0); | ||
848 | num_roots = 0; | ||
849 | } else if (err != 1) { | ||
850 | if (err == 0) | ||
851 | err = -EINVAL; | ||
852 | xenbus_dev_fatal(pdev->xdev, err, | ||
853 | "Error reading number of PCI roots"); | ||
854 | goto out; | ||
855 | } | ||
856 | |||
857 | for (i = 0; i < num_roots; i++) { | ||
858 | len = snprintf(str, sizeof(str), "root-%d", i); | ||
859 | if (unlikely(len >= (sizeof(str) - 1))) { | ||
860 | err = -ENOMEM; | ||
861 | goto out; | ||
862 | } | ||
863 | |||
864 | err = xenbus_scanf(XBT_NIL, pdev->xdev->otherend, str, | ||
865 | "%x:%x", &domain, &bus); | ||
866 | if (err != 2) { | ||
867 | if (err >= 0) | ||
868 | err = -EINVAL; | ||
869 | xenbus_dev_fatal(pdev->xdev, err, | ||
870 | "Error reading PCI root %d", i); | ||
871 | goto out; | ||
872 | } | ||
873 | |||
874 | err = pcifront_scan_root(pdev, domain, bus); | ||
875 | if (err) { | ||
876 | xenbus_dev_fatal(pdev->xdev, err, | ||
877 | "Error scanning PCI root %04x:%02x", | ||
878 | domain, bus); | ||
879 | goto out; | ||
880 | } | ||
881 | } | ||
882 | |||
883 | err = xenbus_switch_state(pdev->xdev, XenbusStateConnected); | ||
884 | |||
885 | out: | ||
886 | return err; | ||
887 | } | ||
888 | |||
889 | static int pcifront_try_disconnect(struct pcifront_device *pdev) | ||
890 | { | ||
891 | int err = 0; | ||
892 | enum xenbus_state prev_state; | ||
893 | |||
894 | |||
895 | prev_state = xenbus_read_driver_state(pdev->xdev->nodename); | ||
896 | |||
897 | if (prev_state >= XenbusStateClosing) | ||
898 | goto out; | ||
899 | |||
900 | if (prev_state == XenbusStateConnected) { | ||
901 | pcifront_free_roots(pdev); | ||
902 | pcifront_disconnect(pdev); | ||
903 | } | ||
904 | |||
905 | err = xenbus_switch_state(pdev->xdev, XenbusStateClosed); | ||
906 | |||
907 | out: | ||
908 | |||
909 | return err; | ||
910 | } | ||
911 | |||
912 | static int __devinit pcifront_attach_devices(struct pcifront_device *pdev) | ||
913 | { | ||
914 | int err = -EFAULT; | ||
915 | int i, num_roots, len; | ||
916 | unsigned int domain, bus; | ||
917 | char str[64]; | ||
918 | |||
919 | if (xenbus_read_driver_state(pdev->xdev->nodename) != | ||
920 | XenbusStateReconfiguring) | ||
921 | goto out; | ||
922 | |||
923 | err = xenbus_scanf(XBT_NIL, pdev->xdev->otherend, | ||
924 | "root_num", "%d", &num_roots); | ||
925 | if (err == -ENOENT) { | ||
926 | xenbus_dev_error(pdev->xdev, err, | ||
927 | "No PCI Roots found, trying 0000:00"); | ||
928 | err = pcifront_rescan_root(pdev, 0, 0); | ||
929 | num_roots = 0; | ||
930 | } else if (err != 1) { | ||
931 | if (err == 0) | ||
932 | err = -EINVAL; | ||
933 | xenbus_dev_fatal(pdev->xdev, err, | ||
934 | "Error reading number of PCI roots"); | ||
935 | goto out; | ||
936 | } | ||
937 | |||
938 | for (i = 0; i < num_roots; i++) { | ||
939 | len = snprintf(str, sizeof(str), "root-%d", i); | ||
940 | if (unlikely(len >= (sizeof(str) - 1))) { | ||
941 | err = -ENOMEM; | ||
942 | goto out; | ||
943 | } | ||
944 | |||
945 | err = xenbus_scanf(XBT_NIL, pdev->xdev->otherend, str, | ||
946 | "%x:%x", &domain, &bus); | ||
947 | if (err != 2) { | ||
948 | if (err >= 0) | ||
949 | err = -EINVAL; | ||
950 | xenbus_dev_fatal(pdev->xdev, err, | ||
951 | "Error reading PCI root %d", i); | ||
952 | goto out; | ||
953 | } | ||
954 | |||
955 | err = pcifront_rescan_root(pdev, domain, bus); | ||
956 | if (err) { | ||
957 | xenbus_dev_fatal(pdev->xdev, err, | ||
958 | "Error scanning PCI root %04x:%02x", | ||
959 | domain, bus); | ||
960 | goto out; | ||
961 | } | ||
962 | } | ||
963 | |||
964 | xenbus_switch_state(pdev->xdev, XenbusStateConnected); | ||
965 | |||
966 | out: | ||
967 | return err; | ||
968 | } | ||
969 | |||
970 | static int pcifront_detach_devices(struct pcifront_device *pdev) | ||
971 | { | ||
972 | int err = 0; | ||
973 | int i, num_devs; | ||
974 | unsigned int domain, bus, slot, func; | ||
975 | struct pci_bus *pci_bus; | ||
976 | struct pci_dev *pci_dev; | ||
977 | char str[64]; | ||
978 | |||
979 | if (xenbus_read_driver_state(pdev->xdev->nodename) != | ||
980 | XenbusStateConnected) | ||
981 | goto out; | ||
982 | |||
983 | err = xenbus_scanf(XBT_NIL, pdev->xdev->otherend, "num_devs", "%d", | ||
984 | &num_devs); | ||
985 | if (err != 1) { | ||
986 | if (err >= 0) | ||
987 | err = -EINVAL; | ||
988 | xenbus_dev_fatal(pdev->xdev, err, | ||
989 | "Error reading number of PCI devices"); | ||
990 | goto out; | ||
991 | } | ||
992 | |||
993 | /* Find devices being detached and remove them. */ | ||
994 | for (i = 0; i < num_devs; i++) { | ||
995 | int l, state; | ||
996 | l = snprintf(str, sizeof(str), "state-%d", i); | ||
997 | if (unlikely(l >= (sizeof(str) - 1))) { | ||
998 | err = -ENOMEM; | ||
999 | goto out; | ||
1000 | } | ||
1001 | err = xenbus_scanf(XBT_NIL, pdev->xdev->otherend, str, "%d", | ||
1002 | &state); | ||
1003 | if (err != 1) | ||
1004 | state = XenbusStateUnknown; | ||
1005 | |||
1006 | if (state != XenbusStateClosing) | ||
1007 | continue; | ||
1008 | |||
1009 | /* Remove device. */ | ||
1010 | l = snprintf(str, sizeof(str), "vdev-%d", i); | ||
1011 | if (unlikely(l >= (sizeof(str) - 1))) { | ||
1012 | err = -ENOMEM; | ||
1013 | goto out; | ||
1014 | } | ||
1015 | err = xenbus_scanf(XBT_NIL, pdev->xdev->otherend, str, | ||
1016 | "%x:%x:%x.%x", &domain, &bus, &slot, &func); | ||
1017 | if (err != 4) { | ||
1018 | if (err >= 0) | ||
1019 | err = -EINVAL; | ||
1020 | xenbus_dev_fatal(pdev->xdev, err, | ||
1021 | "Error reading PCI device %d", i); | ||
1022 | goto out; | ||
1023 | } | ||
1024 | |||
1025 | pci_bus = pci_find_bus(domain, bus); | ||
1026 | if (!pci_bus) { | ||
1027 | dev_dbg(&pdev->xdev->dev, "Cannot get bus %04x:%02x\n", | ||
1028 | domain, bus); | ||
1029 | continue; | ||
1030 | } | ||
1031 | pci_dev = pci_get_slot(pci_bus, PCI_DEVFN(slot, func)); | ||
1032 | if (!pci_dev) { | ||
1033 | dev_dbg(&pdev->xdev->dev, | ||
1034 | "Cannot get PCI device %04x:%02x:%02x.%02x\n", | ||
1035 | domain, bus, slot, func); | ||
1036 | continue; | ||
1037 | } | ||
1038 | pci_remove_bus_device(pci_dev); | ||
1039 | pci_dev_put(pci_dev); | ||
1040 | |||
1041 | dev_dbg(&pdev->xdev->dev, | ||
1042 | "PCI device %04x:%02x:%02x.%02x removed.\n", | ||
1043 | domain, bus, slot, func); | ||
1044 | } | ||
1045 | |||
1046 | err = xenbus_switch_state(pdev->xdev, XenbusStateReconfiguring); | ||
1047 | |||
1048 | out: | ||
1049 | return err; | ||
1050 | } | ||
1051 | |||
1052 | static void __init_refok pcifront_backend_changed(struct xenbus_device *xdev, | ||
1053 | enum xenbus_state be_state) | ||
1054 | { | ||
1055 | struct pcifront_device *pdev = dev_get_drvdata(&xdev->dev); | ||
1056 | |||
1057 | switch (be_state) { | ||
1058 | case XenbusStateUnknown: | ||
1059 | case XenbusStateInitialising: | ||
1060 | case XenbusStateInitWait: | ||
1061 | case XenbusStateInitialised: | ||
1062 | case XenbusStateClosed: | ||
1063 | break; | ||
1064 | |||
1065 | case XenbusStateConnected: | ||
1066 | pcifront_try_connect(pdev); | ||
1067 | break; | ||
1068 | |||
1069 | case XenbusStateClosing: | ||
1070 | dev_warn(&xdev->dev, "backend going away!\n"); | ||
1071 | pcifront_try_disconnect(pdev); | ||
1072 | break; | ||
1073 | |||
1074 | case XenbusStateReconfiguring: | ||
1075 | pcifront_detach_devices(pdev); | ||
1076 | break; | ||
1077 | |||
1078 | case XenbusStateReconfigured: | ||
1079 | pcifront_attach_devices(pdev); | ||
1080 | break; | ||
1081 | } | ||
1082 | } | ||
1083 | |||
1084 | static int pcifront_xenbus_probe(struct xenbus_device *xdev, | ||
1085 | const struct xenbus_device_id *id) | ||
1086 | { | ||
1087 | int err = 0; | ||
1088 | struct pcifront_device *pdev = alloc_pdev(xdev); | ||
1089 | |||
1090 | if (pdev == NULL) { | ||
1091 | err = -ENOMEM; | ||
1092 | xenbus_dev_fatal(xdev, err, | ||
1093 | "Error allocating pcifront_device struct"); | ||
1094 | goto out; | ||
1095 | } | ||
1096 | |||
1097 | err = pcifront_publish_info(pdev); | ||
1098 | if (err) | ||
1099 | free_pdev(pdev); | ||
1100 | |||
1101 | out: | ||
1102 | return err; | ||
1103 | } | ||
1104 | |||
1105 | static int pcifront_xenbus_remove(struct xenbus_device *xdev) | ||
1106 | { | ||
1107 | struct pcifront_device *pdev = dev_get_drvdata(&xdev->dev); | ||
1108 | if (pdev) | ||
1109 | free_pdev(pdev); | ||
1110 | |||
1111 | return 0; | ||
1112 | } | ||
1113 | |||
1114 | static const struct xenbus_device_id xenpci_ids[] = { | ||
1115 | {"pci"}, | ||
1116 | {""}, | ||
1117 | }; | ||
1118 | |||
1119 | static struct xenbus_driver xenbus_pcifront_driver = { | ||
1120 | .name = "pcifront", | ||
1121 | .owner = THIS_MODULE, | ||
1122 | .ids = xenpci_ids, | ||
1123 | .probe = pcifront_xenbus_probe, | ||
1124 | .remove = pcifront_xenbus_remove, | ||
1125 | .otherend_changed = pcifront_backend_changed, | ||
1126 | }; | ||
1127 | |||
1128 | static int __init pcifront_init(void) | ||
1129 | { | ||
1130 | if (!xen_pv_domain() || xen_initial_domain()) | ||
1131 | return -ENODEV; | ||
1132 | |||
1133 | pci_frontend_registrar(1 /* enable */); | ||
1134 | |||
1135 | return xenbus_register_frontend(&xenbus_pcifront_driver); | ||
1136 | } | ||
1137 | |||
1138 | static void __exit pcifront_cleanup(void) | ||
1139 | { | ||
1140 | xenbus_unregister_driver(&xenbus_pcifront_driver); | ||
1141 | pci_frontend_registrar(0 /* disable */); | ||
1142 | } | ||
1143 | module_init(pcifront_init); | ||
1144 | module_exit(pcifront_cleanup); | ||
1145 | |||
1146 | MODULE_DESCRIPTION("Xen PCI passthrough frontend."); | ||
1147 | MODULE_LICENSE("GPL"); | ||
1148 | MODULE_ALIAS("xen:pci"); | ||