diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2009-09-16 10:49:54 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2009-09-16 10:49:54 -0400 |
commit | 4406c56d0a4da7a37b9180abeaece6cd00bcc874 (patch) | |
tree | 65a85fa73a25d24cbed6d163fdcf8df1b934a0be /drivers/pci | |
parent | 6b7b352f2102e21f9d8f38e932f01d9c5705c073 (diff) | |
parent | 5e3573db2bd5db6925159279d99576a4635bdb66 (diff) |
Merge branch 'linux-next' of git://git.kernel.org/pub/scm/linux/kernel/git/jbarnes/pci-2.6
* 'linux-next' of git://git.kernel.org/pub/scm/linux/kernel/git/jbarnes/pci-2.6: (75 commits)
PCI hotplug: clean up acpi_run_hpp()
PCI hotplug: acpiphp: use generic pci_configure_slot()
PCI hotplug: shpchp: use generic pci_configure_slot()
PCI hotplug: pciehp: use generic pci_configure_slot()
PCI hotplug: add pci_configure_slot()
PCI hotplug: clean up acpi_get_hp_params_from_firmware() interface
PCI hotplug: acpiphp: don't cache hotplug_params in acpiphp_bridge
PCI hotplug: acpiphp: remove superfluous _HPP/_HPX evaluation
PCI: Clear saved_state after the state has been restored
PCI PM: Return error codes from pci_pm_resume()
PCI: use dev_printk in quirk messages
PCI / PCIe portdrv: Fix pcie_portdrv_slot_reset()
PCI Hotplug: convert acpi_pci_detect_ejectable() to take an acpi_handle
PCI Hotplug: acpiphp: find bridges the easy way
PCI: pcie portdrv: remove unused variable
PCI / ACPI PM: Propagate wake-up enable for devices w/o ACPI support
ACPI PM: Replace wakeup.prepared with reference counter
PCI PM: Introduce device flag wakeup_prepared
PCI / ACPI PM: Rework some debug messages
PCI PM: Simplify PCI wake-up code
...
Fixed up conflict in arch/powerpc/kernel/pci_64.c due to OF device tree
scanning having been moved and merged for the 32- and 64-bit cases. The
'needs_freset' initialization added in 6e19314cc ("PCI/powerpc: support
PCIe fundamental reset") is now in arch/powerpc/kernel/pci_of_scan.c.
Diffstat (limited to 'drivers/pci')
35 files changed, 1221 insertions, 1191 deletions
diff --git a/drivers/pci/Makefile b/drivers/pci/Makefile index 1ebd6b4c743b..4a7f11d8f432 100644 --- a/drivers/pci/Makefile +++ b/drivers/pci/Makefile | |||
@@ -8,6 +8,9 @@ obj-y += access.o bus.o probe.o remove.o pci.o quirks.o \ | |||
8 | obj-$(CONFIG_PROC_FS) += proc.o | 8 | obj-$(CONFIG_PROC_FS) += proc.o |
9 | obj-$(CONFIG_SYSFS) += slot.o | 9 | obj-$(CONFIG_SYSFS) += slot.o |
10 | 10 | ||
11 | obj-$(CONFIG_PCI_LEGACY) += legacy.o | ||
12 | CFLAGS_legacy.o += -Wno-deprecated-declarations | ||
13 | |||
11 | # Build PCI Express stuff if needed | 14 | # Build PCI Express stuff if needed |
12 | obj-$(CONFIG_PCIEPORTBUS) += pcie/ | 15 | obj-$(CONFIG_PCIEPORTBUS) += pcie/ |
13 | 16 | ||
diff --git a/drivers/pci/hotplug/Makefile b/drivers/pci/hotplug/Makefile index 2aa117c8cd87..3625b094bf7e 100644 --- a/drivers/pci/hotplug/Makefile +++ b/drivers/pci/hotplug/Makefile | |||
@@ -22,7 +22,7 @@ obj-$(CONFIG_HOTPLUG_PCI_SGI) += sgi_hotplug.o | |||
22 | # Link this last so it doesn't claim devices that have a real hotplug driver | 22 | # Link this last so it doesn't claim devices that have a real hotplug driver |
23 | obj-$(CONFIG_HOTPLUG_PCI_FAKE) += fakephp.o | 23 | obj-$(CONFIG_HOTPLUG_PCI_FAKE) += fakephp.o |
24 | 24 | ||
25 | pci_hotplug-objs := pci_hotplug_core.o | 25 | pci_hotplug-objs := pci_hotplug_core.o pcihp_slot.o |
26 | 26 | ||
27 | ifdef CONFIG_HOTPLUG_PCI_CPCI | 27 | ifdef CONFIG_HOTPLUG_PCI_CPCI |
28 | pci_hotplug-objs += cpci_hotplug_core.o \ | 28 | pci_hotplug-objs += cpci_hotplug_core.o \ |
diff --git a/drivers/pci/hotplug/acpi_pcihp.c b/drivers/pci/hotplug/acpi_pcihp.c index eb159587d0bf..a73028ec52e5 100644 --- a/drivers/pci/hotplug/acpi_pcihp.c +++ b/drivers/pci/hotplug/acpi_pcihp.c | |||
@@ -41,7 +41,6 @@ | |||
41 | #define warn(format, arg...) printk(KERN_WARNING "%s: " format , MY_NAME , ## arg) | 41 | #define warn(format, arg...) printk(KERN_WARNING "%s: " format , MY_NAME , ## arg) |
42 | 42 | ||
43 | #define METHOD_NAME__SUN "_SUN" | 43 | #define METHOD_NAME__SUN "_SUN" |
44 | #define METHOD_NAME__HPP "_HPP" | ||
45 | #define METHOD_NAME_OSHP "OSHP" | 44 | #define METHOD_NAME_OSHP "OSHP" |
46 | 45 | ||
47 | static int debug_acpi; | 46 | static int debug_acpi; |
@@ -215,80 +214,41 @@ acpi_run_hpx(acpi_handle handle, struct hotplug_params *hpx) | |||
215 | static acpi_status | 214 | static acpi_status |
216 | acpi_run_hpp(acpi_handle handle, struct hotplug_params *hpp) | 215 | acpi_run_hpp(acpi_handle handle, struct hotplug_params *hpp) |
217 | { | 216 | { |
218 | acpi_status status; | 217 | acpi_status status; |
219 | u8 nui[4]; | 218 | struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; |
220 | struct acpi_buffer ret_buf = { 0, NULL}; | 219 | union acpi_object *package, *fields; |
221 | struct acpi_buffer string = { ACPI_ALLOCATE_BUFFER, NULL }; | 220 | int i; |
222 | union acpi_object *ext_obj, *package; | ||
223 | int i, len = 0; | ||
224 | |||
225 | acpi_get_name(handle, ACPI_FULL_PATHNAME, &string); | ||
226 | 221 | ||
227 | /* Clear the return buffer with zeros */ | ||
228 | memset(hpp, 0, sizeof(struct hotplug_params)); | 222 | memset(hpp, 0, sizeof(struct hotplug_params)); |
229 | 223 | ||
230 | /* get _hpp */ | 224 | status = acpi_evaluate_object(handle, "_HPP", NULL, &buffer); |
231 | status = acpi_evaluate_object(handle, METHOD_NAME__HPP, NULL, &ret_buf); | 225 | if (ACPI_FAILURE(status)) |
232 | switch (status) { | 226 | return status; |
233 | case AE_BUFFER_OVERFLOW: | ||
234 | ret_buf.pointer = kmalloc (ret_buf.length, GFP_KERNEL); | ||
235 | if (!ret_buf.pointer) { | ||
236 | printk(KERN_ERR "%s:%s alloc for _HPP fail\n", | ||
237 | __func__, (char *)string.pointer); | ||
238 | kfree(string.pointer); | ||
239 | return AE_NO_MEMORY; | ||
240 | } | ||
241 | status = acpi_evaluate_object(handle, METHOD_NAME__HPP, | ||
242 | NULL, &ret_buf); | ||
243 | if (ACPI_SUCCESS(status)) | ||
244 | break; | ||
245 | default: | ||
246 | if (ACPI_FAILURE(status)) { | ||
247 | pr_debug("%s:%s _HPP fail=0x%x\n", __func__, | ||
248 | (char *)string.pointer, status); | ||
249 | kfree(string.pointer); | ||
250 | return status; | ||
251 | } | ||
252 | } | ||
253 | 227 | ||
254 | ext_obj = (union acpi_object *) ret_buf.pointer; | 228 | package = (union acpi_object *) buffer.pointer; |
255 | if (ext_obj->type != ACPI_TYPE_PACKAGE) { | 229 | if (package->type != ACPI_TYPE_PACKAGE || |
256 | printk(KERN_ERR "%s:%s _HPP obj not a package\n", __func__, | 230 | package->package.count != 4) { |
257 | (char *)string.pointer); | ||
258 | status = AE_ERROR; | 231 | status = AE_ERROR; |
259 | goto free_and_return; | 232 | goto exit; |
260 | } | 233 | } |
261 | 234 | ||
262 | len = ext_obj->package.count; | 235 | fields = package->package.elements; |
263 | package = (union acpi_object *) ret_buf.pointer; | 236 | for (i = 0; i < 4; i++) { |
264 | for ( i = 0; (i < len) || (i < 4); i++) { | 237 | if (fields[i].type != ACPI_TYPE_INTEGER) { |
265 | ext_obj = (union acpi_object *) &package->package.elements[i]; | ||
266 | switch (ext_obj->type) { | ||
267 | case ACPI_TYPE_INTEGER: | ||
268 | nui[i] = (u8)ext_obj->integer.value; | ||
269 | break; | ||
270 | default: | ||
271 | printk(KERN_ERR "%s:%s _HPP obj type incorrect\n", | ||
272 | __func__, (char *)string.pointer); | ||
273 | status = AE_ERROR; | 238 | status = AE_ERROR; |
274 | goto free_and_return; | 239 | goto exit; |
275 | } | 240 | } |
276 | } | 241 | } |
277 | 242 | ||
278 | hpp->t0 = &hpp->type0_data; | 243 | hpp->t0 = &hpp->type0_data; |
279 | hpp->t0->cache_line_size = nui[0]; | 244 | hpp->t0->revision = 1; |
280 | hpp->t0->latency_timer = nui[1]; | 245 | hpp->t0->cache_line_size = fields[0].integer.value; |
281 | hpp->t0->enable_serr = nui[2]; | 246 | hpp->t0->latency_timer = fields[1].integer.value; |
282 | hpp->t0->enable_perr = nui[3]; | 247 | hpp->t0->enable_serr = fields[2].integer.value; |
283 | 248 | hpp->t0->enable_perr = fields[3].integer.value; | |
284 | pr_debug(" _HPP: cache_line_size=0x%x\n", hpp->t0->cache_line_size); | ||
285 | pr_debug(" _HPP: latency timer =0x%x\n", hpp->t0->latency_timer); | ||
286 | pr_debug(" _HPP: enable SERR =0x%x\n", hpp->t0->enable_serr); | ||
287 | pr_debug(" _HPP: enable PERR =0x%x\n", hpp->t0->enable_perr); | ||
288 | 249 | ||
289 | free_and_return: | 250 | exit: |
290 | kfree(string.pointer); | 251 | kfree(buffer.pointer); |
291 | kfree(ret_buf.pointer); | ||
292 | return status; | 252 | return status; |
293 | } | 253 | } |
294 | 254 | ||
@@ -322,20 +282,19 @@ static acpi_status acpi_run_oshp(acpi_handle handle) | |||
322 | return status; | 282 | return status; |
323 | } | 283 | } |
324 | 284 | ||
325 | /* acpi_get_hp_params_from_firmware | 285 | /* pci_get_hp_params |
326 | * | 286 | * |
327 | * @bus - the pci_bus of the bus on which the device is newly added | 287 | * @dev - the pci_dev for which we want parameters |
328 | * @hpp - allocated by the caller | 288 | * @hpp - allocated by the caller |
329 | */ | 289 | */ |
330 | acpi_status acpi_get_hp_params_from_firmware(struct pci_bus *bus, | 290 | int pci_get_hp_params(struct pci_dev *dev, struct hotplug_params *hpp) |
331 | struct hotplug_params *hpp) | ||
332 | { | 291 | { |
333 | acpi_status status = AE_NOT_FOUND; | 292 | acpi_status status; |
334 | acpi_handle handle, phandle; | 293 | acpi_handle handle, phandle; |
335 | struct pci_bus *pbus; | 294 | struct pci_bus *pbus; |
336 | 295 | ||
337 | handle = NULL; | 296 | handle = NULL; |
338 | for (pbus = bus; pbus; pbus = pbus->parent) { | 297 | for (pbus = dev->bus; pbus; pbus = pbus->parent) { |
339 | handle = acpi_pci_get_bridge_handle(pbus); | 298 | handle = acpi_pci_get_bridge_handle(pbus); |
340 | if (handle) | 299 | if (handle) |
341 | break; | 300 | break; |
@@ -345,15 +304,15 @@ acpi_status acpi_get_hp_params_from_firmware(struct pci_bus *bus, | |||
345 | * _HPP settings apply to all child buses, until another _HPP is | 304 | * _HPP settings apply to all child buses, until another _HPP is |
346 | * encountered. If we don't find an _HPP for the input pci dev, | 305 | * encountered. If we don't find an _HPP for the input pci dev, |
347 | * look for it in the parent device scope since that would apply to | 306 | * look for it in the parent device scope since that would apply to |
348 | * this pci dev. If we don't find any _HPP, use hardcoded defaults | 307 | * this pci dev. |
349 | */ | 308 | */ |
350 | while (handle) { | 309 | while (handle) { |
351 | status = acpi_run_hpx(handle, hpp); | 310 | status = acpi_run_hpx(handle, hpp); |
352 | if (ACPI_SUCCESS(status)) | 311 | if (ACPI_SUCCESS(status)) |
353 | break; | 312 | return 0; |
354 | status = acpi_run_hpp(handle, hpp); | 313 | status = acpi_run_hpp(handle, hpp); |
355 | if (ACPI_SUCCESS(status)) | 314 | if (ACPI_SUCCESS(status)) |
356 | break; | 315 | return 0; |
357 | if (acpi_is_root_bridge(handle)) | 316 | if (acpi_is_root_bridge(handle)) |
358 | break; | 317 | break; |
359 | status = acpi_get_parent(handle, &phandle); | 318 | status = acpi_get_parent(handle, &phandle); |
@@ -361,9 +320,9 @@ acpi_status acpi_get_hp_params_from_firmware(struct pci_bus *bus, | |||
361 | break; | 320 | break; |
362 | handle = phandle; | 321 | handle = phandle; |
363 | } | 322 | } |
364 | return status; | 323 | return -ENODEV; |
365 | } | 324 | } |
366 | EXPORT_SYMBOL_GPL(acpi_get_hp_params_from_firmware); | 325 | EXPORT_SYMBOL_GPL(pci_get_hp_params); |
367 | 326 | ||
368 | /** | 327 | /** |
369 | * acpi_get_hp_hw_control_from_firmware | 328 | * acpi_get_hp_hw_control_from_firmware |
@@ -500,18 +459,18 @@ check_hotplug(acpi_handle handle, u32 lvl, void *context, void **rv) | |||
500 | 459 | ||
501 | /** | 460 | /** |
502 | * acpi_pci_detect_ejectable - check if the PCI bus has ejectable slots | 461 | * acpi_pci_detect_ejectable - check if the PCI bus has ejectable slots |
503 | * @pbus - PCI bus to scan | 462 | * @handle - handle of the PCI bus to scan |
504 | * | 463 | * |
505 | * Returns 1 if the PCI bus has ACPI based ejectable slots, 0 otherwise. | 464 | * Returns 1 if the PCI bus has ACPI based ejectable slots, 0 otherwise. |
506 | */ | 465 | */ |
507 | int acpi_pci_detect_ejectable(struct pci_bus *pbus) | 466 | int acpi_pci_detect_ejectable(acpi_handle handle) |
508 | { | 467 | { |
509 | acpi_handle handle; | ||
510 | int found = 0; | 468 | int found = 0; |
511 | 469 | ||
512 | if (!(handle = acpi_pci_get_bridge_handle(pbus))) | 470 | if (!handle) |
513 | return 0; | 471 | return found; |
514 | acpi_walk_namespace(ACPI_TYPE_DEVICE, handle, (u32)1, | 472 | |
473 | acpi_walk_namespace(ACPI_TYPE_DEVICE, handle, 1, | ||
515 | check_hotplug, (void *)&found, NULL); | 474 | check_hotplug, (void *)&found, NULL); |
516 | return found; | 475 | return found; |
517 | } | 476 | } |
diff --git a/drivers/pci/hotplug/acpiphp.h b/drivers/pci/hotplug/acpiphp.h index e68d5f20ffb3..7d938df79206 100644 --- a/drivers/pci/hotplug/acpiphp.h +++ b/drivers/pci/hotplug/acpiphp.h | |||
@@ -91,9 +91,6 @@ struct acpiphp_bridge { | |||
91 | /* PCI-to-PCI bridge device */ | 91 | /* PCI-to-PCI bridge device */ |
92 | struct pci_dev *pci_dev; | 92 | struct pci_dev *pci_dev; |
93 | 93 | ||
94 | /* ACPI 2.0 _HPP parameters */ | ||
95 | struct hotplug_params hpp; | ||
96 | |||
97 | spinlock_t res_lock; | 94 | spinlock_t res_lock; |
98 | }; | 95 | }; |
99 | 96 | ||
diff --git a/drivers/pci/hotplug/acpiphp_glue.c b/drivers/pci/hotplug/acpiphp_glue.c index 0cb0f830a993..58d25a163a8b 100644 --- a/drivers/pci/hotplug/acpiphp_glue.c +++ b/drivers/pci/hotplug/acpiphp_glue.c | |||
@@ -59,7 +59,7 @@ static DEFINE_SPINLOCK(ioapic_list_lock); | |||
59 | 59 | ||
60 | static void handle_hotplug_event_bridge (acpi_handle, u32, void *); | 60 | static void handle_hotplug_event_bridge (acpi_handle, u32, void *); |
61 | static void acpiphp_sanitize_bus(struct pci_bus *bus); | 61 | static void acpiphp_sanitize_bus(struct pci_bus *bus); |
62 | static void acpiphp_set_hpp_values(acpi_handle handle, struct pci_bus *bus); | 62 | static void acpiphp_set_hpp_values(struct pci_bus *bus); |
63 | static void handle_hotplug_event_func(acpi_handle handle, u32 type, void *context); | 63 | static void handle_hotplug_event_func(acpi_handle handle, u32 type, void *context); |
64 | 64 | ||
65 | /* callback routine to check for the existence of a pci dock device */ | 65 | /* callback routine to check for the existence of a pci dock device */ |
@@ -261,51 +261,21 @@ register_slot(acpi_handle handle, u32 lvl, void *context, void **rv) | |||
261 | 261 | ||
262 | 262 | ||
263 | /* see if it's worth looking at this bridge */ | 263 | /* see if it's worth looking at this bridge */ |
264 | static int detect_ejectable_slots(struct pci_bus *pbus) | 264 | static int detect_ejectable_slots(acpi_handle handle) |
265 | { | 265 | { |
266 | int found = acpi_pci_detect_ejectable(pbus); | 266 | int found = acpi_pci_detect_ejectable(handle); |
267 | if (!found) { | 267 | if (!found) { |
268 | acpi_handle bridge_handle = acpi_pci_get_bridge_handle(pbus); | 268 | acpi_walk_namespace(ACPI_TYPE_DEVICE, handle, (u32)1, |
269 | if (!bridge_handle) | ||
270 | return 0; | ||
271 | acpi_walk_namespace(ACPI_TYPE_DEVICE, bridge_handle, (u32)1, | ||
272 | is_pci_dock_device, (void *)&found, NULL); | 269 | is_pci_dock_device, (void *)&found, NULL); |
273 | } | 270 | } |
274 | return found; | 271 | return found; |
275 | } | 272 | } |
276 | 273 | ||
277 | |||
278 | /* decode ACPI 2.0 _HPP hot plug parameters */ | ||
279 | static void decode_hpp(struct acpiphp_bridge *bridge) | ||
280 | { | ||
281 | acpi_status status; | ||
282 | |||
283 | status = acpi_get_hp_params_from_firmware(bridge->pci_bus, &bridge->hpp); | ||
284 | if (ACPI_FAILURE(status) || | ||
285 | !bridge->hpp.t0 || (bridge->hpp.t0->revision > 1)) { | ||
286 | /* use default numbers */ | ||
287 | printk(KERN_WARNING | ||
288 | "%s: Could not get hotplug parameters. Use defaults\n", | ||
289 | __func__); | ||
290 | bridge->hpp.t0 = &bridge->hpp.type0_data; | ||
291 | bridge->hpp.t0->revision = 0; | ||
292 | bridge->hpp.t0->cache_line_size = 0x10; | ||
293 | bridge->hpp.t0->latency_timer = 0x40; | ||
294 | bridge->hpp.t0->enable_serr = 0; | ||
295 | bridge->hpp.t0->enable_perr = 0; | ||
296 | } | ||
297 | } | ||
298 | |||
299 | |||
300 | |||
301 | /* initialize miscellaneous stuff for both root and PCI-to-PCI bridge */ | 274 | /* initialize miscellaneous stuff for both root and PCI-to-PCI bridge */ |
302 | static void init_bridge_misc(struct acpiphp_bridge *bridge) | 275 | static void init_bridge_misc(struct acpiphp_bridge *bridge) |
303 | { | 276 | { |
304 | acpi_status status; | 277 | acpi_status status; |
305 | 278 | ||
306 | /* decode ACPI 2.0 _HPP (hot plug parameters) */ | ||
307 | decode_hpp(bridge); | ||
308 | |||
309 | /* must be added to the list prior to calling register_slot */ | 279 | /* must be added to the list prior to calling register_slot */ |
310 | list_add(&bridge->list, &bridge_list); | 280 | list_add(&bridge->list, &bridge_list); |
311 | 281 | ||
@@ -399,9 +369,10 @@ static inline void config_p2p_bridge_flags(struct acpiphp_bridge *bridge) | |||
399 | 369 | ||
400 | 370 | ||
401 | /* allocate and initialize host bridge data structure */ | 371 | /* allocate and initialize host bridge data structure */ |
402 | static void add_host_bridge(acpi_handle *handle, struct pci_bus *pci_bus) | 372 | static void add_host_bridge(acpi_handle *handle) |
403 | { | 373 | { |
404 | struct acpiphp_bridge *bridge; | 374 | struct acpiphp_bridge *bridge; |
375 | struct acpi_pci_root *root = acpi_pci_find_root(handle); | ||
405 | 376 | ||
406 | bridge = kzalloc(sizeof(struct acpiphp_bridge), GFP_KERNEL); | 377 | bridge = kzalloc(sizeof(struct acpiphp_bridge), GFP_KERNEL); |
407 | if (bridge == NULL) | 378 | if (bridge == NULL) |
@@ -410,7 +381,7 @@ static void add_host_bridge(acpi_handle *handle, struct pci_bus *pci_bus) | |||
410 | bridge->type = BRIDGE_TYPE_HOST; | 381 | bridge->type = BRIDGE_TYPE_HOST; |
411 | bridge->handle = handle; | 382 | bridge->handle = handle; |
412 | 383 | ||
413 | bridge->pci_bus = pci_bus; | 384 | bridge->pci_bus = root->bus; |
414 | 385 | ||
415 | spin_lock_init(&bridge->res_lock); | 386 | spin_lock_init(&bridge->res_lock); |
416 | 387 | ||
@@ -419,7 +390,7 @@ static void add_host_bridge(acpi_handle *handle, struct pci_bus *pci_bus) | |||
419 | 390 | ||
420 | 391 | ||
421 | /* allocate and initialize PCI-to-PCI bridge data structure */ | 392 | /* allocate and initialize PCI-to-PCI bridge data structure */ |
422 | static void add_p2p_bridge(acpi_handle *handle, struct pci_dev *pci_dev) | 393 | static void add_p2p_bridge(acpi_handle *handle) |
423 | { | 394 | { |
424 | struct acpiphp_bridge *bridge; | 395 | struct acpiphp_bridge *bridge; |
425 | 396 | ||
@@ -433,8 +404,8 @@ static void add_p2p_bridge(acpi_handle *handle, struct pci_dev *pci_dev) | |||
433 | bridge->handle = handle; | 404 | bridge->handle = handle; |
434 | config_p2p_bridge_flags(bridge); | 405 | config_p2p_bridge_flags(bridge); |
435 | 406 | ||
436 | bridge->pci_dev = pci_dev_get(pci_dev); | 407 | bridge->pci_dev = acpi_get_pci_dev(handle); |
437 | bridge->pci_bus = pci_dev->subordinate; | 408 | bridge->pci_bus = bridge->pci_dev->subordinate; |
438 | if (!bridge->pci_bus) { | 409 | if (!bridge->pci_bus) { |
439 | err("This is not a PCI-to-PCI bridge!\n"); | 410 | err("This is not a PCI-to-PCI bridge!\n"); |
440 | goto err; | 411 | goto err; |
@@ -451,7 +422,7 @@ static void add_p2p_bridge(acpi_handle *handle, struct pci_dev *pci_dev) | |||
451 | init_bridge_misc(bridge); | 422 | init_bridge_misc(bridge); |
452 | return; | 423 | return; |
453 | err: | 424 | err: |
454 | pci_dev_put(pci_dev); | 425 | pci_dev_put(bridge->pci_dev); |
455 | kfree(bridge); | 426 | kfree(bridge); |
456 | return; | 427 | return; |
457 | } | 428 | } |
@@ -462,39 +433,21 @@ static acpi_status | |||
462 | find_p2p_bridge(acpi_handle handle, u32 lvl, void *context, void **rv) | 433 | find_p2p_bridge(acpi_handle handle, u32 lvl, void *context, void **rv) |
463 | { | 434 | { |
464 | acpi_status status; | 435 | acpi_status status; |
465 | acpi_handle dummy_handle; | ||
466 | unsigned long long tmp; | ||
467 | int device, function; | ||
468 | struct pci_dev *dev; | 436 | struct pci_dev *dev; |
469 | struct pci_bus *pci_bus = context; | ||
470 | |||
471 | status = acpi_get_handle(handle, "_ADR", &dummy_handle); | ||
472 | if (ACPI_FAILURE(status)) | ||
473 | return AE_OK; /* continue */ | ||
474 | |||
475 | status = acpi_evaluate_integer(handle, "_ADR", NULL, &tmp); | ||
476 | if (ACPI_FAILURE(status)) { | ||
477 | dbg("%s: _ADR evaluation failure\n", __func__); | ||
478 | return AE_OK; | ||
479 | } | ||
480 | |||
481 | device = (tmp >> 16) & 0xffff; | ||
482 | function = tmp & 0xffff; | ||
483 | |||
484 | dev = pci_get_slot(pci_bus, PCI_DEVFN(device, function)); | ||
485 | 437 | ||
438 | dev = acpi_get_pci_dev(handle); | ||
486 | if (!dev || !dev->subordinate) | 439 | if (!dev || !dev->subordinate) |
487 | goto out; | 440 | goto out; |
488 | 441 | ||
489 | /* check if this bridge has ejectable slots */ | 442 | /* check if this bridge has ejectable slots */ |
490 | if ((detect_ejectable_slots(dev->subordinate) > 0)) { | 443 | if ((detect_ejectable_slots(handle) > 0)) { |
491 | dbg("found PCI-to-PCI bridge at PCI %s\n", pci_name(dev)); | 444 | dbg("found PCI-to-PCI bridge at PCI %s\n", pci_name(dev)); |
492 | add_p2p_bridge(handle, dev); | 445 | add_p2p_bridge(handle); |
493 | } | 446 | } |
494 | 447 | ||
495 | /* search P2P bridges under this p2p bridge */ | 448 | /* search P2P bridges under this p2p bridge */ |
496 | status = acpi_walk_namespace(ACPI_TYPE_DEVICE, handle, (u32)1, | 449 | status = acpi_walk_namespace(ACPI_TYPE_DEVICE, handle, (u32)1, |
497 | find_p2p_bridge, dev->subordinate, NULL); | 450 | find_p2p_bridge, NULL, NULL); |
498 | if (ACPI_FAILURE(status)) | 451 | if (ACPI_FAILURE(status)) |
499 | warn("find_p2p_bridge failed (error code = 0x%x)\n", status); | 452 | warn("find_p2p_bridge failed (error code = 0x%x)\n", status); |
500 | 453 | ||
@@ -509,9 +462,7 @@ static int add_bridge(acpi_handle handle) | |||
509 | { | 462 | { |
510 | acpi_status status; | 463 | acpi_status status; |
511 | unsigned long long tmp; | 464 | unsigned long long tmp; |
512 | int seg, bus; | ||
513 | acpi_handle dummy_handle; | 465 | acpi_handle dummy_handle; |
514 | struct pci_bus *pci_bus; | ||
515 | 466 | ||
516 | /* if the bridge doesn't have _STA, we assume it is always there */ | 467 | /* if the bridge doesn't have _STA, we assume it is always there */ |
517 | status = acpi_get_handle(handle, "_STA", &dummy_handle); | 468 | status = acpi_get_handle(handle, "_STA", &dummy_handle); |
@@ -526,36 +477,15 @@ static int add_bridge(acpi_handle handle) | |||
526 | return 0; | 477 | return 0; |
527 | } | 478 | } |
528 | 479 | ||
529 | /* get PCI segment number */ | ||
530 | status = acpi_evaluate_integer(handle, "_SEG", NULL, &tmp); | ||
531 | |||
532 | seg = ACPI_SUCCESS(status) ? tmp : 0; | ||
533 | |||
534 | /* get PCI bus number */ | ||
535 | status = acpi_evaluate_integer(handle, "_BBN", NULL, &tmp); | ||
536 | |||
537 | if (ACPI_SUCCESS(status)) { | ||
538 | bus = tmp; | ||
539 | } else { | ||
540 | warn("can't get bus number, assuming 0\n"); | ||
541 | bus = 0; | ||
542 | } | ||
543 | |||
544 | pci_bus = pci_find_bus(seg, bus); | ||
545 | if (!pci_bus) { | ||
546 | err("Can't find bus %04x:%02x\n", seg, bus); | ||
547 | return 0; | ||
548 | } | ||
549 | |||
550 | /* check if this bridge has ejectable slots */ | 480 | /* check if this bridge has ejectable slots */ |
551 | if (detect_ejectable_slots(pci_bus) > 0) { | 481 | if (detect_ejectable_slots(handle) > 0) { |
552 | dbg("found PCI host-bus bridge with hot-pluggable slots\n"); | 482 | dbg("found PCI host-bus bridge with hot-pluggable slots\n"); |
553 | add_host_bridge(handle, pci_bus); | 483 | add_host_bridge(handle); |
554 | } | 484 | } |
555 | 485 | ||
556 | /* search P2P bridges under this host bridge */ | 486 | /* search P2P bridges under this host bridge */ |
557 | status = acpi_walk_namespace(ACPI_TYPE_DEVICE, handle, (u32)1, | 487 | status = acpi_walk_namespace(ACPI_TYPE_DEVICE, handle, (u32)1, |
558 | find_p2p_bridge, pci_bus, NULL); | 488 | find_p2p_bridge, NULL, NULL); |
559 | 489 | ||
560 | if (ACPI_FAILURE(status)) | 490 | if (ACPI_FAILURE(status)) |
561 | warn("find_p2p_bridge failed (error code = 0x%x)\n", status); | 491 | warn("find_p2p_bridge failed (error code = 0x%x)\n", status); |
@@ -1083,7 +1013,7 @@ static int __ref enable_device(struct acpiphp_slot *slot) | |||
1083 | 1013 | ||
1084 | pci_bus_assign_resources(bus); | 1014 | pci_bus_assign_resources(bus); |
1085 | acpiphp_sanitize_bus(bus); | 1015 | acpiphp_sanitize_bus(bus); |
1086 | acpiphp_set_hpp_values(slot->bridge->handle, bus); | 1016 | acpiphp_set_hpp_values(bus); |
1087 | list_for_each_entry(func, &slot->funcs, sibling) | 1017 | list_for_each_entry(func, &slot->funcs, sibling) |
1088 | acpiphp_configure_ioapics(func->handle); | 1018 | acpiphp_configure_ioapics(func->handle); |
1089 | pci_enable_bridges(bus); | 1019 | pci_enable_bridges(bus); |
@@ -1294,70 +1224,12 @@ static int acpiphp_check_bridge(struct acpiphp_bridge *bridge) | |||
1294 | return retval; | 1224 | return retval; |
1295 | } | 1225 | } |
1296 | 1226 | ||
1297 | static void program_hpp(struct pci_dev *dev, struct acpiphp_bridge *bridge) | 1227 | static void acpiphp_set_hpp_values(struct pci_bus *bus) |
1298 | { | 1228 | { |
1299 | u16 pci_cmd, pci_bctl; | ||
1300 | struct pci_dev *cdev; | ||
1301 | |||
1302 | /* Program hpp values for this device */ | ||
1303 | if (!(dev->hdr_type == PCI_HEADER_TYPE_NORMAL || | ||
1304 | (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE && | ||
1305 | (dev->class >> 8) == PCI_CLASS_BRIDGE_PCI))) | ||
1306 | return; | ||
1307 | |||
1308 | if ((dev->class >> 8) == PCI_CLASS_BRIDGE_HOST) | ||
1309 | return; | ||
1310 | |||
1311 | pci_write_config_byte(dev, PCI_CACHE_LINE_SIZE, | ||
1312 | bridge->hpp.t0->cache_line_size); | ||
1313 | pci_write_config_byte(dev, PCI_LATENCY_TIMER, | ||
1314 | bridge->hpp.t0->latency_timer); | ||
1315 | pci_read_config_word(dev, PCI_COMMAND, &pci_cmd); | ||
1316 | if (bridge->hpp.t0->enable_serr) | ||
1317 | pci_cmd |= PCI_COMMAND_SERR; | ||
1318 | else | ||
1319 | pci_cmd &= ~PCI_COMMAND_SERR; | ||
1320 | if (bridge->hpp.t0->enable_perr) | ||
1321 | pci_cmd |= PCI_COMMAND_PARITY; | ||
1322 | else | ||
1323 | pci_cmd &= ~PCI_COMMAND_PARITY; | ||
1324 | pci_write_config_word(dev, PCI_COMMAND, pci_cmd); | ||
1325 | |||
1326 | /* Program bridge control value and child devices */ | ||
1327 | if ((dev->class >> 8) == PCI_CLASS_BRIDGE_PCI) { | ||
1328 | pci_write_config_byte(dev, PCI_SEC_LATENCY_TIMER, | ||
1329 | bridge->hpp.t0->latency_timer); | ||
1330 | pci_read_config_word(dev, PCI_BRIDGE_CONTROL, &pci_bctl); | ||
1331 | if (bridge->hpp.t0->enable_serr) | ||
1332 | pci_bctl |= PCI_BRIDGE_CTL_SERR; | ||
1333 | else | ||
1334 | pci_bctl &= ~PCI_BRIDGE_CTL_SERR; | ||
1335 | if (bridge->hpp.t0->enable_perr) | ||
1336 | pci_bctl |= PCI_BRIDGE_CTL_PARITY; | ||
1337 | else | ||
1338 | pci_bctl &= ~PCI_BRIDGE_CTL_PARITY; | ||
1339 | pci_write_config_word(dev, PCI_BRIDGE_CONTROL, pci_bctl); | ||
1340 | if (dev->subordinate) { | ||
1341 | list_for_each_entry(cdev, &dev->subordinate->devices, | ||
1342 | bus_list) | ||
1343 | program_hpp(cdev, bridge); | ||
1344 | } | ||
1345 | } | ||
1346 | } | ||
1347 | |||
1348 | static void acpiphp_set_hpp_values(acpi_handle handle, struct pci_bus *bus) | ||
1349 | { | ||
1350 | struct acpiphp_bridge bridge; | ||
1351 | struct pci_dev *dev; | 1229 | struct pci_dev *dev; |
1352 | 1230 | ||
1353 | memset(&bridge, 0, sizeof(bridge)); | ||
1354 | bridge.handle = handle; | ||
1355 | bridge.pci_bus = bus; | ||
1356 | bridge.pci_dev = bus->self; | ||
1357 | decode_hpp(&bridge); | ||
1358 | list_for_each_entry(dev, &bus->devices, bus_list) | 1231 | list_for_each_entry(dev, &bus->devices, bus_list) |
1359 | program_hpp(dev, &bridge); | 1232 | pci_configure_slot(dev); |
1360 | |||
1361 | } | 1233 | } |
1362 | 1234 | ||
1363 | /* | 1235 | /* |
@@ -1387,24 +1259,23 @@ static void acpiphp_sanitize_bus(struct pci_bus *bus) | |||
1387 | /* Program resources in newly inserted bridge */ | 1259 | /* Program resources in newly inserted bridge */ |
1388 | static int acpiphp_configure_bridge (acpi_handle handle) | 1260 | static int acpiphp_configure_bridge (acpi_handle handle) |
1389 | { | 1261 | { |
1390 | struct pci_dev *dev; | ||
1391 | struct pci_bus *bus; | 1262 | struct pci_bus *bus; |
1392 | 1263 | ||
1393 | dev = acpi_get_pci_dev(handle); | 1264 | if (acpi_is_root_bridge(handle)) { |
1394 | if (!dev) { | 1265 | struct acpi_pci_root *root = acpi_pci_find_root(handle); |
1395 | err("cannot get PCI domain and bus number for bridge\n"); | 1266 | bus = root->bus; |
1396 | return -EINVAL; | 1267 | } else { |
1268 | struct pci_dev *pdev = acpi_get_pci_dev(handle); | ||
1269 | bus = pdev->subordinate; | ||
1270 | pci_dev_put(pdev); | ||
1397 | } | 1271 | } |
1398 | 1272 | ||
1399 | bus = dev->bus; | ||
1400 | |||
1401 | pci_bus_size_bridges(bus); | 1273 | pci_bus_size_bridges(bus); |
1402 | pci_bus_assign_resources(bus); | 1274 | pci_bus_assign_resources(bus); |
1403 | acpiphp_sanitize_bus(bus); | 1275 | acpiphp_sanitize_bus(bus); |
1404 | acpiphp_set_hpp_values(handle, bus); | 1276 | acpiphp_set_hpp_values(bus); |
1405 | pci_enable_bridges(bus); | 1277 | pci_enable_bridges(bus); |
1406 | acpiphp_configure_ioapics(handle); | 1278 | acpiphp_configure_ioapics(handle); |
1407 | pci_dev_put(dev); | ||
1408 | return 0; | 1279 | return 0; |
1409 | } | 1280 | } |
1410 | 1281 | ||
diff --git a/drivers/pci/hotplug/pci_hotplug_core.c b/drivers/pci/hotplug/pci_hotplug_core.c index 5c5043f239cf..0325d989bb46 100644 --- a/drivers/pci/hotplug/pci_hotplug_core.c +++ b/drivers/pci/hotplug/pci_hotplug_core.c | |||
@@ -86,7 +86,8 @@ static char *pci_bus_speed_strings[] = { | |||
86 | "66 MHz PCIX 533", /* 0x11 */ | 86 | "66 MHz PCIX 533", /* 0x11 */ |
87 | "100 MHz PCIX 533", /* 0x12 */ | 87 | "100 MHz PCIX 533", /* 0x12 */ |
88 | "133 MHz PCIX 533", /* 0x13 */ | 88 | "133 MHz PCIX 533", /* 0x13 */ |
89 | "25 GBps PCI-E", /* 0x14 */ | 89 | "2.5 GT/s PCI-E", /* 0x14 */ |
90 | "5.0 GT/s PCI-E", /* 0x15 */ | ||
90 | }; | 91 | }; |
91 | 92 | ||
92 | #ifdef CONFIG_HOTPLUG_PCI_CPCI | 93 | #ifdef CONFIG_HOTPLUG_PCI_CPCI |
diff --git a/drivers/pci/hotplug/pciehp.h b/drivers/pci/hotplug/pciehp.h index e6cf096498be..36faa9a8e18f 100644 --- a/drivers/pci/hotplug/pciehp.h +++ b/drivers/pci/hotplug/pciehp.h | |||
@@ -237,17 +237,8 @@ static inline int pciehp_get_hp_hw_control_from_firmware(struct pci_dev *dev) | |||
237 | return retval; | 237 | return retval; |
238 | return pciehp_acpi_slot_detection_check(dev); | 238 | return pciehp_acpi_slot_detection_check(dev); |
239 | } | 239 | } |
240 | |||
241 | static inline int pciehp_get_hp_params_from_firmware(struct pci_dev *dev, | ||
242 | struct hotplug_params *hpp) | ||
243 | { | ||
244 | if (ACPI_FAILURE(acpi_get_hp_params_from_firmware(dev->bus, hpp))) | ||
245 | return -ENODEV; | ||
246 | return 0; | ||
247 | } | ||
248 | #else | 240 | #else |
249 | #define pciehp_firmware_init() do {} while (0) | 241 | #define pciehp_firmware_init() do {} while (0) |
250 | #define pciehp_get_hp_hw_control_from_firmware(dev) 0 | 242 | #define pciehp_get_hp_hw_control_from_firmware(dev) 0 |
251 | #define pciehp_get_hp_params_from_firmware(dev, hpp) (-ENODEV) | ||
252 | #endif /* CONFIG_ACPI */ | 243 | #endif /* CONFIG_ACPI */ |
253 | #endif /* _PCIEHP_H */ | 244 | #endif /* _PCIEHP_H */ |
diff --git a/drivers/pci/hotplug/pciehp_acpi.c b/drivers/pci/hotplug/pciehp_acpi.c index 96048010e7d9..7163e6a6cfae 100644 --- a/drivers/pci/hotplug/pciehp_acpi.c +++ b/drivers/pci/hotplug/pciehp_acpi.c | |||
@@ -47,7 +47,7 @@ int pciehp_acpi_slot_detection_check(struct pci_dev *dev) | |||
47 | { | 47 | { |
48 | if (slot_detection_mode != PCIEHP_DETECT_ACPI) | 48 | if (slot_detection_mode != PCIEHP_DETECT_ACPI) |
49 | return 0; | 49 | return 0; |
50 | if (acpi_pci_detect_ejectable(dev->subordinate)) | 50 | if (acpi_pci_detect_ejectable(DEVICE_ACPI_HANDLE(&dev->dev))) |
51 | return 0; | 51 | return 0; |
52 | return -ENODEV; | 52 | return -ENODEV; |
53 | } | 53 | } |
@@ -76,9 +76,9 @@ static int __init dummy_probe(struct pcie_device *dev) | |||
76 | { | 76 | { |
77 | int pos; | 77 | int pos; |
78 | u32 slot_cap; | 78 | u32 slot_cap; |
79 | acpi_handle handle; | ||
79 | struct slot *slot, *tmp; | 80 | struct slot *slot, *tmp; |
80 | struct pci_dev *pdev = dev->port; | 81 | struct pci_dev *pdev = dev->port; |
81 | struct pci_bus *pbus = pdev->subordinate; | ||
82 | /* Note: pciehp_detect_mode != PCIEHP_DETECT_ACPI here */ | 82 | /* Note: pciehp_detect_mode != PCIEHP_DETECT_ACPI here */ |
83 | if (pciehp_get_hp_hw_control_from_firmware(pdev)) | 83 | if (pciehp_get_hp_hw_control_from_firmware(pdev)) |
84 | return -ENODEV; | 84 | return -ENODEV; |
@@ -94,7 +94,8 @@ static int __init dummy_probe(struct pcie_device *dev) | |||
94 | dup_slot_id++; | 94 | dup_slot_id++; |
95 | } | 95 | } |
96 | list_add_tail(&slot->slot_list, &dummy_slots); | 96 | list_add_tail(&slot->slot_list, &dummy_slots); |
97 | if (!acpi_slot_detected && acpi_pci_detect_ejectable(pbus)) | 97 | handle = DEVICE_ACPI_HANDLE(&pdev->dev); |
98 | if (!acpi_slot_detected && acpi_pci_detect_ejectable(handle)) | ||
98 | acpi_slot_detected = 1; | 99 | acpi_slot_detected = 1; |
99 | return -ENODEV; /* dummy driver always returns error */ | 100 | return -ENODEV; /* dummy driver always returns error */ |
100 | } | 101 | } |
diff --git a/drivers/pci/hotplug/pciehp_ctrl.c b/drivers/pci/hotplug/pciehp_ctrl.c index 8aab8edf123e..b97cb4c3e0fe 100644 --- a/drivers/pci/hotplug/pciehp_ctrl.c +++ b/drivers/pci/hotplug/pciehp_ctrl.c | |||
@@ -246,11 +246,6 @@ static int board_added(struct slot *p_slot) | |||
246 | goto err_exit; | 246 | goto err_exit; |
247 | } | 247 | } |
248 | 248 | ||
249 | /* | ||
250 | * Some PCI Express root ports require fixup after hot-plug operation. | ||
251 | */ | ||
252 | if (pcie_mch_quirk) | ||
253 | pci_fixup_device(pci_fixup_final, ctrl->pci_dev); | ||
254 | if (PWR_LED(ctrl)) | 249 | if (PWR_LED(ctrl)) |
255 | p_slot->hpc_ops->green_led_on(p_slot); | 250 | p_slot->hpc_ops->green_led_on(p_slot); |
256 | 251 | ||
diff --git a/drivers/pci/hotplug/pciehp_hpc.c b/drivers/pci/hotplug/pciehp_hpc.c index 52813257e5bf..271f917b6f2c 100644 --- a/drivers/pci/hotplug/pciehp_hpc.c +++ b/drivers/pci/hotplug/pciehp_hpc.c | |||
@@ -693,7 +693,10 @@ static int hpc_get_max_lnk_speed(struct slot *slot, enum pci_bus_speed *value) | |||
693 | 693 | ||
694 | switch (lnk_cap & 0x000F) { | 694 | switch (lnk_cap & 0x000F) { |
695 | case 1: | 695 | case 1: |
696 | lnk_speed = PCIE_2PT5GB; | 696 | lnk_speed = PCIE_2_5GB; |
697 | break; | ||
698 | case 2: | ||
699 | lnk_speed = PCIE_5_0GB; | ||
697 | break; | 700 | break; |
698 | default: | 701 | default: |
699 | lnk_speed = PCIE_LNK_SPEED_UNKNOWN; | 702 | lnk_speed = PCIE_LNK_SPEED_UNKNOWN; |
@@ -772,7 +775,10 @@ static int hpc_get_cur_lnk_speed(struct slot *slot, enum pci_bus_speed *value) | |||
772 | 775 | ||
773 | switch (lnk_status & PCI_EXP_LNKSTA_CLS) { | 776 | switch (lnk_status & PCI_EXP_LNKSTA_CLS) { |
774 | case 1: | 777 | case 1: |
775 | lnk_speed = PCIE_2PT5GB; | 778 | lnk_speed = PCIE_2_5GB; |
779 | break; | ||
780 | case 2: | ||
781 | lnk_speed = PCIE_5_0GB; | ||
776 | break; | 782 | break; |
777 | default: | 783 | default: |
778 | lnk_speed = PCIE_LNK_SPEED_UNKNOWN; | 784 | lnk_speed = PCIE_LNK_SPEED_UNKNOWN; |
diff --git a/drivers/pci/hotplug/pciehp_pci.c b/drivers/pci/hotplug/pciehp_pci.c index 10f9566cceeb..02e24d63b3ee 100644 --- a/drivers/pci/hotplug/pciehp_pci.c +++ b/drivers/pci/hotplug/pciehp_pci.c | |||
@@ -34,136 +34,6 @@ | |||
34 | #include "../pci.h" | 34 | #include "../pci.h" |
35 | #include "pciehp.h" | 35 | #include "pciehp.h" |
36 | 36 | ||
37 | static void program_hpp_type0(struct pci_dev *dev, struct hpp_type0 *hpp) | ||
38 | { | ||
39 | u16 pci_cmd, pci_bctl; | ||
40 | |||
41 | if (hpp->revision > 1) { | ||
42 | warn("Rev.%d type0 record not supported\n", hpp->revision); | ||
43 | return; | ||
44 | } | ||
45 | |||
46 | pci_write_config_byte(dev, PCI_CACHE_LINE_SIZE, hpp->cache_line_size); | ||
47 | pci_write_config_byte(dev, PCI_LATENCY_TIMER, hpp->latency_timer); | ||
48 | pci_read_config_word(dev, PCI_COMMAND, &pci_cmd); | ||
49 | if (hpp->enable_serr) | ||
50 | pci_cmd |= PCI_COMMAND_SERR; | ||
51 | else | ||
52 | pci_cmd &= ~PCI_COMMAND_SERR; | ||
53 | if (hpp->enable_perr) | ||
54 | pci_cmd |= PCI_COMMAND_PARITY; | ||
55 | else | ||
56 | pci_cmd &= ~PCI_COMMAND_PARITY; | ||
57 | pci_write_config_word(dev, PCI_COMMAND, pci_cmd); | ||
58 | |||
59 | /* Program bridge control value */ | ||
60 | if ((dev->class >> 8) == PCI_CLASS_BRIDGE_PCI) { | ||
61 | pci_write_config_byte(dev, PCI_SEC_LATENCY_TIMER, | ||
62 | hpp->latency_timer); | ||
63 | pci_read_config_word(dev, PCI_BRIDGE_CONTROL, &pci_bctl); | ||
64 | if (hpp->enable_serr) | ||
65 | pci_bctl |= PCI_BRIDGE_CTL_SERR; | ||
66 | else | ||
67 | pci_bctl &= ~PCI_BRIDGE_CTL_SERR; | ||
68 | if (hpp->enable_perr) | ||
69 | pci_bctl |= PCI_BRIDGE_CTL_PARITY; | ||
70 | else | ||
71 | pci_bctl &= ~PCI_BRIDGE_CTL_PARITY; | ||
72 | pci_write_config_word(dev, PCI_BRIDGE_CONTROL, pci_bctl); | ||
73 | } | ||
74 | } | ||
75 | |||
76 | static void program_hpp_type2(struct pci_dev *dev, struct hpp_type2 *hpp) | ||
77 | { | ||
78 | int pos; | ||
79 | u16 reg16; | ||
80 | u32 reg32; | ||
81 | |||
82 | if (hpp->revision > 1) { | ||
83 | warn("Rev.%d type2 record not supported\n", hpp->revision); | ||
84 | return; | ||
85 | } | ||
86 | |||
87 | /* Find PCI Express capability */ | ||
88 | pos = pci_find_capability(dev, PCI_CAP_ID_EXP); | ||
89 | if (!pos) | ||
90 | return; | ||
91 | |||
92 | /* Initialize Device Control Register */ | ||
93 | pci_read_config_word(dev, pos + PCI_EXP_DEVCTL, ®16); | ||
94 | reg16 = (reg16 & hpp->pci_exp_devctl_and) | hpp->pci_exp_devctl_or; | ||
95 | pci_write_config_word(dev, pos + PCI_EXP_DEVCTL, reg16); | ||
96 | |||
97 | /* Initialize Link Control Register */ | ||
98 | if (dev->subordinate) { | ||
99 | pci_read_config_word(dev, pos + PCI_EXP_LNKCTL, ®16); | ||
100 | reg16 = (reg16 & hpp->pci_exp_lnkctl_and) | ||
101 | | hpp->pci_exp_lnkctl_or; | ||
102 | pci_write_config_word(dev, pos + PCI_EXP_LNKCTL, reg16); | ||
103 | } | ||
104 | |||
105 | /* Find Advanced Error Reporting Enhanced Capability */ | ||
106 | pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR); | ||
107 | if (!pos) | ||
108 | return; | ||
109 | |||
110 | /* Initialize Uncorrectable Error Mask Register */ | ||
111 | pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_MASK, ®32); | ||
112 | reg32 = (reg32 & hpp->unc_err_mask_and) | hpp->unc_err_mask_or; | ||
113 | pci_write_config_dword(dev, pos + PCI_ERR_UNCOR_MASK, reg32); | ||
114 | |||
115 | /* Initialize Uncorrectable Error Severity Register */ | ||
116 | pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_SEVER, ®32); | ||
117 | reg32 = (reg32 & hpp->unc_err_sever_and) | hpp->unc_err_sever_or; | ||
118 | pci_write_config_dword(dev, pos + PCI_ERR_UNCOR_SEVER, reg32); | ||
119 | |||
120 | /* Initialize Correctable Error Mask Register */ | ||
121 | pci_read_config_dword(dev, pos + PCI_ERR_COR_MASK, ®32); | ||
122 | reg32 = (reg32 & hpp->cor_err_mask_and) | hpp->cor_err_mask_or; | ||
123 | pci_write_config_dword(dev, pos + PCI_ERR_COR_MASK, reg32); | ||
124 | |||
125 | /* Initialize Advanced Error Capabilities and Control Register */ | ||
126 | pci_read_config_dword(dev, pos + PCI_ERR_CAP, ®32); | ||
127 | reg32 = (reg32 & hpp->adv_err_cap_and) | hpp->adv_err_cap_or; | ||
128 | pci_write_config_dword(dev, pos + PCI_ERR_CAP, reg32); | ||
129 | |||
130 | /* | ||
131 | * FIXME: The following two registers are not supported yet. | ||
132 | * | ||
133 | * o Secondary Uncorrectable Error Severity Register | ||
134 | * o Secondary Uncorrectable Error Mask Register | ||
135 | */ | ||
136 | } | ||
137 | |||
138 | static void program_fw_provided_values(struct pci_dev *dev) | ||
139 | { | ||
140 | struct pci_dev *cdev; | ||
141 | struct hotplug_params hpp; | ||
142 | |||
143 | /* Program hpp values for this device */ | ||
144 | if (!(dev->hdr_type == PCI_HEADER_TYPE_NORMAL || | ||
145 | (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE && | ||
146 | (dev->class >> 8) == PCI_CLASS_BRIDGE_PCI))) | ||
147 | return; | ||
148 | |||
149 | if (pciehp_get_hp_params_from_firmware(dev, &hpp)) { | ||
150 | warn("Could not get hotplug parameters\n"); | ||
151 | return; | ||
152 | } | ||
153 | |||
154 | if (hpp.t2) | ||
155 | program_hpp_type2(dev, hpp.t2); | ||
156 | if (hpp.t0) | ||
157 | program_hpp_type0(dev, hpp.t0); | ||
158 | |||
159 | /* Program child devices */ | ||
160 | if (dev->subordinate) { | ||
161 | list_for_each_entry(cdev, &dev->subordinate->devices, | ||
162 | bus_list) | ||
163 | program_fw_provided_values(cdev); | ||
164 | } | ||
165 | } | ||
166 | |||
167 | static int __ref pciehp_add_bridge(struct pci_dev *dev) | 37 | static int __ref pciehp_add_bridge(struct pci_dev *dev) |
168 | { | 38 | { |
169 | struct pci_bus *parent = dev->bus; | 39 | struct pci_bus *parent = dev->bus; |
@@ -226,7 +96,7 @@ int pciehp_configure_device(struct slot *p_slot) | |||
226 | (dev->hdr_type == PCI_HEADER_TYPE_CARDBUS)) { | 96 | (dev->hdr_type == PCI_HEADER_TYPE_CARDBUS)) { |
227 | pciehp_add_bridge(dev); | 97 | pciehp_add_bridge(dev); |
228 | } | 98 | } |
229 | program_fw_provided_values(dev); | 99 | pci_configure_slot(dev); |
230 | pci_dev_put(dev); | 100 | pci_dev_put(dev); |
231 | } | 101 | } |
232 | 102 | ||
@@ -285,11 +155,6 @@ int pciehp_unconfigure_device(struct slot *p_slot) | |||
285 | } | 155 | } |
286 | pci_dev_put(temp); | 156 | pci_dev_put(temp); |
287 | } | 157 | } |
288 | /* | ||
289 | * Some PCI Express root ports require fixup after hot-plug operation. | ||
290 | */ | ||
291 | if (pcie_mch_quirk) | ||
292 | pci_fixup_device(pci_fixup_final, p_slot->ctrl->pci_dev); | ||
293 | 158 | ||
294 | return rc; | 159 | return rc; |
295 | } | 160 | } |
diff --git a/drivers/pci/hotplug/pcihp_slot.c b/drivers/pci/hotplug/pcihp_slot.c new file mode 100644 index 000000000000..cc8ec3aa41a7 --- /dev/null +++ b/drivers/pci/hotplug/pcihp_slot.c | |||
@@ -0,0 +1,187 @@ | |||
1 | /* | ||
2 | * Copyright (C) 1995,2001 Compaq Computer Corporation | ||
3 | * Copyright (C) 2001 Greg Kroah-Hartman (greg@kroah.com) | ||
4 | * Copyright (C) 2001 IBM Corp. | ||
5 | * Copyright (C) 2003-2004 Intel Corporation | ||
6 | * (c) Copyright 2009 Hewlett-Packard Development Company, L.P. | ||
7 | * | ||
8 | * All rights reserved. | ||
9 | * | ||
10 | * This program is free software; you can redistribute it and/or modify | ||
11 | * it under the terms of the GNU General Public License as published by | ||
12 | * the Free Software Foundation; either version 2 of the License, or (at | ||
13 | * your option) any later version. | ||
14 | * | ||
15 | * This program is distributed in the hope that it will be useful, but | ||
16 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
17 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
18 | * NON INFRINGEMENT. See the GNU General Public License for more | ||
19 | * details. | ||
20 | * | ||
21 | * You should have received a copy of the GNU General Public License | ||
22 | * along with this program; if not, write to the Free Software | ||
23 | * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. | ||
24 | */ | ||
25 | |||
26 | #include <linux/pci.h> | ||
27 | #include <linux/pci_hotplug.h> | ||
28 | |||
29 | static struct hpp_type0 pci_default_type0 = { | ||
30 | .revision = 1, | ||
31 | .cache_line_size = 8, | ||
32 | .latency_timer = 0x40, | ||
33 | .enable_serr = 0, | ||
34 | .enable_perr = 0, | ||
35 | }; | ||
36 | |||
37 | static void program_hpp_type0(struct pci_dev *dev, struct hpp_type0 *hpp) | ||
38 | { | ||
39 | u16 pci_cmd, pci_bctl; | ||
40 | |||
41 | if (!hpp) { | ||
42 | /* | ||
43 | * Perhaps we *should* use default settings for PCIe, but | ||
44 | * pciehp didn't, so we won't either. | ||
45 | */ | ||
46 | if (dev->is_pcie) | ||
47 | return; | ||
48 | dev_info(&dev->dev, "using default PCI settings\n"); | ||
49 | hpp = &pci_default_type0; | ||
50 | } | ||
51 | |||
52 | if (hpp->revision > 1) { | ||
53 | dev_warn(&dev->dev, | ||
54 | "PCI settings rev %d not supported; using defaults\n", | ||
55 | hpp->revision); | ||
56 | hpp = &pci_default_type0; | ||
57 | } | ||
58 | |||
59 | pci_write_config_byte(dev, PCI_CACHE_LINE_SIZE, hpp->cache_line_size); | ||
60 | pci_write_config_byte(dev, PCI_LATENCY_TIMER, hpp->latency_timer); | ||
61 | pci_read_config_word(dev, PCI_COMMAND, &pci_cmd); | ||
62 | if (hpp->enable_serr) | ||
63 | pci_cmd |= PCI_COMMAND_SERR; | ||
64 | else | ||
65 | pci_cmd &= ~PCI_COMMAND_SERR; | ||
66 | if (hpp->enable_perr) | ||
67 | pci_cmd |= PCI_COMMAND_PARITY; | ||
68 | else | ||
69 | pci_cmd &= ~PCI_COMMAND_PARITY; | ||
70 | pci_write_config_word(dev, PCI_COMMAND, pci_cmd); | ||
71 | |||
72 | /* Program bridge control value */ | ||
73 | if ((dev->class >> 8) == PCI_CLASS_BRIDGE_PCI) { | ||
74 | pci_write_config_byte(dev, PCI_SEC_LATENCY_TIMER, | ||
75 | hpp->latency_timer); | ||
76 | pci_read_config_word(dev, PCI_BRIDGE_CONTROL, &pci_bctl); | ||
77 | if (hpp->enable_serr) | ||
78 | pci_bctl |= PCI_BRIDGE_CTL_SERR; | ||
79 | else | ||
80 | pci_bctl &= ~PCI_BRIDGE_CTL_SERR; | ||
81 | if (hpp->enable_perr) | ||
82 | pci_bctl |= PCI_BRIDGE_CTL_PARITY; | ||
83 | else | ||
84 | pci_bctl &= ~PCI_BRIDGE_CTL_PARITY; | ||
85 | pci_write_config_word(dev, PCI_BRIDGE_CONTROL, pci_bctl); | ||
86 | } | ||
87 | } | ||
88 | |||
89 | static void program_hpp_type1(struct pci_dev *dev, struct hpp_type1 *hpp) | ||
90 | { | ||
91 | if (hpp) | ||
92 | dev_warn(&dev->dev, "PCI-X settings not supported\n"); | ||
93 | } | ||
94 | |||
95 | static void program_hpp_type2(struct pci_dev *dev, struct hpp_type2 *hpp) | ||
96 | { | ||
97 | int pos; | ||
98 | u16 reg16; | ||
99 | u32 reg32; | ||
100 | |||
101 | if (!hpp) | ||
102 | return; | ||
103 | |||
104 | /* Find PCI Express capability */ | ||
105 | pos = pci_find_capability(dev, PCI_CAP_ID_EXP); | ||
106 | if (!pos) | ||
107 | return; | ||
108 | |||
109 | if (hpp->revision > 1) { | ||
110 | dev_warn(&dev->dev, "PCIe settings rev %d not supported\n", | ||
111 | hpp->revision); | ||
112 | return; | ||
113 | } | ||
114 | |||
115 | /* Initialize Device Control Register */ | ||
116 | pci_read_config_word(dev, pos + PCI_EXP_DEVCTL, ®16); | ||
117 | reg16 = (reg16 & hpp->pci_exp_devctl_and) | hpp->pci_exp_devctl_or; | ||
118 | pci_write_config_word(dev, pos + PCI_EXP_DEVCTL, reg16); | ||
119 | |||
120 | /* Initialize Link Control Register */ | ||
121 | if (dev->subordinate) { | ||
122 | pci_read_config_word(dev, pos + PCI_EXP_LNKCTL, ®16); | ||
123 | reg16 = (reg16 & hpp->pci_exp_lnkctl_and) | ||
124 | | hpp->pci_exp_lnkctl_or; | ||
125 | pci_write_config_word(dev, pos + PCI_EXP_LNKCTL, reg16); | ||
126 | } | ||
127 | |||
128 | /* Find Advanced Error Reporting Enhanced Capability */ | ||
129 | pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR); | ||
130 | if (!pos) | ||
131 | return; | ||
132 | |||
133 | /* Initialize Uncorrectable Error Mask Register */ | ||
134 | pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_MASK, ®32); | ||
135 | reg32 = (reg32 & hpp->unc_err_mask_and) | hpp->unc_err_mask_or; | ||
136 | pci_write_config_dword(dev, pos + PCI_ERR_UNCOR_MASK, reg32); | ||
137 | |||
138 | /* Initialize Uncorrectable Error Severity Register */ | ||
139 | pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_SEVER, ®32); | ||
140 | reg32 = (reg32 & hpp->unc_err_sever_and) | hpp->unc_err_sever_or; | ||
141 | pci_write_config_dword(dev, pos + PCI_ERR_UNCOR_SEVER, reg32); | ||
142 | |||
143 | /* Initialize Correctable Error Mask Register */ | ||
144 | pci_read_config_dword(dev, pos + PCI_ERR_COR_MASK, ®32); | ||
145 | reg32 = (reg32 & hpp->cor_err_mask_and) | hpp->cor_err_mask_or; | ||
146 | pci_write_config_dword(dev, pos + PCI_ERR_COR_MASK, reg32); | ||
147 | |||
148 | /* Initialize Advanced Error Capabilities and Control Register */ | ||
149 | pci_read_config_dword(dev, pos + PCI_ERR_CAP, ®32); | ||
150 | reg32 = (reg32 & hpp->adv_err_cap_and) | hpp->adv_err_cap_or; | ||
151 | pci_write_config_dword(dev, pos + PCI_ERR_CAP, reg32); | ||
152 | |||
153 | /* | ||
154 | * FIXME: The following two registers are not supported yet. | ||
155 | * | ||
156 | * o Secondary Uncorrectable Error Severity Register | ||
157 | * o Secondary Uncorrectable Error Mask Register | ||
158 | */ | ||
159 | } | ||
160 | |||
161 | void pci_configure_slot(struct pci_dev *dev) | ||
162 | { | ||
163 | struct pci_dev *cdev; | ||
164 | struct hotplug_params hpp; | ||
165 | int ret; | ||
166 | |||
167 | if (!(dev->hdr_type == PCI_HEADER_TYPE_NORMAL || | ||
168 | (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE && | ||
169 | (dev->class >> 8) == PCI_CLASS_BRIDGE_PCI))) | ||
170 | return; | ||
171 | |||
172 | memset(&hpp, 0, sizeof(hpp)); | ||
173 | ret = pci_get_hp_params(dev, &hpp); | ||
174 | if (ret) | ||
175 | dev_warn(&dev->dev, "no hotplug settings from platform\n"); | ||
176 | |||
177 | program_hpp_type2(dev, hpp.t2); | ||
178 | program_hpp_type1(dev, hpp.t1); | ||
179 | program_hpp_type0(dev, hpp.t0); | ||
180 | |||
181 | if (dev->subordinate) { | ||
182 | list_for_each_entry(cdev, &dev->subordinate->devices, | ||
183 | bus_list) | ||
184 | pci_configure_slot(cdev); | ||
185 | } | ||
186 | } | ||
187 | EXPORT_SYMBOL_GPL(pci_configure_slot); | ||
diff --git a/drivers/pci/hotplug/shpchp.h b/drivers/pci/hotplug/shpchp.h index 974e924ca96d..bd588eb8e922 100644 --- a/drivers/pci/hotplug/shpchp.h +++ b/drivers/pci/hotplug/shpchp.h | |||
@@ -188,21 +188,12 @@ static inline const char *slot_name(struct slot *slot) | |||
188 | 188 | ||
189 | #ifdef CONFIG_ACPI | 189 | #ifdef CONFIG_ACPI |
190 | #include <linux/pci-acpi.h> | 190 | #include <linux/pci-acpi.h> |
191 | static inline int get_hp_params_from_firmware(struct pci_dev *dev, | ||
192 | struct hotplug_params *hpp) | ||
193 | { | ||
194 | if (ACPI_FAILURE(acpi_get_hp_params_from_firmware(dev->bus, hpp))) | ||
195 | return -ENODEV; | ||
196 | return 0; | ||
197 | } | ||
198 | |||
199 | static inline int get_hp_hw_control_from_firmware(struct pci_dev *dev) | 191 | static inline int get_hp_hw_control_from_firmware(struct pci_dev *dev) |
200 | { | 192 | { |
201 | u32 flags = OSC_SHPC_NATIVE_HP_CONTROL; | 193 | u32 flags = OSC_SHPC_NATIVE_HP_CONTROL; |
202 | return acpi_get_hp_hw_control_from_firmware(dev, flags); | 194 | return acpi_get_hp_hw_control_from_firmware(dev, flags); |
203 | } | 195 | } |
204 | #else | 196 | #else |
205 | #define get_hp_params_from_firmware(dev, hpp) (-ENODEV) | ||
206 | #define get_hp_hw_control_from_firmware(dev) (0) | 197 | #define get_hp_hw_control_from_firmware(dev) (0) |
207 | #endif | 198 | #endif |
208 | 199 | ||
diff --git a/drivers/pci/hotplug/shpchp_pci.c b/drivers/pci/hotplug/shpchp_pci.c index aa315e52529b..8c3d3219f227 100644 --- a/drivers/pci/hotplug/shpchp_pci.c +++ b/drivers/pci/hotplug/shpchp_pci.c | |||
@@ -34,66 +34,6 @@ | |||
34 | #include "../pci.h" | 34 | #include "../pci.h" |
35 | #include "shpchp.h" | 35 | #include "shpchp.h" |
36 | 36 | ||
37 | static void program_fw_provided_values(struct pci_dev *dev) | ||
38 | { | ||
39 | u16 pci_cmd, pci_bctl; | ||
40 | struct pci_dev *cdev; | ||
41 | struct hotplug_params hpp; | ||
42 | |||
43 | /* Program hpp values for this device */ | ||
44 | if (!(dev->hdr_type == PCI_HEADER_TYPE_NORMAL || | ||
45 | (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE && | ||
46 | (dev->class >> 8) == PCI_CLASS_BRIDGE_PCI))) | ||
47 | return; | ||
48 | |||
49 | /* use default values if we can't get them from firmware */ | ||
50 | if (get_hp_params_from_firmware(dev, &hpp) || | ||
51 | !hpp.t0 || (hpp.t0->revision > 1)) { | ||
52 | warn("Could not get hotplug parameters. Use defaults\n"); | ||
53 | hpp.t0 = &hpp.type0_data; | ||
54 | hpp.t0->revision = 0; | ||
55 | hpp.t0->cache_line_size = 8; | ||
56 | hpp.t0->latency_timer = 0x40; | ||
57 | hpp.t0->enable_serr = 0; | ||
58 | hpp.t0->enable_perr = 0; | ||
59 | } | ||
60 | |||
61 | pci_write_config_byte(dev, | ||
62 | PCI_CACHE_LINE_SIZE, hpp.t0->cache_line_size); | ||
63 | pci_write_config_byte(dev, PCI_LATENCY_TIMER, hpp.t0->latency_timer); | ||
64 | pci_read_config_word(dev, PCI_COMMAND, &pci_cmd); | ||
65 | if (hpp.t0->enable_serr) | ||
66 | pci_cmd |= PCI_COMMAND_SERR; | ||
67 | else | ||
68 | pci_cmd &= ~PCI_COMMAND_SERR; | ||
69 | if (hpp.t0->enable_perr) | ||
70 | pci_cmd |= PCI_COMMAND_PARITY; | ||
71 | else | ||
72 | pci_cmd &= ~PCI_COMMAND_PARITY; | ||
73 | pci_write_config_word(dev, PCI_COMMAND, pci_cmd); | ||
74 | |||
75 | /* Program bridge control value and child devices */ | ||
76 | if ((dev->class >> 8) == PCI_CLASS_BRIDGE_PCI) { | ||
77 | pci_write_config_byte(dev, PCI_SEC_LATENCY_TIMER, | ||
78 | hpp.t0->latency_timer); | ||
79 | pci_read_config_word(dev, PCI_BRIDGE_CONTROL, &pci_bctl); | ||
80 | if (hpp.t0->enable_serr) | ||
81 | pci_bctl |= PCI_BRIDGE_CTL_SERR; | ||
82 | else | ||
83 | pci_bctl &= ~PCI_BRIDGE_CTL_SERR; | ||
84 | if (hpp.t0->enable_perr) | ||
85 | pci_bctl |= PCI_BRIDGE_CTL_PARITY; | ||
86 | else | ||
87 | pci_bctl &= ~PCI_BRIDGE_CTL_PARITY; | ||
88 | pci_write_config_word(dev, PCI_BRIDGE_CONTROL, pci_bctl); | ||
89 | if (dev->subordinate) { | ||
90 | list_for_each_entry(cdev, &dev->subordinate->devices, | ||
91 | bus_list) | ||
92 | program_fw_provided_values(cdev); | ||
93 | } | ||
94 | } | ||
95 | } | ||
96 | |||
97 | int __ref shpchp_configure_device(struct slot *p_slot) | 37 | int __ref shpchp_configure_device(struct slot *p_slot) |
98 | { | 38 | { |
99 | struct pci_dev *dev; | 39 | struct pci_dev *dev; |
@@ -153,7 +93,7 @@ int __ref shpchp_configure_device(struct slot *p_slot) | |||
153 | child->subordinate = pci_do_scan_bus(child); | 93 | child->subordinate = pci_do_scan_bus(child); |
154 | pci_bus_size_bridges(child); | 94 | pci_bus_size_bridges(child); |
155 | } | 95 | } |
156 | program_fw_provided_values(dev); | 96 | pci_configure_slot(dev); |
157 | pci_dev_put(dev); | 97 | pci_dev_put(dev); |
158 | } | 98 | } |
159 | 99 | ||
diff --git a/drivers/pci/legacy.c b/drivers/pci/legacy.c new file mode 100644 index 000000000000..871f65c15936 --- /dev/null +++ b/drivers/pci/legacy.c | |||
@@ -0,0 +1,34 @@ | |||
1 | #include <linux/init.h> | ||
2 | #include <linux/pci.h> | ||
3 | #include <linux/module.h> | ||
4 | #include <linux/interrupt.h> | ||
5 | #include "pci.h" | ||
6 | |||
7 | /** | ||
8 | * pci_find_device - begin or continue searching for a PCI device by vendor/device id | ||
9 | * @vendor: PCI vendor id to match, or %PCI_ANY_ID to match all vendor ids | ||
10 | * @device: PCI device id to match, or %PCI_ANY_ID to match all device ids | ||
11 | * @from: Previous PCI device found in search, or %NULL for new search. | ||
12 | * | ||
13 | * Iterates through the list of known PCI devices. If a PCI device is found | ||
14 | * with a matching @vendor and @device, a pointer to its device structure is | ||
15 | * returned. Otherwise, %NULL is returned. | ||
16 | * A new search is initiated by passing %NULL as the @from argument. | ||
17 | * Otherwise if @from is not %NULL, searches continue from next device | ||
18 | * on the global list. | ||
19 | * | ||
20 | * NOTE: Do not use this function any more; use pci_get_device() instead, as | ||
21 | * the PCI device returned by this function can disappear at any moment in | ||
22 | * time. | ||
23 | */ | ||
24 | struct pci_dev *pci_find_device(unsigned int vendor, unsigned int device, | ||
25 | struct pci_dev *from) | ||
26 | { | ||
27 | struct pci_dev *pdev; | ||
28 | |||
29 | pci_dev_get(from); | ||
30 | pdev = pci_get_subsys(vendor, device, PCI_ANY_ID, PCI_ANY_ID, from); | ||
31 | pci_dev_put(pdev); | ||
32 | return pdev; | ||
33 | } | ||
34 | EXPORT_SYMBOL(pci_find_device); | ||
diff --git a/drivers/pci/msi.c b/drivers/pci/msi.c index d986afb7032b..f9cf3173b23d 100644 --- a/drivers/pci/msi.c +++ b/drivers/pci/msi.c | |||
@@ -16,9 +16,8 @@ | |||
16 | #include <linux/proc_fs.h> | 16 | #include <linux/proc_fs.h> |
17 | #include <linux/msi.h> | 17 | #include <linux/msi.h> |
18 | #include <linux/smp.h> | 18 | #include <linux/smp.h> |
19 | 19 | #include <linux/errno.h> | |
20 | #include <asm/errno.h> | 20 | #include <linux/io.h> |
21 | #include <asm/io.h> | ||
22 | 21 | ||
23 | #include "pci.h" | 22 | #include "pci.h" |
24 | #include "msi.h" | 23 | #include "msi.h" |
@@ -272,7 +271,30 @@ void write_msi_msg(unsigned int irq, struct msi_msg *msg) | |||
272 | write_msi_msg_desc(desc, msg); | 271 | write_msi_msg_desc(desc, msg); |
273 | } | 272 | } |
274 | 273 | ||
275 | static int msi_free_irqs(struct pci_dev* dev); | 274 | static void free_msi_irqs(struct pci_dev *dev) |
275 | { | ||
276 | struct msi_desc *entry, *tmp; | ||
277 | |||
278 | list_for_each_entry(entry, &dev->msi_list, list) { | ||
279 | int i, nvec; | ||
280 | if (!entry->irq) | ||
281 | continue; | ||
282 | nvec = 1 << entry->msi_attrib.multiple; | ||
283 | for (i = 0; i < nvec; i++) | ||
284 | BUG_ON(irq_has_action(entry->irq + i)); | ||
285 | } | ||
286 | |||
287 | arch_teardown_msi_irqs(dev); | ||
288 | |||
289 | list_for_each_entry_safe(entry, tmp, &dev->msi_list, list) { | ||
290 | if (entry->msi_attrib.is_msix) { | ||
291 | if (list_is_last(&entry->list, &dev->msi_list)) | ||
292 | iounmap(entry->mask_base); | ||
293 | } | ||
294 | list_del(&entry->list); | ||
295 | kfree(entry); | ||
296 | } | ||
297 | } | ||
276 | 298 | ||
277 | static struct msi_desc *alloc_msi_entry(struct pci_dev *dev) | 299 | static struct msi_desc *alloc_msi_entry(struct pci_dev *dev) |
278 | { | 300 | { |
@@ -324,7 +346,7 @@ static void __pci_restore_msix_state(struct pci_dev *dev) | |||
324 | if (!dev->msix_enabled) | 346 | if (!dev->msix_enabled) |
325 | return; | 347 | return; |
326 | BUG_ON(list_empty(&dev->msi_list)); | 348 | BUG_ON(list_empty(&dev->msi_list)); |
327 | entry = list_entry(dev->msi_list.next, struct msi_desc, list); | 349 | entry = list_first_entry(&dev->msi_list, struct msi_desc, list); |
328 | pos = entry->msi_attrib.pos; | 350 | pos = entry->msi_attrib.pos; |
329 | pci_read_config_word(dev, pos + PCI_MSIX_FLAGS, &control); | 351 | pci_read_config_word(dev, pos + PCI_MSIX_FLAGS, &control); |
330 | 352 | ||
@@ -367,7 +389,7 @@ static int msi_capability_init(struct pci_dev *dev, int nvec) | |||
367 | u16 control; | 389 | u16 control; |
368 | unsigned mask; | 390 | unsigned mask; |
369 | 391 | ||
370 | pos = pci_find_capability(dev, PCI_CAP_ID_MSI); | 392 | pos = pci_find_capability(dev, PCI_CAP_ID_MSI); |
371 | msi_set_enable(dev, pos, 0); /* Disable MSI during set up */ | 393 | msi_set_enable(dev, pos, 0); /* Disable MSI during set up */ |
372 | 394 | ||
373 | pci_read_config_word(dev, msi_control_reg(pos), &control); | 395 | pci_read_config_word(dev, msi_control_reg(pos), &control); |
@@ -376,12 +398,12 @@ static int msi_capability_init(struct pci_dev *dev, int nvec) | |||
376 | if (!entry) | 398 | if (!entry) |
377 | return -ENOMEM; | 399 | return -ENOMEM; |
378 | 400 | ||
379 | entry->msi_attrib.is_msix = 0; | 401 | entry->msi_attrib.is_msix = 0; |
380 | entry->msi_attrib.is_64 = is_64bit_address(control); | 402 | entry->msi_attrib.is_64 = is_64bit_address(control); |
381 | entry->msi_attrib.entry_nr = 0; | 403 | entry->msi_attrib.entry_nr = 0; |
382 | entry->msi_attrib.maskbit = is_mask_bit_support(control); | 404 | entry->msi_attrib.maskbit = is_mask_bit_support(control); |
383 | entry->msi_attrib.default_irq = dev->irq; /* Save IOAPIC IRQ */ | 405 | entry->msi_attrib.default_irq = dev->irq; /* Save IOAPIC IRQ */ |
384 | entry->msi_attrib.pos = pos; | 406 | entry->msi_attrib.pos = pos; |
385 | 407 | ||
386 | entry->mask_pos = msi_mask_reg(pos, entry->msi_attrib.is_64); | 408 | entry->mask_pos = msi_mask_reg(pos, entry->msi_attrib.is_64); |
387 | /* All MSIs are unmasked by default, Mask them all */ | 409 | /* All MSIs are unmasked by default, Mask them all */ |
@@ -396,7 +418,7 @@ static int msi_capability_init(struct pci_dev *dev, int nvec) | |||
396 | ret = arch_setup_msi_irqs(dev, nvec, PCI_CAP_ID_MSI); | 418 | ret = arch_setup_msi_irqs(dev, nvec, PCI_CAP_ID_MSI); |
397 | if (ret) { | 419 | if (ret) { |
398 | msi_mask_irq(entry, mask, ~mask); | 420 | msi_mask_irq(entry, mask, ~mask); |
399 | msi_free_irqs(dev); | 421 | free_msi_irqs(dev); |
400 | return ret; | 422 | return ret; |
401 | } | 423 | } |
402 | 424 | ||
@@ -409,44 +431,27 @@ static int msi_capability_init(struct pci_dev *dev, int nvec) | |||
409 | return 0; | 431 | return 0; |
410 | } | 432 | } |
411 | 433 | ||
412 | /** | 434 | static void __iomem *msix_map_region(struct pci_dev *dev, unsigned pos, |
413 | * msix_capability_init - configure device's MSI-X capability | 435 | unsigned nr_entries) |
414 | * @dev: pointer to the pci_dev data structure of MSI-X device function | ||
415 | * @entries: pointer to an array of struct msix_entry entries | ||
416 | * @nvec: number of @entries | ||
417 | * | ||
418 | * Setup the MSI-X capability structure of device function with a | ||
419 | * single MSI-X irq. A return of zero indicates the successful setup of | ||
420 | * requested MSI-X entries with allocated irqs or non-zero for otherwise. | ||
421 | **/ | ||
422 | static int msix_capability_init(struct pci_dev *dev, | ||
423 | struct msix_entry *entries, int nvec) | ||
424 | { | 436 | { |
425 | struct msi_desc *entry; | ||
426 | int pos, i, j, nr_entries, ret; | ||
427 | unsigned long phys_addr; | 437 | unsigned long phys_addr; |
428 | u32 table_offset; | 438 | u32 table_offset; |
429 | u16 control; | ||
430 | u8 bir; | 439 | u8 bir; |
431 | void __iomem *base; | ||
432 | 440 | ||
433 | pos = pci_find_capability(dev, PCI_CAP_ID_MSIX); | 441 | pci_read_config_dword(dev, msix_table_offset_reg(pos), &table_offset); |
434 | pci_read_config_word(dev, pos + PCI_MSIX_FLAGS, &control); | ||
435 | |||
436 | /* Ensure MSI-X is disabled while it is set up */ | ||
437 | control &= ~PCI_MSIX_FLAGS_ENABLE; | ||
438 | pci_write_config_word(dev, pos + PCI_MSIX_FLAGS, control); | ||
439 | |||
440 | /* Request & Map MSI-X table region */ | ||
441 | nr_entries = multi_msix_capable(control); | ||
442 | |||
443 | pci_read_config_dword(dev, msix_table_offset_reg(pos), &table_offset); | ||
444 | bir = (u8)(table_offset & PCI_MSIX_FLAGS_BIRMASK); | 442 | bir = (u8)(table_offset & PCI_MSIX_FLAGS_BIRMASK); |
445 | table_offset &= ~PCI_MSIX_FLAGS_BIRMASK; | 443 | table_offset &= ~PCI_MSIX_FLAGS_BIRMASK; |
446 | phys_addr = pci_resource_start (dev, bir) + table_offset; | 444 | phys_addr = pci_resource_start(dev, bir) + table_offset; |
447 | base = ioremap_nocache(phys_addr, nr_entries * PCI_MSIX_ENTRY_SIZE); | 445 | |
448 | if (base == NULL) | 446 | return ioremap_nocache(phys_addr, nr_entries * PCI_MSIX_ENTRY_SIZE); |
449 | return -ENOMEM; | 447 | } |
448 | |||
449 | static int msix_setup_entries(struct pci_dev *dev, unsigned pos, | ||
450 | void __iomem *base, struct msix_entry *entries, | ||
451 | int nvec) | ||
452 | { | ||
453 | struct msi_desc *entry; | ||
454 | int i; | ||
450 | 455 | ||
451 | for (i = 0; i < nvec; i++) { | 456 | for (i = 0; i < nvec; i++) { |
452 | entry = alloc_msi_entry(dev); | 457 | entry = alloc_msi_entry(dev); |
@@ -454,41 +459,78 @@ static int msix_capability_init(struct pci_dev *dev, | |||
454 | if (!i) | 459 | if (!i) |
455 | iounmap(base); | 460 | iounmap(base); |
456 | else | 461 | else |
457 | msi_free_irqs(dev); | 462 | free_msi_irqs(dev); |
458 | /* No enough memory. Don't try again */ | 463 | /* No enough memory. Don't try again */ |
459 | return -ENOMEM; | 464 | return -ENOMEM; |
460 | } | 465 | } |
461 | 466 | ||
462 | j = entries[i].entry; | 467 | entry->msi_attrib.is_msix = 1; |
463 | entry->msi_attrib.is_msix = 1; | 468 | entry->msi_attrib.is_64 = 1; |
464 | entry->msi_attrib.is_64 = 1; | 469 | entry->msi_attrib.entry_nr = entries[i].entry; |
465 | entry->msi_attrib.entry_nr = j; | 470 | entry->msi_attrib.default_irq = dev->irq; |
466 | entry->msi_attrib.default_irq = dev->irq; | 471 | entry->msi_attrib.pos = pos; |
467 | entry->msi_attrib.pos = pos; | 472 | entry->mask_base = base; |
468 | entry->mask_base = base; | ||
469 | 473 | ||
470 | list_add_tail(&entry->list, &dev->msi_list); | 474 | list_add_tail(&entry->list, &dev->msi_list); |
471 | } | 475 | } |
472 | 476 | ||
473 | ret = arch_setup_msi_irqs(dev, nvec, PCI_CAP_ID_MSIX); | 477 | return 0; |
474 | if (ret < 0) { | 478 | } |
475 | /* If we had some success report the number of irqs | ||
476 | * we succeeded in setting up. */ | ||
477 | int avail = 0; | ||
478 | list_for_each_entry(entry, &dev->msi_list, list) { | ||
479 | if (entry->irq != 0) { | ||
480 | avail++; | ||
481 | } | ||
482 | } | ||
483 | 479 | ||
484 | if (avail != 0) | 480 | static void msix_program_entries(struct pci_dev *dev, |
485 | ret = avail; | 481 | struct msix_entry *entries) |
482 | { | ||
483 | struct msi_desc *entry; | ||
484 | int i = 0; | ||
485 | |||
486 | list_for_each_entry(entry, &dev->msi_list, list) { | ||
487 | int offset = entries[i].entry * PCI_MSIX_ENTRY_SIZE + | ||
488 | PCI_MSIX_ENTRY_VECTOR_CTRL; | ||
489 | |||
490 | entries[i].vector = entry->irq; | ||
491 | set_irq_msi(entry->irq, entry); | ||
492 | entry->masked = readl(entry->mask_base + offset); | ||
493 | msix_mask_irq(entry, 1); | ||
494 | i++; | ||
486 | } | 495 | } |
496 | } | ||
487 | 497 | ||
488 | if (ret) { | 498 | /** |
489 | msi_free_irqs(dev); | 499 | * msix_capability_init - configure device's MSI-X capability |
500 | * @dev: pointer to the pci_dev data structure of MSI-X device function | ||
501 | * @entries: pointer to an array of struct msix_entry entries | ||
502 | * @nvec: number of @entries | ||
503 | * | ||
504 | * Setup the MSI-X capability structure of device function with a | ||
505 | * single MSI-X irq. A return of zero indicates the successful setup of | ||
506 | * requested MSI-X entries with allocated irqs or non-zero for otherwise. | ||
507 | **/ | ||
508 | static int msix_capability_init(struct pci_dev *dev, | ||
509 | struct msix_entry *entries, int nvec) | ||
510 | { | ||
511 | int pos, ret; | ||
512 | u16 control; | ||
513 | void __iomem *base; | ||
514 | |||
515 | pos = pci_find_capability(dev, PCI_CAP_ID_MSIX); | ||
516 | pci_read_config_word(dev, pos + PCI_MSIX_FLAGS, &control); | ||
517 | |||
518 | /* Ensure MSI-X is disabled while it is set up */ | ||
519 | control &= ~PCI_MSIX_FLAGS_ENABLE; | ||
520 | pci_write_config_word(dev, pos + PCI_MSIX_FLAGS, control); | ||
521 | |||
522 | /* Request & Map MSI-X table region */ | ||
523 | base = msix_map_region(dev, pos, multi_msix_capable(control)); | ||
524 | if (!base) | ||
525 | return -ENOMEM; | ||
526 | |||
527 | ret = msix_setup_entries(dev, pos, base, entries, nvec); | ||
528 | if (ret) | ||
490 | return ret; | 529 | return ret; |
491 | } | 530 | |
531 | ret = arch_setup_msi_irqs(dev, nvec, PCI_CAP_ID_MSIX); | ||
532 | if (ret) | ||
533 | goto error; | ||
492 | 534 | ||
493 | /* | 535 | /* |
494 | * Some devices require MSI-X to be enabled before we can touch the | 536 | * Some devices require MSI-X to be enabled before we can touch the |
@@ -498,16 +540,7 @@ static int msix_capability_init(struct pci_dev *dev, | |||
498 | control |= PCI_MSIX_FLAGS_MASKALL | PCI_MSIX_FLAGS_ENABLE; | 540 | control |= PCI_MSIX_FLAGS_MASKALL | PCI_MSIX_FLAGS_ENABLE; |
499 | pci_write_config_word(dev, pos + PCI_MSIX_FLAGS, control); | 541 | pci_write_config_word(dev, pos + PCI_MSIX_FLAGS, control); |
500 | 542 | ||
501 | i = 0; | 543 | msix_program_entries(dev, entries); |
502 | list_for_each_entry(entry, &dev->msi_list, list) { | ||
503 | entries[i].vector = entry->irq; | ||
504 | set_irq_msi(entry->irq, entry); | ||
505 | j = entries[i].entry; | ||
506 | entry->masked = readl(base + j * PCI_MSIX_ENTRY_SIZE + | ||
507 | PCI_MSIX_ENTRY_VECTOR_CTRL); | ||
508 | msix_mask_irq(entry, 1); | ||
509 | i++; | ||
510 | } | ||
511 | 544 | ||
512 | /* Set MSI-X enabled bits and unmask the function */ | 545 | /* Set MSI-X enabled bits and unmask the function */ |
513 | pci_intx_for_msi(dev, 0); | 546 | pci_intx_for_msi(dev, 0); |
@@ -517,6 +550,27 @@ static int msix_capability_init(struct pci_dev *dev, | |||
517 | pci_write_config_word(dev, pos + PCI_MSIX_FLAGS, control); | 550 | pci_write_config_word(dev, pos + PCI_MSIX_FLAGS, control); |
518 | 551 | ||
519 | return 0; | 552 | return 0; |
553 | |||
554 | error: | ||
555 | if (ret < 0) { | ||
556 | /* | ||
557 | * If we had some success, report the number of irqs | ||
558 | * we succeeded in setting up. | ||
559 | */ | ||
560 | struct msi_desc *entry; | ||
561 | int avail = 0; | ||
562 | |||
563 | list_for_each_entry(entry, &dev->msi_list, list) { | ||
564 | if (entry->irq != 0) | ||
565 | avail++; | ||
566 | } | ||
567 | if (avail != 0) | ||
568 | ret = avail; | ||
569 | } | ||
570 | |||
571 | free_msi_irqs(dev); | ||
572 | |||
573 | return ret; | ||
520 | } | 574 | } |
521 | 575 | ||
522 | /** | 576 | /** |
@@ -529,7 +583,7 @@ static int msix_capability_init(struct pci_dev *dev, | |||
529 | * to determine if MSI/-X are supported for the device. If MSI/-X is | 583 | * to determine if MSI/-X are supported for the device. If MSI/-X is |
530 | * supported return 0, else return an error code. | 584 | * supported return 0, else return an error code. |
531 | **/ | 585 | **/ |
532 | static int pci_msi_check_device(struct pci_dev* dev, int nvec, int type) | 586 | static int pci_msi_check_device(struct pci_dev *dev, int nvec, int type) |
533 | { | 587 | { |
534 | struct pci_bus *bus; | 588 | struct pci_bus *bus; |
535 | int ret; | 589 | int ret; |
@@ -546,8 +600,9 @@ static int pci_msi_check_device(struct pci_dev* dev, int nvec, int type) | |||
546 | if (nvec < 1) | 600 | if (nvec < 1) |
547 | return -ERANGE; | 601 | return -ERANGE; |
548 | 602 | ||
549 | /* Any bridge which does NOT route MSI transactions from it's | 603 | /* |
550 | * secondary bus to it's primary bus must set NO_MSI flag on | 604 | * Any bridge which does NOT route MSI transactions from its |
605 | * secondary bus to its primary bus must set NO_MSI flag on | ||
551 | * the secondary pci_bus. | 606 | * the secondary pci_bus. |
552 | * We expect only arch-specific PCI host bus controller driver | 607 | * We expect only arch-specific PCI host bus controller driver |
553 | * or quirks for specific PCI bridges to be setting NO_MSI. | 608 | * or quirks for specific PCI bridges to be setting NO_MSI. |
@@ -638,50 +693,16 @@ void pci_msi_shutdown(struct pci_dev *dev) | |||
638 | dev->irq = desc->msi_attrib.default_irq; | 693 | dev->irq = desc->msi_attrib.default_irq; |
639 | } | 694 | } |
640 | 695 | ||
641 | void pci_disable_msi(struct pci_dev* dev) | 696 | void pci_disable_msi(struct pci_dev *dev) |
642 | { | 697 | { |
643 | struct msi_desc *entry; | ||
644 | |||
645 | if (!pci_msi_enable || !dev || !dev->msi_enabled) | 698 | if (!pci_msi_enable || !dev || !dev->msi_enabled) |
646 | return; | 699 | return; |
647 | 700 | ||
648 | pci_msi_shutdown(dev); | 701 | pci_msi_shutdown(dev); |
649 | 702 | free_msi_irqs(dev); | |
650 | entry = list_entry(dev->msi_list.next, struct msi_desc, list); | ||
651 | if (entry->msi_attrib.is_msix) | ||
652 | return; | ||
653 | |||
654 | msi_free_irqs(dev); | ||
655 | } | 703 | } |
656 | EXPORT_SYMBOL(pci_disable_msi); | 704 | EXPORT_SYMBOL(pci_disable_msi); |
657 | 705 | ||
658 | static int msi_free_irqs(struct pci_dev* dev) | ||
659 | { | ||
660 | struct msi_desc *entry, *tmp; | ||
661 | |||
662 | list_for_each_entry(entry, &dev->msi_list, list) { | ||
663 | int i, nvec; | ||
664 | if (!entry->irq) | ||
665 | continue; | ||
666 | nvec = 1 << entry->msi_attrib.multiple; | ||
667 | for (i = 0; i < nvec; i++) | ||
668 | BUG_ON(irq_has_action(entry->irq + i)); | ||
669 | } | ||
670 | |||
671 | arch_teardown_msi_irqs(dev); | ||
672 | |||
673 | list_for_each_entry_safe(entry, tmp, &dev->msi_list, list) { | ||
674 | if (entry->msi_attrib.is_msix) { | ||
675 | if (list_is_last(&entry->list, &dev->msi_list)) | ||
676 | iounmap(entry->mask_base); | ||
677 | } | ||
678 | list_del(&entry->list); | ||
679 | kfree(entry); | ||
680 | } | ||
681 | |||
682 | return 0; | ||
683 | } | ||
684 | |||
685 | /** | 706 | /** |
686 | * pci_msix_table_size - return the number of device's MSI-X table entries | 707 | * pci_msix_table_size - return the number of device's MSI-X table entries |
687 | * @dev: pointer to the pci_dev data structure of MSI-X device function | 708 | * @dev: pointer to the pci_dev data structure of MSI-X device function |
@@ -714,13 +735,13 @@ int pci_msix_table_size(struct pci_dev *dev) | |||
714 | * of irqs or MSI-X vectors available. Driver should use the returned value to | 735 | * of irqs or MSI-X vectors available. Driver should use the returned value to |
715 | * re-send its request. | 736 | * re-send its request. |
716 | **/ | 737 | **/ |
717 | int pci_enable_msix(struct pci_dev* dev, struct msix_entry *entries, int nvec) | 738 | int pci_enable_msix(struct pci_dev *dev, struct msix_entry *entries, int nvec) |
718 | { | 739 | { |
719 | int status, nr_entries; | 740 | int status, nr_entries; |
720 | int i, j; | 741 | int i, j; |
721 | 742 | ||
722 | if (!entries) | 743 | if (!entries) |
723 | return -EINVAL; | 744 | return -EINVAL; |
724 | 745 | ||
725 | status = pci_msi_check_device(dev, nvec, PCI_CAP_ID_MSIX); | 746 | status = pci_msi_check_device(dev, nvec, PCI_CAP_ID_MSIX); |
726 | if (status) | 747 | if (status) |
@@ -742,7 +763,7 @@ int pci_enable_msix(struct pci_dev* dev, struct msix_entry *entries, int nvec) | |||
742 | WARN_ON(!!dev->msix_enabled); | 763 | WARN_ON(!!dev->msix_enabled); |
743 | 764 | ||
744 | /* Check whether driver already requested for MSI irq */ | 765 | /* Check whether driver already requested for MSI irq */ |
745 | if (dev->msi_enabled) { | 766 | if (dev->msi_enabled) { |
746 | dev_info(&dev->dev, "can't enable MSI-X " | 767 | dev_info(&dev->dev, "can't enable MSI-X " |
747 | "(MSI IRQ already assigned)\n"); | 768 | "(MSI IRQ already assigned)\n"); |
748 | return -EINVAL; | 769 | return -EINVAL; |
@@ -752,12 +773,7 @@ int pci_enable_msix(struct pci_dev* dev, struct msix_entry *entries, int nvec) | |||
752 | } | 773 | } |
753 | EXPORT_SYMBOL(pci_enable_msix); | 774 | EXPORT_SYMBOL(pci_enable_msix); |
754 | 775 | ||
755 | static void msix_free_all_irqs(struct pci_dev *dev) | 776 | void pci_msix_shutdown(struct pci_dev *dev) |
756 | { | ||
757 | msi_free_irqs(dev); | ||
758 | } | ||
759 | |||
760 | void pci_msix_shutdown(struct pci_dev* dev) | ||
761 | { | 777 | { |
762 | struct msi_desc *entry; | 778 | struct msi_desc *entry; |
763 | 779 | ||
@@ -774,14 +790,14 @@ void pci_msix_shutdown(struct pci_dev* dev) | |||
774 | pci_intx_for_msi(dev, 1); | 790 | pci_intx_for_msi(dev, 1); |
775 | dev->msix_enabled = 0; | 791 | dev->msix_enabled = 0; |
776 | } | 792 | } |
777 | void pci_disable_msix(struct pci_dev* dev) | 793 | |
794 | void pci_disable_msix(struct pci_dev *dev) | ||
778 | { | 795 | { |
779 | if (!pci_msi_enable || !dev || !dev->msix_enabled) | 796 | if (!pci_msi_enable || !dev || !dev->msix_enabled) |
780 | return; | 797 | return; |
781 | 798 | ||
782 | pci_msix_shutdown(dev); | 799 | pci_msix_shutdown(dev); |
783 | 800 | free_msi_irqs(dev); | |
784 | msix_free_all_irqs(dev); | ||
785 | } | 801 | } |
786 | EXPORT_SYMBOL(pci_disable_msix); | 802 | EXPORT_SYMBOL(pci_disable_msix); |
787 | 803 | ||
@@ -794,16 +810,13 @@ EXPORT_SYMBOL(pci_disable_msix); | |||
794 | * allocated for this device function, are reclaimed to unused state, | 810 | * allocated for this device function, are reclaimed to unused state, |
795 | * which may be used later on. | 811 | * which may be used later on. |
796 | **/ | 812 | **/ |
797 | void msi_remove_pci_irq_vectors(struct pci_dev* dev) | 813 | void msi_remove_pci_irq_vectors(struct pci_dev *dev) |
798 | { | 814 | { |
799 | if (!pci_msi_enable || !dev) | 815 | if (!pci_msi_enable || !dev) |
800 | return; | 816 | return; |
801 | |||
802 | if (dev->msi_enabled) | ||
803 | msi_free_irqs(dev); | ||
804 | 817 | ||
805 | if (dev->msix_enabled) | 818 | if (dev->msi_enabled || dev->msix_enabled) |
806 | msix_free_all_irqs(dev); | 819 | free_msi_irqs(dev); |
807 | } | 820 | } |
808 | 821 | ||
809 | void pci_no_msi(void) | 822 | void pci_no_msi(void) |
diff --git a/drivers/pci/pci-acpi.c b/drivers/pci/pci-acpi.c index ea15b0537457..33317df47699 100644 --- a/drivers/pci/pci-acpi.c +++ b/drivers/pci/pci-acpi.c | |||
@@ -109,15 +109,32 @@ static bool acpi_pci_can_wakeup(struct pci_dev *dev) | |||
109 | return handle ? acpi_bus_can_wakeup(handle) : false; | 109 | return handle ? acpi_bus_can_wakeup(handle) : false; |
110 | } | 110 | } |
111 | 111 | ||
112 | static void acpi_pci_propagate_wakeup_enable(struct pci_bus *bus, bool enable) | ||
113 | { | ||
114 | while (bus->parent) { | ||
115 | struct pci_dev *bridge = bus->self; | ||
116 | int ret; | ||
117 | |||
118 | ret = acpi_pm_device_sleep_wake(&bridge->dev, enable); | ||
119 | if (!ret || bridge->is_pcie) | ||
120 | return; | ||
121 | bus = bus->parent; | ||
122 | } | ||
123 | |||
124 | /* We have reached the root bus. */ | ||
125 | if (bus->bridge) | ||
126 | acpi_pm_device_sleep_wake(bus->bridge, enable); | ||
127 | } | ||
128 | |||
112 | static int acpi_pci_sleep_wake(struct pci_dev *dev, bool enable) | 129 | static int acpi_pci_sleep_wake(struct pci_dev *dev, bool enable) |
113 | { | 130 | { |
114 | int error = acpi_pm_device_sleep_wake(&dev->dev, enable); | 131 | if (acpi_pci_can_wakeup(dev)) |
132 | return acpi_pm_device_sleep_wake(&dev->dev, enable); | ||
115 | 133 | ||
116 | if (!error) | 134 | if (!dev->is_pcie) |
117 | dev_printk(KERN_INFO, &dev->dev, | 135 | acpi_pci_propagate_wakeup_enable(dev->bus, enable); |
118 | "wake-up capability %s by ACPI\n", | 136 | |
119 | enable ? "enabled" : "disabled"); | 137 | return 0; |
120 | return error; | ||
121 | } | 138 | } |
122 | 139 | ||
123 | static struct pci_platform_pm_ops acpi_pci_platform_pm = { | 140 | static struct pci_platform_pm_ops acpi_pci_platform_pm = { |
diff --git a/drivers/pci/pci-driver.c b/drivers/pci/pci-driver.c index a7eb7277b106..e5d47be3c6d7 100644 --- a/drivers/pci/pci-driver.c +++ b/drivers/pci/pci-driver.c | |||
@@ -19,37 +19,98 @@ | |||
19 | #include <linux/cpu.h> | 19 | #include <linux/cpu.h> |
20 | #include "pci.h" | 20 | #include "pci.h" |
21 | 21 | ||
22 | /* | ||
23 | * Dynamic device IDs are disabled for !CONFIG_HOTPLUG | ||
24 | */ | ||
25 | |||
26 | struct pci_dynid { | 22 | struct pci_dynid { |
27 | struct list_head node; | 23 | struct list_head node; |
28 | struct pci_device_id id; | 24 | struct pci_device_id id; |
29 | }; | 25 | }; |
30 | 26 | ||
31 | #ifdef CONFIG_HOTPLUG | 27 | /** |
28 | * pci_add_dynid - add a new PCI device ID to this driver and re-probe devices | ||
29 | * @drv: target pci driver | ||
30 | * @vendor: PCI vendor ID | ||
31 | * @device: PCI device ID | ||
32 | * @subvendor: PCI subvendor ID | ||
33 | * @subdevice: PCI subdevice ID | ||
34 | * @class: PCI class | ||
35 | * @class_mask: PCI class mask | ||
36 | * @driver_data: private driver data | ||
37 | * | ||
38 | * Adds a new dynamic pci device ID to this driver and causes the | ||
39 | * driver to probe for all devices again. @drv must have been | ||
40 | * registered prior to calling this function. | ||
41 | * | ||
42 | * CONTEXT: | ||
43 | * Does GFP_KERNEL allocation. | ||
44 | * | ||
45 | * RETURNS: | ||
46 | * 0 on success, -errno on failure. | ||
47 | */ | ||
48 | int pci_add_dynid(struct pci_driver *drv, | ||
49 | unsigned int vendor, unsigned int device, | ||
50 | unsigned int subvendor, unsigned int subdevice, | ||
51 | unsigned int class, unsigned int class_mask, | ||
52 | unsigned long driver_data) | ||
53 | { | ||
54 | struct pci_dynid *dynid; | ||
55 | int retval; | ||
56 | |||
57 | dynid = kzalloc(sizeof(*dynid), GFP_KERNEL); | ||
58 | if (!dynid) | ||
59 | return -ENOMEM; | ||
60 | |||
61 | dynid->id.vendor = vendor; | ||
62 | dynid->id.device = device; | ||
63 | dynid->id.subvendor = subvendor; | ||
64 | dynid->id.subdevice = subdevice; | ||
65 | dynid->id.class = class; | ||
66 | dynid->id.class_mask = class_mask; | ||
67 | dynid->id.driver_data = driver_data; | ||
68 | |||
69 | spin_lock(&drv->dynids.lock); | ||
70 | list_add_tail(&dynid->node, &drv->dynids.list); | ||
71 | spin_unlock(&drv->dynids.lock); | ||
72 | |||
73 | get_driver(&drv->driver); | ||
74 | retval = driver_attach(&drv->driver); | ||
75 | put_driver(&drv->driver); | ||
76 | |||
77 | return retval; | ||
78 | } | ||
79 | |||
80 | static void pci_free_dynids(struct pci_driver *drv) | ||
81 | { | ||
82 | struct pci_dynid *dynid, *n; | ||
32 | 83 | ||
84 | spin_lock(&drv->dynids.lock); | ||
85 | list_for_each_entry_safe(dynid, n, &drv->dynids.list, node) { | ||
86 | list_del(&dynid->node); | ||
87 | kfree(dynid); | ||
88 | } | ||
89 | spin_unlock(&drv->dynids.lock); | ||
90 | } | ||
91 | |||
92 | /* | ||
93 | * Dynamic device ID manipulation via sysfs is disabled for !CONFIG_HOTPLUG | ||
94 | */ | ||
95 | #ifdef CONFIG_HOTPLUG | ||
33 | /** | 96 | /** |
34 | * store_new_id - add a new PCI device ID to this driver and re-probe devices | 97 | * store_new_id - sysfs frontend to pci_add_dynid() |
35 | * @driver: target device driver | 98 | * @driver: target device driver |
36 | * @buf: buffer for scanning device ID data | 99 | * @buf: buffer for scanning device ID data |
37 | * @count: input size | 100 | * @count: input size |
38 | * | 101 | * |
39 | * Adds a new dynamic pci device ID to this driver, | 102 | * Allow PCI IDs to be added to an existing driver via sysfs. |
40 | * and causes the driver to probe for all devices again. | ||
41 | */ | 103 | */ |
42 | static ssize_t | 104 | static ssize_t |
43 | store_new_id(struct device_driver *driver, const char *buf, size_t count) | 105 | store_new_id(struct device_driver *driver, const char *buf, size_t count) |
44 | { | 106 | { |
45 | struct pci_dynid *dynid; | ||
46 | struct pci_driver *pdrv = to_pci_driver(driver); | 107 | struct pci_driver *pdrv = to_pci_driver(driver); |
47 | const struct pci_device_id *ids = pdrv->id_table; | 108 | const struct pci_device_id *ids = pdrv->id_table; |
48 | __u32 vendor, device, subvendor=PCI_ANY_ID, | 109 | __u32 vendor, device, subvendor=PCI_ANY_ID, |
49 | subdevice=PCI_ANY_ID, class=0, class_mask=0; | 110 | subdevice=PCI_ANY_ID, class=0, class_mask=0; |
50 | unsigned long driver_data=0; | 111 | unsigned long driver_data=0; |
51 | int fields=0; | 112 | int fields=0; |
52 | int retval=0; | 113 | int retval; |
53 | 114 | ||
54 | fields = sscanf(buf, "%x %x %x %x %x %x %lx", | 115 | fields = sscanf(buf, "%x %x %x %x %x %x %lx", |
55 | &vendor, &device, &subvendor, &subdevice, | 116 | &vendor, &device, &subvendor, &subdevice, |
@@ -72,27 +133,8 @@ store_new_id(struct device_driver *driver, const char *buf, size_t count) | |||
72 | return retval; | 133 | return retval; |
73 | } | 134 | } |
74 | 135 | ||
75 | dynid = kzalloc(sizeof(*dynid), GFP_KERNEL); | 136 | retval = pci_add_dynid(pdrv, vendor, device, subvendor, subdevice, |
76 | if (!dynid) | 137 | class, class_mask, driver_data); |
77 | return -ENOMEM; | ||
78 | |||
79 | dynid->id.vendor = vendor; | ||
80 | dynid->id.device = device; | ||
81 | dynid->id.subvendor = subvendor; | ||
82 | dynid->id.subdevice = subdevice; | ||
83 | dynid->id.class = class; | ||
84 | dynid->id.class_mask = class_mask; | ||
85 | dynid->id.driver_data = driver_data; | ||
86 | |||
87 | spin_lock(&pdrv->dynids.lock); | ||
88 | list_add_tail(&dynid->node, &pdrv->dynids.list); | ||
89 | spin_unlock(&pdrv->dynids.lock); | ||
90 | |||
91 | if (get_driver(&pdrv->driver)) { | ||
92 | retval = driver_attach(&pdrv->driver); | ||
93 | put_driver(&pdrv->driver); | ||
94 | } | ||
95 | |||
96 | if (retval) | 138 | if (retval) |
97 | return retval; | 139 | return retval; |
98 | return count; | 140 | return count; |
@@ -145,19 +187,6 @@ store_remove_id(struct device_driver *driver, const char *buf, size_t count) | |||
145 | } | 187 | } |
146 | static DRIVER_ATTR(remove_id, S_IWUSR, NULL, store_remove_id); | 188 | static DRIVER_ATTR(remove_id, S_IWUSR, NULL, store_remove_id); |
147 | 189 | ||
148 | static void | ||
149 | pci_free_dynids(struct pci_driver *drv) | ||
150 | { | ||
151 | struct pci_dynid *dynid, *n; | ||
152 | |||
153 | spin_lock(&drv->dynids.lock); | ||
154 | list_for_each_entry_safe(dynid, n, &drv->dynids.list, node) { | ||
155 | list_del(&dynid->node); | ||
156 | kfree(dynid); | ||
157 | } | ||
158 | spin_unlock(&drv->dynids.lock); | ||
159 | } | ||
160 | |||
161 | static int | 190 | static int |
162 | pci_create_newid_file(struct pci_driver *drv) | 191 | pci_create_newid_file(struct pci_driver *drv) |
163 | { | 192 | { |
@@ -186,7 +215,6 @@ static void pci_remove_removeid_file(struct pci_driver *drv) | |||
186 | driver_remove_file(&drv->driver, &driver_attr_remove_id); | 215 | driver_remove_file(&drv->driver, &driver_attr_remove_id); |
187 | } | 216 | } |
188 | #else /* !CONFIG_HOTPLUG */ | 217 | #else /* !CONFIG_HOTPLUG */ |
189 | static inline void pci_free_dynids(struct pci_driver *drv) {} | ||
190 | static inline int pci_create_newid_file(struct pci_driver *drv) | 218 | static inline int pci_create_newid_file(struct pci_driver *drv) |
191 | { | 219 | { |
192 | return 0; | 220 | return 0; |
@@ -417,8 +445,6 @@ static int pci_legacy_suspend(struct device *dev, pm_message_t state) | |||
417 | struct pci_dev * pci_dev = to_pci_dev(dev); | 445 | struct pci_dev * pci_dev = to_pci_dev(dev); |
418 | struct pci_driver * drv = pci_dev->driver; | 446 | struct pci_driver * drv = pci_dev->driver; |
419 | 447 | ||
420 | pci_dev->state_saved = false; | ||
421 | |||
422 | if (drv && drv->suspend) { | 448 | if (drv && drv->suspend) { |
423 | pci_power_t prev = pci_dev->current_state; | 449 | pci_power_t prev = pci_dev->current_state; |
424 | int error; | 450 | int error; |
@@ -514,7 +540,6 @@ static int pci_restore_standard_config(struct pci_dev *pci_dev) | |||
514 | static void pci_pm_default_resume_noirq(struct pci_dev *pci_dev) | 540 | static void pci_pm_default_resume_noirq(struct pci_dev *pci_dev) |
515 | { | 541 | { |
516 | pci_restore_standard_config(pci_dev); | 542 | pci_restore_standard_config(pci_dev); |
517 | pci_dev->state_saved = false; | ||
518 | pci_fixup_device(pci_fixup_resume_early, pci_dev); | 543 | pci_fixup_device(pci_fixup_resume_early, pci_dev); |
519 | } | 544 | } |
520 | 545 | ||
@@ -580,8 +605,6 @@ static int pci_pm_suspend(struct device *dev) | |||
580 | if (pci_has_legacy_pm_support(pci_dev)) | 605 | if (pci_has_legacy_pm_support(pci_dev)) |
581 | return pci_legacy_suspend(dev, PMSG_SUSPEND); | 606 | return pci_legacy_suspend(dev, PMSG_SUSPEND); |
582 | 607 | ||
583 | pci_dev->state_saved = false; | ||
584 | |||
585 | if (!pm) { | 608 | if (!pm) { |
586 | pci_pm_default_suspend(pci_dev); | 609 | pci_pm_default_suspend(pci_dev); |
587 | goto Fixup; | 610 | goto Fixup; |
@@ -694,7 +717,7 @@ static int pci_pm_resume(struct device *dev) | |||
694 | pci_pm_reenable_device(pci_dev); | 717 | pci_pm_reenable_device(pci_dev); |
695 | } | 718 | } |
696 | 719 | ||
697 | return 0; | 720 | return error; |
698 | } | 721 | } |
699 | 722 | ||
700 | #else /* !CONFIG_SUSPEND */ | 723 | #else /* !CONFIG_SUSPEND */ |
@@ -716,8 +739,6 @@ static int pci_pm_freeze(struct device *dev) | |||
716 | if (pci_has_legacy_pm_support(pci_dev)) | 739 | if (pci_has_legacy_pm_support(pci_dev)) |
717 | return pci_legacy_suspend(dev, PMSG_FREEZE); | 740 | return pci_legacy_suspend(dev, PMSG_FREEZE); |
718 | 741 | ||
719 | pci_dev->state_saved = false; | ||
720 | |||
721 | if (!pm) { | 742 | if (!pm) { |
722 | pci_pm_default_suspend(pci_dev); | 743 | pci_pm_default_suspend(pci_dev); |
723 | return 0; | 744 | return 0; |
@@ -793,6 +814,8 @@ static int pci_pm_thaw(struct device *dev) | |||
793 | pci_pm_reenable_device(pci_dev); | 814 | pci_pm_reenable_device(pci_dev); |
794 | } | 815 | } |
795 | 816 | ||
817 | pci_dev->state_saved = false; | ||
818 | |||
796 | return error; | 819 | return error; |
797 | } | 820 | } |
798 | 821 | ||
@@ -804,8 +827,6 @@ static int pci_pm_poweroff(struct device *dev) | |||
804 | if (pci_has_legacy_pm_support(pci_dev)) | 827 | if (pci_has_legacy_pm_support(pci_dev)) |
805 | return pci_legacy_suspend(dev, PMSG_HIBERNATE); | 828 | return pci_legacy_suspend(dev, PMSG_HIBERNATE); |
806 | 829 | ||
807 | pci_dev->state_saved = false; | ||
808 | |||
809 | if (!pm) { | 830 | if (!pm) { |
810 | pci_pm_default_suspend(pci_dev); | 831 | pci_pm_default_suspend(pci_dev); |
811 | goto Fixup; | 832 | goto Fixup; |
@@ -1106,6 +1127,7 @@ static int __init pci_driver_init(void) | |||
1106 | 1127 | ||
1107 | postcore_initcall(pci_driver_init); | 1128 | postcore_initcall(pci_driver_init); |
1108 | 1129 | ||
1130 | EXPORT_SYMBOL_GPL(pci_add_dynid); | ||
1109 | EXPORT_SYMBOL(pci_match_id); | 1131 | EXPORT_SYMBOL(pci_match_id); |
1110 | EXPORT_SYMBOL(__pci_register_driver); | 1132 | EXPORT_SYMBOL(__pci_register_driver); |
1111 | EXPORT_SYMBOL(pci_unregister_driver); | 1133 | EXPORT_SYMBOL(pci_unregister_driver); |
diff --git a/drivers/pci/pci-stub.c b/drivers/pci/pci-stub.c index 74fbec0bf6cb..f7b68ca6cc98 100644 --- a/drivers/pci/pci-stub.c +++ b/drivers/pci/pci-stub.c | |||
@@ -19,8 +19,16 @@ | |||
19 | #include <linux/module.h> | 19 | #include <linux/module.h> |
20 | #include <linux/pci.h> | 20 | #include <linux/pci.h> |
21 | 21 | ||
22 | static char ids[1024] __initdata; | ||
23 | |||
24 | module_param_string(ids, ids, sizeof(ids), 0); | ||
25 | MODULE_PARM_DESC(ids, "Initial PCI IDs to add to the stub driver, format is " | ||
26 | "\"vendor:device[:subvendor[:subdevice[:class[:class_mask]]]]\"" | ||
27 | " and multiple comma separated entries can be specified"); | ||
28 | |||
22 | static int pci_stub_probe(struct pci_dev *dev, const struct pci_device_id *id) | 29 | static int pci_stub_probe(struct pci_dev *dev, const struct pci_device_id *id) |
23 | { | 30 | { |
31 | dev_printk(KERN_INFO, &dev->dev, "claimed by stub\n"); | ||
24 | return 0; | 32 | return 0; |
25 | } | 33 | } |
26 | 34 | ||
@@ -32,7 +40,42 @@ static struct pci_driver stub_driver = { | |||
32 | 40 | ||
33 | static int __init pci_stub_init(void) | 41 | static int __init pci_stub_init(void) |
34 | { | 42 | { |
35 | return pci_register_driver(&stub_driver); | 43 | char *p, *id; |
44 | int rc; | ||
45 | |||
46 | rc = pci_register_driver(&stub_driver); | ||
47 | if (rc) | ||
48 | return rc; | ||
49 | |||
50 | /* add ids specified in the module parameter */ | ||
51 | p = ids; | ||
52 | while ((id = strsep(&p, ","))) { | ||
53 | unsigned int vendor, device, subvendor = PCI_ANY_ID, | ||
54 | subdevice = PCI_ANY_ID, class=0, class_mask=0; | ||
55 | int fields; | ||
56 | |||
57 | fields = sscanf(id, "%x:%x:%x:%x:%x:%x", | ||
58 | &vendor, &device, &subvendor, &subdevice, | ||
59 | &class, &class_mask); | ||
60 | |||
61 | if (fields < 2) { | ||
62 | printk(KERN_WARNING | ||
63 | "pci-stub: invalid id string \"%s\"\n", id); | ||
64 | continue; | ||
65 | } | ||
66 | |||
67 | printk(KERN_INFO | ||
68 | "pci-stub: add %04X:%04X sub=%04X:%04X cls=%08X/%08X\n", | ||
69 | vendor, device, subvendor, subdevice, class, class_mask); | ||
70 | |||
71 | rc = pci_add_dynid(&stub_driver, vendor, device, | ||
72 | subvendor, subdevice, class, class_mask, 0); | ||
73 | if (rc) | ||
74 | printk(KERN_WARNING | ||
75 | "pci-stub: failed to add dynamic id (%d)\n", rc); | ||
76 | } | ||
77 | |||
78 | return 0; | ||
36 | } | 79 | } |
37 | 80 | ||
38 | static void __exit pci_stub_exit(void) | 81 | static void __exit pci_stub_exit(void) |
diff --git a/drivers/pci/pci-sysfs.c b/drivers/pci/pci-sysfs.c index 85ebd02a64a7..0f6382f090ee 100644 --- a/drivers/pci/pci-sysfs.c +++ b/drivers/pci/pci-sysfs.c | |||
@@ -916,6 +916,24 @@ int __attribute__ ((weak)) pcibios_add_platform_entries(struct pci_dev *dev) | |||
916 | return 0; | 916 | return 0; |
917 | } | 917 | } |
918 | 918 | ||
919 | static ssize_t reset_store(struct device *dev, | ||
920 | struct device_attribute *attr, const char *buf, | ||
921 | size_t count) | ||
922 | { | ||
923 | struct pci_dev *pdev = to_pci_dev(dev); | ||
924 | unsigned long val; | ||
925 | ssize_t result = strict_strtoul(buf, 0, &val); | ||
926 | |||
927 | if (result < 0) | ||
928 | return result; | ||
929 | |||
930 | if (val != 1) | ||
931 | return -EINVAL; | ||
932 | return pci_reset_function(pdev); | ||
933 | } | ||
934 | |||
935 | static struct device_attribute reset_attr = __ATTR(reset, 0200, NULL, reset_store); | ||
936 | |||
919 | static int pci_create_capabilities_sysfs(struct pci_dev *dev) | 937 | static int pci_create_capabilities_sysfs(struct pci_dev *dev) |
920 | { | 938 | { |
921 | int retval; | 939 | int retval; |
@@ -943,7 +961,22 @@ static int pci_create_capabilities_sysfs(struct pci_dev *dev) | |||
943 | /* Active State Power Management */ | 961 | /* Active State Power Management */ |
944 | pcie_aspm_create_sysfs_dev_files(dev); | 962 | pcie_aspm_create_sysfs_dev_files(dev); |
945 | 963 | ||
964 | if (!pci_probe_reset_function(dev)) { | ||
965 | retval = device_create_file(&dev->dev, &reset_attr); | ||
966 | if (retval) | ||
967 | goto error; | ||
968 | dev->reset_fn = 1; | ||
969 | } | ||
946 | return 0; | 970 | return 0; |
971 | |||
972 | error: | ||
973 | pcie_aspm_remove_sysfs_dev_files(dev); | ||
974 | if (dev->vpd && dev->vpd->attr) { | ||
975 | sysfs_remove_bin_file(&dev->dev.kobj, dev->vpd->attr); | ||
976 | kfree(dev->vpd->attr); | ||
977 | } | ||
978 | |||
979 | return retval; | ||
947 | } | 980 | } |
948 | 981 | ||
949 | int __must_check pci_create_sysfs_dev_files (struct pci_dev *pdev) | 982 | int __must_check pci_create_sysfs_dev_files (struct pci_dev *pdev) |
@@ -1037,6 +1070,10 @@ static void pci_remove_capabilities_sysfs(struct pci_dev *dev) | |||
1037 | } | 1070 | } |
1038 | 1071 | ||
1039 | pcie_aspm_remove_sysfs_dev_files(dev); | 1072 | pcie_aspm_remove_sysfs_dev_files(dev); |
1073 | if (dev->reset_fn) { | ||
1074 | device_remove_file(&dev->dev, &reset_attr); | ||
1075 | dev->reset_fn = 0; | ||
1076 | } | ||
1040 | } | 1077 | } |
1041 | 1078 | ||
1042 | /** | 1079 | /** |
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c index 7b70312181d7..6edecff0b419 100644 --- a/drivers/pci/pci.c +++ b/drivers/pci/pci.c | |||
@@ -41,6 +41,12 @@ int pci_domains_supported = 1; | |||
41 | unsigned long pci_cardbus_io_size = DEFAULT_CARDBUS_IO_SIZE; | 41 | unsigned long pci_cardbus_io_size = DEFAULT_CARDBUS_IO_SIZE; |
42 | unsigned long pci_cardbus_mem_size = DEFAULT_CARDBUS_MEM_SIZE; | 42 | unsigned long pci_cardbus_mem_size = DEFAULT_CARDBUS_MEM_SIZE; |
43 | 43 | ||
44 | #define DEFAULT_HOTPLUG_IO_SIZE (256) | ||
45 | #define DEFAULT_HOTPLUG_MEM_SIZE (2*1024*1024) | ||
46 | /* pci=hpmemsize=nnM,hpiosize=nn can override this */ | ||
47 | unsigned long pci_hotplug_io_size = DEFAULT_HOTPLUG_IO_SIZE; | ||
48 | unsigned long pci_hotplug_mem_size = DEFAULT_HOTPLUG_MEM_SIZE; | ||
49 | |||
44 | /** | 50 | /** |
45 | * pci_bus_max_busnr - returns maximum PCI bus number of given bus' children | 51 | * pci_bus_max_busnr - returns maximum PCI bus number of given bus' children |
46 | * @bus: pointer to PCI bus structure to search | 52 | * @bus: pointer to PCI bus structure to search |
@@ -848,6 +854,7 @@ pci_restore_state(struct pci_dev *dev) | |||
848 | 854 | ||
849 | if (!dev->state_saved) | 855 | if (!dev->state_saved) |
850 | return 0; | 856 | return 0; |
857 | |||
851 | /* PCI Express register must be restored first */ | 858 | /* PCI Express register must be restored first */ |
852 | pci_restore_pcie_state(dev); | 859 | pci_restore_pcie_state(dev); |
853 | 860 | ||
@@ -869,6 +876,8 @@ pci_restore_state(struct pci_dev *dev) | |||
869 | pci_restore_msi_state(dev); | 876 | pci_restore_msi_state(dev); |
870 | pci_restore_iov_state(dev); | 877 | pci_restore_iov_state(dev); |
871 | 878 | ||
879 | dev->state_saved = false; | ||
880 | |||
872 | return 0; | 881 | return 0; |
873 | } | 882 | } |
874 | 883 | ||
@@ -1214,30 +1223,40 @@ void pci_pme_active(struct pci_dev *dev, bool enable) | |||
1214 | */ | 1223 | */ |
1215 | int pci_enable_wake(struct pci_dev *dev, pci_power_t state, bool enable) | 1224 | int pci_enable_wake(struct pci_dev *dev, pci_power_t state, bool enable) |
1216 | { | 1225 | { |
1217 | int error = 0; | 1226 | int ret = 0; |
1218 | bool pme_done = false; | ||
1219 | 1227 | ||
1220 | if (enable && !device_may_wakeup(&dev->dev)) | 1228 | if (enable && !device_may_wakeup(&dev->dev)) |
1221 | return -EINVAL; | 1229 | return -EINVAL; |
1222 | 1230 | ||
1231 | /* Don't do the same thing twice in a row for one device. */ | ||
1232 | if (!!enable == !!dev->wakeup_prepared) | ||
1233 | return 0; | ||
1234 | |||
1223 | /* | 1235 | /* |
1224 | * According to "PCI System Architecture" 4th ed. by Tom Shanley & Don | 1236 | * According to "PCI System Architecture" 4th ed. by Tom Shanley & Don |
1225 | * Anderson we should be doing PME# wake enable followed by ACPI wake | 1237 | * Anderson we should be doing PME# wake enable followed by ACPI wake |
1226 | * enable. To disable wake-up we call the platform first, for symmetry. | 1238 | * enable. To disable wake-up we call the platform first, for symmetry. |
1227 | */ | 1239 | */ |
1228 | 1240 | ||
1229 | if (!enable && platform_pci_can_wakeup(dev)) | 1241 | if (enable) { |
1230 | error = platform_pci_sleep_wake(dev, false); | 1242 | int error; |
1231 | |||
1232 | if (!enable || pci_pme_capable(dev, state)) { | ||
1233 | pci_pme_active(dev, enable); | ||
1234 | pme_done = true; | ||
1235 | } | ||
1236 | 1243 | ||
1237 | if (enable && platform_pci_can_wakeup(dev)) | 1244 | if (pci_pme_capable(dev, state)) |
1245 | pci_pme_active(dev, true); | ||
1246 | else | ||
1247 | ret = 1; | ||
1238 | error = platform_pci_sleep_wake(dev, true); | 1248 | error = platform_pci_sleep_wake(dev, true); |
1249 | if (ret) | ||
1250 | ret = error; | ||
1251 | if (!ret) | ||
1252 | dev->wakeup_prepared = true; | ||
1253 | } else { | ||
1254 | platform_pci_sleep_wake(dev, false); | ||
1255 | pci_pme_active(dev, false); | ||
1256 | dev->wakeup_prepared = false; | ||
1257 | } | ||
1239 | 1258 | ||
1240 | return pme_done ? 0 : error; | 1259 | return ret; |
1241 | } | 1260 | } |
1242 | 1261 | ||
1243 | /** | 1262 | /** |
@@ -1356,6 +1375,7 @@ void pci_pm_init(struct pci_dev *dev) | |||
1356 | int pm; | 1375 | int pm; |
1357 | u16 pmc; | 1376 | u16 pmc; |
1358 | 1377 | ||
1378 | dev->wakeup_prepared = false; | ||
1359 | dev->pm_cap = 0; | 1379 | dev->pm_cap = 0; |
1360 | 1380 | ||
1361 | /* find PCI PM capability in list */ | 1381 | /* find PCI PM capability in list */ |
@@ -2262,6 +2282,22 @@ int __pci_reset_function(struct pci_dev *dev) | |||
2262 | EXPORT_SYMBOL_GPL(__pci_reset_function); | 2282 | EXPORT_SYMBOL_GPL(__pci_reset_function); |
2263 | 2283 | ||
2264 | /** | 2284 | /** |
2285 | * pci_probe_reset_function - check whether the device can be safely reset | ||
2286 | * @dev: PCI device to reset | ||
2287 | * | ||
2288 | * Some devices allow an individual function to be reset without affecting | ||
2289 | * other functions in the same device. The PCI device must be responsive | ||
2290 | * to PCI config space in order to use this function. | ||
2291 | * | ||
2292 | * Returns 0 if the device function can be reset or negative if the | ||
2293 | * device doesn't support resetting a single function. | ||
2294 | */ | ||
2295 | int pci_probe_reset_function(struct pci_dev *dev) | ||
2296 | { | ||
2297 | return pci_dev_reset(dev, 1); | ||
2298 | } | ||
2299 | |||
2300 | /** | ||
2265 | * pci_reset_function - quiesce and reset a PCI device function | 2301 | * pci_reset_function - quiesce and reset a PCI device function |
2266 | * @dev: PCI device to reset | 2302 | * @dev: PCI device to reset |
2267 | * | 2303 | * |
@@ -2504,6 +2540,50 @@ int pci_resource_bar(struct pci_dev *dev, int resno, enum pci_bar_type *type) | |||
2504 | return 0; | 2540 | return 0; |
2505 | } | 2541 | } |
2506 | 2542 | ||
2543 | /** | ||
2544 | * pci_set_vga_state - set VGA decode state on device and parents if requested | ||
2545 | * @dev the PCI device | ||
2546 | * @decode - true = enable decoding, false = disable decoding | ||
2547 | * @command_bits PCI_COMMAND_IO and/or PCI_COMMAND_MEMORY | ||
2548 | * @change_bridge - traverse ancestors and change bridges | ||
2549 | */ | ||
2550 | int pci_set_vga_state(struct pci_dev *dev, bool decode, | ||
2551 | unsigned int command_bits, bool change_bridge) | ||
2552 | { | ||
2553 | struct pci_bus *bus; | ||
2554 | struct pci_dev *bridge; | ||
2555 | u16 cmd; | ||
2556 | |||
2557 | WARN_ON(command_bits & ~(PCI_COMMAND_IO|PCI_COMMAND_MEMORY)); | ||
2558 | |||
2559 | pci_read_config_word(dev, PCI_COMMAND, &cmd); | ||
2560 | if (decode == true) | ||
2561 | cmd |= command_bits; | ||
2562 | else | ||
2563 | cmd &= ~command_bits; | ||
2564 | pci_write_config_word(dev, PCI_COMMAND, cmd); | ||
2565 | |||
2566 | if (change_bridge == false) | ||
2567 | return 0; | ||
2568 | |||
2569 | bus = dev->bus; | ||
2570 | while (bus) { | ||
2571 | bridge = bus->self; | ||
2572 | if (bridge) { | ||
2573 | pci_read_config_word(bridge, PCI_BRIDGE_CONTROL, | ||
2574 | &cmd); | ||
2575 | if (decode == true) | ||
2576 | cmd |= PCI_BRIDGE_CTL_VGA; | ||
2577 | else | ||
2578 | cmd &= ~PCI_BRIDGE_CTL_VGA; | ||
2579 | pci_write_config_word(bridge, PCI_BRIDGE_CONTROL, | ||
2580 | cmd); | ||
2581 | } | ||
2582 | bus = bus->parent; | ||
2583 | } | ||
2584 | return 0; | ||
2585 | } | ||
2586 | |||
2507 | #define RESOURCE_ALIGNMENT_PARAM_SIZE COMMAND_LINE_SIZE | 2587 | #define RESOURCE_ALIGNMENT_PARAM_SIZE COMMAND_LINE_SIZE |
2508 | static char resource_alignment_param[RESOURCE_ALIGNMENT_PARAM_SIZE] = {0}; | 2588 | static char resource_alignment_param[RESOURCE_ALIGNMENT_PARAM_SIZE] = {0}; |
2509 | spinlock_t resource_alignment_lock = SPIN_LOCK_UNLOCKED; | 2589 | spinlock_t resource_alignment_lock = SPIN_LOCK_UNLOCKED; |
@@ -2672,6 +2752,10 @@ static int __init pci_setup(char *str) | |||
2672 | strlen(str + 19)); | 2752 | strlen(str + 19)); |
2673 | } else if (!strncmp(str, "ecrc=", 5)) { | 2753 | } else if (!strncmp(str, "ecrc=", 5)) { |
2674 | pcie_ecrc_get_policy(str + 5); | 2754 | pcie_ecrc_get_policy(str + 5); |
2755 | } else if (!strncmp(str, "hpiosize=", 9)) { | ||
2756 | pci_hotplug_io_size = memparse(str + 9, &str); | ||
2757 | } else if (!strncmp(str, "hpmemsize=", 10)) { | ||
2758 | pci_hotplug_mem_size = memparse(str + 10, &str); | ||
2675 | } else { | 2759 | } else { |
2676 | printk(KERN_ERR "PCI: Unknown option `%s'\n", | 2760 | printk(KERN_ERR "PCI: Unknown option `%s'\n", |
2677 | str); | 2761 | str); |
diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h index 5ff4d25bf0e9..d92d1954a2fb 100644 --- a/drivers/pci/pci.h +++ b/drivers/pci/pci.h | |||
@@ -16,6 +16,7 @@ extern void pci_cleanup_rom(struct pci_dev *dev); | |||
16 | extern int pci_mmap_fits(struct pci_dev *pdev, int resno, | 16 | extern int pci_mmap_fits(struct pci_dev *pdev, int resno, |
17 | struct vm_area_struct *vma); | 17 | struct vm_area_struct *vma); |
18 | #endif | 18 | #endif |
19 | int pci_probe_reset_function(struct pci_dev *dev); | ||
19 | 20 | ||
20 | /** | 21 | /** |
21 | * struct pci_platform_pm_ops - Firmware PM callbacks | 22 | * struct pci_platform_pm_ops - Firmware PM callbacks |
@@ -133,7 +134,6 @@ static inline int pci_no_d1d2(struct pci_dev *dev) | |||
133 | return (dev->no_d1d2 || parent_dstates); | 134 | return (dev->no_d1d2 || parent_dstates); |
134 | 135 | ||
135 | } | 136 | } |
136 | extern int pcie_mch_quirk; | ||
137 | extern struct device_attribute pci_dev_attrs[]; | 137 | extern struct device_attribute pci_dev_attrs[]; |
138 | extern struct device_attribute dev_attr_cpuaffinity; | 138 | extern struct device_attribute dev_attr_cpuaffinity; |
139 | extern struct device_attribute dev_attr_cpulistaffinity; | 139 | extern struct device_attribute dev_attr_cpulistaffinity; |
diff --git a/drivers/pci/pcie/aer/aer_inject.c b/drivers/pci/pcie/aer/aer_inject.c index d92ae21a59d8..62d15f652bb6 100644 --- a/drivers/pci/pcie/aer/aer_inject.c +++ b/drivers/pci/pcie/aer/aer_inject.c | |||
@@ -22,11 +22,10 @@ | |||
22 | #include <linux/miscdevice.h> | 22 | #include <linux/miscdevice.h> |
23 | #include <linux/pci.h> | 23 | #include <linux/pci.h> |
24 | #include <linux/fs.h> | 24 | #include <linux/fs.h> |
25 | #include <asm/uaccess.h> | 25 | #include <linux/uaccess.h> |
26 | #include "aerdrv.h" | 26 | #include "aerdrv.h" |
27 | 27 | ||
28 | struct aer_error_inj | 28 | struct aer_error_inj { |
29 | { | ||
30 | u8 bus; | 29 | u8 bus; |
31 | u8 dev; | 30 | u8 dev; |
32 | u8 fn; | 31 | u8 fn; |
@@ -38,8 +37,7 @@ struct aer_error_inj | |||
38 | u32 header_log3; | 37 | u32 header_log3; |
39 | }; | 38 | }; |
40 | 39 | ||
41 | struct aer_error | 40 | struct aer_error { |
42 | { | ||
43 | struct list_head list; | 41 | struct list_head list; |
44 | unsigned int bus; | 42 | unsigned int bus; |
45 | unsigned int devfn; | 43 | unsigned int devfn; |
@@ -55,8 +53,7 @@ struct aer_error | |||
55 | u32 source_id; | 53 | u32 source_id; |
56 | }; | 54 | }; |
57 | 55 | ||
58 | struct pci_bus_ops | 56 | struct pci_bus_ops { |
59 | { | ||
60 | struct list_head list; | 57 | struct list_head list; |
61 | struct pci_bus *bus; | 58 | struct pci_bus *bus; |
62 | struct pci_ops *ops; | 59 | struct pci_ops *ops; |
@@ -150,7 +147,7 @@ static u32 *find_pci_config_dword(struct aer_error *err, int where, | |||
150 | target = &err->header_log1; | 147 | target = &err->header_log1; |
151 | break; | 148 | break; |
152 | case PCI_ERR_HEADER_LOG+8: | 149 | case PCI_ERR_HEADER_LOG+8: |
153 | target = &err->header_log2; | 150 | target = &err->header_log2; |
154 | break; | 151 | break; |
155 | case PCI_ERR_HEADER_LOG+12: | 152 | case PCI_ERR_HEADER_LOG+12: |
156 | target = &err->header_log3; | 153 | target = &err->header_log3; |
@@ -258,8 +255,7 @@ static int pci_bus_set_aer_ops(struct pci_bus *bus) | |||
258 | bus_ops = NULL; | 255 | bus_ops = NULL; |
259 | out: | 256 | out: |
260 | spin_unlock_irqrestore(&inject_lock, flags); | 257 | spin_unlock_irqrestore(&inject_lock, flags); |
261 | if (bus_ops) | 258 | kfree(bus_ops); |
262 | kfree(bus_ops); | ||
263 | return 0; | 259 | return 0; |
264 | } | 260 | } |
265 | 261 | ||
@@ -401,10 +397,8 @@ static int aer_inject(struct aer_error_inj *einj) | |||
401 | else | 397 | else |
402 | ret = -EINVAL; | 398 | ret = -EINVAL; |
403 | out_put: | 399 | out_put: |
404 | if (err_alloc) | 400 | kfree(err_alloc); |
405 | kfree(err_alloc); | 401 | kfree(rperr_alloc); |
406 | if (rperr_alloc) | ||
407 | kfree(rperr_alloc); | ||
408 | pci_dev_put(dev); | 402 | pci_dev_put(dev); |
409 | return ret; | 403 | return ret; |
410 | } | 404 | } |
@@ -458,8 +452,7 @@ static void __exit aer_inject_exit(void) | |||
458 | } | 452 | } |
459 | 453 | ||
460 | spin_lock_irqsave(&inject_lock, flags); | 454 | spin_lock_irqsave(&inject_lock, flags); |
461 | list_for_each_entry_safe(err, err_next, | 455 | list_for_each_entry_safe(err, err_next, &pci_bus_ops_list, list) { |
462 | &pci_bus_ops_list, list) { | ||
463 | list_del(&err->list); | 456 | list_del(&err->list); |
464 | kfree(err); | 457 | kfree(err); |
465 | } | 458 | } |
diff --git a/drivers/pci/pcie/aer/aerdrv.c b/drivers/pci/pcie/aer/aerdrv.c index 4770f13b3ca1..10c0e62bd5a8 100644 --- a/drivers/pci/pcie/aer/aerdrv.c +++ b/drivers/pci/pcie/aer/aerdrv.c | |||
@@ -38,7 +38,7 @@ MODULE_AUTHOR(DRIVER_AUTHOR); | |||
38 | MODULE_DESCRIPTION(DRIVER_DESC); | 38 | MODULE_DESCRIPTION(DRIVER_DESC); |
39 | MODULE_LICENSE("GPL"); | 39 | MODULE_LICENSE("GPL"); |
40 | 40 | ||
41 | static int __devinit aer_probe (struct pcie_device *dev); | 41 | static int __devinit aer_probe(struct pcie_device *dev); |
42 | static void aer_remove(struct pcie_device *dev); | 42 | static void aer_remove(struct pcie_device *dev); |
43 | static pci_ers_result_t aer_error_detected(struct pci_dev *dev, | 43 | static pci_ers_result_t aer_error_detected(struct pci_dev *dev, |
44 | enum pci_channel_state error); | 44 | enum pci_channel_state error); |
@@ -47,7 +47,7 @@ static pci_ers_result_t aer_root_reset(struct pci_dev *dev); | |||
47 | 47 | ||
48 | static struct pci_error_handlers aer_error_handlers = { | 48 | static struct pci_error_handlers aer_error_handlers = { |
49 | .error_detected = aer_error_detected, | 49 | .error_detected = aer_error_detected, |
50 | .resume = aer_error_resume, | 50 | .resume = aer_error_resume, |
51 | }; | 51 | }; |
52 | 52 | ||
53 | static struct pcie_port_service_driver aerdriver = { | 53 | static struct pcie_port_service_driver aerdriver = { |
@@ -134,12 +134,12 @@ EXPORT_SYMBOL_GPL(aer_irq); | |||
134 | * | 134 | * |
135 | * Invoked when Root Port's AER service is loaded. | 135 | * Invoked when Root Port's AER service is loaded. |
136 | **/ | 136 | **/ |
137 | static struct aer_rpc* aer_alloc_rpc(struct pcie_device *dev) | 137 | static struct aer_rpc *aer_alloc_rpc(struct pcie_device *dev) |
138 | { | 138 | { |
139 | struct aer_rpc *rpc; | 139 | struct aer_rpc *rpc; |
140 | 140 | ||
141 | if (!(rpc = kzalloc(sizeof(struct aer_rpc), | 141 | rpc = kzalloc(sizeof(struct aer_rpc), GFP_KERNEL); |
142 | GFP_KERNEL))) | 142 | if (!rpc) |
143 | return NULL; | 143 | return NULL; |
144 | 144 | ||
145 | /* | 145 | /* |
@@ -189,26 +189,28 @@ static void aer_remove(struct pcie_device *dev) | |||
189 | * | 189 | * |
190 | * Invoked when PCI Express bus loads AER service driver. | 190 | * Invoked when PCI Express bus loads AER service driver. |
191 | **/ | 191 | **/ |
192 | static int __devinit aer_probe (struct pcie_device *dev) | 192 | static int __devinit aer_probe(struct pcie_device *dev) |
193 | { | 193 | { |
194 | int status; | 194 | int status; |
195 | struct aer_rpc *rpc; | 195 | struct aer_rpc *rpc; |
196 | struct device *device = &dev->device; | 196 | struct device *device = &dev->device; |
197 | 197 | ||
198 | /* Init */ | 198 | /* Init */ |
199 | if ((status = aer_init(dev))) | 199 | status = aer_init(dev); |
200 | if (status) | ||
200 | return status; | 201 | return status; |
201 | 202 | ||
202 | /* Alloc rpc data structure */ | 203 | /* Alloc rpc data structure */ |
203 | if (!(rpc = aer_alloc_rpc(dev))) { | 204 | rpc = aer_alloc_rpc(dev); |
205 | if (!rpc) { | ||
204 | dev_printk(KERN_DEBUG, device, "alloc rpc failed\n"); | 206 | dev_printk(KERN_DEBUG, device, "alloc rpc failed\n"); |
205 | aer_remove(dev); | 207 | aer_remove(dev); |
206 | return -ENOMEM; | 208 | return -ENOMEM; |
207 | } | 209 | } |
208 | 210 | ||
209 | /* Request IRQ ISR */ | 211 | /* Request IRQ ISR */ |
210 | if ((status = request_irq(dev->irq, aer_irq, IRQF_SHARED, "aerdrv", | 212 | status = request_irq(dev->irq, aer_irq, IRQF_SHARED, "aerdrv", dev); |
211 | dev))) { | 213 | if (status) { |
212 | dev_printk(KERN_DEBUG, device, "request IRQ failed\n"); | 214 | dev_printk(KERN_DEBUG, device, "request IRQ failed\n"); |
213 | aer_remove(dev); | 215 | aer_remove(dev); |
214 | return status; | 216 | return status; |
diff --git a/drivers/pci/pcie/aer/aerdrv.h b/drivers/pci/pcie/aer/aerdrv.h index bbd7428ca2d0..bd833ea3ba49 100644 --- a/drivers/pci/pcie/aer/aerdrv.h +++ b/drivers/pci/pcie/aer/aerdrv.h | |||
@@ -16,12 +16,9 @@ | |||
16 | #define AER_NONFATAL 0 | 16 | #define AER_NONFATAL 0 |
17 | #define AER_FATAL 1 | 17 | #define AER_FATAL 1 |
18 | #define AER_CORRECTABLE 2 | 18 | #define AER_CORRECTABLE 2 |
19 | #define AER_UNCORRECTABLE 4 | ||
20 | #define AER_ERROR_MASK 0x001fffff | ||
21 | #define AER_ERROR(d) (d & AER_ERROR_MASK) | ||
22 | 19 | ||
23 | /* Root Error Status Register Bits */ | 20 | /* Root Error Status Register Bits */ |
24 | #define ROOT_ERR_STATUS_MASKS 0x0f | 21 | #define ROOT_ERR_STATUS_MASKS 0x0f |
25 | 22 | ||
26 | #define SYSTEM_ERROR_INTR_ON_MESG_MASK (PCI_EXP_RTCTL_SECEE| \ | 23 | #define SYSTEM_ERROR_INTR_ON_MESG_MASK (PCI_EXP_RTCTL_SECEE| \ |
27 | PCI_EXP_RTCTL_SENFEE| \ | 24 | PCI_EXP_RTCTL_SENFEE| \ |
@@ -32,8 +29,6 @@ | |||
32 | #define ERR_COR_ID(d) (d & 0xffff) | 29 | #define ERR_COR_ID(d) (d & 0xffff) |
33 | #define ERR_UNCOR_ID(d) (d >> 16) | 30 | #define ERR_UNCOR_ID(d) (d >> 16) |
34 | 31 | ||
35 | #define AER_SUCCESS 0 | ||
36 | #define AER_UNSUCCESS 1 | ||
37 | #define AER_ERROR_SOURCES_MAX 100 | 32 | #define AER_ERROR_SOURCES_MAX 100 |
38 | 33 | ||
39 | #define AER_LOG_TLP_MASKS (PCI_ERR_UNC_POISON_TLP| \ | 34 | #define AER_LOG_TLP_MASKS (PCI_ERR_UNC_POISON_TLP| \ |
@@ -43,13 +38,6 @@ | |||
43 | PCI_ERR_UNC_UNX_COMP| \ | 38 | PCI_ERR_UNC_UNX_COMP| \ |
44 | PCI_ERR_UNC_MALF_TLP) | 39 | PCI_ERR_UNC_MALF_TLP) |
45 | 40 | ||
46 | /* AER Error Info Flags */ | ||
47 | #define AER_TLP_HEADER_VALID_FLAG 0x00000001 | ||
48 | #define AER_MULTI_ERROR_VALID_FLAG 0x00000002 | ||
49 | |||
50 | #define ERR_CORRECTABLE_ERROR_MASK 0x000031c1 | ||
51 | #define ERR_UNCORRECTABLE_ERROR_MASK 0x001ff010 | ||
52 | |||
53 | struct header_log_regs { | 41 | struct header_log_regs { |
54 | unsigned int dw0; | 42 | unsigned int dw0; |
55 | unsigned int dw1; | 43 | unsigned int dw1; |
@@ -61,11 +49,20 @@ struct header_log_regs { | |||
61 | struct aer_err_info { | 49 | struct aer_err_info { |
62 | struct pci_dev *dev[AER_MAX_MULTI_ERR_DEVICES]; | 50 | struct pci_dev *dev[AER_MAX_MULTI_ERR_DEVICES]; |
63 | int error_dev_num; | 51 | int error_dev_num; |
64 | u16 id; | 52 | |
65 | int severity; /* 0:NONFATAL | 1:FATAL | 2:COR */ | 53 | unsigned int id:16; |
66 | int flags; | 54 | |
55 | unsigned int severity:2; /* 0:NONFATAL | 1:FATAL | 2:COR */ | ||
56 | unsigned int __pad1:5; | ||
57 | unsigned int multi_error_valid:1; | ||
58 | |||
59 | unsigned int first_error:5; | ||
60 | unsigned int __pad2:2; | ||
61 | unsigned int tlp_header_valid:1; | ||
62 | |||
67 | unsigned int status; /* COR/UNCOR Error Status */ | 63 | unsigned int status; /* COR/UNCOR Error Status */ |
68 | struct header_log_regs tlp; /* TLP Header */ | 64 | unsigned int mask; /* COR/UNCOR Error Mask */ |
65 | struct header_log_regs tlp; /* TLP Header */ | ||
69 | }; | 66 | }; |
70 | 67 | ||
71 | struct aer_err_source { | 68 | struct aer_err_source { |
@@ -125,6 +122,7 @@ extern void aer_delete_rootport(struct aer_rpc *rpc); | |||
125 | extern int aer_init(struct pcie_device *dev); | 122 | extern int aer_init(struct pcie_device *dev); |
126 | extern void aer_isr(struct work_struct *work); | 123 | extern void aer_isr(struct work_struct *work); |
127 | extern void aer_print_error(struct pci_dev *dev, struct aer_err_info *info); | 124 | extern void aer_print_error(struct pci_dev *dev, struct aer_err_info *info); |
125 | extern void aer_print_port_info(struct pci_dev *dev, struct aer_err_info *info); | ||
128 | extern irqreturn_t aer_irq(int irq, void *context); | 126 | extern irqreturn_t aer_irq(int irq, void *context); |
129 | 127 | ||
130 | #ifdef CONFIG_ACPI | 128 | #ifdef CONFIG_ACPI |
@@ -136,4 +134,4 @@ static inline int aer_osc_setup(struct pcie_device *pciedev) | |||
136 | } | 134 | } |
137 | #endif | 135 | #endif |
138 | 136 | ||
139 | #endif //_AERDRV_H_ | 137 | #endif /* _AERDRV_H_ */ |
diff --git a/drivers/pci/pcie/aer/aerdrv_core.c b/drivers/pci/pcie/aer/aerdrv_core.c index 3d8872704a58..9f5ccbeb4fa5 100644 --- a/drivers/pci/pcie/aer/aerdrv_core.c +++ b/drivers/pci/pcie/aer/aerdrv_core.c | |||
@@ -49,10 +49,11 @@ int pci_enable_pcie_error_reporting(struct pci_dev *dev) | |||
49 | PCI_EXP_DEVCTL_NFERE | | 49 | PCI_EXP_DEVCTL_NFERE | |
50 | PCI_EXP_DEVCTL_FERE | | 50 | PCI_EXP_DEVCTL_FERE | |
51 | PCI_EXP_DEVCTL_URRE; | 51 | PCI_EXP_DEVCTL_URRE; |
52 | pci_write_config_word(dev, pos+PCI_EXP_DEVCTL, | 52 | pci_write_config_word(dev, pos+PCI_EXP_DEVCTL, reg16); |
53 | reg16); | 53 | |
54 | return 0; | 54 | return 0; |
55 | } | 55 | } |
56 | EXPORT_SYMBOL_GPL(pci_enable_pcie_error_reporting); | ||
56 | 57 | ||
57 | int pci_disable_pcie_error_reporting(struct pci_dev *dev) | 58 | int pci_disable_pcie_error_reporting(struct pci_dev *dev) |
58 | { | 59 | { |
@@ -68,10 +69,11 @@ int pci_disable_pcie_error_reporting(struct pci_dev *dev) | |||
68 | PCI_EXP_DEVCTL_NFERE | | 69 | PCI_EXP_DEVCTL_NFERE | |
69 | PCI_EXP_DEVCTL_FERE | | 70 | PCI_EXP_DEVCTL_FERE | |
70 | PCI_EXP_DEVCTL_URRE); | 71 | PCI_EXP_DEVCTL_URRE); |
71 | pci_write_config_word(dev, pos+PCI_EXP_DEVCTL, | 72 | pci_write_config_word(dev, pos+PCI_EXP_DEVCTL, reg16); |
72 | reg16); | 73 | |
73 | return 0; | 74 | return 0; |
74 | } | 75 | } |
76 | EXPORT_SYMBOL_GPL(pci_disable_pcie_error_reporting); | ||
75 | 77 | ||
76 | int pci_cleanup_aer_uncorrect_error_status(struct pci_dev *dev) | 78 | int pci_cleanup_aer_uncorrect_error_status(struct pci_dev *dev) |
77 | { | 79 | { |
@@ -92,6 +94,7 @@ int pci_cleanup_aer_uncorrect_error_status(struct pci_dev *dev) | |||
92 | 94 | ||
93 | return 0; | 95 | return 0; |
94 | } | 96 | } |
97 | EXPORT_SYMBOL_GPL(pci_cleanup_aer_uncorrect_error_status); | ||
95 | 98 | ||
96 | #if 0 | 99 | #if 0 |
97 | int pci_cleanup_aer_correct_error_status(struct pci_dev *dev) | 100 | int pci_cleanup_aer_correct_error_status(struct pci_dev *dev) |
@@ -110,7 +113,6 @@ int pci_cleanup_aer_correct_error_status(struct pci_dev *dev) | |||
110 | } | 113 | } |
111 | #endif /* 0 */ | 114 | #endif /* 0 */ |
112 | 115 | ||
113 | |||
114 | static int set_device_error_reporting(struct pci_dev *dev, void *data) | 116 | static int set_device_error_reporting(struct pci_dev *dev, void *data) |
115 | { | 117 | { |
116 | bool enable = *((bool *)data); | 118 | bool enable = *((bool *)data); |
@@ -164,8 +166,9 @@ static int add_error_device(struct aer_err_info *e_info, struct pci_dev *dev) | |||
164 | e_info->dev[e_info->error_dev_num] = dev; | 166 | e_info->dev[e_info->error_dev_num] = dev; |
165 | e_info->error_dev_num++; | 167 | e_info->error_dev_num++; |
166 | return 1; | 168 | return 1; |
167 | } else | 169 | } |
168 | return 0; | 170 | |
171 | return 0; | ||
169 | } | 172 | } |
170 | 173 | ||
171 | 174 | ||
@@ -193,7 +196,7 @@ static int find_device_iter(struct pci_dev *dev, void *data) | |||
193 | * If there is no multiple error, we stop | 196 | * If there is no multiple error, we stop |
194 | * or continue based on the id comparing. | 197 | * or continue based on the id comparing. |
195 | */ | 198 | */ |
196 | if (!(e_info->flags & AER_MULTI_ERROR_VALID_FLAG)) | 199 | if (!e_info->multi_error_valid) |
197 | return result; | 200 | return result; |
198 | 201 | ||
199 | /* | 202 | /* |
@@ -233,24 +236,16 @@ static int find_device_iter(struct pci_dev *dev, void *data) | |||
233 | status = 0; | 236 | status = 0; |
234 | mask = 0; | 237 | mask = 0; |
235 | if (e_info->severity == AER_CORRECTABLE) { | 238 | if (e_info->severity == AER_CORRECTABLE) { |
236 | pci_read_config_dword(dev, | 239 | pci_read_config_dword(dev, pos + PCI_ERR_COR_STATUS, &status); |
237 | pos + PCI_ERR_COR_STATUS, | 240 | pci_read_config_dword(dev, pos + PCI_ERR_COR_MASK, &mask); |
238 | &status); | 241 | if (status & ~mask) { |
239 | pci_read_config_dword(dev, | ||
240 | pos + PCI_ERR_COR_MASK, | ||
241 | &mask); | ||
242 | if (status & ERR_CORRECTABLE_ERROR_MASK & ~mask) { | ||
243 | add_error_device(e_info, dev); | 242 | add_error_device(e_info, dev); |
244 | goto added; | 243 | goto added; |
245 | } | 244 | } |
246 | } else { | 245 | } else { |
247 | pci_read_config_dword(dev, | 246 | pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_STATUS, &status); |
248 | pos + PCI_ERR_UNCOR_STATUS, | 247 | pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_MASK, &mask); |
249 | &status); | 248 | if (status & ~mask) { |
250 | pci_read_config_dword(dev, | ||
251 | pos + PCI_ERR_UNCOR_MASK, | ||
252 | &mask); | ||
253 | if (status & ERR_UNCORRECTABLE_ERROR_MASK & ~mask) { | ||
254 | add_error_device(e_info, dev); | 249 | add_error_device(e_info, dev); |
255 | goto added; | 250 | goto added; |
256 | } | 251 | } |
@@ -259,7 +254,7 @@ static int find_device_iter(struct pci_dev *dev, void *data) | |||
259 | return 0; | 254 | return 0; |
260 | 255 | ||
261 | added: | 256 | added: |
262 | if (e_info->flags & AER_MULTI_ERROR_VALID_FLAG) | 257 | if (e_info->multi_error_valid) |
263 | return 0; | 258 | return 0; |
264 | else | 259 | else |
265 | return 1; | 260 | return 1; |
@@ -411,8 +406,7 @@ static pci_ers_result_t broadcast_error_message(struct pci_dev *dev, | |||
411 | pci_cleanup_aer_uncorrect_error_status(dev); | 406 | pci_cleanup_aer_uncorrect_error_status(dev); |
412 | dev->error_state = pci_channel_io_normal; | 407 | dev->error_state = pci_channel_io_normal; |
413 | } | 408 | } |
414 | } | 409 | } else { |
415 | else { | ||
416 | /* | 410 | /* |
417 | * If the error is reported by an end point, we think this | 411 | * If the error is reported by an end point, we think this |
418 | * error is related to the upstream link of the end point. | 412 | * error is related to the upstream link of the end point. |
@@ -473,7 +467,7 @@ static pci_ers_result_t reset_link(struct pcie_device *aerdev, | |||
473 | if (dev->hdr_type & PCI_HEADER_TYPE_BRIDGE) | 467 | if (dev->hdr_type & PCI_HEADER_TYPE_BRIDGE) |
474 | udev = dev; | 468 | udev = dev; |
475 | else | 469 | else |
476 | udev= dev->bus->self; | 470 | udev = dev->bus->self; |
477 | 471 | ||
478 | data.is_downstream = 0; | 472 | data.is_downstream = 0; |
479 | data.aer_driver = NULL; | 473 | data.aer_driver = NULL; |
@@ -576,7 +570,7 @@ static pci_ers_result_t do_recovery(struct pcie_device *aerdev, | |||
576 | * | 570 | * |
577 | * Invoked when an error being detected by Root Port. | 571 | * Invoked when an error being detected by Root Port. |
578 | */ | 572 | */ |
579 | static void handle_error_source(struct pcie_device * aerdev, | 573 | static void handle_error_source(struct pcie_device *aerdev, |
580 | struct pci_dev *dev, | 574 | struct pci_dev *dev, |
581 | struct aer_err_info *info) | 575 | struct aer_err_info *info) |
582 | { | 576 | { |
@@ -682,7 +676,7 @@ static void disable_root_aer(struct aer_rpc *rpc) | |||
682 | * | 676 | * |
683 | * Invoked by DPC handler to consume an error. | 677 | * Invoked by DPC handler to consume an error. |
684 | */ | 678 | */ |
685 | static struct aer_err_source* get_e_source(struct aer_rpc *rpc) | 679 | static struct aer_err_source *get_e_source(struct aer_rpc *rpc) |
686 | { | 680 | { |
687 | struct aer_err_source *e_source; | 681 | struct aer_err_source *e_source; |
688 | unsigned long flags; | 682 | unsigned long flags; |
@@ -702,32 +696,50 @@ static struct aer_err_source* get_e_source(struct aer_rpc *rpc) | |||
702 | return e_source; | 696 | return e_source; |
703 | } | 697 | } |
704 | 698 | ||
699 | /** | ||
700 | * get_device_error_info - read error status from dev and store it to info | ||
701 | * @dev: pointer to the device expected to have a error record | ||
702 | * @info: pointer to structure to store the error record | ||
703 | * | ||
704 | * Return 1 on success, 0 on error. | ||
705 | */ | ||
705 | static int get_device_error_info(struct pci_dev *dev, struct aer_err_info *info) | 706 | static int get_device_error_info(struct pci_dev *dev, struct aer_err_info *info) |
706 | { | 707 | { |
707 | int pos; | 708 | int pos, temp; |
709 | |||
710 | info->status = 0; | ||
711 | info->tlp_header_valid = 0; | ||
708 | 712 | ||
709 | pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR); | 713 | pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR); |
710 | 714 | ||
711 | /* The device might not support AER */ | 715 | /* The device might not support AER */ |
712 | if (!pos) | 716 | if (!pos) |
713 | return AER_SUCCESS; | 717 | return 1; |
714 | 718 | ||
715 | if (info->severity == AER_CORRECTABLE) { | 719 | if (info->severity == AER_CORRECTABLE) { |
716 | pci_read_config_dword(dev, pos + PCI_ERR_COR_STATUS, | 720 | pci_read_config_dword(dev, pos + PCI_ERR_COR_STATUS, |
717 | &info->status); | 721 | &info->status); |
718 | if (!(info->status & ERR_CORRECTABLE_ERROR_MASK)) | 722 | pci_read_config_dword(dev, pos + PCI_ERR_COR_MASK, |
719 | return AER_UNSUCCESS; | 723 | &info->mask); |
724 | if (!(info->status & ~info->mask)) | ||
725 | return 0; | ||
720 | } else if (dev->hdr_type & PCI_HEADER_TYPE_BRIDGE || | 726 | } else if (dev->hdr_type & PCI_HEADER_TYPE_BRIDGE || |
721 | info->severity == AER_NONFATAL) { | 727 | info->severity == AER_NONFATAL) { |
722 | 728 | ||
723 | /* Link is still healthy for IO reads */ | 729 | /* Link is still healthy for IO reads */ |
724 | pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_STATUS, | 730 | pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_STATUS, |
725 | &info->status); | 731 | &info->status); |
726 | if (!(info->status & ERR_UNCORRECTABLE_ERROR_MASK)) | 732 | pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_MASK, |
727 | return AER_UNSUCCESS; | 733 | &info->mask); |
734 | if (!(info->status & ~info->mask)) | ||
735 | return 0; | ||
736 | |||
737 | /* Get First Error Pointer */ | ||
738 | pci_read_config_dword(dev, pos + PCI_ERR_CAP, &temp); | ||
739 | info->first_error = PCI_ERR_CAP_FEP(temp); | ||
728 | 740 | ||
729 | if (info->status & AER_LOG_TLP_MASKS) { | 741 | if (info->status & AER_LOG_TLP_MASKS) { |
730 | info->flags |= AER_TLP_HEADER_VALID_FLAG; | 742 | info->tlp_header_valid = 1; |
731 | pci_read_config_dword(dev, | 743 | pci_read_config_dword(dev, |
732 | pos + PCI_ERR_HEADER_LOG, &info->tlp.dw0); | 744 | pos + PCI_ERR_HEADER_LOG, &info->tlp.dw0); |
733 | pci_read_config_dword(dev, | 745 | pci_read_config_dword(dev, |
@@ -739,7 +751,7 @@ static int get_device_error_info(struct pci_dev *dev, struct aer_err_info *info) | |||
739 | } | 751 | } |
740 | } | 752 | } |
741 | 753 | ||
742 | return AER_SUCCESS; | 754 | return 1; |
743 | } | 755 | } |
744 | 756 | ||
745 | static inline void aer_process_err_devices(struct pcie_device *p_device, | 757 | static inline void aer_process_err_devices(struct pcie_device *p_device, |
@@ -753,14 +765,14 @@ static inline void aer_process_err_devices(struct pcie_device *p_device, | |||
753 | e_info->id); | 765 | e_info->id); |
754 | } | 766 | } |
755 | 767 | ||
768 | /* Report all before handle them, not to lost records by reset etc. */ | ||
756 | for (i = 0; i < e_info->error_dev_num && e_info->dev[i]; i++) { | 769 | for (i = 0; i < e_info->error_dev_num && e_info->dev[i]; i++) { |
757 | if (get_device_error_info(e_info->dev[i], e_info) == | 770 | if (get_device_error_info(e_info->dev[i], e_info)) |
758 | AER_SUCCESS) { | ||
759 | aer_print_error(e_info->dev[i], e_info); | 771 | aer_print_error(e_info->dev[i], e_info); |
760 | handle_error_source(p_device, | 772 | } |
761 | e_info->dev[i], | 773 | for (i = 0; i < e_info->error_dev_num && e_info->dev[i]; i++) { |
762 | e_info); | 774 | if (get_device_error_info(e_info->dev[i], e_info)) |
763 | } | 775 | handle_error_source(p_device, e_info->dev[i], e_info); |
764 | } | 776 | } |
765 | } | 777 | } |
766 | 778 | ||
@@ -806,7 +818,9 @@ static void aer_isr_one_error(struct pcie_device *p_device, | |||
806 | if (e_src->status & | 818 | if (e_src->status & |
807 | (PCI_ERR_ROOT_MULTI_COR_RCV | | 819 | (PCI_ERR_ROOT_MULTI_COR_RCV | |
808 | PCI_ERR_ROOT_MULTI_UNCOR_RCV)) | 820 | PCI_ERR_ROOT_MULTI_UNCOR_RCV)) |
809 | e_info->flags |= AER_MULTI_ERROR_VALID_FLAG; | 821 | e_info->multi_error_valid = 1; |
822 | |||
823 | aer_print_port_info(p_device->port, e_info); | ||
810 | 824 | ||
811 | find_source_device(p_device->port, e_info); | 825 | find_source_device(p_device->port, e_info); |
812 | aer_process_err_devices(p_device, e_info); | 826 | aer_process_err_devices(p_device, e_info); |
@@ -863,10 +877,5 @@ int aer_init(struct pcie_device *dev) | |||
863 | if (aer_osc_setup(dev) && !forceload) | 877 | if (aer_osc_setup(dev) && !forceload) |
864 | return -ENXIO; | 878 | return -ENXIO; |
865 | 879 | ||
866 | return AER_SUCCESS; | 880 | return 0; |
867 | } | 881 | } |
868 | |||
869 | EXPORT_SYMBOL_GPL(pci_enable_pcie_error_reporting); | ||
870 | EXPORT_SYMBOL_GPL(pci_disable_pcie_error_reporting); | ||
871 | EXPORT_SYMBOL_GPL(pci_cleanup_aer_uncorrect_error_status); | ||
872 | |||
diff --git a/drivers/pci/pcie/aer/aerdrv_errprint.c b/drivers/pci/pcie/aer/aerdrv_errprint.c index 0fc29ae80df8..44acde72294f 100644 --- a/drivers/pci/pcie/aer/aerdrv_errprint.c +++ b/drivers/pci/pcie/aer/aerdrv_errprint.c | |||
@@ -27,69 +27,70 @@ | |||
27 | #define AER_AGENT_COMPLETER 2 | 27 | #define AER_AGENT_COMPLETER 2 |
28 | #define AER_AGENT_TRANSMITTER 3 | 28 | #define AER_AGENT_TRANSMITTER 3 |
29 | 29 | ||
30 | #define AER_AGENT_REQUESTER_MASK (PCI_ERR_UNC_COMP_TIME| \ | 30 | #define AER_AGENT_REQUESTER_MASK(t) ((t == AER_CORRECTABLE) ? \ |
31 | PCI_ERR_UNC_UNSUP) | 31 | 0 : (PCI_ERR_UNC_COMP_TIME|PCI_ERR_UNC_UNSUP)) |
32 | 32 | #define AER_AGENT_COMPLETER_MASK(t) ((t == AER_CORRECTABLE) ? \ | |
33 | #define AER_AGENT_COMPLETER_MASK PCI_ERR_UNC_COMP_ABORT | 33 | 0 : PCI_ERR_UNC_COMP_ABORT) |
34 | 34 | #define AER_AGENT_TRANSMITTER_MASK(t) ((t == AER_CORRECTABLE) ? \ | |
35 | #define AER_AGENT_TRANSMITTER_MASK(t, e) (e & (PCI_ERR_COR_REP_ROLL| \ | 35 | (PCI_ERR_COR_REP_ROLL|PCI_ERR_COR_REP_TIMER) : 0) |
36 | ((t == AER_CORRECTABLE) ? PCI_ERR_COR_REP_TIMER: 0))) | ||
37 | 36 | ||
38 | #define AER_GET_AGENT(t, e) \ | 37 | #define AER_GET_AGENT(t, e) \ |
39 | ((e & AER_AGENT_COMPLETER_MASK) ? AER_AGENT_COMPLETER : \ | 38 | ((e & AER_AGENT_COMPLETER_MASK(t)) ? AER_AGENT_COMPLETER : \ |
40 | (e & AER_AGENT_REQUESTER_MASK) ? AER_AGENT_REQUESTER : \ | 39 | (e & AER_AGENT_REQUESTER_MASK(t)) ? AER_AGENT_REQUESTER : \ |
41 | (AER_AGENT_TRANSMITTER_MASK(t, e)) ? AER_AGENT_TRANSMITTER : \ | 40 | (e & AER_AGENT_TRANSMITTER_MASK(t)) ? AER_AGENT_TRANSMITTER : \ |
42 | AER_AGENT_RECEIVER) | 41 | AER_AGENT_RECEIVER) |
43 | 42 | ||
44 | #define AER_PHYSICAL_LAYER_ERROR_MASK PCI_ERR_COR_RCVR | ||
45 | #define AER_DATA_LINK_LAYER_ERROR_MASK(t, e) \ | ||
46 | (PCI_ERR_UNC_DLP| \ | ||
47 | PCI_ERR_COR_BAD_TLP| \ | ||
48 | PCI_ERR_COR_BAD_DLLP| \ | ||
49 | PCI_ERR_COR_REP_ROLL| \ | ||
50 | ((t == AER_CORRECTABLE) ? \ | ||
51 | PCI_ERR_COR_REP_TIMER: 0)) | ||
52 | |||
53 | #define AER_PHYSICAL_LAYER_ERROR 0 | 43 | #define AER_PHYSICAL_LAYER_ERROR 0 |
54 | #define AER_DATA_LINK_LAYER_ERROR 1 | 44 | #define AER_DATA_LINK_LAYER_ERROR 1 |
55 | #define AER_TRANSACTION_LAYER_ERROR 2 | 45 | #define AER_TRANSACTION_LAYER_ERROR 2 |
56 | 46 | ||
57 | #define AER_GET_LAYER_ERROR(t, e) \ | 47 | #define AER_PHYSICAL_LAYER_ERROR_MASK(t) ((t == AER_CORRECTABLE) ? \ |
58 | ((e & AER_PHYSICAL_LAYER_ERROR_MASK) ? \ | 48 | PCI_ERR_COR_RCVR : 0) |
59 | AER_PHYSICAL_LAYER_ERROR : \ | 49 | #define AER_DATA_LINK_LAYER_ERROR_MASK(t) ((t == AER_CORRECTABLE) ? \ |
60 | (e & AER_DATA_LINK_LAYER_ERROR_MASK(t, e)) ? \ | 50 | (PCI_ERR_COR_BAD_TLP| \ |
61 | AER_DATA_LINK_LAYER_ERROR : \ | 51 | PCI_ERR_COR_BAD_DLLP| \ |
62 | AER_TRANSACTION_LAYER_ERROR) | 52 | PCI_ERR_COR_REP_ROLL| \ |
53 | PCI_ERR_COR_REP_TIMER) : PCI_ERR_UNC_DLP) | ||
54 | |||
55 | #define AER_GET_LAYER_ERROR(t, e) \ | ||
56 | ((e & AER_PHYSICAL_LAYER_ERROR_MASK(t)) ? AER_PHYSICAL_LAYER_ERROR : \ | ||
57 | (e & AER_DATA_LINK_LAYER_ERROR_MASK(t)) ? AER_DATA_LINK_LAYER_ERROR : \ | ||
58 | AER_TRANSACTION_LAYER_ERROR) | ||
59 | |||
60 | #define AER_PR(info, pdev, fmt, args...) \ | ||
61 | printk("%s%s %s: " fmt, (info->severity == AER_CORRECTABLE) ? \ | ||
62 | KERN_WARNING : KERN_ERR, dev_driver_string(&pdev->dev), \ | ||
63 | dev_name(&pdev->dev), ## args) | ||
63 | 64 | ||
64 | /* | 65 | /* |
65 | * AER error strings | 66 | * AER error strings |
66 | */ | 67 | */ |
67 | static char* aer_error_severity_string[] = { | 68 | static char *aer_error_severity_string[] = { |
68 | "Uncorrected (Non-Fatal)", | 69 | "Uncorrected (Non-Fatal)", |
69 | "Uncorrected (Fatal)", | 70 | "Uncorrected (Fatal)", |
70 | "Corrected" | 71 | "Corrected" |
71 | }; | 72 | }; |
72 | 73 | ||
73 | static char* aer_error_layer[] = { | 74 | static char *aer_error_layer[] = { |
74 | "Physical Layer", | 75 | "Physical Layer", |
75 | "Data Link Layer", | 76 | "Data Link Layer", |
76 | "Transaction Layer" | 77 | "Transaction Layer" |
77 | }; | 78 | }; |
78 | static char* aer_correctable_error_string[] = { | 79 | static char *aer_correctable_error_string[] = { |
79 | "Receiver Error ", /* Bit Position 0 */ | 80 | "Receiver Error ", /* Bit Position 0 */ |
80 | NULL, | 81 | NULL, |
81 | NULL, | 82 | NULL, |
82 | NULL, | 83 | NULL, |
83 | NULL, | 84 | NULL, |
84 | NULL, | 85 | NULL, |
85 | "Bad TLP ", /* Bit Position 6 */ | 86 | "Bad TLP ", /* Bit Position 6 */ |
86 | "Bad DLLP ", /* Bit Position 7 */ | 87 | "Bad DLLP ", /* Bit Position 7 */ |
87 | "RELAY_NUM Rollover ", /* Bit Position 8 */ | 88 | "RELAY_NUM Rollover ", /* Bit Position 8 */ |
88 | NULL, | 89 | NULL, |
89 | NULL, | 90 | NULL, |
90 | NULL, | 91 | NULL, |
91 | "Replay Timer Timeout ", /* Bit Position 12 */ | 92 | "Replay Timer Timeout ", /* Bit Position 12 */ |
92 | "Advisory Non-Fatal ", /* Bit Position 13 */ | 93 | "Advisory Non-Fatal ", /* Bit Position 13 */ |
93 | NULL, | 94 | NULL, |
94 | NULL, | 95 | NULL, |
95 | NULL, | 96 | NULL, |
@@ -110,7 +111,7 @@ static char* aer_correctable_error_string[] = { | |||
110 | NULL, | 111 | NULL, |
111 | }; | 112 | }; |
112 | 113 | ||
113 | static char* aer_uncorrectable_error_string[] = { | 114 | static char *aer_uncorrectable_error_string[] = { |
114 | NULL, | 115 | NULL, |
115 | NULL, | 116 | NULL, |
116 | NULL, | 117 | NULL, |
@@ -123,10 +124,10 @@ static char* aer_uncorrectable_error_string[] = { | |||
123 | NULL, | 124 | NULL, |
124 | NULL, | 125 | NULL, |
125 | NULL, | 126 | NULL, |
126 | "Poisoned TLP ", /* Bit Position 12 */ | 127 | "Poisoned TLP ", /* Bit Position 12 */ |
127 | "Flow Control Protocol ", /* Bit Position 13 */ | 128 | "Flow Control Protocol ", /* Bit Position 13 */ |
128 | "Completion Timeout ", /* Bit Position 14 */ | 129 | "Completion Timeout ", /* Bit Position 14 */ |
129 | "Completer Abort ", /* Bit Position 15 */ | 130 | "Completer Abort ", /* Bit Position 15 */ |
130 | "Unexpected Completion ", /* Bit Position 16 */ | 131 | "Unexpected Completion ", /* Bit Position 16 */ |
131 | "Receiver Overflow ", /* Bit Position 17 */ | 132 | "Receiver Overflow ", /* Bit Position 17 */ |
132 | "Malformed TLP ", /* Bit Position 18 */ | 133 | "Malformed TLP ", /* Bit Position 18 */ |
@@ -145,98 +146,69 @@ static char* aer_uncorrectable_error_string[] = { | |||
145 | NULL, | 146 | NULL, |
146 | }; | 147 | }; |
147 | 148 | ||
148 | static char* aer_agent_string[] = { | 149 | static char *aer_agent_string[] = { |
149 | "Receiver ID", | 150 | "Receiver ID", |
150 | "Requester ID", | 151 | "Requester ID", |
151 | "Completer ID", | 152 | "Completer ID", |
152 | "Transmitter ID" | 153 | "Transmitter ID" |
153 | }; | 154 | }; |
154 | 155 | ||
155 | static char * aer_get_error_source_name(int severity, | 156 | static void __aer_print_error(struct aer_err_info *info, struct pci_dev *dev) |
156 | unsigned int status, | ||
157 | char errmsg_buff[]) | ||
158 | { | 157 | { |
159 | int i; | 158 | int i, status; |
160 | char * errmsg = NULL; | 159 | char *errmsg = NULL; |
160 | |||
161 | status = (info->status & ~info->mask); | ||
161 | 162 | ||
162 | for (i = 0; i < 32; i++) { | 163 | for (i = 0; i < 32; i++) { |
163 | if (!(status & (1 << i))) | 164 | if (!(status & (1 << i))) |
164 | continue; | 165 | continue; |
165 | 166 | ||
166 | if (severity == AER_CORRECTABLE) | 167 | if (info->severity == AER_CORRECTABLE) |
167 | errmsg = aer_correctable_error_string[i]; | 168 | errmsg = aer_correctable_error_string[i]; |
168 | else | 169 | else |
169 | errmsg = aer_uncorrectable_error_string[i]; | 170 | errmsg = aer_uncorrectable_error_string[i]; |
170 | 171 | ||
171 | if (!errmsg) { | 172 | if (errmsg) |
172 | sprintf(errmsg_buff, "Unknown Error Bit %2d ", i); | 173 | AER_PR(info, dev, " [%2d] %s%s\n", i, errmsg, |
173 | errmsg = errmsg_buff; | 174 | info->first_error == i ? " (First)" : ""); |
174 | } | 175 | else |
175 | 176 | AER_PR(info, dev, " [%2d] Unknown Error Bit%s\n", i, | |
176 | break; | 177 | info->first_error == i ? " (First)" : ""); |
177 | } | 178 | } |
178 | |||
179 | return errmsg; | ||
180 | } | 179 | } |
181 | 180 | ||
182 | static DEFINE_SPINLOCK(logbuf_lock); | ||
183 | static char errmsg_buff[100]; | ||
184 | void aer_print_error(struct pci_dev *dev, struct aer_err_info *info) | 181 | void aer_print_error(struct pci_dev *dev, struct aer_err_info *info) |
185 | { | 182 | { |
186 | char * errmsg; | 183 | int id = ((dev->bus->number << 8) | dev->devfn); |
187 | int err_layer, agent; | 184 | |
188 | char * loglevel; | 185 | if (info->status == 0) { |
189 | 186 | AER_PR(info, dev, | |
190 | if (info->severity == AER_CORRECTABLE) | 187 | "PCIE Bus Error: severity=%s, type=Unaccessible, " |
191 | loglevel = KERN_WARNING; | 188 | "id=%04x(Unregistered Agent ID)\n", |
192 | else | 189 | aer_error_severity_string[info->severity], id); |
193 | loglevel = KERN_ERR; | ||
194 | |||
195 | printk("%s+------ PCI-Express Device Error ------+\n", loglevel); | ||
196 | printk("%sError Severity\t\t: %s\n", loglevel, | ||
197 | aer_error_severity_string[info->severity]); | ||
198 | |||
199 | if ( info->status == 0) { | ||
200 | printk("%sPCIE Bus Error type\t: (Unaccessible)\n", loglevel); | ||
201 | printk("%sUnaccessible Received\t: %s\n", loglevel, | ||
202 | info->flags & AER_MULTI_ERROR_VALID_FLAG ? | ||
203 | "Multiple" : "First"); | ||
204 | printk("%sUnregistered Agent ID\t: %04x\n", loglevel, | ||
205 | (dev->bus->number << 8) | dev->devfn); | ||
206 | } else { | 190 | } else { |
207 | err_layer = AER_GET_LAYER_ERROR(info->severity, info->status); | 191 | int layer, agent; |
208 | printk("%sPCIE Bus Error type\t: %s\n", loglevel, | ||
209 | aer_error_layer[err_layer]); | ||
210 | |||
211 | spin_lock(&logbuf_lock); | ||
212 | errmsg = aer_get_error_source_name(info->severity, | ||
213 | info->status, | ||
214 | errmsg_buff); | ||
215 | printk("%s%s\t: %s\n", loglevel, errmsg, | ||
216 | info->flags & AER_MULTI_ERROR_VALID_FLAG ? | ||
217 | "Multiple" : "First"); | ||
218 | spin_unlock(&logbuf_lock); | ||
219 | 192 | ||
193 | layer = AER_GET_LAYER_ERROR(info->severity, info->status); | ||
220 | agent = AER_GET_AGENT(info->severity, info->status); | 194 | agent = AER_GET_AGENT(info->severity, info->status); |
221 | printk("%s%s\t\t: %04x\n", loglevel, | 195 | |
222 | aer_agent_string[agent], | 196 | AER_PR(info, dev, |
223 | (dev->bus->number << 8) | dev->devfn); | 197 | "PCIE Bus Error: severity=%s, type=%s, id=%04x(%s)\n", |
224 | 198 | aer_error_severity_string[info->severity], | |
225 | printk("%sVendorID=%04xh, DeviceID=%04xh," | 199 | aer_error_layer[layer], id, aer_agent_string[agent]); |
226 | " Bus=%02xh, Device=%02xh, Function=%02xh\n", | 200 | |
227 | loglevel, | 201 | AER_PR(info, dev, |
228 | dev->vendor, | 202 | " device [%04x:%04x] error status/mask=%08x/%08x\n", |
229 | dev->device, | 203 | dev->vendor, dev->device, info->status, info->mask); |
230 | dev->bus->number, | 204 | |
231 | PCI_SLOT(dev->devfn), | 205 | __aer_print_error(info, dev); |
232 | PCI_FUNC(dev->devfn)); | 206 | |
233 | 207 | if (info->tlp_header_valid) { | |
234 | if (info->flags & AER_TLP_HEADER_VALID_FLAG) { | ||
235 | unsigned char *tlp = (unsigned char *) &info->tlp; | 208 | unsigned char *tlp = (unsigned char *) &info->tlp; |
236 | printk("%sTLP Header:\n", loglevel); | 209 | AER_PR(info, dev, " TLP Header:" |
237 | printk("%s%02x%02x%02x%02x %02x%02x%02x%02x" | 210 | " %02x%02x%02x%02x %02x%02x%02x%02x" |
238 | " %02x%02x%02x%02x %02x%02x%02x%02x\n", | 211 | " %02x%02x%02x%02x %02x%02x%02x%02x\n", |
239 | loglevel, | ||
240 | *(tlp + 3), *(tlp + 2), *(tlp + 1), *tlp, | 212 | *(tlp + 3), *(tlp + 2), *(tlp + 1), *tlp, |
241 | *(tlp + 7), *(tlp + 6), *(tlp + 5), *(tlp + 4), | 213 | *(tlp + 7), *(tlp + 6), *(tlp + 5), *(tlp + 4), |
242 | *(tlp + 11), *(tlp + 10), *(tlp + 9), | 214 | *(tlp + 11), *(tlp + 10), *(tlp + 9), |
@@ -244,5 +216,15 @@ void aer_print_error(struct pci_dev *dev, struct aer_err_info *info) | |||
244 | *(tlp + 13), *(tlp + 12)); | 216 | *(tlp + 13), *(tlp + 12)); |
245 | } | 217 | } |
246 | } | 218 | } |
219 | |||
220 | if (info->id && info->error_dev_num > 1 && info->id == id) | ||
221 | AER_PR(info, dev, | ||
222 | " Error of this Agent(%04x) is reported first\n", id); | ||
247 | } | 223 | } |
248 | 224 | ||
225 | void aer_print_port_info(struct pci_dev *dev, struct aer_err_info *info) | ||
226 | { | ||
227 | dev_info(&dev->dev, "AER: %s%s error received: id=%04x\n", | ||
228 | info->multi_error_valid ? "Multiple " : "", | ||
229 | aer_error_severity_string[info->severity], info->id); | ||
230 | } | ||
diff --git a/drivers/pci/pcie/aspm.c b/drivers/pci/pcie/aspm.c index 3d27c97e0486..f289ca9bf18d 100644 --- a/drivers/pci/pcie/aspm.c +++ b/drivers/pci/pcie/aspm.c | |||
@@ -26,6 +26,13 @@ | |||
26 | #endif | 26 | #endif |
27 | #define MODULE_PARAM_PREFIX "pcie_aspm." | 27 | #define MODULE_PARAM_PREFIX "pcie_aspm." |
28 | 28 | ||
29 | /* Note: those are not register definitions */ | ||
30 | #define ASPM_STATE_L0S_UP (1) /* Upstream direction L0s state */ | ||
31 | #define ASPM_STATE_L0S_DW (2) /* Downstream direction L0s state */ | ||
32 | #define ASPM_STATE_L1 (4) /* L1 state */ | ||
33 | #define ASPM_STATE_L0S (ASPM_STATE_L0S_UP | ASPM_STATE_L0S_DW) | ||
34 | #define ASPM_STATE_ALL (ASPM_STATE_L0S | ASPM_STATE_L1) | ||
35 | |||
29 | struct aspm_latency { | 36 | struct aspm_latency { |
30 | u32 l0s; /* L0s latency (nsec) */ | 37 | u32 l0s; /* L0s latency (nsec) */ |
31 | u32 l1; /* L1 latency (nsec) */ | 38 | u32 l1; /* L1 latency (nsec) */ |
@@ -40,17 +47,20 @@ struct pcie_link_state { | |||
40 | struct list_head link; /* node in parent's children list */ | 47 | struct list_head link; /* node in parent's children list */ |
41 | 48 | ||
42 | /* ASPM state */ | 49 | /* ASPM state */ |
43 | u32 aspm_support:2; /* Supported ASPM state */ | 50 | u32 aspm_support:3; /* Supported ASPM state */ |
44 | u32 aspm_enabled:2; /* Enabled ASPM state */ | 51 | u32 aspm_enabled:3; /* Enabled ASPM state */ |
45 | u32 aspm_default:2; /* Default ASPM state by BIOS */ | 52 | u32 aspm_capable:3; /* Capable ASPM state with latency */ |
53 | u32 aspm_default:3; /* Default ASPM state by BIOS */ | ||
54 | u32 aspm_disable:3; /* Disabled ASPM state */ | ||
46 | 55 | ||
47 | /* Clock PM state */ | 56 | /* Clock PM state */ |
48 | u32 clkpm_capable:1; /* Clock PM capable? */ | 57 | u32 clkpm_capable:1; /* Clock PM capable? */ |
49 | u32 clkpm_enabled:1; /* Current Clock PM state */ | 58 | u32 clkpm_enabled:1; /* Current Clock PM state */ |
50 | u32 clkpm_default:1; /* Default Clock PM state by BIOS */ | 59 | u32 clkpm_default:1; /* Default Clock PM state by BIOS */ |
51 | 60 | ||
52 | /* Latencies */ | 61 | /* Exit latencies */ |
53 | struct aspm_latency latency; /* Exit latency */ | 62 | struct aspm_latency latency_up; /* Upstream direction exit latency */ |
63 | struct aspm_latency latency_dw; /* Downstream direction exit latency */ | ||
54 | /* | 64 | /* |
55 | * Endpoint acceptable latencies. A pcie downstream port only | 65 | * Endpoint acceptable latencies. A pcie downstream port only |
56 | * has one slot under it, so at most there are 8 functions. | 66 | * has one slot under it, so at most there are 8 functions. |
@@ -82,7 +92,7 @@ static int policy_to_aspm_state(struct pcie_link_state *link) | |||
82 | return 0; | 92 | return 0; |
83 | case POLICY_POWERSAVE: | 93 | case POLICY_POWERSAVE: |
84 | /* Enable ASPM L0s/L1 */ | 94 | /* Enable ASPM L0s/L1 */ |
85 | return PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1; | 95 | return ASPM_STATE_ALL; |
86 | case POLICY_DEFAULT: | 96 | case POLICY_DEFAULT: |
87 | return link->aspm_default; | 97 | return link->aspm_default; |
88 | } | 98 | } |
@@ -164,18 +174,6 @@ static void pcie_clkpm_cap_init(struct pcie_link_state *link, int blacklist) | |||
164 | link->clkpm_capable = (blacklist) ? 0 : capable; | 174 | link->clkpm_capable = (blacklist) ? 0 : capable; |
165 | } | 175 | } |
166 | 176 | ||
167 | static bool pcie_aspm_downstream_has_switch(struct pcie_link_state *link) | ||
168 | { | ||
169 | struct pci_dev *child; | ||
170 | struct pci_bus *linkbus = link->pdev->subordinate; | ||
171 | |||
172 | list_for_each_entry(child, &linkbus->devices, bus_list) { | ||
173 | if (child->pcie_type == PCI_EXP_TYPE_UPSTREAM) | ||
174 | return true; | ||
175 | } | ||
176 | return false; | ||
177 | } | ||
178 | |||
179 | /* | 177 | /* |
180 | * pcie_aspm_configure_common_clock: check if the 2 ends of a link | 178 | * pcie_aspm_configure_common_clock: check if the 2 ends of a link |
181 | * could use common clock. If they are, configure them to use the | 179 | * could use common clock. If they are, configure them to use the |
@@ -288,71 +286,133 @@ static u32 calc_l1_acceptable(u32 encoding) | |||
288 | return (1000 << encoding); | 286 | return (1000 << encoding); |
289 | } | 287 | } |
290 | 288 | ||
291 | static void pcie_aspm_get_cap_device(struct pci_dev *pdev, u32 *state, | 289 | struct aspm_register_info { |
292 | u32 *l0s, u32 *l1, u32 *enabled) | 290 | u32 support:2; |
291 | u32 enabled:2; | ||
292 | u32 latency_encoding_l0s; | ||
293 | u32 latency_encoding_l1; | ||
294 | }; | ||
295 | |||
296 | static void pcie_get_aspm_reg(struct pci_dev *pdev, | ||
297 | struct aspm_register_info *info) | ||
293 | { | 298 | { |
294 | int pos; | 299 | int pos; |
295 | u16 reg16; | 300 | u16 reg16; |
296 | u32 reg32, encoding; | 301 | u32 reg32; |
297 | 302 | ||
298 | *l0s = *l1 = *enabled = 0; | ||
299 | pos = pci_find_capability(pdev, PCI_CAP_ID_EXP); | 303 | pos = pci_find_capability(pdev, PCI_CAP_ID_EXP); |
300 | pci_read_config_dword(pdev, pos + PCI_EXP_LNKCAP, ®32); | 304 | pci_read_config_dword(pdev, pos + PCI_EXP_LNKCAP, ®32); |
301 | *state = (reg32 & PCI_EXP_LNKCAP_ASPMS) >> 10; | 305 | info->support = (reg32 & PCI_EXP_LNKCAP_ASPMS) >> 10; |
302 | if (*state != PCIE_LINK_STATE_L0S && | 306 | /* 00b and 10b are defined as "Reserved". */ |
303 | *state != (PCIE_LINK_STATE_L1 | PCIE_LINK_STATE_L0S)) | 307 | if (info->support == PCIE_LINK_STATE_L1) |
304 | *state = 0; | 308 | info->support = 0; |
305 | if (*state == 0) | 309 | info->latency_encoding_l0s = (reg32 & PCI_EXP_LNKCAP_L0SEL) >> 12; |
310 | info->latency_encoding_l1 = (reg32 & PCI_EXP_LNKCAP_L1EL) >> 15; | ||
311 | pci_read_config_word(pdev, pos + PCI_EXP_LNKCTL, ®16); | ||
312 | info->enabled = reg16 & PCI_EXP_LNKCTL_ASPMC; | ||
313 | } | ||
314 | |||
315 | static void pcie_aspm_check_latency(struct pci_dev *endpoint) | ||
316 | { | ||
317 | u32 latency, l1_switch_latency = 0; | ||
318 | struct aspm_latency *acceptable; | ||
319 | struct pcie_link_state *link; | ||
320 | |||
321 | /* Device not in D0 doesn't need latency check */ | ||
322 | if ((endpoint->current_state != PCI_D0) && | ||
323 | (endpoint->current_state != PCI_UNKNOWN)) | ||
306 | return; | 324 | return; |
307 | 325 | ||
308 | encoding = (reg32 & PCI_EXP_LNKCAP_L0SEL) >> 12; | 326 | link = endpoint->bus->self->link_state; |
309 | *l0s = calc_l0s_latency(encoding); | 327 | acceptable = &link->acceptable[PCI_FUNC(endpoint->devfn)]; |
310 | if (*state & PCIE_LINK_STATE_L1) { | 328 | |
311 | encoding = (reg32 & PCI_EXP_LNKCAP_L1EL) >> 15; | 329 | while (link) { |
312 | *l1 = calc_l1_latency(encoding); | 330 | /* Check upstream direction L0s latency */ |
331 | if ((link->aspm_capable & ASPM_STATE_L0S_UP) && | ||
332 | (link->latency_up.l0s > acceptable->l0s)) | ||
333 | link->aspm_capable &= ~ASPM_STATE_L0S_UP; | ||
334 | |||
335 | /* Check downstream direction L0s latency */ | ||
336 | if ((link->aspm_capable & ASPM_STATE_L0S_DW) && | ||
337 | (link->latency_dw.l0s > acceptable->l0s)) | ||
338 | link->aspm_capable &= ~ASPM_STATE_L0S_DW; | ||
339 | /* | ||
340 | * Check L1 latency. | ||
341 | * Every switch on the path to root complex need 1 | ||
342 | * more microsecond for L1. Spec doesn't mention L0s. | ||
343 | */ | ||
344 | latency = max_t(u32, link->latency_up.l1, link->latency_dw.l1); | ||
345 | if ((link->aspm_capable & ASPM_STATE_L1) && | ||
346 | (latency + l1_switch_latency > acceptable->l1)) | ||
347 | link->aspm_capable &= ~ASPM_STATE_L1; | ||
348 | l1_switch_latency += 1000; | ||
349 | |||
350 | link = link->parent; | ||
313 | } | 351 | } |
314 | pci_read_config_word(pdev, pos + PCI_EXP_LNKCTL, ®16); | ||
315 | *enabled = reg16 & (PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1); | ||
316 | } | 352 | } |
317 | 353 | ||
318 | static void pcie_aspm_cap_init(struct pcie_link_state *link, int blacklist) | 354 | static void pcie_aspm_cap_init(struct pcie_link_state *link, int blacklist) |
319 | { | 355 | { |
320 | u32 support, l0s, l1, enabled; | ||
321 | struct pci_dev *child, *parent = link->pdev; | 356 | struct pci_dev *child, *parent = link->pdev; |
322 | struct pci_bus *linkbus = parent->subordinate; | 357 | struct pci_bus *linkbus = parent->subordinate; |
358 | struct aspm_register_info upreg, dwreg; | ||
323 | 359 | ||
324 | if (blacklist) { | 360 | if (blacklist) { |
325 | /* Set support state to 0, so we will disable ASPM later */ | 361 | /* Set enabled/disable so that we will disable ASPM later */ |
326 | link->aspm_support = 0; | 362 | link->aspm_enabled = ASPM_STATE_ALL; |
327 | link->aspm_default = 0; | 363 | link->aspm_disable = ASPM_STATE_ALL; |
328 | link->aspm_enabled = PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1; | ||
329 | return; | 364 | return; |
330 | } | 365 | } |
331 | 366 | ||
332 | /* Configure common clock before checking latencies */ | 367 | /* Configure common clock before checking latencies */ |
333 | pcie_aspm_configure_common_clock(link); | 368 | pcie_aspm_configure_common_clock(link); |
334 | 369 | ||
335 | /* upstream component states */ | 370 | /* Get upstream/downstream components' register state */ |
336 | pcie_aspm_get_cap_device(parent, &support, &l0s, &l1, &enabled); | 371 | pcie_get_aspm_reg(parent, &upreg); |
337 | link->aspm_support = support; | ||
338 | link->latency.l0s = l0s; | ||
339 | link->latency.l1 = l1; | ||
340 | link->aspm_enabled = enabled; | ||
341 | |||
342 | /* downstream component states, all functions have the same setting */ | ||
343 | child = list_entry(linkbus->devices.next, struct pci_dev, bus_list); | 372 | child = list_entry(linkbus->devices.next, struct pci_dev, bus_list); |
344 | pcie_aspm_get_cap_device(child, &support, &l0s, &l1, &enabled); | 373 | pcie_get_aspm_reg(child, &dwreg); |
345 | link->aspm_support &= support; | ||
346 | link->latency.l0s = max_t(u32, link->latency.l0s, l0s); | ||
347 | link->latency.l1 = max_t(u32, link->latency.l1, l1); | ||
348 | 374 | ||
349 | if (!link->aspm_support) | 375 | /* |
350 | return; | 376 | * Setup L0s state |
351 | 377 | * | |
352 | link->aspm_enabled &= link->aspm_support; | 378 | * Note that we must not enable L0s in either direction on a |
379 | * given link unless components on both sides of the link each | ||
380 | * support L0s. | ||
381 | */ | ||
382 | if (dwreg.support & upreg.support & PCIE_LINK_STATE_L0S) | ||
383 | link->aspm_support |= ASPM_STATE_L0S; | ||
384 | if (dwreg.enabled & PCIE_LINK_STATE_L0S) | ||
385 | link->aspm_enabled |= ASPM_STATE_L0S_UP; | ||
386 | if (upreg.enabled & PCIE_LINK_STATE_L0S) | ||
387 | link->aspm_enabled |= ASPM_STATE_L0S_DW; | ||
388 | link->latency_up.l0s = calc_l0s_latency(upreg.latency_encoding_l0s); | ||
389 | link->latency_dw.l0s = calc_l0s_latency(dwreg.latency_encoding_l0s); | ||
390 | |||
391 | /* Setup L1 state */ | ||
392 | if (upreg.support & dwreg.support & PCIE_LINK_STATE_L1) | ||
393 | link->aspm_support |= ASPM_STATE_L1; | ||
394 | if (upreg.enabled & dwreg.enabled & PCIE_LINK_STATE_L1) | ||
395 | link->aspm_enabled |= ASPM_STATE_L1; | ||
396 | link->latency_up.l1 = calc_l1_latency(upreg.latency_encoding_l1); | ||
397 | link->latency_dw.l1 = calc_l1_latency(dwreg.latency_encoding_l1); | ||
398 | |||
399 | /* Save default state */ | ||
353 | link->aspm_default = link->aspm_enabled; | 400 | link->aspm_default = link->aspm_enabled; |
354 | 401 | ||
355 | /* ENDPOINT states*/ | 402 | /* Setup initial capable state. Will be updated later */ |
403 | link->aspm_capable = link->aspm_support; | ||
404 | /* | ||
405 | * If the downstream component has pci bridge function, don't | ||
406 | * do ASPM for now. | ||
407 | */ | ||
408 | list_for_each_entry(child, &linkbus->devices, bus_list) { | ||
409 | if (child->pcie_type == PCI_EXP_TYPE_PCI_BRIDGE) { | ||
410 | link->aspm_disable = ASPM_STATE_ALL; | ||
411 | break; | ||
412 | } | ||
413 | } | ||
414 | |||
415 | /* Get and check endpoint acceptable latencies */ | ||
356 | list_for_each_entry(child, &linkbus->devices, bus_list) { | 416 | list_for_each_entry(child, &linkbus->devices, bus_list) { |
357 | int pos; | 417 | int pos; |
358 | u32 reg32, encoding; | 418 | u32 reg32, encoding; |
@@ -365,109 +425,46 @@ static void pcie_aspm_cap_init(struct pcie_link_state *link, int blacklist) | |||
365 | 425 | ||
366 | pos = pci_find_capability(child, PCI_CAP_ID_EXP); | 426 | pos = pci_find_capability(child, PCI_CAP_ID_EXP); |
367 | pci_read_config_dword(child, pos + PCI_EXP_DEVCAP, ®32); | 427 | pci_read_config_dword(child, pos + PCI_EXP_DEVCAP, ®32); |
428 | /* Calculate endpoint L0s acceptable latency */ | ||
368 | encoding = (reg32 & PCI_EXP_DEVCAP_L0S) >> 6; | 429 | encoding = (reg32 & PCI_EXP_DEVCAP_L0S) >> 6; |
369 | acceptable->l0s = calc_l0s_acceptable(encoding); | 430 | acceptable->l0s = calc_l0s_acceptable(encoding); |
370 | if (link->aspm_support & PCIE_LINK_STATE_L1) { | 431 | /* Calculate endpoint L1 acceptable latency */ |
371 | encoding = (reg32 & PCI_EXP_DEVCAP_L1) >> 9; | 432 | encoding = (reg32 & PCI_EXP_DEVCAP_L1) >> 9; |
372 | acceptable->l1 = calc_l1_acceptable(encoding); | 433 | acceptable->l1 = calc_l1_acceptable(encoding); |
373 | } | ||
374 | } | ||
375 | } | ||
376 | |||
377 | /** | ||
378 | * __pcie_aspm_check_state_one - check latency for endpoint device. | ||
379 | * @endpoint: pointer to the struct pci_dev of endpoint device | ||
380 | * | ||
381 | * TBD: The latency from the endpoint to root complex vary per switch's | ||
382 | * upstream link state above the device. Here we just do a simple check | ||
383 | * which assumes all links above the device can be in L1 state, that | ||
384 | * is we just consider the worst case. If switch's upstream link can't | ||
385 | * be put into L0S/L1, then our check is too strictly. | ||
386 | */ | ||
387 | static u32 __pcie_aspm_check_state_one(struct pci_dev *endpoint, u32 state) | ||
388 | { | ||
389 | u32 l1_switch_latency = 0; | ||
390 | struct aspm_latency *acceptable; | ||
391 | struct pcie_link_state *link; | ||
392 | |||
393 | link = endpoint->bus->self->link_state; | ||
394 | state &= link->aspm_support; | ||
395 | acceptable = &link->acceptable[PCI_FUNC(endpoint->devfn)]; | ||
396 | 434 | ||
397 | while (link && state) { | 435 | pcie_aspm_check_latency(child); |
398 | if ((state & PCIE_LINK_STATE_L0S) && | ||
399 | (link->latency.l0s > acceptable->l0s)) | ||
400 | state &= ~PCIE_LINK_STATE_L0S; | ||
401 | if ((state & PCIE_LINK_STATE_L1) && | ||
402 | (link->latency.l1 + l1_switch_latency > acceptable->l1)) | ||
403 | state &= ~PCIE_LINK_STATE_L1; | ||
404 | link = link->parent; | ||
405 | /* | ||
406 | * Every switch on the path to root complex need 1 | ||
407 | * more microsecond for L1. Spec doesn't mention L0s. | ||
408 | */ | ||
409 | l1_switch_latency += 1000; | ||
410 | } | ||
411 | return state; | ||
412 | } | ||
413 | |||
414 | static u32 pcie_aspm_check_state(struct pcie_link_state *link, u32 state) | ||
415 | { | ||
416 | pci_power_t power_state; | ||
417 | struct pci_dev *child; | ||
418 | struct pci_bus *linkbus = link->pdev->subordinate; | ||
419 | |||
420 | /* If no child, ignore the link */ | ||
421 | if (list_empty(&linkbus->devices)) | ||
422 | return state; | ||
423 | |||
424 | list_for_each_entry(child, &linkbus->devices, bus_list) { | ||
425 | /* | ||
426 | * If downstream component of a link is pci bridge, we | ||
427 | * disable ASPM for now for the link | ||
428 | */ | ||
429 | if (child->pcie_type == PCI_EXP_TYPE_PCI_BRIDGE) | ||
430 | return 0; | ||
431 | |||
432 | if ((child->pcie_type != PCI_EXP_TYPE_ENDPOINT && | ||
433 | child->pcie_type != PCI_EXP_TYPE_LEG_END)) | ||
434 | continue; | ||
435 | /* Device not in D0 doesn't need check latency */ | ||
436 | power_state = child->current_state; | ||
437 | if (power_state == PCI_D1 || power_state == PCI_D2 || | ||
438 | power_state == PCI_D3hot || power_state == PCI_D3cold) | ||
439 | continue; | ||
440 | state = __pcie_aspm_check_state_one(child, state); | ||
441 | } | 436 | } |
442 | return state; | ||
443 | } | 437 | } |
444 | 438 | ||
445 | static void __pcie_aspm_config_one_dev(struct pci_dev *pdev, unsigned int state) | 439 | static void pcie_config_aspm_dev(struct pci_dev *pdev, u32 val) |
446 | { | 440 | { |
447 | u16 reg16; | 441 | u16 reg16; |
448 | int pos = pci_find_capability(pdev, PCI_CAP_ID_EXP); | 442 | int pos = pci_find_capability(pdev, PCI_CAP_ID_EXP); |
449 | 443 | ||
450 | pci_read_config_word(pdev, pos + PCI_EXP_LNKCTL, ®16); | 444 | pci_read_config_word(pdev, pos + PCI_EXP_LNKCTL, ®16); |
451 | reg16 &= ~0x3; | 445 | reg16 &= ~0x3; |
452 | reg16 |= state; | 446 | reg16 |= val; |
453 | pci_write_config_word(pdev, pos + PCI_EXP_LNKCTL, reg16); | 447 | pci_write_config_word(pdev, pos + PCI_EXP_LNKCTL, reg16); |
454 | } | 448 | } |
455 | 449 | ||
456 | static void __pcie_aspm_config_link(struct pcie_link_state *link, u32 state) | 450 | static void pcie_config_aspm_link(struct pcie_link_state *link, u32 state) |
457 | { | 451 | { |
452 | u32 upstream = 0, dwstream = 0; | ||
458 | struct pci_dev *child, *parent = link->pdev; | 453 | struct pci_dev *child, *parent = link->pdev; |
459 | struct pci_bus *linkbus = parent->subordinate; | 454 | struct pci_bus *linkbus = parent->subordinate; |
460 | 455 | ||
461 | /* If no child, disable the link */ | 456 | /* Nothing to do if the link is already in the requested state */ |
462 | if (list_empty(&linkbus->devices)) | 457 | state &= (link->aspm_capable & ~link->aspm_disable); |
463 | state = 0; | 458 | if (link->aspm_enabled == state) |
464 | /* | 459 | return; |
465 | * If the downstream component has pci bridge function, don't | 460 | /* Convert ASPM state to upstream/downstream ASPM register state */ |
466 | * do ASPM now. | 461 | if (state & ASPM_STATE_L0S_UP) |
467 | */ | 462 | dwstream |= PCIE_LINK_STATE_L0S; |
468 | list_for_each_entry(child, &linkbus->devices, bus_list) { | 463 | if (state & ASPM_STATE_L0S_DW) |
469 | if (child->pcie_type == PCI_EXP_TYPE_PCI_BRIDGE) | 464 | upstream |= PCIE_LINK_STATE_L0S; |
470 | return; | 465 | if (state & ASPM_STATE_L1) { |
466 | upstream |= PCIE_LINK_STATE_L1; | ||
467 | dwstream |= PCIE_LINK_STATE_L1; | ||
471 | } | 468 | } |
472 | /* | 469 | /* |
473 | * Spec 2.0 suggests all functions should be configured the | 470 | * Spec 2.0 suggests all functions should be configured the |
@@ -475,67 +472,24 @@ static void __pcie_aspm_config_link(struct pcie_link_state *link, u32 state) | |||
475 | * upstream component first and then downstream, and vice | 472 | * upstream component first and then downstream, and vice |
476 | * versa for disabling ASPM L1. Spec doesn't mention L0S. | 473 | * versa for disabling ASPM L1. Spec doesn't mention L0S. |
477 | */ | 474 | */ |
478 | if (state & PCIE_LINK_STATE_L1) | 475 | if (state & ASPM_STATE_L1) |
479 | __pcie_aspm_config_one_dev(parent, state); | 476 | pcie_config_aspm_dev(parent, upstream); |
480 | |||
481 | list_for_each_entry(child, &linkbus->devices, bus_list) | 477 | list_for_each_entry(child, &linkbus->devices, bus_list) |
482 | __pcie_aspm_config_one_dev(child, state); | 478 | pcie_config_aspm_dev(child, dwstream); |
483 | 479 | if (!(state & ASPM_STATE_L1)) | |
484 | if (!(state & PCIE_LINK_STATE_L1)) | 480 | pcie_config_aspm_dev(parent, upstream); |
485 | __pcie_aspm_config_one_dev(parent, state); | ||
486 | 481 | ||
487 | link->aspm_enabled = state; | 482 | link->aspm_enabled = state; |
488 | } | 483 | } |
489 | 484 | ||
490 | /* Check the whole hierarchy, and configure each link in the hierarchy */ | 485 | static void pcie_config_aspm_path(struct pcie_link_state *link) |
491 | static void __pcie_aspm_configure_link_state(struct pcie_link_state *link, | ||
492 | u32 state) | ||
493 | { | 486 | { |
494 | struct pcie_link_state *leaf, *root = link->root; | 487 | while (link) { |
495 | 488 | pcie_config_aspm_link(link, policy_to_aspm_state(link)); | |
496 | state &= (PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1); | 489 | link = link->parent; |
497 | |||
498 | /* Check all links who have specific root port link */ | ||
499 | list_for_each_entry(leaf, &link_list, sibling) { | ||
500 | if (!list_empty(&leaf->children) || (leaf->root != root)) | ||
501 | continue; | ||
502 | state = pcie_aspm_check_state(leaf, state); | ||
503 | } | ||
504 | /* Check root port link too in case it hasn't children */ | ||
505 | state = pcie_aspm_check_state(root, state); | ||
506 | if (link->aspm_enabled == state) | ||
507 | return; | ||
508 | /* | ||
509 | * We must change the hierarchy. See comments in | ||
510 | * __pcie_aspm_config_link for the order | ||
511 | **/ | ||
512 | if (state & PCIE_LINK_STATE_L1) { | ||
513 | list_for_each_entry(leaf, &link_list, sibling) { | ||
514 | if (leaf->root == root) | ||
515 | __pcie_aspm_config_link(leaf, state); | ||
516 | } | ||
517 | } else { | ||
518 | list_for_each_entry_reverse(leaf, &link_list, sibling) { | ||
519 | if (leaf->root == root) | ||
520 | __pcie_aspm_config_link(leaf, state); | ||
521 | } | ||
522 | } | 490 | } |
523 | } | 491 | } |
524 | 492 | ||
525 | /* | ||
526 | * pcie_aspm_configure_link_state: enable/disable PCI express link state | ||
527 | * @pdev: the root port or switch downstream port | ||
528 | */ | ||
529 | static void pcie_aspm_configure_link_state(struct pcie_link_state *link, | ||
530 | u32 state) | ||
531 | { | ||
532 | down_read(&pci_bus_sem); | ||
533 | mutex_lock(&aspm_lock); | ||
534 | __pcie_aspm_configure_link_state(link, state); | ||
535 | mutex_unlock(&aspm_lock); | ||
536 | up_read(&pci_bus_sem); | ||
537 | } | ||
538 | |||
539 | static void free_link_state(struct pcie_link_state *link) | 493 | static void free_link_state(struct pcie_link_state *link) |
540 | { | 494 | { |
541 | link->pdev->link_state = NULL; | 495 | link->pdev->link_state = NULL; |
@@ -570,10 +524,9 @@ static int pcie_aspm_sanity_check(struct pci_dev *pdev) | |||
570 | return 0; | 524 | return 0; |
571 | } | 525 | } |
572 | 526 | ||
573 | static struct pcie_link_state *pcie_aspm_setup_link_state(struct pci_dev *pdev) | 527 | static struct pcie_link_state *alloc_pcie_link_state(struct pci_dev *pdev) |
574 | { | 528 | { |
575 | struct pcie_link_state *link; | 529 | struct pcie_link_state *link; |
576 | int blacklist = !!pcie_aspm_sanity_check(pdev); | ||
577 | 530 | ||
578 | link = kzalloc(sizeof(*link), GFP_KERNEL); | 531 | link = kzalloc(sizeof(*link), GFP_KERNEL); |
579 | if (!link) | 532 | if (!link) |
@@ -599,15 +552,7 @@ static struct pcie_link_state *pcie_aspm_setup_link_state(struct pci_dev *pdev) | |||
599 | link->root = link->parent->root; | 552 | link->root = link->parent->root; |
600 | 553 | ||
601 | list_add(&link->sibling, &link_list); | 554 | list_add(&link->sibling, &link_list); |
602 | |||
603 | pdev->link_state = link; | 555 | pdev->link_state = link; |
604 | |||
605 | /* Check ASPM capability */ | ||
606 | pcie_aspm_cap_init(link, blacklist); | ||
607 | |||
608 | /* Check Clock PM capability */ | ||
609 | pcie_clkpm_cap_init(link, blacklist); | ||
610 | |||
611 | return link; | 556 | return link; |
612 | } | 557 | } |
613 | 558 | ||
@@ -618,8 +563,8 @@ static struct pcie_link_state *pcie_aspm_setup_link_state(struct pci_dev *pdev) | |||
618 | */ | 563 | */ |
619 | void pcie_aspm_init_link_state(struct pci_dev *pdev) | 564 | void pcie_aspm_init_link_state(struct pci_dev *pdev) |
620 | { | 565 | { |
621 | u32 state; | ||
622 | struct pcie_link_state *link; | 566 | struct pcie_link_state *link; |
567 | int blacklist = !!pcie_aspm_sanity_check(pdev); | ||
623 | 568 | ||
624 | if (aspm_disabled || !pdev->is_pcie || pdev->link_state) | 569 | if (aspm_disabled || !pdev->is_pcie || pdev->link_state) |
625 | return; | 570 | return; |
@@ -637,47 +582,64 @@ void pcie_aspm_init_link_state(struct pci_dev *pdev) | |||
637 | goto out; | 582 | goto out; |
638 | 583 | ||
639 | mutex_lock(&aspm_lock); | 584 | mutex_lock(&aspm_lock); |
640 | link = pcie_aspm_setup_link_state(pdev); | 585 | link = alloc_pcie_link_state(pdev); |
641 | if (!link) | 586 | if (!link) |
642 | goto unlock; | 587 | goto unlock; |
643 | /* | 588 | /* |
644 | * Setup initial ASPM state | 589 | * Setup initial ASPM state. Note that we need to configure |
645 | * | 590 | * upstream links also because capable state of them can be |
646 | * If link has switch, delay the link config. The leaf link | 591 | * update through pcie_aspm_cap_init(). |
647 | * initialization will config the whole hierarchy. But we must | ||
648 | * make sure BIOS doesn't set unsupported link state. | ||
649 | */ | 592 | */ |
650 | if (pcie_aspm_downstream_has_switch(link)) { | 593 | pcie_aspm_cap_init(link, blacklist); |
651 | state = pcie_aspm_check_state(link, link->aspm_default); | 594 | pcie_config_aspm_path(link); |
652 | __pcie_aspm_config_link(link, state); | ||
653 | } else { | ||
654 | state = policy_to_aspm_state(link); | ||
655 | __pcie_aspm_configure_link_state(link, state); | ||
656 | } | ||
657 | 595 | ||
658 | /* Setup initial Clock PM state */ | 596 | /* Setup initial Clock PM state */ |
659 | state = (link->clkpm_capable) ? policy_to_clkpm_state(link) : 0; | 597 | pcie_clkpm_cap_init(link, blacklist); |
660 | pcie_set_clkpm(link, state); | 598 | pcie_set_clkpm(link, policy_to_clkpm_state(link)); |
661 | unlock: | 599 | unlock: |
662 | mutex_unlock(&aspm_lock); | 600 | mutex_unlock(&aspm_lock); |
663 | out: | 601 | out: |
664 | up_read(&pci_bus_sem); | 602 | up_read(&pci_bus_sem); |
665 | } | 603 | } |
666 | 604 | ||
605 | /* Recheck latencies and update aspm_capable for links under the root */ | ||
606 | static void pcie_update_aspm_capable(struct pcie_link_state *root) | ||
607 | { | ||
608 | struct pcie_link_state *link; | ||
609 | BUG_ON(root->parent); | ||
610 | list_for_each_entry(link, &link_list, sibling) { | ||
611 | if (link->root != root) | ||
612 | continue; | ||
613 | link->aspm_capable = link->aspm_support; | ||
614 | } | ||
615 | list_for_each_entry(link, &link_list, sibling) { | ||
616 | struct pci_dev *child; | ||
617 | struct pci_bus *linkbus = link->pdev->subordinate; | ||
618 | if (link->root != root) | ||
619 | continue; | ||
620 | list_for_each_entry(child, &linkbus->devices, bus_list) { | ||
621 | if ((child->pcie_type != PCI_EXP_TYPE_ENDPOINT) && | ||
622 | (child->pcie_type != PCI_EXP_TYPE_LEG_END)) | ||
623 | continue; | ||
624 | pcie_aspm_check_latency(child); | ||
625 | } | ||
626 | } | ||
627 | } | ||
628 | |||
667 | /* @pdev: the endpoint device */ | 629 | /* @pdev: the endpoint device */ |
668 | void pcie_aspm_exit_link_state(struct pci_dev *pdev) | 630 | void pcie_aspm_exit_link_state(struct pci_dev *pdev) |
669 | { | 631 | { |
670 | struct pci_dev *parent = pdev->bus->self; | 632 | struct pci_dev *parent = pdev->bus->self; |
671 | struct pcie_link_state *link_state = parent->link_state; | 633 | struct pcie_link_state *link, *root, *parent_link; |
672 | 634 | ||
673 | if (aspm_disabled || !pdev->is_pcie || !parent || !link_state) | 635 | if (aspm_disabled || !pdev->is_pcie || !parent || !parent->link_state) |
674 | return; | 636 | return; |
675 | if (parent->pcie_type != PCI_EXP_TYPE_ROOT_PORT && | 637 | if ((parent->pcie_type != PCI_EXP_TYPE_ROOT_PORT) && |
676 | parent->pcie_type != PCI_EXP_TYPE_DOWNSTREAM) | 638 | (parent->pcie_type != PCI_EXP_TYPE_DOWNSTREAM)) |
677 | return; | 639 | return; |
640 | |||
678 | down_read(&pci_bus_sem); | 641 | down_read(&pci_bus_sem); |
679 | mutex_lock(&aspm_lock); | 642 | mutex_lock(&aspm_lock); |
680 | |||
681 | /* | 643 | /* |
682 | * All PCIe functions are in one slot, remove one function will remove | 644 | * All PCIe functions are in one slot, remove one function will remove |
683 | * the whole slot, so just wait until we are the last function left. | 645 | * the whole slot, so just wait until we are the last function left. |
@@ -685,13 +647,20 @@ void pcie_aspm_exit_link_state(struct pci_dev *pdev) | |||
685 | if (!list_is_last(&pdev->bus_list, &parent->subordinate->devices)) | 647 | if (!list_is_last(&pdev->bus_list, &parent->subordinate->devices)) |
686 | goto out; | 648 | goto out; |
687 | 649 | ||
650 | link = parent->link_state; | ||
651 | root = link->root; | ||
652 | parent_link = link->parent; | ||
653 | |||
688 | /* All functions are removed, so just disable ASPM for the link */ | 654 | /* All functions are removed, so just disable ASPM for the link */ |
689 | __pcie_aspm_config_one_dev(parent, 0); | 655 | pcie_config_aspm_link(link, 0); |
690 | list_del(&link_state->sibling); | 656 | list_del(&link->sibling); |
691 | list_del(&link_state->link); | 657 | list_del(&link->link); |
692 | /* Clock PM is for endpoint device */ | 658 | /* Clock PM is for endpoint device */ |
659 | free_link_state(link); | ||
693 | 660 | ||
694 | free_link_state(link_state); | 661 | /* Recheck latencies and configure upstream links */ |
662 | pcie_update_aspm_capable(root); | ||
663 | pcie_config_aspm_path(parent_link); | ||
695 | out: | 664 | out: |
696 | mutex_unlock(&aspm_lock); | 665 | mutex_unlock(&aspm_lock); |
697 | up_read(&pci_bus_sem); | 666 | up_read(&pci_bus_sem); |
@@ -700,18 +669,23 @@ out: | |||
700 | /* @pdev: the root port or switch downstream port */ | 669 | /* @pdev: the root port or switch downstream port */ |
701 | void pcie_aspm_pm_state_change(struct pci_dev *pdev) | 670 | void pcie_aspm_pm_state_change(struct pci_dev *pdev) |
702 | { | 671 | { |
703 | struct pcie_link_state *link_state = pdev->link_state; | 672 | struct pcie_link_state *link = pdev->link_state; |
704 | 673 | ||
705 | if (aspm_disabled || !pdev->is_pcie || !pdev->link_state) | 674 | if (aspm_disabled || !pdev->is_pcie || !link) |
706 | return; | 675 | return; |
707 | if (pdev->pcie_type != PCI_EXP_TYPE_ROOT_PORT && | 676 | if ((pdev->pcie_type != PCI_EXP_TYPE_ROOT_PORT) && |
708 | pdev->pcie_type != PCI_EXP_TYPE_DOWNSTREAM) | 677 | (pdev->pcie_type != PCI_EXP_TYPE_DOWNSTREAM)) |
709 | return; | 678 | return; |
710 | /* | 679 | /* |
711 | * devices changed PM state, we should recheck if latency meets all | 680 | * Devices changed PM state, we should recheck if latency |
712 | * functions' requirement | 681 | * meets all functions' requirement |
713 | */ | 682 | */ |
714 | pcie_aspm_configure_link_state(link_state, link_state->aspm_enabled); | 683 | down_read(&pci_bus_sem); |
684 | mutex_lock(&aspm_lock); | ||
685 | pcie_update_aspm_capable(link->root); | ||
686 | pcie_config_aspm_path(link); | ||
687 | mutex_unlock(&aspm_lock); | ||
688 | up_read(&pci_bus_sem); | ||
715 | } | 689 | } |
716 | 690 | ||
717 | /* | 691 | /* |
@@ -721,7 +695,7 @@ void pcie_aspm_pm_state_change(struct pci_dev *pdev) | |||
721 | void pci_disable_link_state(struct pci_dev *pdev, int state) | 695 | void pci_disable_link_state(struct pci_dev *pdev, int state) |
722 | { | 696 | { |
723 | struct pci_dev *parent = pdev->bus->self; | 697 | struct pci_dev *parent = pdev->bus->self; |
724 | struct pcie_link_state *link_state; | 698 | struct pcie_link_state *link; |
725 | 699 | ||
726 | if (aspm_disabled || !pdev->is_pcie) | 700 | if (aspm_disabled || !pdev->is_pcie) |
727 | return; | 701 | return; |
@@ -733,12 +707,16 @@ void pci_disable_link_state(struct pci_dev *pdev, int state) | |||
733 | 707 | ||
734 | down_read(&pci_bus_sem); | 708 | down_read(&pci_bus_sem); |
735 | mutex_lock(&aspm_lock); | 709 | mutex_lock(&aspm_lock); |
736 | link_state = parent->link_state; | 710 | link = parent->link_state; |
737 | link_state->aspm_support &= ~state; | 711 | if (state & PCIE_LINK_STATE_L0S) |
738 | __pcie_aspm_configure_link_state(link_state, link_state->aspm_enabled); | 712 | link->aspm_disable |= ASPM_STATE_L0S; |
713 | if (state & PCIE_LINK_STATE_L1) | ||
714 | link->aspm_disable |= ASPM_STATE_L1; | ||
715 | pcie_config_aspm_link(link, policy_to_aspm_state(link)); | ||
716 | |||
739 | if (state & PCIE_LINK_STATE_CLKPM) { | 717 | if (state & PCIE_LINK_STATE_CLKPM) { |
740 | link_state->clkpm_capable = 0; | 718 | link->clkpm_capable = 0; |
741 | pcie_set_clkpm(link_state, 0); | 719 | pcie_set_clkpm(link, 0); |
742 | } | 720 | } |
743 | mutex_unlock(&aspm_lock); | 721 | mutex_unlock(&aspm_lock); |
744 | up_read(&pci_bus_sem); | 722 | up_read(&pci_bus_sem); |
@@ -748,7 +726,7 @@ EXPORT_SYMBOL(pci_disable_link_state); | |||
748 | static int pcie_aspm_set_policy(const char *val, struct kernel_param *kp) | 726 | static int pcie_aspm_set_policy(const char *val, struct kernel_param *kp) |
749 | { | 727 | { |
750 | int i; | 728 | int i; |
751 | struct pcie_link_state *link_state; | 729 | struct pcie_link_state *link; |
752 | 730 | ||
753 | for (i = 0; i < ARRAY_SIZE(policy_str); i++) | 731 | for (i = 0; i < ARRAY_SIZE(policy_str); i++) |
754 | if (!strncmp(val, policy_str[i], strlen(policy_str[i]))) | 732 | if (!strncmp(val, policy_str[i], strlen(policy_str[i]))) |
@@ -761,10 +739,9 @@ static int pcie_aspm_set_policy(const char *val, struct kernel_param *kp) | |||
761 | down_read(&pci_bus_sem); | 739 | down_read(&pci_bus_sem); |
762 | mutex_lock(&aspm_lock); | 740 | mutex_lock(&aspm_lock); |
763 | aspm_policy = i; | 741 | aspm_policy = i; |
764 | list_for_each_entry(link_state, &link_list, sibling) { | 742 | list_for_each_entry(link, &link_list, sibling) { |
765 | __pcie_aspm_configure_link_state(link_state, | 743 | pcie_config_aspm_link(link, policy_to_aspm_state(link)); |
766 | policy_to_aspm_state(link_state)); | 744 | pcie_set_clkpm(link, policy_to_clkpm_state(link)); |
767 | pcie_set_clkpm(link_state, policy_to_clkpm_state(link_state)); | ||
768 | } | 745 | } |
769 | mutex_unlock(&aspm_lock); | 746 | mutex_unlock(&aspm_lock); |
770 | up_read(&pci_bus_sem); | 747 | up_read(&pci_bus_sem); |
@@ -802,18 +779,28 @@ static ssize_t link_state_store(struct device *dev, | |||
802 | size_t n) | 779 | size_t n) |
803 | { | 780 | { |
804 | struct pci_dev *pdev = to_pci_dev(dev); | 781 | struct pci_dev *pdev = to_pci_dev(dev); |
805 | int state; | 782 | struct pcie_link_state *link, *root = pdev->link_state->root; |
783 | u32 val = buf[0] - '0', state = 0; | ||
806 | 784 | ||
807 | if (n < 1) | 785 | if (n < 1 || val > 3) |
808 | return -EINVAL; | 786 | return -EINVAL; |
809 | state = buf[0]-'0'; | ||
810 | if (state >= 0 && state <= 3) { | ||
811 | /* setup link aspm state */ | ||
812 | pcie_aspm_configure_link_state(pdev->link_state, state); | ||
813 | return n; | ||
814 | } | ||
815 | 787 | ||
816 | return -EINVAL; | 788 | /* Convert requested state to ASPM state */ |
789 | if (val & PCIE_LINK_STATE_L0S) | ||
790 | state |= ASPM_STATE_L0S; | ||
791 | if (val & PCIE_LINK_STATE_L1) | ||
792 | state |= ASPM_STATE_L1; | ||
793 | |||
794 | down_read(&pci_bus_sem); | ||
795 | mutex_lock(&aspm_lock); | ||
796 | list_for_each_entry(link, &link_list, sibling) { | ||
797 | if (link->root != root) | ||
798 | continue; | ||
799 | pcie_config_aspm_link(link, state); | ||
800 | } | ||
801 | mutex_unlock(&aspm_lock); | ||
802 | up_read(&pci_bus_sem); | ||
803 | return n; | ||
817 | } | 804 | } |
818 | 805 | ||
819 | static ssize_t clk_ctl_show(struct device *dev, | 806 | static ssize_t clk_ctl_show(struct device *dev, |
diff --git a/drivers/pci/pcie/portdrv_core.c b/drivers/pci/pcie/portdrv_core.c index 13ffdc35ea0e..52f84fca9f7d 100644 --- a/drivers/pci/pcie/portdrv_core.c +++ b/drivers/pci/pcie/portdrv_core.c | |||
@@ -187,14 +187,9 @@ static int pcie_port_enable_msix(struct pci_dev *dev, int *vectors, int mask) | |||
187 | */ | 187 | */ |
188 | static int assign_interrupt_mode(struct pci_dev *dev, int *vectors, int mask) | 188 | static int assign_interrupt_mode(struct pci_dev *dev, int *vectors, int mask) |
189 | { | 189 | { |
190 | struct pcie_port_data *port_data = pci_get_drvdata(dev); | ||
191 | int irq, interrupt_mode = PCIE_PORT_NO_IRQ; | 190 | int irq, interrupt_mode = PCIE_PORT_NO_IRQ; |
192 | int i; | 191 | int i; |
193 | 192 | ||
194 | /* Check MSI quirk */ | ||
195 | if (port_data->port_type == PCIE_RC_PORT && pcie_mch_quirk) | ||
196 | goto Fallback; | ||
197 | |||
198 | /* Try to use MSI-X if supported */ | 193 | /* Try to use MSI-X if supported */ |
199 | if (!pcie_port_enable_msix(dev, vectors, mask)) | 194 | if (!pcie_port_enable_msix(dev, vectors, mask)) |
200 | return PCIE_PORT_MSIX_MODE; | 195 | return PCIE_PORT_MSIX_MODE; |
@@ -203,7 +198,6 @@ static int assign_interrupt_mode(struct pci_dev *dev, int *vectors, int mask) | |||
203 | if (!pci_enable_msi(dev)) | 198 | if (!pci_enable_msi(dev)) |
204 | interrupt_mode = PCIE_PORT_MSI_MODE; | 199 | interrupt_mode = PCIE_PORT_MSI_MODE; |
205 | 200 | ||
206 | Fallback: | ||
207 | if (interrupt_mode == PCIE_PORT_NO_IRQ && dev->pin) | 201 | if (interrupt_mode == PCIE_PORT_NO_IRQ && dev->pin) |
208 | interrupt_mode = PCIE_PORT_INTx_MODE; | 202 | interrupt_mode = PCIE_PORT_INTx_MODE; |
209 | 203 | ||
diff --git a/drivers/pci/pcie/portdrv_pci.c b/drivers/pci/pcie/portdrv_pci.c index 091ce70051e0..6df5c984a791 100644 --- a/drivers/pci/pcie/portdrv_pci.c +++ b/drivers/pci/pcie/portdrv_pci.c | |||
@@ -205,6 +205,7 @@ static pci_ers_result_t pcie_portdrv_slot_reset(struct pci_dev *dev) | |||
205 | 205 | ||
206 | /* If fatal, restore cfg space for possible link reset at upstream */ | 206 | /* If fatal, restore cfg space for possible link reset at upstream */ |
207 | if (dev->error_state == pci_channel_io_frozen) { | 207 | if (dev->error_state == pci_channel_io_frozen) { |
208 | dev->state_saved = true; | ||
208 | pci_restore_state(dev); | 209 | pci_restore_state(dev); |
209 | pcie_portdrv_restore_config(dev); | 210 | pcie_portdrv_restore_config(dev); |
210 | pci_enable_pcie_error_reporting(dev); | 211 | pci_enable_pcie_error_reporting(dev); |
diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c index 40e75f6a5056..8105e32117f6 100644 --- a/drivers/pci/probe.c +++ b/drivers/pci/probe.c | |||
@@ -235,7 +235,10 @@ int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type, | |||
235 | res->start = l64; | 235 | res->start = l64; |
236 | res->end = l64 + sz64; | 236 | res->end = l64 + sz64; |
237 | dev_printk(KERN_DEBUG, &dev->dev, | 237 | dev_printk(KERN_DEBUG, &dev->dev, |
238 | "reg %x 64bit mmio: %pR\n", pos, res); | 238 | "reg %x %s: %pR\n", pos, |
239 | (res->flags & IORESOURCE_PREFETCH) ? | ||
240 | "64bit mmio pref" : "64bit mmio", | ||
241 | res); | ||
239 | } | 242 | } |
240 | 243 | ||
241 | res->flags |= IORESOURCE_MEM_64; | 244 | res->flags |= IORESOURCE_MEM_64; |
@@ -249,7 +252,9 @@ int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type, | |||
249 | res->end = l + sz; | 252 | res->end = l + sz; |
250 | 253 | ||
251 | dev_printk(KERN_DEBUG, &dev->dev, "reg %x %s: %pR\n", pos, | 254 | dev_printk(KERN_DEBUG, &dev->dev, "reg %x %s: %pR\n", pos, |
252 | (res->flags & IORESOURCE_IO) ? "io port" : "32bit mmio", | 255 | (res->flags & IORESOURCE_IO) ? "io port" : |
256 | ((res->flags & IORESOURCE_PREFETCH) ? | ||
257 | "32bit mmio pref" : "32bit mmio"), | ||
253 | res); | 258 | res); |
254 | } | 259 | } |
255 | 260 | ||
@@ -692,6 +697,23 @@ static void set_pcie_port_type(struct pci_dev *pdev) | |||
692 | pdev->pcie_type = (reg16 & PCI_EXP_FLAGS_TYPE) >> 4; | 697 | pdev->pcie_type = (reg16 & PCI_EXP_FLAGS_TYPE) >> 4; |
693 | } | 698 | } |
694 | 699 | ||
700 | static void set_pcie_hotplug_bridge(struct pci_dev *pdev) | ||
701 | { | ||
702 | int pos; | ||
703 | u16 reg16; | ||
704 | u32 reg32; | ||
705 | |||
706 | pos = pci_find_capability(pdev, PCI_CAP_ID_EXP); | ||
707 | if (!pos) | ||
708 | return; | ||
709 | pci_read_config_word(pdev, pos + PCI_EXP_FLAGS, ®16); | ||
710 | if (!(reg16 & PCI_EXP_FLAGS_SLOT)) | ||
711 | return; | ||
712 | pci_read_config_dword(pdev, pos + PCI_EXP_SLTCAP, ®32); | ||
713 | if (reg32 & PCI_EXP_SLTCAP_HPC) | ||
714 | pdev->is_hotplug_bridge = 1; | ||
715 | } | ||
716 | |||
695 | #define LEGACY_IO_RESOURCE (IORESOURCE_IO | IORESOURCE_PCI_FIXED) | 717 | #define LEGACY_IO_RESOURCE (IORESOURCE_IO | IORESOURCE_PCI_FIXED) |
696 | 718 | ||
697 | /** | 719 | /** |
@@ -799,6 +821,7 @@ int pci_setup_device(struct pci_dev *dev) | |||
799 | pci_read_irq(dev); | 821 | pci_read_irq(dev); |
800 | dev->transparent = ((dev->class & 0xff) == 1); | 822 | dev->transparent = ((dev->class & 0xff) == 1); |
801 | pci_read_bases(dev, 2, PCI_ROM_ADDRESS1); | 823 | pci_read_bases(dev, 2, PCI_ROM_ADDRESS1); |
824 | set_pcie_hotplug_bridge(dev); | ||
802 | break; | 825 | break; |
803 | 826 | ||
804 | case PCI_HEADER_TYPE_CARDBUS: /* CardBus bridge header */ | 827 | case PCI_HEADER_TYPE_CARDBUS: /* CardBus bridge header */ |
@@ -1009,6 +1032,9 @@ void pci_device_add(struct pci_dev *dev, struct pci_bus *bus) | |||
1009 | /* Fix up broken headers */ | 1032 | /* Fix up broken headers */ |
1010 | pci_fixup_device(pci_fixup_header, dev); | 1033 | pci_fixup_device(pci_fixup_header, dev); |
1011 | 1034 | ||
1035 | /* Clear the state_saved flag. */ | ||
1036 | dev->state_saved = false; | ||
1037 | |||
1012 | /* Initialize various capabilities */ | 1038 | /* Initialize various capabilities */ |
1013 | pci_init_capabilities(dev); | 1039 | pci_init_capabilities(dev); |
1014 | 1040 | ||
@@ -1061,8 +1087,7 @@ int pci_scan_slot(struct pci_bus *bus, int devfn) | |||
1061 | if (dev && !dev->is_added) /* new device? */ | 1087 | if (dev && !dev->is_added) /* new device? */ |
1062 | nr++; | 1088 | nr++; |
1063 | 1089 | ||
1064 | if ((dev && dev->multifunction) || | 1090 | if (dev && dev->multifunction) { |
1065 | (!dev && pcibios_scan_all_fns(bus, devfn))) { | ||
1066 | for (fn = 1; fn < 8; fn++) { | 1091 | for (fn = 1; fn < 8; fn++) { |
1067 | dev = pci_scan_single_device(bus, devfn + fn); | 1092 | dev = pci_scan_single_device(bus, devfn + fn); |
1068 | if (dev) { | 1093 | if (dev) { |
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c index 85ce23997be4..6099facecd79 100644 --- a/drivers/pci/quirks.c +++ b/drivers/pci/quirks.c | |||
@@ -31,8 +31,6 @@ int isa_dma_bridge_buggy; | |||
31 | EXPORT_SYMBOL(isa_dma_bridge_buggy); | 31 | EXPORT_SYMBOL(isa_dma_bridge_buggy); |
32 | int pci_pci_problems; | 32 | int pci_pci_problems; |
33 | EXPORT_SYMBOL(pci_pci_problems); | 33 | EXPORT_SYMBOL(pci_pci_problems); |
34 | int pcie_mch_quirk; | ||
35 | EXPORT_SYMBOL(pcie_mch_quirk); | ||
36 | 34 | ||
37 | #ifdef CONFIG_PCI_QUIRKS | 35 | #ifdef CONFIG_PCI_QUIRKS |
38 | /* | 36 | /* |
@@ -1203,6 +1201,7 @@ static void __init asus_hides_smbus_hostbridge(struct pci_dev *dev) | |||
1203 | switch(dev->subsystem_device) { | 1201 | switch(dev->subsystem_device) { |
1204 | case 0x00b8: /* Compaq Evo D510 CMT */ | 1202 | case 0x00b8: /* Compaq Evo D510 CMT */ |
1205 | case 0x00b9: /* Compaq Evo D510 SFF */ | 1203 | case 0x00b9: /* Compaq Evo D510 SFF */ |
1204 | case 0x00ba: /* Compaq Evo D510 USDT */ | ||
1206 | /* Motherboard doesn't have Host bridge | 1205 | /* Motherboard doesn't have Host bridge |
1207 | * subvendor/subdevice IDs and on-board VGA | 1206 | * subvendor/subdevice IDs and on-board VGA |
1208 | * controller is disabled if an AGP card is | 1207 | * controller is disabled if an AGP card is |
@@ -1501,7 +1500,8 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_EESSC, quirk_a | |||
1501 | 1500 | ||
1502 | static void __devinit quirk_pcie_mch(struct pci_dev *pdev) | 1501 | static void __devinit quirk_pcie_mch(struct pci_dev *pdev) |
1503 | { | 1502 | { |
1504 | pcie_mch_quirk = 1; | 1503 | pci_msi_off(pdev); |
1504 | pdev->no_msi = 1; | ||
1505 | } | 1505 | } |
1506 | DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_E7520_MCH, quirk_pcie_mch); | 1506 | DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_E7520_MCH, quirk_pcie_mch); |
1507 | DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_E7320_MCH, quirk_pcie_mch); | 1507 | DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_E7320_MCH, quirk_pcie_mch); |
@@ -1569,10 +1569,8 @@ static void quirk_reroute_to_boot_interrupts_intel(struct pci_dev *dev) | |||
1569 | return; | 1569 | return; |
1570 | 1570 | ||
1571 | dev->irq_reroute_variant = INTEL_IRQ_REROUTE_VARIANT; | 1571 | dev->irq_reroute_variant = INTEL_IRQ_REROUTE_VARIANT; |
1572 | 1572 | dev_info(&dev->dev, "rerouting interrupts for [%04x:%04x]\n", | |
1573 | printk(KERN_INFO "PCI quirk: reroute interrupts for 0x%04x:0x%04x\n", | 1573 | dev->vendor, dev->device); |
1574 | dev->vendor, dev->device); | ||
1575 | return; | ||
1576 | } | 1574 | } |
1577 | DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_80333_0, quirk_reroute_to_boot_interrupts_intel); | 1575 | DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_80333_0, quirk_reroute_to_boot_interrupts_intel); |
1578 | DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_80333_1, quirk_reroute_to_boot_interrupts_intel); | 1576 | DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_80333_1, quirk_reroute_to_boot_interrupts_intel); |
@@ -1614,8 +1612,8 @@ static void quirk_disable_intel_boot_interrupt(struct pci_dev *dev) | |||
1614 | pci_config_word |= INTEL_6300_DISABLE_BOOT_IRQ; | 1612 | pci_config_word |= INTEL_6300_DISABLE_BOOT_IRQ; |
1615 | pci_write_config_word(dev, INTEL_6300_IOAPIC_ABAR, pci_config_word); | 1613 | pci_write_config_word(dev, INTEL_6300_IOAPIC_ABAR, pci_config_word); |
1616 | 1614 | ||
1617 | printk(KERN_INFO "disabled boot interrupt on device 0x%04x:0x%04x\n", | 1615 | dev_info(&dev->dev, "disabled boot interrupts on device [%04x:%04x]\n", |
1618 | dev->vendor, dev->device); | 1616 | dev->vendor, dev->device); |
1619 | } | 1617 | } |
1620 | DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ESB_10, quirk_disable_intel_boot_interrupt); | 1618 | DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ESB_10, quirk_disable_intel_boot_interrupt); |
1621 | DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ESB_10, quirk_disable_intel_boot_interrupt); | 1619 | DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ESB_10, quirk_disable_intel_boot_interrupt); |
@@ -1647,8 +1645,8 @@ static void quirk_disable_broadcom_boot_interrupt(struct pci_dev *dev) | |||
1647 | 1645 | ||
1648 | pci_write_config_dword(dev, BC_HT1000_FEATURE_REG, pci_config_dword); | 1646 | pci_write_config_dword(dev, BC_HT1000_FEATURE_REG, pci_config_dword); |
1649 | 1647 | ||
1650 | printk(KERN_INFO "disabled boot interrupts on PCI device" | 1648 | dev_info(&dev->dev, "disabled boot interrupts on device [%04x:%04x]\n", |
1651 | "0x%04x:0x%04x\n", dev->vendor, dev->device); | 1649 | dev->vendor, dev->device); |
1652 | } | 1650 | } |
1653 | DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_SERVERWORKS, PCI_DEVICE_ID_SERVERWORKS_HT1000SB, quirk_disable_broadcom_boot_interrupt); | 1651 | DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_SERVERWORKS, PCI_DEVICE_ID_SERVERWORKS_HT1000SB, quirk_disable_broadcom_boot_interrupt); |
1654 | DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_SERVERWORKS, PCI_DEVICE_ID_SERVERWORKS_HT1000SB, quirk_disable_broadcom_boot_interrupt); | 1652 | DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_SERVERWORKS, PCI_DEVICE_ID_SERVERWORKS_HT1000SB, quirk_disable_broadcom_boot_interrupt); |
@@ -1678,8 +1676,8 @@ static void quirk_disable_amd_813x_boot_interrupt(struct pci_dev *dev) | |||
1678 | pci_config_dword &= ~AMD_813X_NOIOAMODE; | 1676 | pci_config_dword &= ~AMD_813X_NOIOAMODE; |
1679 | pci_write_config_dword(dev, AMD_813X_MISC, pci_config_dword); | 1677 | pci_write_config_dword(dev, AMD_813X_MISC, pci_config_dword); |
1680 | 1678 | ||
1681 | printk(KERN_INFO "disabled boot interrupts on PCI device " | 1679 | dev_info(&dev->dev, "disabled boot interrupts on device [%04x:%04x]\n", |
1682 | "0x%04x:0x%04x\n", dev->vendor, dev->device); | 1680 | dev->vendor, dev->device); |
1683 | } | 1681 | } |
1684 | DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8131_BRIDGE, quirk_disable_amd_813x_boot_interrupt); | 1682 | DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8131_BRIDGE, quirk_disable_amd_813x_boot_interrupt); |
1685 | DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8132_BRIDGE, quirk_disable_amd_813x_boot_interrupt); | 1683 | DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8132_BRIDGE, quirk_disable_amd_813x_boot_interrupt); |
@@ -1695,14 +1693,13 @@ static void quirk_disable_amd_8111_boot_interrupt(struct pci_dev *dev) | |||
1695 | 1693 | ||
1696 | pci_read_config_word(dev, AMD_8111_PCI_IRQ_ROUTING, &pci_config_word); | 1694 | pci_read_config_word(dev, AMD_8111_PCI_IRQ_ROUTING, &pci_config_word); |
1697 | if (!pci_config_word) { | 1695 | if (!pci_config_word) { |
1698 | printk(KERN_INFO "boot interrupts on PCI device 0x%04x:0x%04x " | 1696 | dev_info(&dev->dev, "boot interrupts on device [%04x:%04x] " |
1699 | "already disabled\n", | 1697 | "already disabled\n", dev->vendor, dev->device); |
1700 | dev->vendor, dev->device); | ||
1701 | return; | 1698 | return; |
1702 | } | 1699 | } |
1703 | pci_write_config_word(dev, AMD_8111_PCI_IRQ_ROUTING, 0); | 1700 | pci_write_config_word(dev, AMD_8111_PCI_IRQ_ROUTING, 0); |
1704 | printk(KERN_INFO "disabled boot interrupts on PCI device " | 1701 | dev_info(&dev->dev, "disabled boot interrupts on device [%04x:%04x]\n", |
1705 | "0x%04x:0x%04x\n", dev->vendor, dev->device); | 1702 | dev->vendor, dev->device); |
1706 | } | 1703 | } |
1707 | DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8111_SMBUS, quirk_disable_amd_8111_boot_interrupt); | 1704 | DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8111_SMBUS, quirk_disable_amd_8111_boot_interrupt); |
1708 | DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8111_SMBUS, quirk_disable_amd_8111_boot_interrupt); | 1705 | DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8111_SMBUS, quirk_disable_amd_8111_boot_interrupt); |
@@ -2384,8 +2381,10 @@ static void __devinit nv_msi_ht_cap_quirk_leaf(struct pci_dev *dev) | |||
2384 | } | 2381 | } |
2385 | 2382 | ||
2386 | DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID, nv_msi_ht_cap_quirk_leaf); | 2383 | DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID, nv_msi_ht_cap_quirk_leaf); |
2384 | DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID, nv_msi_ht_cap_quirk_leaf); | ||
2387 | 2385 | ||
2388 | DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AL, PCI_ANY_ID, nv_msi_ht_cap_quirk_all); | 2386 | DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AL, PCI_ANY_ID, nv_msi_ht_cap_quirk_all); |
2387 | DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_AL, PCI_ANY_ID, nv_msi_ht_cap_quirk_all); | ||
2389 | 2388 | ||
2390 | static void __devinit quirk_msi_intx_disable_bug(struct pci_dev *dev) | 2389 | static void __devinit quirk_msi_intx_disable_bug(struct pci_dev *dev) |
2391 | { | 2390 | { |
@@ -2494,6 +2493,7 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x10e6, quirk_i82576_sriov); | |||
2494 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x10e7, quirk_i82576_sriov); | 2493 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x10e7, quirk_i82576_sriov); |
2495 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x10e8, quirk_i82576_sriov); | 2494 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x10e8, quirk_i82576_sriov); |
2496 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x150a, quirk_i82576_sriov); | 2495 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x150a, quirk_i82576_sriov); |
2496 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x150d, quirk_i82576_sriov); | ||
2497 | 2497 | ||
2498 | #endif /* CONFIG_PCI_IOV */ | 2498 | #endif /* CONFIG_PCI_IOV */ |
2499 | 2499 | ||
diff --git a/drivers/pci/search.c b/drivers/pci/search.c index e8cb5051c311..ec415352d9ba 100644 --- a/drivers/pci/search.c +++ b/drivers/pci/search.c | |||
@@ -113,37 +113,6 @@ pci_find_next_bus(const struct pci_bus *from) | |||
113 | return b; | 113 | return b; |
114 | } | 114 | } |
115 | 115 | ||
116 | #ifdef CONFIG_PCI_LEGACY | ||
117 | /** | ||
118 | * pci_find_device - begin or continue searching for a PCI device by vendor/device id | ||
119 | * @vendor: PCI vendor id to match, or %PCI_ANY_ID to match all vendor ids | ||
120 | * @device: PCI device id to match, or %PCI_ANY_ID to match all device ids | ||
121 | * @from: Previous PCI device found in search, or %NULL for new search. | ||
122 | * | ||
123 | * Iterates through the list of known PCI devices. If a PCI device is found | ||
124 | * with a matching @vendor and @device, a pointer to its device structure is | ||
125 | * returned. Otherwise, %NULL is returned. | ||
126 | * A new search is initiated by passing %NULL as the @from argument. | ||
127 | * Otherwise if @from is not %NULL, searches continue from next device | ||
128 | * on the global list. | ||
129 | * | ||
130 | * NOTE: Do not use this function any more; use pci_get_device() instead, as | ||
131 | * the PCI device returned by this function can disappear at any moment in | ||
132 | * time. | ||
133 | */ | ||
134 | struct pci_dev *pci_find_device(unsigned int vendor, unsigned int device, | ||
135 | struct pci_dev *from) | ||
136 | { | ||
137 | struct pci_dev *pdev; | ||
138 | |||
139 | pci_dev_get(from); | ||
140 | pdev = pci_get_subsys(vendor, device, PCI_ANY_ID, PCI_ANY_ID, from); | ||
141 | pci_dev_put(pdev); | ||
142 | return pdev; | ||
143 | } | ||
144 | EXPORT_SYMBOL(pci_find_device); | ||
145 | #endif /* CONFIG_PCI_LEGACY */ | ||
146 | |||
147 | /** | 116 | /** |
148 | * pci_get_slot - locate PCI device for a given PCI slot | 117 | * pci_get_slot - locate PCI device for a given PCI slot |
149 | * @bus: PCI bus on which desired PCI device resides | 118 | * @bus: PCI bus on which desired PCI device resides |
diff --git a/drivers/pci/setup-bus.c b/drivers/pci/setup-bus.c index 7c443b4583ab..cb1a027eb552 100644 --- a/drivers/pci/setup-bus.c +++ b/drivers/pci/setup-bus.c | |||
@@ -309,7 +309,7 @@ static struct resource *find_free_bus_resource(struct pci_bus *bus, unsigned lon | |||
309 | since these windows have 4K granularity and the IO ranges | 309 | since these windows have 4K granularity and the IO ranges |
310 | of non-bridge PCI devices are limited to 256 bytes. | 310 | of non-bridge PCI devices are limited to 256 bytes. |
311 | We must be careful with the ISA aliasing though. */ | 311 | We must be careful with the ISA aliasing though. */ |
312 | static void pbus_size_io(struct pci_bus *bus) | 312 | static void pbus_size_io(struct pci_bus *bus, resource_size_t min_size) |
313 | { | 313 | { |
314 | struct pci_dev *dev; | 314 | struct pci_dev *dev; |
315 | struct resource *b_res = find_free_bus_resource(bus, IORESOURCE_IO); | 315 | struct resource *b_res = find_free_bus_resource(bus, IORESOURCE_IO); |
@@ -336,6 +336,8 @@ static void pbus_size_io(struct pci_bus *bus) | |||
336 | size1 += r_size; | 336 | size1 += r_size; |
337 | } | 337 | } |
338 | } | 338 | } |
339 | if (size < min_size) | ||
340 | size = min_size; | ||
339 | /* To be fixed in 2.5: we should have sort of HAVE_ISA | 341 | /* To be fixed in 2.5: we should have sort of HAVE_ISA |
340 | flag in the struct pci_bus. */ | 342 | flag in the struct pci_bus. */ |
341 | #if defined(CONFIG_ISA) || defined(CONFIG_EISA) | 343 | #if defined(CONFIG_ISA) || defined(CONFIG_EISA) |
@@ -354,7 +356,8 @@ static void pbus_size_io(struct pci_bus *bus) | |||
354 | 356 | ||
355 | /* Calculate the size of the bus and minimal alignment which | 357 | /* Calculate the size of the bus and minimal alignment which |
356 | guarantees that all child resources fit in this size. */ | 358 | guarantees that all child resources fit in this size. */ |
357 | static int pbus_size_mem(struct pci_bus *bus, unsigned long mask, unsigned long type) | 359 | static int pbus_size_mem(struct pci_bus *bus, unsigned long mask, |
360 | unsigned long type, resource_size_t min_size) | ||
358 | { | 361 | { |
359 | struct pci_dev *dev; | 362 | struct pci_dev *dev; |
360 | resource_size_t min_align, align, size; | 363 | resource_size_t min_align, align, size; |
@@ -404,6 +407,8 @@ static int pbus_size_mem(struct pci_bus *bus, unsigned long mask, unsigned long | |||
404 | mem64_mask &= r->flags & IORESOURCE_MEM_64; | 407 | mem64_mask &= r->flags & IORESOURCE_MEM_64; |
405 | } | 408 | } |
406 | } | 409 | } |
410 | if (size < min_size) | ||
411 | size = min_size; | ||
407 | 412 | ||
408 | align = 0; | 413 | align = 0; |
409 | min_align = 0; | 414 | min_align = 0; |
@@ -483,6 +488,7 @@ void __ref pci_bus_size_bridges(struct pci_bus *bus) | |||
483 | { | 488 | { |
484 | struct pci_dev *dev; | 489 | struct pci_dev *dev; |
485 | unsigned long mask, prefmask; | 490 | unsigned long mask, prefmask; |
491 | resource_size_t min_mem_size = 0, min_io_size = 0; | ||
486 | 492 | ||
487 | list_for_each_entry(dev, &bus->devices, bus_list) { | 493 | list_for_each_entry(dev, &bus->devices, bus_list) { |
488 | struct pci_bus *b = dev->subordinate; | 494 | struct pci_bus *b = dev->subordinate; |
@@ -512,8 +518,12 @@ void __ref pci_bus_size_bridges(struct pci_bus *bus) | |||
512 | 518 | ||
513 | case PCI_CLASS_BRIDGE_PCI: | 519 | case PCI_CLASS_BRIDGE_PCI: |
514 | pci_bridge_check_ranges(bus); | 520 | pci_bridge_check_ranges(bus); |
521 | if (bus->self->is_hotplug_bridge) { | ||
522 | min_io_size = pci_hotplug_io_size; | ||
523 | min_mem_size = pci_hotplug_mem_size; | ||
524 | } | ||
515 | default: | 525 | default: |
516 | pbus_size_io(bus); | 526 | pbus_size_io(bus, min_io_size); |
517 | /* If the bridge supports prefetchable range, size it | 527 | /* If the bridge supports prefetchable range, size it |
518 | separately. If it doesn't, or its prefetchable window | 528 | separately. If it doesn't, or its prefetchable window |
519 | has already been allocated by arch code, try | 529 | has already been allocated by arch code, try |
@@ -521,9 +531,11 @@ void __ref pci_bus_size_bridges(struct pci_bus *bus) | |||
521 | resources. */ | 531 | resources. */ |
522 | mask = IORESOURCE_MEM; | 532 | mask = IORESOURCE_MEM; |
523 | prefmask = IORESOURCE_MEM | IORESOURCE_PREFETCH; | 533 | prefmask = IORESOURCE_MEM | IORESOURCE_PREFETCH; |
524 | if (pbus_size_mem(bus, prefmask, prefmask)) | 534 | if (pbus_size_mem(bus, prefmask, prefmask, min_mem_size)) |
525 | mask = prefmask; /* Success, size non-prefetch only. */ | 535 | mask = prefmask; /* Success, size non-prefetch only. */ |
526 | pbus_size_mem(bus, mask, IORESOURCE_MEM); | 536 | else |
537 | min_mem_size += min_mem_size; | ||
538 | pbus_size_mem(bus, mask, IORESOURCE_MEM, min_mem_size); | ||
527 | break; | 539 | break; |
528 | } | 540 | } |
529 | } | 541 | } |
diff --git a/drivers/pci/setup-res.c b/drivers/pci/setup-res.c index 88cdd1a937d6..706f82d8111f 100644 --- a/drivers/pci/setup-res.c +++ b/drivers/pci/setup-res.c | |||
@@ -119,6 +119,7 @@ int pci_claim_resource(struct pci_dev *dev, int resource) | |||
119 | 119 | ||
120 | return err; | 120 | return err; |
121 | } | 121 | } |
122 | EXPORT_SYMBOL(pci_claim_resource); | ||
122 | 123 | ||
123 | #ifdef CONFIG_PCI_QUIRKS | 124 | #ifdef CONFIG_PCI_QUIRKS |
124 | void pci_disable_bridge_window(struct pci_dev *dev) | 125 | void pci_disable_bridge_window(struct pci_dev *dev) |