diff options
Diffstat (limited to 'drivers')
33 files changed, 1754 insertions, 485 deletions
diff --git a/drivers/acpi/apei/apei-base.c b/drivers/acpi/apei/apei-base.c index 46f80e2c92f7..6d2c49b86b7f 100644 --- a/drivers/acpi/apei/apei-base.c +++ b/drivers/acpi/apei/apei-base.c | |||
@@ -758,9 +758,9 @@ int apei_osc_setup(void) | |||
758 | .cap.pointer = capbuf, | 758 | .cap.pointer = capbuf, |
759 | }; | 759 | }; |
760 | 760 | ||
761 | capbuf[OSC_QUERY_TYPE] = OSC_QUERY_ENABLE; | 761 | capbuf[OSC_QUERY_DWORD] = OSC_QUERY_ENABLE; |
762 | capbuf[OSC_SUPPORT_TYPE] = 1; | 762 | capbuf[OSC_SUPPORT_DWORD] = 1; |
763 | capbuf[OSC_CONTROL_TYPE] = 0; | 763 | capbuf[OSC_CONTROL_DWORD] = 0; |
764 | 764 | ||
765 | if (ACPI_FAILURE(acpi_get_handle(NULL, "\\_SB", &handle)) | 765 | if (ACPI_FAILURE(acpi_get_handle(NULL, "\\_SB", &handle)) |
766 | || ACPI_FAILURE(acpi_run_osc(handle, &context))) | 766 | || ACPI_FAILURE(acpi_run_osc(handle, &context))) |
diff --git a/drivers/acpi/bus.c b/drivers/acpi/bus.c index e1bd9a181117..bba9b72e25f8 100644 --- a/drivers/acpi/bus.c +++ b/drivers/acpi/bus.c | |||
@@ -256,7 +256,7 @@ acpi_status acpi_run_osc(acpi_handle handle, struct acpi_osc_context *context) | |||
256 | acpi_print_osc_error(handle, context, | 256 | acpi_print_osc_error(handle, context, |
257 | "_OSC invalid revision"); | 257 | "_OSC invalid revision"); |
258 | if (errors & OSC_CAPABILITIES_MASK_ERROR) { | 258 | if (errors & OSC_CAPABILITIES_MASK_ERROR) { |
259 | if (((u32 *)context->cap.pointer)[OSC_QUERY_TYPE] | 259 | if (((u32 *)context->cap.pointer)[OSC_QUERY_DWORD] |
260 | & OSC_QUERY_ENABLE) | 260 | & OSC_QUERY_ENABLE) |
261 | goto out_success; | 261 | goto out_success; |
262 | status = AE_SUPPORT; | 262 | status = AE_SUPPORT; |
@@ -296,30 +296,30 @@ static void acpi_bus_osc_support(void) | |||
296 | }; | 296 | }; |
297 | acpi_handle handle; | 297 | acpi_handle handle; |
298 | 298 | ||
299 | capbuf[OSC_QUERY_TYPE] = OSC_QUERY_ENABLE; | 299 | capbuf[OSC_QUERY_DWORD] = OSC_QUERY_ENABLE; |
300 | capbuf[OSC_SUPPORT_TYPE] = OSC_SB_PR3_SUPPORT; /* _PR3 is in use */ | 300 | capbuf[OSC_SUPPORT_DWORD] = OSC_SB_PR3_SUPPORT; /* _PR3 is in use */ |
301 | #if defined(CONFIG_ACPI_PROCESSOR_AGGREGATOR) ||\ | 301 | #if defined(CONFIG_ACPI_PROCESSOR_AGGREGATOR) ||\ |
302 | defined(CONFIG_ACPI_PROCESSOR_AGGREGATOR_MODULE) | 302 | defined(CONFIG_ACPI_PROCESSOR_AGGREGATOR_MODULE) |
303 | capbuf[OSC_SUPPORT_TYPE] |= OSC_SB_PAD_SUPPORT; | 303 | capbuf[OSC_SUPPORT_DWORD] |= OSC_SB_PAD_SUPPORT; |
304 | #endif | 304 | #endif |
305 | 305 | ||
306 | #if defined(CONFIG_ACPI_PROCESSOR) || defined(CONFIG_ACPI_PROCESSOR_MODULE) | 306 | #if defined(CONFIG_ACPI_PROCESSOR) || defined(CONFIG_ACPI_PROCESSOR_MODULE) |
307 | capbuf[OSC_SUPPORT_TYPE] |= OSC_SB_PPC_OST_SUPPORT; | 307 | capbuf[OSC_SUPPORT_DWORD] |= OSC_SB_PPC_OST_SUPPORT; |
308 | #endif | 308 | #endif |
309 | 309 | ||
310 | #ifdef ACPI_HOTPLUG_OST | 310 | #ifdef ACPI_HOTPLUG_OST |
311 | capbuf[OSC_SUPPORT_TYPE] |= OSC_SB_HOTPLUG_OST_SUPPORT; | 311 | capbuf[OSC_SUPPORT_DWORD] |= OSC_SB_HOTPLUG_OST_SUPPORT; |
312 | #endif | 312 | #endif |
313 | 313 | ||
314 | if (!ghes_disable) | 314 | if (!ghes_disable) |
315 | capbuf[OSC_SUPPORT_TYPE] |= OSC_SB_APEI_SUPPORT; | 315 | capbuf[OSC_SUPPORT_DWORD] |= OSC_SB_APEI_SUPPORT; |
316 | if (ACPI_FAILURE(acpi_get_handle(NULL, "\\_SB", &handle))) | 316 | if (ACPI_FAILURE(acpi_get_handle(NULL, "\\_SB", &handle))) |
317 | return; | 317 | return; |
318 | if (ACPI_SUCCESS(acpi_run_osc(handle, &context))) { | 318 | if (ACPI_SUCCESS(acpi_run_osc(handle, &context))) { |
319 | u32 *capbuf_ret = context.ret.pointer; | 319 | u32 *capbuf_ret = context.ret.pointer; |
320 | if (context.ret.length > OSC_SUPPORT_TYPE) | 320 | if (context.ret.length > OSC_SUPPORT_DWORD) |
321 | osc_sb_apei_support_acked = | 321 | osc_sb_apei_support_acked = |
322 | capbuf_ret[OSC_SUPPORT_TYPE] & OSC_SB_APEI_SUPPORT; | 322 | capbuf_ret[OSC_SUPPORT_DWORD] & OSC_SB_APEI_SUPPORT; |
323 | kfree(context.ret.pointer); | 323 | kfree(context.ret.pointer); |
324 | } | 324 | } |
325 | /* do we need to check other returned cap? Sounds no */ | 325 | /* do we need to check other returned cap? Sounds no */ |
diff --git a/drivers/acpi/pci_root.c b/drivers/acpi/pci_root.c index 417876bce854..56f05869b08d 100644 --- a/drivers/acpi/pci_root.c +++ b/drivers/acpi/pci_root.c | |||
@@ -51,10 +51,10 @@ static int acpi_pci_root_add(struct acpi_device *device, | |||
51 | const struct acpi_device_id *not_used); | 51 | const struct acpi_device_id *not_used); |
52 | static void acpi_pci_root_remove(struct acpi_device *device); | 52 | static void acpi_pci_root_remove(struct acpi_device *device); |
53 | 53 | ||
54 | #define ACPI_PCIE_REQ_SUPPORT (OSC_EXT_PCI_CONFIG_SUPPORT \ | 54 | #define ACPI_PCIE_REQ_SUPPORT (OSC_PCI_EXT_CONFIG_SUPPORT \ |
55 | | OSC_ACTIVE_STATE_PWR_SUPPORT \ | 55 | | OSC_PCI_ASPM_SUPPORT \ |
56 | | OSC_CLOCK_PWR_CAPABILITY_SUPPORT \ | 56 | | OSC_PCI_CLOCK_PM_SUPPORT \ |
57 | | OSC_MSI_SUPPORT) | 57 | | OSC_PCI_MSI_SUPPORT) |
58 | 58 | ||
59 | static const struct acpi_device_id root_device_ids[] = { | 59 | static const struct acpi_device_id root_device_ids[] = { |
60 | {"PNP0A03", 0}, | 60 | {"PNP0A03", 0}, |
@@ -129,6 +129,55 @@ static acpi_status try_get_root_bridge_busnr(acpi_handle handle, | |||
129 | return AE_OK; | 129 | return AE_OK; |
130 | } | 130 | } |
131 | 131 | ||
132 | struct pci_osc_bit_struct { | ||
133 | u32 bit; | ||
134 | char *desc; | ||
135 | }; | ||
136 | |||
137 | static struct pci_osc_bit_struct pci_osc_support_bit[] = { | ||
138 | { OSC_PCI_EXT_CONFIG_SUPPORT, "ExtendedConfig" }, | ||
139 | { OSC_PCI_ASPM_SUPPORT, "ASPM" }, | ||
140 | { OSC_PCI_CLOCK_PM_SUPPORT, "ClockPM" }, | ||
141 | { OSC_PCI_SEGMENT_GROUPS_SUPPORT, "Segments" }, | ||
142 | { OSC_PCI_MSI_SUPPORT, "MSI" }, | ||
143 | }; | ||
144 | |||
145 | static struct pci_osc_bit_struct pci_osc_control_bit[] = { | ||
146 | { OSC_PCI_EXPRESS_NATIVE_HP_CONTROL, "PCIeHotplug" }, | ||
147 | { OSC_PCI_SHPC_NATIVE_HP_CONTROL, "SHPCHotplug" }, | ||
148 | { OSC_PCI_EXPRESS_PME_CONTROL, "PME" }, | ||
149 | { OSC_PCI_EXPRESS_AER_CONTROL, "AER" }, | ||
150 | { OSC_PCI_EXPRESS_CAPABILITY_CONTROL, "PCIeCapability" }, | ||
151 | }; | ||
152 | |||
153 | static void decode_osc_bits(struct acpi_pci_root *root, char *msg, u32 word, | ||
154 | struct pci_osc_bit_struct *table, int size) | ||
155 | { | ||
156 | char buf[80]; | ||
157 | int i, len = 0; | ||
158 | struct pci_osc_bit_struct *entry; | ||
159 | |||
160 | buf[0] = '\0'; | ||
161 | for (i = 0, entry = table; i < size; i++, entry++) | ||
162 | if (word & entry->bit) | ||
163 | len += snprintf(buf + len, sizeof(buf) - len, "%s%s", | ||
164 | len ? " " : "", entry->desc); | ||
165 | |||
166 | dev_info(&root->device->dev, "_OSC: %s [%s]\n", msg, buf); | ||
167 | } | ||
168 | |||
169 | static void decode_osc_support(struct acpi_pci_root *root, char *msg, u32 word) | ||
170 | { | ||
171 | decode_osc_bits(root, msg, word, pci_osc_support_bit, | ||
172 | ARRAY_SIZE(pci_osc_support_bit)); | ||
173 | } | ||
174 | |||
175 | static void decode_osc_control(struct acpi_pci_root *root, char *msg, u32 word) | ||
176 | { | ||
177 | decode_osc_bits(root, msg, word, pci_osc_control_bit, | ||
178 | ARRAY_SIZE(pci_osc_control_bit)); | ||
179 | } | ||
180 | |||
132 | static u8 pci_osc_uuid_str[] = "33DB4D5B-1FF7-401C-9657-7441C03DD766"; | 181 | static u8 pci_osc_uuid_str[] = "33DB4D5B-1FF7-401C-9657-7441C03DD766"; |
133 | 182 | ||
134 | static acpi_status acpi_pci_run_osc(acpi_handle handle, | 183 | static acpi_status acpi_pci_run_osc(acpi_handle handle, |
@@ -160,14 +209,14 @@ static acpi_status acpi_pci_query_osc(struct acpi_pci_root *root, | |||
160 | support &= OSC_PCI_SUPPORT_MASKS; | 209 | support &= OSC_PCI_SUPPORT_MASKS; |
161 | support |= root->osc_support_set; | 210 | support |= root->osc_support_set; |
162 | 211 | ||
163 | capbuf[OSC_QUERY_TYPE] = OSC_QUERY_ENABLE; | 212 | capbuf[OSC_QUERY_DWORD] = OSC_QUERY_ENABLE; |
164 | capbuf[OSC_SUPPORT_TYPE] = support; | 213 | capbuf[OSC_SUPPORT_DWORD] = support; |
165 | if (control) { | 214 | if (control) { |
166 | *control &= OSC_PCI_CONTROL_MASKS; | 215 | *control &= OSC_PCI_CONTROL_MASKS; |
167 | capbuf[OSC_CONTROL_TYPE] = *control | root->osc_control_set; | 216 | capbuf[OSC_CONTROL_DWORD] = *control | root->osc_control_set; |
168 | } else { | 217 | } else { |
169 | /* Run _OSC query only with existing controls. */ | 218 | /* Run _OSC query only with existing controls. */ |
170 | capbuf[OSC_CONTROL_TYPE] = root->osc_control_set; | 219 | capbuf[OSC_CONTROL_DWORD] = root->osc_control_set; |
171 | } | 220 | } |
172 | 221 | ||
173 | status = acpi_pci_run_osc(root->device->handle, capbuf, &result); | 222 | status = acpi_pci_run_osc(root->device->handle, capbuf, &result); |
@@ -182,11 +231,7 @@ static acpi_status acpi_pci_query_osc(struct acpi_pci_root *root, | |||
182 | static acpi_status acpi_pci_osc_support(struct acpi_pci_root *root, u32 flags) | 231 | static acpi_status acpi_pci_osc_support(struct acpi_pci_root *root, u32 flags) |
183 | { | 232 | { |
184 | acpi_status status; | 233 | acpi_status status; |
185 | acpi_handle tmp; | ||
186 | 234 | ||
187 | status = acpi_get_handle(root->device->handle, "_OSC", &tmp); | ||
188 | if (ACPI_FAILURE(status)) | ||
189 | return status; | ||
190 | mutex_lock(&osc_lock); | 235 | mutex_lock(&osc_lock); |
191 | status = acpi_pci_query_osc(root, flags, NULL); | 236 | status = acpi_pci_query_osc(root, flags, NULL); |
192 | mutex_unlock(&osc_lock); | 237 | mutex_unlock(&osc_lock); |
@@ -318,9 +363,8 @@ EXPORT_SYMBOL_GPL(acpi_get_pci_dev); | |||
318 | acpi_status acpi_pci_osc_control_set(acpi_handle handle, u32 *mask, u32 req) | 363 | acpi_status acpi_pci_osc_control_set(acpi_handle handle, u32 *mask, u32 req) |
319 | { | 364 | { |
320 | struct acpi_pci_root *root; | 365 | struct acpi_pci_root *root; |
321 | acpi_status status; | 366 | acpi_status status = AE_OK; |
322 | u32 ctrl, capbuf[3]; | 367 | u32 ctrl, capbuf[3]; |
323 | acpi_handle tmp; | ||
324 | 368 | ||
325 | if (!mask) | 369 | if (!mask) |
326 | return AE_BAD_PARAMETER; | 370 | return AE_BAD_PARAMETER; |
@@ -333,10 +377,6 @@ acpi_status acpi_pci_osc_control_set(acpi_handle handle, u32 *mask, u32 req) | |||
333 | if (!root) | 377 | if (!root) |
334 | return AE_NOT_EXIST; | 378 | return AE_NOT_EXIST; |
335 | 379 | ||
336 | status = acpi_get_handle(handle, "_OSC", &tmp); | ||
337 | if (ACPI_FAILURE(status)) | ||
338 | return status; | ||
339 | |||
340 | mutex_lock(&osc_lock); | 380 | mutex_lock(&osc_lock); |
341 | 381 | ||
342 | *mask = ctrl | root->osc_control_set; | 382 | *mask = ctrl | root->osc_control_set; |
@@ -351,17 +391,21 @@ acpi_status acpi_pci_osc_control_set(acpi_handle handle, u32 *mask, u32 req) | |||
351 | goto out; | 391 | goto out; |
352 | if (ctrl == *mask) | 392 | if (ctrl == *mask) |
353 | break; | 393 | break; |
394 | decode_osc_control(root, "platform does not support", | ||
395 | ctrl & ~(*mask)); | ||
354 | ctrl = *mask; | 396 | ctrl = *mask; |
355 | } | 397 | } |
356 | 398 | ||
357 | if ((ctrl & req) != req) { | 399 | if ((ctrl & req) != req) { |
400 | decode_osc_control(root, "not requesting control; platform does not support", | ||
401 | req & ~(ctrl)); | ||
358 | status = AE_SUPPORT; | 402 | status = AE_SUPPORT; |
359 | goto out; | 403 | goto out; |
360 | } | 404 | } |
361 | 405 | ||
362 | capbuf[OSC_QUERY_TYPE] = 0; | 406 | capbuf[OSC_QUERY_DWORD] = 0; |
363 | capbuf[OSC_SUPPORT_TYPE] = root->osc_support_set; | 407 | capbuf[OSC_SUPPORT_DWORD] = root->osc_support_set; |
364 | capbuf[OSC_CONTROL_TYPE] = ctrl; | 408 | capbuf[OSC_CONTROL_DWORD] = ctrl; |
365 | status = acpi_pci_run_osc(handle, capbuf, mask); | 409 | status = acpi_pci_run_osc(handle, capbuf, mask); |
366 | if (ACPI_SUCCESS(status)) | 410 | if (ACPI_SUCCESS(status)) |
367 | root->osc_control_set = *mask; | 411 | root->osc_control_set = *mask; |
@@ -371,6 +415,87 @@ out: | |||
371 | } | 415 | } |
372 | EXPORT_SYMBOL(acpi_pci_osc_control_set); | 416 | EXPORT_SYMBOL(acpi_pci_osc_control_set); |
373 | 417 | ||
418 | static void negotiate_os_control(struct acpi_pci_root *root, int *no_aspm, | ||
419 | int *clear_aspm) | ||
420 | { | ||
421 | u32 support, control, requested; | ||
422 | acpi_status status; | ||
423 | struct acpi_device *device = root->device; | ||
424 | acpi_handle handle = device->handle; | ||
425 | |||
426 | /* | ||
427 | * All supported architectures that use ACPI have support for | ||
428 | * PCI domains, so we indicate this in _OSC support capabilities. | ||
429 | */ | ||
430 | support = OSC_PCI_SEGMENT_GROUPS_SUPPORT; | ||
431 | if (pci_ext_cfg_avail()) | ||
432 | support |= OSC_PCI_EXT_CONFIG_SUPPORT; | ||
433 | if (pcie_aspm_support_enabled()) | ||
434 | support |= OSC_PCI_ASPM_SUPPORT | OSC_PCI_CLOCK_PM_SUPPORT; | ||
435 | if (pci_msi_enabled()) | ||
436 | support |= OSC_PCI_MSI_SUPPORT; | ||
437 | |||
438 | decode_osc_support(root, "OS supports", support); | ||
439 | status = acpi_pci_osc_support(root, support); | ||
440 | if (ACPI_FAILURE(status)) { | ||
441 | dev_info(&device->dev, "_OSC failed (%s); disabling ASPM\n", | ||
442 | acpi_format_exception(status)); | ||
443 | *no_aspm = 1; | ||
444 | return; | ||
445 | } | ||
446 | |||
447 | if (pcie_ports_disabled) { | ||
448 | dev_info(&device->dev, "PCIe port services disabled; not requesting _OSC control\n"); | ||
449 | return; | ||
450 | } | ||
451 | |||
452 | if ((support & ACPI_PCIE_REQ_SUPPORT) != ACPI_PCIE_REQ_SUPPORT) { | ||
453 | decode_osc_support(root, "not requesting OS control; OS requires", | ||
454 | ACPI_PCIE_REQ_SUPPORT); | ||
455 | return; | ||
456 | } | ||
457 | |||
458 | control = OSC_PCI_EXPRESS_CAPABILITY_CONTROL | ||
459 | | OSC_PCI_EXPRESS_NATIVE_HP_CONTROL | ||
460 | | OSC_PCI_EXPRESS_PME_CONTROL; | ||
461 | |||
462 | if (pci_aer_available()) { | ||
463 | if (aer_acpi_firmware_first()) | ||
464 | dev_info(&device->dev, | ||
465 | "PCIe AER handled by firmware\n"); | ||
466 | else | ||
467 | control |= OSC_PCI_EXPRESS_AER_CONTROL; | ||
468 | } | ||
469 | |||
470 | requested = control; | ||
471 | status = acpi_pci_osc_control_set(handle, &control, | ||
472 | OSC_PCI_EXPRESS_CAPABILITY_CONTROL); | ||
473 | if (ACPI_SUCCESS(status)) { | ||
474 | decode_osc_control(root, "OS now controls", control); | ||
475 | if (acpi_gbl_FADT.boot_flags & ACPI_FADT_NO_ASPM) { | ||
476 | /* | ||
477 | * We have ASPM control, but the FADT indicates | ||
478 | * that it's unsupported. Clear it. | ||
479 | */ | ||
480 | *clear_aspm = 1; | ||
481 | } | ||
482 | } else { | ||
483 | decode_osc_control(root, "OS requested", requested); | ||
484 | decode_osc_control(root, "platform willing to grant", control); | ||
485 | dev_info(&device->dev, "_OSC failed (%s); disabling ASPM\n", | ||
486 | acpi_format_exception(status)); | ||
487 | |||
488 | /* | ||
489 | * We want to disable ASPM here, but aspm_disabled | ||
490 | * needs to remain in its state from boot so that we | ||
491 | * properly handle PCIe 1.1 devices. So we set this | ||
492 | * flag here, to defer the action until after the ACPI | ||
493 | * root scan. | ||
494 | */ | ||
495 | *no_aspm = 1; | ||
496 | } | ||
497 | } | ||
498 | |||
374 | static int acpi_pci_root_add(struct acpi_device *device, | 499 | static int acpi_pci_root_add(struct acpi_device *device, |
375 | const struct acpi_device_id *not_used) | 500 | const struct acpi_device_id *not_used) |
376 | { | 501 | { |
@@ -378,9 +503,8 @@ static int acpi_pci_root_add(struct acpi_device *device, | |||
378 | acpi_status status; | 503 | acpi_status status; |
379 | int result; | 504 | int result; |
380 | struct acpi_pci_root *root; | 505 | struct acpi_pci_root *root; |
381 | u32 flags, base_flags; | ||
382 | acpi_handle handle = device->handle; | 506 | acpi_handle handle = device->handle; |
383 | bool no_aspm = false, clear_aspm = false; | 507 | int no_aspm = 0, clear_aspm = 0; |
384 | 508 | ||
385 | root = kzalloc(sizeof(struct acpi_pci_root), GFP_KERNEL); | 509 | root = kzalloc(sizeof(struct acpi_pci_root), GFP_KERNEL); |
386 | if (!root) | 510 | if (!root) |
@@ -433,81 +557,7 @@ static int acpi_pci_root_add(struct acpi_device *device, | |||
433 | 557 | ||
434 | root->mcfg_addr = acpi_pci_root_get_mcfg_addr(handle); | 558 | root->mcfg_addr = acpi_pci_root_get_mcfg_addr(handle); |
435 | 559 | ||
436 | /* | 560 | negotiate_os_control(root, &no_aspm, &clear_aspm); |
437 | * All supported architectures that use ACPI have support for | ||
438 | * PCI domains, so we indicate this in _OSC support capabilities. | ||
439 | */ | ||
440 | flags = base_flags = OSC_PCI_SEGMENT_GROUPS_SUPPORT; | ||
441 | acpi_pci_osc_support(root, flags); | ||
442 | |||
443 | if (pci_ext_cfg_avail()) | ||
444 | flags |= OSC_EXT_PCI_CONFIG_SUPPORT; | ||
445 | if (pcie_aspm_support_enabled()) { | ||
446 | flags |= OSC_ACTIVE_STATE_PWR_SUPPORT | | ||
447 | OSC_CLOCK_PWR_CAPABILITY_SUPPORT; | ||
448 | } | ||
449 | if (pci_msi_enabled()) | ||
450 | flags |= OSC_MSI_SUPPORT; | ||
451 | if (flags != base_flags) { | ||
452 | status = acpi_pci_osc_support(root, flags); | ||
453 | if (ACPI_FAILURE(status)) { | ||
454 | dev_info(&device->dev, "ACPI _OSC support " | ||
455 | "notification failed, disabling PCIe ASPM\n"); | ||
456 | no_aspm = true; | ||
457 | flags = base_flags; | ||
458 | } | ||
459 | } | ||
460 | |||
461 | if (!pcie_ports_disabled | ||
462 | && (flags & ACPI_PCIE_REQ_SUPPORT) == ACPI_PCIE_REQ_SUPPORT) { | ||
463 | flags = OSC_PCI_EXPRESS_CAP_STRUCTURE_CONTROL | ||
464 | | OSC_PCI_EXPRESS_NATIVE_HP_CONTROL | ||
465 | | OSC_PCI_EXPRESS_PME_CONTROL; | ||
466 | |||
467 | if (pci_aer_available()) { | ||
468 | if (aer_acpi_firmware_first()) | ||
469 | dev_dbg(&device->dev, | ||
470 | "PCIe errors handled by BIOS.\n"); | ||
471 | else | ||
472 | flags |= OSC_PCI_EXPRESS_AER_CONTROL; | ||
473 | } | ||
474 | |||
475 | dev_info(&device->dev, | ||
476 | "Requesting ACPI _OSC control (0x%02x)\n", flags); | ||
477 | |||
478 | status = acpi_pci_osc_control_set(handle, &flags, | ||
479 | OSC_PCI_EXPRESS_CAP_STRUCTURE_CONTROL); | ||
480 | if (ACPI_SUCCESS(status)) { | ||
481 | dev_info(&device->dev, | ||
482 | "ACPI _OSC control (0x%02x) granted\n", flags); | ||
483 | if (acpi_gbl_FADT.boot_flags & ACPI_FADT_NO_ASPM) { | ||
484 | /* | ||
485 | * We have ASPM control, but the FADT indicates | ||
486 | * that it's unsupported. Clear it. | ||
487 | */ | ||
488 | clear_aspm = true; | ||
489 | } | ||
490 | } else { | ||
491 | dev_info(&device->dev, | ||
492 | "ACPI _OSC request failed (%s), " | ||
493 | "returned control mask: 0x%02x\n", | ||
494 | acpi_format_exception(status), flags); | ||
495 | dev_info(&device->dev, | ||
496 | "ACPI _OSC control for PCIe not granted, disabling ASPM\n"); | ||
497 | /* | ||
498 | * We want to disable ASPM here, but aspm_disabled | ||
499 | * needs to remain in its state from boot so that we | ||
500 | * properly handle PCIe 1.1 devices. So we set this | ||
501 | * flag here, to defer the action until after the ACPI | ||
502 | * root scan. | ||
503 | */ | ||
504 | no_aspm = true; | ||
505 | } | ||
506 | } else { | ||
507 | dev_info(&device->dev, | ||
508 | "Unable to request _OSC control " | ||
509 | "(_OSC support mask: 0x%02x)\n", flags); | ||
510 | } | ||
511 | 561 | ||
512 | /* | 562 | /* |
513 | * TBD: Need PCI interface for enumeration/configuration of roots. | 563 | * TBD: Need PCI interface for enumeration/configuration of roots. |
diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c index b5c67a99dda9..56f6bec34af5 100644 --- a/drivers/gpu/drm/radeon/evergreen.c +++ b/drivers/gpu/drm/radeon/evergreen.c | |||
@@ -1174,23 +1174,16 @@ int evergreen_set_uvd_clocks(struct radeon_device *rdev, u32 vclk, u32 dclk) | |||
1174 | 1174 | ||
1175 | void evergreen_fix_pci_max_read_req_size(struct radeon_device *rdev) | 1175 | void evergreen_fix_pci_max_read_req_size(struct radeon_device *rdev) |
1176 | { | 1176 | { |
1177 | u16 ctl, v; | 1177 | int readrq; |
1178 | int err; | 1178 | u16 v; |
1179 | |||
1180 | err = pcie_capability_read_word(rdev->pdev, PCI_EXP_DEVCTL, &ctl); | ||
1181 | if (err) | ||
1182 | return; | ||
1183 | |||
1184 | v = (ctl & PCI_EXP_DEVCTL_READRQ) >> 12; | ||
1185 | 1179 | ||
1180 | readrq = pcie_get_readrq(rdev->pdev); | ||
1181 | v = ffs(readrq) - 8; | ||
1186 | /* if bios or OS sets MAX_READ_REQUEST_SIZE to an invalid value, fix it | 1182 | /* if bios or OS sets MAX_READ_REQUEST_SIZE to an invalid value, fix it |
1187 | * to avoid hangs or perfomance issues | 1183 | * to avoid hangs or perfomance issues |
1188 | */ | 1184 | */ |
1189 | if ((v == 0) || (v == 6) || (v == 7)) { | 1185 | if ((v == 0) || (v == 6) || (v == 7)) |
1190 | ctl &= ~PCI_EXP_DEVCTL_READRQ; | 1186 | pcie_set_readrq(rdev->pdev, 512); |
1191 | ctl |= (2 << 12); | ||
1192 | pcie_capability_write_word(rdev->pdev, PCI_EXP_DEVCTL, ctl); | ||
1193 | } | ||
1194 | } | 1187 | } |
1195 | 1188 | ||
1196 | static bool dce4_is_in_vblank(struct radeon_device *rdev, int crtc) | 1189 | static bool dce4_is_in_vblank(struct radeon_device *rdev, int crtc) |
diff --git a/drivers/infiniband/hw/qib/qib_pcie.c b/drivers/infiniband/hw/qib/qib_pcie.c index 3f14009fb662..c8d9c4ab142b 100644 --- a/drivers/infiniband/hw/qib/qib_pcie.c +++ b/drivers/infiniband/hw/qib/qib_pcie.c | |||
@@ -51,8 +51,8 @@ | |||
51 | * file calls, even though this violates some | 51 | * file calls, even though this violates some |
52 | * expectations of harmlessness. | 52 | * expectations of harmlessness. |
53 | */ | 53 | */ |
54 | static int qib_tune_pcie_caps(struct qib_devdata *); | 54 | static void qib_tune_pcie_caps(struct qib_devdata *); |
55 | static int qib_tune_pcie_coalesce(struct qib_devdata *); | 55 | static void qib_tune_pcie_coalesce(struct qib_devdata *); |
56 | 56 | ||
57 | /* | 57 | /* |
58 | * Do all the common PCIe setup and initialization. | 58 | * Do all the common PCIe setup and initialization. |
@@ -476,30 +476,6 @@ void qib_pcie_reenable(struct qib_devdata *dd, u16 cmd, u8 iline, u8 cline) | |||
476 | "pci_enable_device failed after reset: %d\n", r); | 476 | "pci_enable_device failed after reset: %d\n", r); |
477 | } | 477 | } |
478 | 478 | ||
479 | /* code to adjust PCIe capabilities. */ | ||
480 | |||
481 | static int fld2val(int wd, int mask) | ||
482 | { | ||
483 | int lsbmask; | ||
484 | |||
485 | if (!mask) | ||
486 | return 0; | ||
487 | wd &= mask; | ||
488 | lsbmask = mask ^ (mask & (mask - 1)); | ||
489 | wd /= lsbmask; | ||
490 | return wd; | ||
491 | } | ||
492 | |||
493 | static int val2fld(int wd, int mask) | ||
494 | { | ||
495 | int lsbmask; | ||
496 | |||
497 | if (!mask) | ||
498 | return 0; | ||
499 | lsbmask = mask ^ (mask & (mask - 1)); | ||
500 | wd *= lsbmask; | ||
501 | return wd; | ||
502 | } | ||
503 | 479 | ||
504 | static int qib_pcie_coalesce; | 480 | static int qib_pcie_coalesce; |
505 | module_param_named(pcie_coalesce, qib_pcie_coalesce, int, S_IRUGO); | 481 | module_param_named(pcie_coalesce, qib_pcie_coalesce, int, S_IRUGO); |
@@ -511,7 +487,7 @@ MODULE_PARM_DESC(pcie_coalesce, "tune PCIe colescing on some Intel chipsets"); | |||
511 | * of these chipsets, with some BIOS settings, and enabling it on those | 487 | * of these chipsets, with some BIOS settings, and enabling it on those |
512 | * systems may result in the system crashing, and/or data corruption. | 488 | * systems may result in the system crashing, and/or data corruption. |
513 | */ | 489 | */ |
514 | static int qib_tune_pcie_coalesce(struct qib_devdata *dd) | 490 | static void qib_tune_pcie_coalesce(struct qib_devdata *dd) |
515 | { | 491 | { |
516 | int r; | 492 | int r; |
517 | struct pci_dev *parent; | 493 | struct pci_dev *parent; |
@@ -519,18 +495,18 @@ static int qib_tune_pcie_coalesce(struct qib_devdata *dd) | |||
519 | u32 mask, bits, val; | 495 | u32 mask, bits, val; |
520 | 496 | ||
521 | if (!qib_pcie_coalesce) | 497 | if (!qib_pcie_coalesce) |
522 | return 0; | 498 | return; |
523 | 499 | ||
524 | /* Find out supported and configured values for parent (root) */ | 500 | /* Find out supported and configured values for parent (root) */ |
525 | parent = dd->pcidev->bus->self; | 501 | parent = dd->pcidev->bus->self; |
526 | if (parent->bus->parent) { | 502 | if (parent->bus->parent) { |
527 | qib_devinfo(dd->pcidev, "Parent not root\n"); | 503 | qib_devinfo(dd->pcidev, "Parent not root\n"); |
528 | return 1; | 504 | return; |
529 | } | 505 | } |
530 | if (!pci_is_pcie(parent)) | 506 | if (!pci_is_pcie(parent)) |
531 | return 1; | 507 | return; |
532 | if (parent->vendor != 0x8086) | 508 | if (parent->vendor != 0x8086) |
533 | return 1; | 509 | return; |
534 | 510 | ||
535 | /* | 511 | /* |
536 | * - bit 12: Max_rdcmp_Imt_EN: need to set to 1 | 512 | * - bit 12: Max_rdcmp_Imt_EN: need to set to 1 |
@@ -563,13 +539,12 @@ static int qib_tune_pcie_coalesce(struct qib_devdata *dd) | |||
563 | mask = (3U << 24) | (7U << 10); | 539 | mask = (3U << 24) | (7U << 10); |
564 | } else { | 540 | } else { |
565 | /* not one of the chipsets that we know about */ | 541 | /* not one of the chipsets that we know about */ |
566 | return 1; | 542 | return; |
567 | } | 543 | } |
568 | pci_read_config_dword(parent, 0x48, &val); | 544 | pci_read_config_dword(parent, 0x48, &val); |
569 | val &= ~mask; | 545 | val &= ~mask; |
570 | val |= bits; | 546 | val |= bits; |
571 | r = pci_write_config_dword(parent, 0x48, val); | 547 | r = pci_write_config_dword(parent, 0x48, val); |
572 | return 0; | ||
573 | } | 548 | } |
574 | 549 | ||
575 | /* | 550 | /* |
@@ -580,55 +555,44 @@ static int qib_pcie_caps; | |||
580 | module_param_named(pcie_caps, qib_pcie_caps, int, S_IRUGO); | 555 | module_param_named(pcie_caps, qib_pcie_caps, int, S_IRUGO); |
581 | MODULE_PARM_DESC(pcie_caps, "Max PCIe tuning: Payload (0..3), ReadReq (4..7)"); | 556 | MODULE_PARM_DESC(pcie_caps, "Max PCIe tuning: Payload (0..3), ReadReq (4..7)"); |
582 | 557 | ||
583 | static int qib_tune_pcie_caps(struct qib_devdata *dd) | 558 | static void qib_tune_pcie_caps(struct qib_devdata *dd) |
584 | { | 559 | { |
585 | int ret = 1; /* Assume the worst */ | ||
586 | struct pci_dev *parent; | 560 | struct pci_dev *parent; |
587 | u16 pcaps, pctl, ecaps, ectl; | 561 | u16 rc_mpss, rc_mps, ep_mpss, ep_mps; |
588 | int rc_sup, ep_sup; | 562 | u16 rc_mrrs, ep_mrrs, max_mrrs; |
589 | int rc_cur, ep_cur; | ||
590 | 563 | ||
591 | /* Find out supported and configured values for parent (root) */ | 564 | /* Find out supported and configured values for parent (root) */ |
592 | parent = dd->pcidev->bus->self; | 565 | parent = dd->pcidev->bus->self; |
593 | if (parent->bus->parent) { | 566 | if (!pci_is_root_bus(parent->bus)) { |
594 | qib_devinfo(dd->pcidev, "Parent not root\n"); | 567 | qib_devinfo(dd->pcidev, "Parent not root\n"); |
595 | goto bail; | 568 | return; |
596 | } | 569 | } |
597 | 570 | ||
598 | if (!pci_is_pcie(parent) || !pci_is_pcie(dd->pcidev)) | 571 | if (!pci_is_pcie(parent) || !pci_is_pcie(dd->pcidev)) |
599 | goto bail; | 572 | return; |
600 | pcie_capability_read_word(parent, PCI_EXP_DEVCAP, &pcaps); | 573 | |
601 | pcie_capability_read_word(parent, PCI_EXP_DEVCTL, &pctl); | 574 | rc_mpss = parent->pcie_mpss; |
575 | rc_mps = ffs(pcie_get_mps(parent)) - 8; | ||
602 | /* Find out supported and configured values for endpoint (us) */ | 576 | /* Find out supported and configured values for endpoint (us) */ |
603 | pcie_capability_read_word(dd->pcidev, PCI_EXP_DEVCAP, &ecaps); | 577 | ep_mpss = dd->pcidev->pcie_mpss; |
604 | pcie_capability_read_word(dd->pcidev, PCI_EXP_DEVCTL, &ectl); | 578 | ep_mps = ffs(pcie_get_mps(dd->pcidev)) - 8; |
605 | 579 | ||
606 | ret = 0; | ||
607 | /* Find max payload supported by root, endpoint */ | 580 | /* Find max payload supported by root, endpoint */ |
608 | rc_sup = fld2val(pcaps, PCI_EXP_DEVCAP_PAYLOAD); | 581 | if (rc_mpss > ep_mpss) |
609 | ep_sup = fld2val(ecaps, PCI_EXP_DEVCAP_PAYLOAD); | 582 | rc_mpss = ep_mpss; |
610 | if (rc_sup > ep_sup) | ||
611 | rc_sup = ep_sup; | ||
612 | |||
613 | rc_cur = fld2val(pctl, PCI_EXP_DEVCTL_PAYLOAD); | ||
614 | ep_cur = fld2val(ectl, PCI_EXP_DEVCTL_PAYLOAD); | ||
615 | 583 | ||
616 | /* If Supported greater than limit in module param, limit it */ | 584 | /* If Supported greater than limit in module param, limit it */ |
617 | if (rc_sup > (qib_pcie_caps & 7)) | 585 | if (rc_mpss > (qib_pcie_caps & 7)) |
618 | rc_sup = qib_pcie_caps & 7; | 586 | rc_mpss = qib_pcie_caps & 7; |
619 | /* If less than (allowed, supported), bump root payload */ | 587 | /* If less than (allowed, supported), bump root payload */ |
620 | if (rc_sup > rc_cur) { | 588 | if (rc_mpss > rc_mps) { |
621 | rc_cur = rc_sup; | 589 | rc_mps = rc_mpss; |
622 | pctl = (pctl & ~PCI_EXP_DEVCTL_PAYLOAD) | | 590 | pcie_set_mps(parent, 128 << rc_mps); |
623 | val2fld(rc_cur, PCI_EXP_DEVCTL_PAYLOAD); | ||
624 | pcie_capability_write_word(parent, PCI_EXP_DEVCTL, pctl); | ||
625 | } | 591 | } |
626 | /* If less than (allowed, supported), bump endpoint payload */ | 592 | /* If less than (allowed, supported), bump endpoint payload */ |
627 | if (rc_sup > ep_cur) { | 593 | if (rc_mpss > ep_mps) { |
628 | ep_cur = rc_sup; | 594 | ep_mps = rc_mpss; |
629 | ectl = (ectl & ~PCI_EXP_DEVCTL_PAYLOAD) | | 595 | pcie_set_mps(dd->pcidev, 128 << ep_mps); |
630 | val2fld(ep_cur, PCI_EXP_DEVCTL_PAYLOAD); | ||
631 | pcie_capability_write_word(dd->pcidev, PCI_EXP_DEVCTL, ectl); | ||
632 | } | 596 | } |
633 | 597 | ||
634 | /* | 598 | /* |
@@ -636,26 +600,22 @@ static int qib_tune_pcie_caps(struct qib_devdata *dd) | |||
636 | * No field for max supported, but PCIe spec limits it to 4096, | 600 | * No field for max supported, but PCIe spec limits it to 4096, |
637 | * which is code '5' (log2(4096) - 7) | 601 | * which is code '5' (log2(4096) - 7) |
638 | */ | 602 | */ |
639 | rc_sup = 5; | 603 | max_mrrs = 5; |
640 | if (rc_sup > ((qib_pcie_caps >> 4) & 7)) | 604 | if (max_mrrs > ((qib_pcie_caps >> 4) & 7)) |
641 | rc_sup = (qib_pcie_caps >> 4) & 7; | 605 | max_mrrs = (qib_pcie_caps >> 4) & 7; |
642 | rc_cur = fld2val(pctl, PCI_EXP_DEVCTL_READRQ); | 606 | |
643 | ep_cur = fld2val(ectl, PCI_EXP_DEVCTL_READRQ); | 607 | max_mrrs = 128 << max_mrrs; |
644 | 608 | rc_mrrs = pcie_get_readrq(parent); | |
645 | if (rc_sup > rc_cur) { | 609 | ep_mrrs = pcie_get_readrq(dd->pcidev); |
646 | rc_cur = rc_sup; | 610 | |
647 | pctl = (pctl & ~PCI_EXP_DEVCTL_READRQ) | | 611 | if (max_mrrs > rc_mrrs) { |
648 | val2fld(rc_cur, PCI_EXP_DEVCTL_READRQ); | 612 | rc_mrrs = max_mrrs; |
649 | pcie_capability_write_word(parent, PCI_EXP_DEVCTL, pctl); | 613 | pcie_set_readrq(parent, rc_mrrs); |
650 | } | 614 | } |
651 | if (rc_sup > ep_cur) { | 615 | if (max_mrrs > ep_mrrs) { |
652 | ep_cur = rc_sup; | 616 | ep_mrrs = max_mrrs; |
653 | ectl = (ectl & ~PCI_EXP_DEVCTL_READRQ) | | 617 | pcie_set_readrq(dd->pcidev, ep_mrrs); |
654 | val2fld(ep_cur, PCI_EXP_DEVCTL_READRQ); | ||
655 | pcie_capability_write_word(dd->pcidev, PCI_EXP_DEVCTL, ectl); | ||
656 | } | 618 | } |
657 | bail: | ||
658 | return ret; | ||
659 | } | 619 | } |
660 | /* End of PCIe capability tuning */ | 620 | /* End of PCIe capability tuning */ |
661 | 621 | ||
diff --git a/drivers/pci/host/Kconfig b/drivers/pci/host/Kconfig index 43186feb4294..47d46c6d8468 100644 --- a/drivers/pci/host/Kconfig +++ b/drivers/pci/host/Kconfig | |||
@@ -15,8 +15,22 @@ config PCI_EXYNOS | |||
15 | select PCIEPORTBUS | 15 | select PCIEPORTBUS |
16 | select PCIE_DW | 16 | select PCIE_DW |
17 | 17 | ||
18 | config PCI_IMX6 | ||
19 | bool "Freescale i.MX6 PCIe controller" | ||
20 | depends on SOC_IMX6Q | ||
21 | select PCIEPORTBUS | ||
22 | select PCIE_DW | ||
23 | |||
18 | config PCI_TEGRA | 24 | config PCI_TEGRA |
19 | bool "NVIDIA Tegra PCIe controller" | 25 | bool "NVIDIA Tegra PCIe controller" |
20 | depends on ARCH_TEGRA | 26 | depends on ARCH_TEGRA |
21 | 27 | ||
28 | config PCI_RCAR_GEN2 | ||
29 | bool "Renesas R-Car Gen2 Internal PCI controller" | ||
30 | depends on ARM && (ARCH_R8A7790 || ARCH_R8A7791 || COMPILE_TEST) | ||
31 | help | ||
32 | Say Y here if you want internal PCI support on R-Car Gen2 SoC. | ||
33 | There are 3 internal PCI controllers available with a single | ||
34 | built-in EHCI/OHCI host controller present on each one. | ||
35 | |||
22 | endmenu | 36 | endmenu |
diff --git a/drivers/pci/host/Makefile b/drivers/pci/host/Makefile index c9a997b2690d..13fb3333aa05 100644 --- a/drivers/pci/host/Makefile +++ b/drivers/pci/host/Makefile | |||
@@ -1,4 +1,6 @@ | |||
1 | obj-$(CONFIG_PCIE_DW) += pcie-designware.o | 1 | obj-$(CONFIG_PCIE_DW) += pcie-designware.o |
2 | obj-$(CONFIG_PCI_EXYNOS) += pci-exynos.o | 2 | obj-$(CONFIG_PCI_EXYNOS) += pci-exynos.o |
3 | obj-$(CONFIG_PCI_IMX6) += pci-imx6.o | ||
3 | obj-$(CONFIG_PCI_MVEBU) += pci-mvebu.o | 4 | obj-$(CONFIG_PCI_MVEBU) += pci-mvebu.o |
4 | obj-$(CONFIG_PCI_TEGRA) += pci-tegra.o | 5 | obj-$(CONFIG_PCI_TEGRA) += pci-tegra.o |
6 | obj-$(CONFIG_PCI_RCAR_GEN2) += pci-rcar-gen2.o | ||
diff --git a/drivers/pci/host/pci-exynos.c b/drivers/pci/host/pci-exynos.c index 94e096bb2d0a..24beed38ddc7 100644 --- a/drivers/pci/host/pci-exynos.c +++ b/drivers/pci/host/pci-exynos.c | |||
@@ -48,6 +48,7 @@ struct exynos_pcie { | |||
48 | #define PCIE_IRQ_SPECIAL 0x008 | 48 | #define PCIE_IRQ_SPECIAL 0x008 |
49 | #define PCIE_IRQ_EN_PULSE 0x00c | 49 | #define PCIE_IRQ_EN_PULSE 0x00c |
50 | #define PCIE_IRQ_EN_LEVEL 0x010 | 50 | #define PCIE_IRQ_EN_LEVEL 0x010 |
51 | #define IRQ_MSI_ENABLE (0x1 << 2) | ||
51 | #define PCIE_IRQ_EN_SPECIAL 0x014 | 52 | #define PCIE_IRQ_EN_SPECIAL 0x014 |
52 | #define PCIE_PWR_RESET 0x018 | 53 | #define PCIE_PWR_RESET 0x018 |
53 | #define PCIE_CORE_RESET 0x01c | 54 | #define PCIE_CORE_RESET 0x01c |
@@ -77,18 +78,28 @@ struct exynos_pcie { | |||
77 | #define PCIE_PHY_PLL_BIAS 0x00c | 78 | #define PCIE_PHY_PLL_BIAS 0x00c |
78 | #define PCIE_PHY_DCC_FEEDBACK 0x014 | 79 | #define PCIE_PHY_DCC_FEEDBACK 0x014 |
79 | #define PCIE_PHY_PLL_DIV_1 0x05c | 80 | #define PCIE_PHY_PLL_DIV_1 0x05c |
81 | #define PCIE_PHY_COMMON_POWER 0x064 | ||
82 | #define PCIE_PHY_COMMON_PD_CMN (0x1 << 3) | ||
80 | #define PCIE_PHY_TRSV0_EMP_LVL 0x084 | 83 | #define PCIE_PHY_TRSV0_EMP_LVL 0x084 |
81 | #define PCIE_PHY_TRSV0_DRV_LVL 0x088 | 84 | #define PCIE_PHY_TRSV0_DRV_LVL 0x088 |
82 | #define PCIE_PHY_TRSV0_RXCDR 0x0ac | 85 | #define PCIE_PHY_TRSV0_RXCDR 0x0ac |
86 | #define PCIE_PHY_TRSV0_POWER 0x0c4 | ||
87 | #define PCIE_PHY_TRSV0_PD_TSV (0x1 << 7) | ||
83 | #define PCIE_PHY_TRSV0_LVCC 0x0dc | 88 | #define PCIE_PHY_TRSV0_LVCC 0x0dc |
84 | #define PCIE_PHY_TRSV1_EMP_LVL 0x144 | 89 | #define PCIE_PHY_TRSV1_EMP_LVL 0x144 |
85 | #define PCIE_PHY_TRSV1_RXCDR 0x16c | 90 | #define PCIE_PHY_TRSV1_RXCDR 0x16c |
91 | #define PCIE_PHY_TRSV1_POWER 0x184 | ||
92 | #define PCIE_PHY_TRSV1_PD_TSV (0x1 << 7) | ||
86 | #define PCIE_PHY_TRSV1_LVCC 0x19c | 93 | #define PCIE_PHY_TRSV1_LVCC 0x19c |
87 | #define PCIE_PHY_TRSV2_EMP_LVL 0x204 | 94 | #define PCIE_PHY_TRSV2_EMP_LVL 0x204 |
88 | #define PCIE_PHY_TRSV2_RXCDR 0x22c | 95 | #define PCIE_PHY_TRSV2_RXCDR 0x22c |
96 | #define PCIE_PHY_TRSV2_POWER 0x244 | ||
97 | #define PCIE_PHY_TRSV2_PD_TSV (0x1 << 7) | ||
89 | #define PCIE_PHY_TRSV2_LVCC 0x25c | 98 | #define PCIE_PHY_TRSV2_LVCC 0x25c |
90 | #define PCIE_PHY_TRSV3_EMP_LVL 0x2c4 | 99 | #define PCIE_PHY_TRSV3_EMP_LVL 0x2c4 |
91 | #define PCIE_PHY_TRSV3_RXCDR 0x2ec | 100 | #define PCIE_PHY_TRSV3_RXCDR 0x2ec |
101 | #define PCIE_PHY_TRSV3_POWER 0x304 | ||
102 | #define PCIE_PHY_TRSV3_PD_TSV (0x1 << 7) | ||
92 | #define PCIE_PHY_TRSV3_LVCC 0x31c | 103 | #define PCIE_PHY_TRSV3_LVCC 0x31c |
93 | 104 | ||
94 | static inline void exynos_elb_writel(struct exynos_pcie *pcie, u32 val, u32 reg) | 105 | static inline void exynos_elb_writel(struct exynos_pcie *pcie, u32 val, u32 reg) |
@@ -202,6 +213,58 @@ static void exynos_pcie_deassert_phy_reset(struct pcie_port *pp) | |||
202 | exynos_blk_writel(exynos_pcie, 0, PCIE_PHY_TRSV_RESET); | 213 | exynos_blk_writel(exynos_pcie, 0, PCIE_PHY_TRSV_RESET); |
203 | } | 214 | } |
204 | 215 | ||
216 | static void exynos_pcie_power_on_phy(struct pcie_port *pp) | ||
217 | { | ||
218 | u32 val; | ||
219 | struct exynos_pcie *exynos_pcie = to_exynos_pcie(pp); | ||
220 | |||
221 | val = exynos_phy_readl(exynos_pcie, PCIE_PHY_COMMON_POWER); | ||
222 | val &= ~PCIE_PHY_COMMON_PD_CMN; | ||
223 | exynos_phy_writel(exynos_pcie, val, PCIE_PHY_COMMON_POWER); | ||
224 | |||
225 | val = exynos_phy_readl(exynos_pcie, PCIE_PHY_TRSV0_POWER); | ||
226 | val &= ~PCIE_PHY_TRSV0_PD_TSV; | ||
227 | exynos_phy_writel(exynos_pcie, val, PCIE_PHY_TRSV0_POWER); | ||
228 | |||
229 | val = exynos_phy_readl(exynos_pcie, PCIE_PHY_TRSV1_POWER); | ||
230 | val &= ~PCIE_PHY_TRSV1_PD_TSV; | ||
231 | exynos_phy_writel(exynos_pcie, val, PCIE_PHY_TRSV1_POWER); | ||
232 | |||
233 | val = exynos_phy_readl(exynos_pcie, PCIE_PHY_TRSV2_POWER); | ||
234 | val &= ~PCIE_PHY_TRSV2_PD_TSV; | ||
235 | exynos_phy_writel(exynos_pcie, val, PCIE_PHY_TRSV2_POWER); | ||
236 | |||
237 | val = exynos_phy_readl(exynos_pcie, PCIE_PHY_TRSV3_POWER); | ||
238 | val &= ~PCIE_PHY_TRSV3_PD_TSV; | ||
239 | exynos_phy_writel(exynos_pcie, val, PCIE_PHY_TRSV3_POWER); | ||
240 | } | ||
241 | |||
242 | static void exynos_pcie_power_off_phy(struct pcie_port *pp) | ||
243 | { | ||
244 | u32 val; | ||
245 | struct exynos_pcie *exynos_pcie = to_exynos_pcie(pp); | ||
246 | |||
247 | val = exynos_phy_readl(exynos_pcie, PCIE_PHY_COMMON_POWER); | ||
248 | val |= PCIE_PHY_COMMON_PD_CMN; | ||
249 | exynos_phy_writel(exynos_pcie, val, PCIE_PHY_COMMON_POWER); | ||
250 | |||
251 | val = exynos_phy_readl(exynos_pcie, PCIE_PHY_TRSV0_POWER); | ||
252 | val |= PCIE_PHY_TRSV0_PD_TSV; | ||
253 | exynos_phy_writel(exynos_pcie, val, PCIE_PHY_TRSV0_POWER); | ||
254 | |||
255 | val = exynos_phy_readl(exynos_pcie, PCIE_PHY_TRSV1_POWER); | ||
256 | val |= PCIE_PHY_TRSV1_PD_TSV; | ||
257 | exynos_phy_writel(exynos_pcie, val, PCIE_PHY_TRSV1_POWER); | ||
258 | |||
259 | val = exynos_phy_readl(exynos_pcie, PCIE_PHY_TRSV2_POWER); | ||
260 | val |= PCIE_PHY_TRSV2_PD_TSV; | ||
261 | exynos_phy_writel(exynos_pcie, val, PCIE_PHY_TRSV2_POWER); | ||
262 | |||
263 | val = exynos_phy_readl(exynos_pcie, PCIE_PHY_TRSV3_POWER); | ||
264 | val |= PCIE_PHY_TRSV3_PD_TSV; | ||
265 | exynos_phy_writel(exynos_pcie, val, PCIE_PHY_TRSV3_POWER); | ||
266 | } | ||
267 | |||
205 | static void exynos_pcie_init_phy(struct pcie_port *pp) | 268 | static void exynos_pcie_init_phy(struct pcie_port *pp) |
206 | { | 269 | { |
207 | struct exynos_pcie *exynos_pcie = to_exynos_pcie(pp); | 270 | struct exynos_pcie *exynos_pcie = to_exynos_pcie(pp); |
@@ -270,6 +333,9 @@ static int exynos_pcie_establish_link(struct pcie_port *pp) | |||
270 | /* de-assert phy reset */ | 333 | /* de-assert phy reset */ |
271 | exynos_pcie_deassert_phy_reset(pp); | 334 | exynos_pcie_deassert_phy_reset(pp); |
272 | 335 | ||
336 | /* power on phy */ | ||
337 | exynos_pcie_power_on_phy(pp); | ||
338 | |||
273 | /* initialize phy */ | 339 | /* initialize phy */ |
274 | exynos_pcie_init_phy(pp); | 340 | exynos_pcie_init_phy(pp); |
275 | 341 | ||
@@ -302,6 +368,9 @@ static int exynos_pcie_establish_link(struct pcie_port *pp) | |||
302 | PCIE_PHY_PLL_LOCKED); | 368 | PCIE_PHY_PLL_LOCKED); |
303 | dev_info(pp->dev, "PLL Locked: 0x%x\n", val); | 369 | dev_info(pp->dev, "PLL Locked: 0x%x\n", val); |
304 | } | 370 | } |
371 | /* power off phy */ | ||
372 | exynos_pcie_power_off_phy(pp); | ||
373 | |||
305 | dev_err(pp->dev, "PCIe Link Fail\n"); | 374 | dev_err(pp->dev, "PCIe Link Fail\n"); |
306 | return -EINVAL; | 375 | return -EINVAL; |
307 | } | 376 | } |
@@ -342,9 +411,36 @@ static irqreturn_t exynos_pcie_irq_handler(int irq, void *arg) | |||
342 | return IRQ_HANDLED; | 411 | return IRQ_HANDLED; |
343 | } | 412 | } |
344 | 413 | ||
414 | static irqreturn_t exynos_pcie_msi_irq_handler(int irq, void *arg) | ||
415 | { | ||
416 | struct pcie_port *pp = arg; | ||
417 | |||
418 | dw_handle_msi_irq(pp); | ||
419 | |||
420 | return IRQ_HANDLED; | ||
421 | } | ||
422 | |||
423 | static void exynos_pcie_msi_init(struct pcie_port *pp) | ||
424 | { | ||
425 | u32 val; | ||
426 | struct exynos_pcie *exynos_pcie = to_exynos_pcie(pp); | ||
427 | |||
428 | dw_pcie_msi_init(pp); | ||
429 | |||
430 | /* enable MSI interrupt */ | ||
431 | val = exynos_elb_readl(exynos_pcie, PCIE_IRQ_EN_LEVEL); | ||
432 | val |= IRQ_MSI_ENABLE; | ||
433 | exynos_elb_writel(exynos_pcie, val, PCIE_IRQ_EN_LEVEL); | ||
434 | return; | ||
435 | } | ||
436 | |||
345 | static void exynos_pcie_enable_interrupts(struct pcie_port *pp) | 437 | static void exynos_pcie_enable_interrupts(struct pcie_port *pp) |
346 | { | 438 | { |
347 | exynos_pcie_enable_irq_pulse(pp); | 439 | exynos_pcie_enable_irq_pulse(pp); |
440 | |||
441 | if (IS_ENABLED(CONFIG_PCI_MSI)) | ||
442 | exynos_pcie_msi_init(pp); | ||
443 | |||
348 | return; | 444 | return; |
349 | } | 445 | } |
350 | 446 | ||
@@ -430,6 +526,22 @@ static int add_pcie_port(struct pcie_port *pp, struct platform_device *pdev) | |||
430 | return ret; | 526 | return ret; |
431 | } | 527 | } |
432 | 528 | ||
529 | if (IS_ENABLED(CONFIG_PCI_MSI)) { | ||
530 | pp->msi_irq = platform_get_irq(pdev, 0); | ||
531 | if (!pp->msi_irq) { | ||
532 | dev_err(&pdev->dev, "failed to get msi irq\n"); | ||
533 | return -ENODEV; | ||
534 | } | ||
535 | |||
536 | ret = devm_request_irq(&pdev->dev, pp->msi_irq, | ||
537 | exynos_pcie_msi_irq_handler, | ||
538 | IRQF_SHARED, "exynos-pcie", pp); | ||
539 | if (ret) { | ||
540 | dev_err(&pdev->dev, "failed to request msi irq\n"); | ||
541 | return ret; | ||
542 | } | ||
543 | } | ||
544 | |||
433 | pp->root_bus_nr = -1; | 545 | pp->root_bus_nr = -1; |
434 | pp->ops = &exynos_pcie_host_ops; | 546 | pp->ops = &exynos_pcie_host_ops; |
435 | 547 | ||
@@ -487,18 +599,24 @@ static int __init exynos_pcie_probe(struct platform_device *pdev) | |||
487 | 599 | ||
488 | elbi_base = platform_get_resource(pdev, IORESOURCE_MEM, 0); | 600 | elbi_base = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
489 | exynos_pcie->elbi_base = devm_ioremap_resource(&pdev->dev, elbi_base); | 601 | exynos_pcie->elbi_base = devm_ioremap_resource(&pdev->dev, elbi_base); |
490 | if (IS_ERR(exynos_pcie->elbi_base)) | 602 | if (IS_ERR(exynos_pcie->elbi_base)) { |
491 | return PTR_ERR(exynos_pcie->elbi_base); | 603 | ret = PTR_ERR(exynos_pcie->elbi_base); |
604 | goto fail_bus_clk; | ||
605 | } | ||
492 | 606 | ||
493 | phy_base = platform_get_resource(pdev, IORESOURCE_MEM, 1); | 607 | phy_base = platform_get_resource(pdev, IORESOURCE_MEM, 1); |
494 | exynos_pcie->phy_base = devm_ioremap_resource(&pdev->dev, phy_base); | 608 | exynos_pcie->phy_base = devm_ioremap_resource(&pdev->dev, phy_base); |
495 | if (IS_ERR(exynos_pcie->phy_base)) | 609 | if (IS_ERR(exynos_pcie->phy_base)) { |
496 | return PTR_ERR(exynos_pcie->phy_base); | 610 | ret = PTR_ERR(exynos_pcie->phy_base); |
611 | goto fail_bus_clk; | ||
612 | } | ||
497 | 613 | ||
498 | block_base = platform_get_resource(pdev, IORESOURCE_MEM, 2); | 614 | block_base = platform_get_resource(pdev, IORESOURCE_MEM, 2); |
499 | exynos_pcie->block_base = devm_ioremap_resource(&pdev->dev, block_base); | 615 | exynos_pcie->block_base = devm_ioremap_resource(&pdev->dev, block_base); |
500 | if (IS_ERR(exynos_pcie->block_base)) | 616 | if (IS_ERR(exynos_pcie->block_base)) { |
501 | return PTR_ERR(exynos_pcie->block_base); | 617 | ret = PTR_ERR(exynos_pcie->block_base); |
618 | goto fail_bus_clk; | ||
619 | } | ||
502 | 620 | ||
503 | ret = add_pcie_port(pp, pdev); | 621 | ret = add_pcie_port(pp, pdev); |
504 | if (ret < 0) | 622 | if (ret < 0) |
@@ -535,7 +653,7 @@ static struct platform_driver exynos_pcie_driver = { | |||
535 | .driver = { | 653 | .driver = { |
536 | .name = "exynos-pcie", | 654 | .name = "exynos-pcie", |
537 | .owner = THIS_MODULE, | 655 | .owner = THIS_MODULE, |
538 | .of_match_table = of_match_ptr(exynos_pcie_of_match), | 656 | .of_match_table = exynos_pcie_of_match, |
539 | }, | 657 | }, |
540 | }; | 658 | }; |
541 | 659 | ||
diff --git a/drivers/pci/host/pci-imx6.c b/drivers/pci/host/pci-imx6.c new file mode 100644 index 000000000000..bd70af8f31ac --- /dev/null +++ b/drivers/pci/host/pci-imx6.c | |||
@@ -0,0 +1,568 @@ | |||
1 | /* | ||
2 | * PCIe host controller driver for Freescale i.MX6 SoCs | ||
3 | * | ||
4 | * Copyright (C) 2013 Kosagi | ||
5 | * http://www.kosagi.com | ||
6 | * | ||
7 | * Author: Sean Cross <xobs@kosagi.com> | ||
8 | * | ||
9 | * This program is free software; you can redistribute it and/or modify | ||
10 | * it under the terms of the GNU General Public License version 2 as | ||
11 | * published by the Free Software Foundation. | ||
12 | */ | ||
13 | |||
14 | #include <linux/clk.h> | ||
15 | #include <linux/delay.h> | ||
16 | #include <linux/gpio.h> | ||
17 | #include <linux/kernel.h> | ||
18 | #include <linux/mfd/syscon.h> | ||
19 | #include <linux/mfd/syscon/imx6q-iomuxc-gpr.h> | ||
20 | #include <linux/module.h> | ||
21 | #include <linux/of_gpio.h> | ||
22 | #include <linux/pci.h> | ||
23 | #include <linux/platform_device.h> | ||
24 | #include <linux/regmap.h> | ||
25 | #include <linux/resource.h> | ||
26 | #include <linux/signal.h> | ||
27 | #include <linux/types.h> | ||
28 | |||
29 | #include "pcie-designware.h" | ||
30 | |||
31 | #define to_imx6_pcie(x) container_of(x, struct imx6_pcie, pp) | ||
32 | |||
33 | struct imx6_pcie { | ||
34 | int reset_gpio; | ||
35 | int power_on_gpio; | ||
36 | int wake_up_gpio; | ||
37 | int disable_gpio; | ||
38 | struct clk *lvds_gate; | ||
39 | struct clk *sata_ref_100m; | ||
40 | struct clk *pcie_ref_125m; | ||
41 | struct clk *pcie_axi; | ||
42 | struct pcie_port pp; | ||
43 | struct regmap *iomuxc_gpr; | ||
44 | void __iomem *mem_base; | ||
45 | }; | ||
46 | |||
47 | /* PCIe Port Logic registers (memory-mapped) */ | ||
48 | #define PL_OFFSET 0x700 | ||
49 | #define PCIE_PHY_DEBUG_R0 (PL_OFFSET + 0x28) | ||
50 | #define PCIE_PHY_DEBUG_R1 (PL_OFFSET + 0x2c) | ||
51 | |||
52 | #define PCIE_PHY_CTRL (PL_OFFSET + 0x114) | ||
53 | #define PCIE_PHY_CTRL_DATA_LOC 0 | ||
54 | #define PCIE_PHY_CTRL_CAP_ADR_LOC 16 | ||
55 | #define PCIE_PHY_CTRL_CAP_DAT_LOC 17 | ||
56 | #define PCIE_PHY_CTRL_WR_LOC 18 | ||
57 | #define PCIE_PHY_CTRL_RD_LOC 19 | ||
58 | |||
59 | #define PCIE_PHY_STAT (PL_OFFSET + 0x110) | ||
60 | #define PCIE_PHY_STAT_ACK_LOC 16 | ||
61 | |||
62 | /* PHY registers (not memory-mapped) */ | ||
63 | #define PCIE_PHY_RX_ASIC_OUT 0x100D | ||
64 | |||
65 | #define PHY_RX_OVRD_IN_LO 0x1005 | ||
66 | #define PHY_RX_OVRD_IN_LO_RX_DATA_EN (1 << 5) | ||
67 | #define PHY_RX_OVRD_IN_LO_RX_PLL_EN (1 << 3) | ||
68 | |||
69 | static int pcie_phy_poll_ack(void __iomem *dbi_base, int exp_val) | ||
70 | { | ||
71 | u32 val; | ||
72 | u32 max_iterations = 10; | ||
73 | u32 wait_counter = 0; | ||
74 | |||
75 | do { | ||
76 | val = readl(dbi_base + PCIE_PHY_STAT); | ||
77 | val = (val >> PCIE_PHY_STAT_ACK_LOC) & 0x1; | ||
78 | wait_counter++; | ||
79 | |||
80 | if (val == exp_val) | ||
81 | return 0; | ||
82 | |||
83 | udelay(1); | ||
84 | } while (wait_counter < max_iterations); | ||
85 | |||
86 | return -ETIMEDOUT; | ||
87 | } | ||
88 | |||
89 | static int pcie_phy_wait_ack(void __iomem *dbi_base, int addr) | ||
90 | { | ||
91 | u32 val; | ||
92 | int ret; | ||
93 | |||
94 | val = addr << PCIE_PHY_CTRL_DATA_LOC; | ||
95 | writel(val, dbi_base + PCIE_PHY_CTRL); | ||
96 | |||
97 | val |= (0x1 << PCIE_PHY_CTRL_CAP_ADR_LOC); | ||
98 | writel(val, dbi_base + PCIE_PHY_CTRL); | ||
99 | |||
100 | ret = pcie_phy_poll_ack(dbi_base, 1); | ||
101 | if (ret) | ||
102 | return ret; | ||
103 | |||
104 | val = addr << PCIE_PHY_CTRL_DATA_LOC; | ||
105 | writel(val, dbi_base + PCIE_PHY_CTRL); | ||
106 | |||
107 | ret = pcie_phy_poll_ack(dbi_base, 0); | ||
108 | if (ret) | ||
109 | return ret; | ||
110 | |||
111 | return 0; | ||
112 | } | ||
113 | |||
114 | /* Read from the 16-bit PCIe PHY control registers (not memory-mapped) */ | ||
115 | static int pcie_phy_read(void __iomem *dbi_base, int addr , int *data) | ||
116 | { | ||
117 | u32 val, phy_ctl; | ||
118 | int ret; | ||
119 | |||
120 | ret = pcie_phy_wait_ack(dbi_base, addr); | ||
121 | if (ret) | ||
122 | return ret; | ||
123 | |||
124 | /* assert Read signal */ | ||
125 | phy_ctl = 0x1 << PCIE_PHY_CTRL_RD_LOC; | ||
126 | writel(phy_ctl, dbi_base + PCIE_PHY_CTRL); | ||
127 | |||
128 | ret = pcie_phy_poll_ack(dbi_base, 1); | ||
129 | if (ret) | ||
130 | return ret; | ||
131 | |||
132 | val = readl(dbi_base + PCIE_PHY_STAT); | ||
133 | *data = val & 0xffff; | ||
134 | |||
135 | /* deassert Read signal */ | ||
136 | writel(0x00, dbi_base + PCIE_PHY_CTRL); | ||
137 | |||
138 | ret = pcie_phy_poll_ack(dbi_base, 0); | ||
139 | if (ret) | ||
140 | return ret; | ||
141 | |||
142 | return 0; | ||
143 | } | ||
144 | |||
145 | static int pcie_phy_write(void __iomem *dbi_base, int addr, int data) | ||
146 | { | ||
147 | u32 var; | ||
148 | int ret; | ||
149 | |||
150 | /* write addr */ | ||
151 | /* cap addr */ | ||
152 | ret = pcie_phy_wait_ack(dbi_base, addr); | ||
153 | if (ret) | ||
154 | return ret; | ||
155 | |||
156 | var = data << PCIE_PHY_CTRL_DATA_LOC; | ||
157 | writel(var, dbi_base + PCIE_PHY_CTRL); | ||
158 | |||
159 | /* capture data */ | ||
160 | var |= (0x1 << PCIE_PHY_CTRL_CAP_DAT_LOC); | ||
161 | writel(var, dbi_base + PCIE_PHY_CTRL); | ||
162 | |||
163 | ret = pcie_phy_poll_ack(dbi_base, 1); | ||
164 | if (ret) | ||
165 | return ret; | ||
166 | |||
167 | /* deassert cap data */ | ||
168 | var = data << PCIE_PHY_CTRL_DATA_LOC; | ||
169 | writel(var, dbi_base + PCIE_PHY_CTRL); | ||
170 | |||
171 | /* wait for ack de-assertion */ | ||
172 | ret = pcie_phy_poll_ack(dbi_base, 0); | ||
173 | if (ret) | ||
174 | return ret; | ||
175 | |||
176 | /* assert wr signal */ | ||
177 | var = 0x1 << PCIE_PHY_CTRL_WR_LOC; | ||
178 | writel(var, dbi_base + PCIE_PHY_CTRL); | ||
179 | |||
180 | /* wait for ack */ | ||
181 | ret = pcie_phy_poll_ack(dbi_base, 1); | ||
182 | if (ret) | ||
183 | return ret; | ||
184 | |||
185 | /* deassert wr signal */ | ||
186 | var = data << PCIE_PHY_CTRL_DATA_LOC; | ||
187 | writel(var, dbi_base + PCIE_PHY_CTRL); | ||
188 | |||
189 | /* wait for ack de-assertion */ | ||
190 | ret = pcie_phy_poll_ack(dbi_base, 0); | ||
191 | if (ret) | ||
192 | return ret; | ||
193 | |||
194 | writel(0x0, dbi_base + PCIE_PHY_CTRL); | ||
195 | |||
196 | return 0; | ||
197 | } | ||
198 | |||
199 | /* Added for PCI abort handling */ | ||
200 | static int imx6q_pcie_abort_handler(unsigned long addr, | ||
201 | unsigned int fsr, struct pt_regs *regs) | ||
202 | { | ||
203 | return 0; | ||
204 | } | ||
205 | |||
206 | static int imx6_pcie_assert_core_reset(struct pcie_port *pp) | ||
207 | { | ||
208 | struct imx6_pcie *imx6_pcie = to_imx6_pcie(pp); | ||
209 | |||
210 | regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1, | ||
211 | IMX6Q_GPR1_PCIE_TEST_PD, 1 << 18); | ||
212 | regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12, | ||
213 | IMX6Q_GPR12_PCIE_CTL_2, 1 << 10); | ||
214 | regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1, | ||
215 | IMX6Q_GPR1_PCIE_REF_CLK_EN, 0 << 16); | ||
216 | |||
217 | gpio_set_value(imx6_pcie->reset_gpio, 0); | ||
218 | msleep(100); | ||
219 | gpio_set_value(imx6_pcie->reset_gpio, 1); | ||
220 | |||
221 | return 0; | ||
222 | } | ||
223 | |||
224 | static int imx6_pcie_deassert_core_reset(struct pcie_port *pp) | ||
225 | { | ||
226 | struct imx6_pcie *imx6_pcie = to_imx6_pcie(pp); | ||
227 | int ret; | ||
228 | |||
229 | if (gpio_is_valid(imx6_pcie->power_on_gpio)) | ||
230 | gpio_set_value(imx6_pcie->power_on_gpio, 1); | ||
231 | |||
232 | regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1, | ||
233 | IMX6Q_GPR1_PCIE_TEST_PD, 0 << 18); | ||
234 | regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1, | ||
235 | IMX6Q_GPR1_PCIE_REF_CLK_EN, 1 << 16); | ||
236 | |||
237 | ret = clk_prepare_enable(imx6_pcie->sata_ref_100m); | ||
238 | if (ret) { | ||
239 | dev_err(pp->dev, "unable to enable sata_ref_100m\n"); | ||
240 | goto err_sata_ref; | ||
241 | } | ||
242 | |||
243 | ret = clk_prepare_enable(imx6_pcie->pcie_ref_125m); | ||
244 | if (ret) { | ||
245 | dev_err(pp->dev, "unable to enable pcie_ref_125m\n"); | ||
246 | goto err_pcie_ref; | ||
247 | } | ||
248 | |||
249 | ret = clk_prepare_enable(imx6_pcie->lvds_gate); | ||
250 | if (ret) { | ||
251 | dev_err(pp->dev, "unable to enable lvds_gate\n"); | ||
252 | goto err_lvds_gate; | ||
253 | } | ||
254 | |||
255 | ret = clk_prepare_enable(imx6_pcie->pcie_axi); | ||
256 | if (ret) { | ||
257 | dev_err(pp->dev, "unable to enable pcie_axi\n"); | ||
258 | goto err_pcie_axi; | ||
259 | } | ||
260 | |||
261 | /* allow the clocks to stabilize */ | ||
262 | usleep_range(200, 500); | ||
263 | |||
264 | return 0; | ||
265 | |||
266 | err_pcie_axi: | ||
267 | clk_disable_unprepare(imx6_pcie->lvds_gate); | ||
268 | err_lvds_gate: | ||
269 | clk_disable_unprepare(imx6_pcie->pcie_ref_125m); | ||
270 | err_pcie_ref: | ||
271 | clk_disable_unprepare(imx6_pcie->sata_ref_100m); | ||
272 | err_sata_ref: | ||
273 | return ret; | ||
274 | |||
275 | } | ||
276 | |||
277 | static void imx6_pcie_init_phy(struct pcie_port *pp) | ||
278 | { | ||
279 | struct imx6_pcie *imx6_pcie = to_imx6_pcie(pp); | ||
280 | |||
281 | regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12, | ||
282 | IMX6Q_GPR12_PCIE_CTL_2, 0 << 10); | ||
283 | |||
284 | /* configure constant input signal to the pcie ctrl and phy */ | ||
285 | regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12, | ||
286 | IMX6Q_GPR12_DEVICE_TYPE, PCI_EXP_TYPE_ROOT_PORT << 12); | ||
287 | regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12, | ||
288 | IMX6Q_GPR12_LOS_LEVEL, 9 << 4); | ||
289 | |||
290 | regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR8, | ||
291 | IMX6Q_GPR8_TX_DEEMPH_GEN1, 0 << 0); | ||
292 | regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR8, | ||
293 | IMX6Q_GPR8_TX_DEEMPH_GEN2_3P5DB, 0 << 6); | ||
294 | regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR8, | ||
295 | IMX6Q_GPR8_TX_DEEMPH_GEN2_6DB, 20 << 12); | ||
296 | regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR8, | ||
297 | IMX6Q_GPR8_TX_SWING_FULL, 127 << 18); | ||
298 | regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR8, | ||
299 | IMX6Q_GPR8_TX_SWING_LOW, 127 << 25); | ||
300 | } | ||
301 | |||
302 | static void imx6_pcie_host_init(struct pcie_port *pp) | ||
303 | { | ||
304 | int count = 0; | ||
305 | struct imx6_pcie *imx6_pcie = to_imx6_pcie(pp); | ||
306 | |||
307 | imx6_pcie_assert_core_reset(pp); | ||
308 | |||
309 | imx6_pcie_init_phy(pp); | ||
310 | |||
311 | imx6_pcie_deassert_core_reset(pp); | ||
312 | |||
313 | dw_pcie_setup_rc(pp); | ||
314 | |||
315 | regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12, | ||
316 | IMX6Q_GPR12_PCIE_CTL_2, 1 << 10); | ||
317 | |||
318 | while (!dw_pcie_link_up(pp)) { | ||
319 | usleep_range(100, 1000); | ||
320 | count++; | ||
321 | if (count >= 200) { | ||
322 | dev_err(pp->dev, "phy link never came up\n"); | ||
323 | dev_dbg(pp->dev, | ||
324 | "DEBUG_R0: 0x%08x, DEBUG_R1: 0x%08x\n", | ||
325 | readl(pp->dbi_base + PCIE_PHY_DEBUG_R0), | ||
326 | readl(pp->dbi_base + PCIE_PHY_DEBUG_R1)); | ||
327 | break; | ||
328 | } | ||
329 | } | ||
330 | |||
331 | return; | ||
332 | } | ||
333 | |||
334 | static int imx6_pcie_link_up(struct pcie_port *pp) | ||
335 | { | ||
336 | u32 rc, ltssm, rx_valid, temp; | ||
337 | |||
338 | /* link is debug bit 36, debug register 1 starts at bit 32 */ | ||
339 | rc = readl(pp->dbi_base + PCIE_PHY_DEBUG_R1) & (0x1 << (36 - 32)); | ||
340 | if (rc) | ||
341 | return -EAGAIN; | ||
342 | |||
343 | /* | ||
344 | * From L0, initiate MAC entry to gen2 if EP/RC supports gen2. | ||
345 | * Wait 2ms (LTSSM timeout is 24ms, PHY lock is ~5us in gen2). | ||
346 | * If (MAC/LTSSM.state == Recovery.RcvrLock) | ||
347 | * && (PHY/rx_valid==0) then pulse PHY/rx_reset. Transition | ||
348 | * to gen2 is stuck | ||
349 | */ | ||
350 | pcie_phy_read(pp->dbi_base, PCIE_PHY_RX_ASIC_OUT, &rx_valid); | ||
351 | ltssm = readl(pp->dbi_base + PCIE_PHY_DEBUG_R0) & 0x3F; | ||
352 | |||
353 | if (rx_valid & 0x01) | ||
354 | return 0; | ||
355 | |||
356 | if (ltssm != 0x0d) | ||
357 | return 0; | ||
358 | |||
359 | dev_err(pp->dev, "transition to gen2 is stuck, reset PHY!\n"); | ||
360 | |||
361 | pcie_phy_read(pp->dbi_base, | ||
362 | PHY_RX_OVRD_IN_LO, &temp); | ||
363 | temp |= (PHY_RX_OVRD_IN_LO_RX_DATA_EN | ||
364 | | PHY_RX_OVRD_IN_LO_RX_PLL_EN); | ||
365 | pcie_phy_write(pp->dbi_base, | ||
366 | PHY_RX_OVRD_IN_LO, temp); | ||
367 | |||
368 | usleep_range(2000, 3000); | ||
369 | |||
370 | pcie_phy_read(pp->dbi_base, | ||
371 | PHY_RX_OVRD_IN_LO, &temp); | ||
372 | temp &= ~(PHY_RX_OVRD_IN_LO_RX_DATA_EN | ||
373 | | PHY_RX_OVRD_IN_LO_RX_PLL_EN); | ||
374 | pcie_phy_write(pp->dbi_base, | ||
375 | PHY_RX_OVRD_IN_LO, temp); | ||
376 | |||
377 | return 0; | ||
378 | } | ||
379 | |||
380 | static struct pcie_host_ops imx6_pcie_host_ops = { | ||
381 | .link_up = imx6_pcie_link_up, | ||
382 | .host_init = imx6_pcie_host_init, | ||
383 | }; | ||
384 | |||
385 | static int imx6_add_pcie_port(struct pcie_port *pp, | ||
386 | struct platform_device *pdev) | ||
387 | { | ||
388 | int ret; | ||
389 | |||
390 | pp->irq = platform_get_irq(pdev, 0); | ||
391 | if (!pp->irq) { | ||
392 | dev_err(&pdev->dev, "failed to get irq\n"); | ||
393 | return -ENODEV; | ||
394 | } | ||
395 | |||
396 | pp->root_bus_nr = -1; | ||
397 | pp->ops = &imx6_pcie_host_ops; | ||
398 | |||
399 | spin_lock_init(&pp->conf_lock); | ||
400 | ret = dw_pcie_host_init(pp); | ||
401 | if (ret) { | ||
402 | dev_err(&pdev->dev, "failed to initialize host\n"); | ||
403 | return ret; | ||
404 | } | ||
405 | |||
406 | return 0; | ||
407 | } | ||
408 | |||
409 | static int __init imx6_pcie_probe(struct platform_device *pdev) | ||
410 | { | ||
411 | struct imx6_pcie *imx6_pcie; | ||
412 | struct pcie_port *pp; | ||
413 | struct device_node *np = pdev->dev.of_node; | ||
414 | struct resource *dbi_base; | ||
415 | int ret; | ||
416 | |||
417 | imx6_pcie = devm_kzalloc(&pdev->dev, sizeof(*imx6_pcie), GFP_KERNEL); | ||
418 | if (!imx6_pcie) | ||
419 | return -ENOMEM; | ||
420 | |||
421 | pp = &imx6_pcie->pp; | ||
422 | pp->dev = &pdev->dev; | ||
423 | |||
424 | /* Added for PCI abort handling */ | ||
425 | hook_fault_code(16 + 6, imx6q_pcie_abort_handler, SIGBUS, 0, | ||
426 | "imprecise external abort"); | ||
427 | |||
428 | dbi_base = platform_get_resource(pdev, IORESOURCE_MEM, 0); | ||
429 | if (!dbi_base) { | ||
430 | dev_err(&pdev->dev, "dbi_base memory resource not found\n"); | ||
431 | return -ENODEV; | ||
432 | } | ||
433 | |||
434 | pp->dbi_base = devm_ioremap_resource(&pdev->dev, dbi_base); | ||
435 | if (IS_ERR(pp->dbi_base)) { | ||
436 | ret = PTR_ERR(pp->dbi_base); | ||
437 | goto err; | ||
438 | } | ||
439 | |||
440 | /* Fetch GPIOs */ | ||
441 | imx6_pcie->reset_gpio = of_get_named_gpio(np, "reset-gpio", 0); | ||
442 | if (!gpio_is_valid(imx6_pcie->reset_gpio)) { | ||
443 | dev_err(&pdev->dev, "no reset-gpio defined\n"); | ||
444 | ret = -ENODEV; | ||
445 | } | ||
446 | ret = devm_gpio_request_one(&pdev->dev, | ||
447 | imx6_pcie->reset_gpio, | ||
448 | GPIOF_OUT_INIT_LOW, | ||
449 | "PCIe reset"); | ||
450 | if (ret) { | ||
451 | dev_err(&pdev->dev, "unable to get reset gpio\n"); | ||
452 | goto err; | ||
453 | } | ||
454 | |||
455 | imx6_pcie->power_on_gpio = of_get_named_gpio(np, "power-on-gpio", 0); | ||
456 | if (gpio_is_valid(imx6_pcie->power_on_gpio)) { | ||
457 | ret = devm_gpio_request_one(&pdev->dev, | ||
458 | imx6_pcie->power_on_gpio, | ||
459 | GPIOF_OUT_INIT_LOW, | ||
460 | "PCIe power enable"); | ||
461 | if (ret) { | ||
462 | dev_err(&pdev->dev, "unable to get power-on gpio\n"); | ||
463 | goto err; | ||
464 | } | ||
465 | } | ||
466 | |||
467 | imx6_pcie->wake_up_gpio = of_get_named_gpio(np, "wake-up-gpio", 0); | ||
468 | if (gpio_is_valid(imx6_pcie->wake_up_gpio)) { | ||
469 | ret = devm_gpio_request_one(&pdev->dev, | ||
470 | imx6_pcie->wake_up_gpio, | ||
471 | GPIOF_IN, | ||
472 | "PCIe wake up"); | ||
473 | if (ret) { | ||
474 | dev_err(&pdev->dev, "unable to get wake-up gpio\n"); | ||
475 | goto err; | ||
476 | } | ||
477 | } | ||
478 | |||
479 | imx6_pcie->disable_gpio = of_get_named_gpio(np, "disable-gpio", 0); | ||
480 | if (gpio_is_valid(imx6_pcie->disable_gpio)) { | ||
481 | ret = devm_gpio_request_one(&pdev->dev, | ||
482 | imx6_pcie->disable_gpio, | ||
483 | GPIOF_OUT_INIT_HIGH, | ||
484 | "PCIe disable endpoint"); | ||
485 | if (ret) { | ||
486 | dev_err(&pdev->dev, "unable to get disable-ep gpio\n"); | ||
487 | goto err; | ||
488 | } | ||
489 | } | ||
490 | |||
491 | /* Fetch clocks */ | ||
492 | imx6_pcie->lvds_gate = devm_clk_get(&pdev->dev, "lvds_gate"); | ||
493 | if (IS_ERR(imx6_pcie->lvds_gate)) { | ||
494 | dev_err(&pdev->dev, | ||
495 | "lvds_gate clock select missing or invalid\n"); | ||
496 | ret = PTR_ERR(imx6_pcie->lvds_gate); | ||
497 | goto err; | ||
498 | } | ||
499 | |||
500 | imx6_pcie->sata_ref_100m = devm_clk_get(&pdev->dev, "sata_ref_100m"); | ||
501 | if (IS_ERR(imx6_pcie->sata_ref_100m)) { | ||
502 | dev_err(&pdev->dev, | ||
503 | "sata_ref_100m clock source missing or invalid\n"); | ||
504 | ret = PTR_ERR(imx6_pcie->sata_ref_100m); | ||
505 | goto err; | ||
506 | } | ||
507 | |||
508 | imx6_pcie->pcie_ref_125m = devm_clk_get(&pdev->dev, "pcie_ref_125m"); | ||
509 | if (IS_ERR(imx6_pcie->pcie_ref_125m)) { | ||
510 | dev_err(&pdev->dev, | ||
511 | "pcie_ref_125m clock source missing or invalid\n"); | ||
512 | ret = PTR_ERR(imx6_pcie->pcie_ref_125m); | ||
513 | goto err; | ||
514 | } | ||
515 | |||
516 | imx6_pcie->pcie_axi = devm_clk_get(&pdev->dev, "pcie_axi"); | ||
517 | if (IS_ERR(imx6_pcie->pcie_axi)) { | ||
518 | dev_err(&pdev->dev, | ||
519 | "pcie_axi clock source missing or invalid\n"); | ||
520 | ret = PTR_ERR(imx6_pcie->pcie_axi); | ||
521 | goto err; | ||
522 | } | ||
523 | |||
524 | /* Grab GPR config register range */ | ||
525 | imx6_pcie->iomuxc_gpr = | ||
526 | syscon_regmap_lookup_by_compatible("fsl,imx6q-iomuxc-gpr"); | ||
527 | if (IS_ERR(imx6_pcie->iomuxc_gpr)) { | ||
528 | dev_err(&pdev->dev, "unable to find iomuxc registers\n"); | ||
529 | ret = PTR_ERR(imx6_pcie->iomuxc_gpr); | ||
530 | goto err; | ||
531 | } | ||
532 | |||
533 | ret = imx6_add_pcie_port(pp, pdev); | ||
534 | if (ret < 0) | ||
535 | goto err; | ||
536 | |||
537 | platform_set_drvdata(pdev, imx6_pcie); | ||
538 | return 0; | ||
539 | |||
540 | err: | ||
541 | return ret; | ||
542 | } | ||
543 | |||
544 | static const struct of_device_id imx6_pcie_of_match[] = { | ||
545 | { .compatible = "fsl,imx6q-pcie", }, | ||
546 | {}, | ||
547 | }; | ||
548 | MODULE_DEVICE_TABLE(of, imx6_pcie_of_match); | ||
549 | |||
550 | static struct platform_driver imx6_pcie_driver = { | ||
551 | .driver = { | ||
552 | .name = "imx6q-pcie", | ||
553 | .owner = THIS_MODULE, | ||
554 | .of_match_table = imx6_pcie_of_match, | ||
555 | }, | ||
556 | }; | ||
557 | |||
558 | /* Freescale PCIe driver does not allow module unload */ | ||
559 | |||
560 | static int __init imx6_pcie_init(void) | ||
561 | { | ||
562 | return platform_driver_probe(&imx6_pcie_driver, imx6_pcie_probe); | ||
563 | } | ||
564 | fs_initcall(imx6_pcie_init); | ||
565 | |||
566 | MODULE_AUTHOR("Sean Cross <xobs@kosagi.com>"); | ||
567 | MODULE_DESCRIPTION("Freescale i.MX6 PCIe host controller driver"); | ||
568 | MODULE_LICENSE("GPL v2"); | ||
diff --git a/drivers/pci/host/pci-rcar-gen2.c b/drivers/pci/host/pci-rcar-gen2.c new file mode 100644 index 000000000000..cbaa5c4397e3 --- /dev/null +++ b/drivers/pci/host/pci-rcar-gen2.c | |||
@@ -0,0 +1,333 @@ | |||
1 | /* | ||
2 | * pci-rcar-gen2: internal PCI bus support | ||
3 | * | ||
4 | * Copyright (C) 2013 Renesas Solutions Corp. | ||
5 | * Copyright (C) 2013 Cogent Embedded, Inc. | ||
6 | * | ||
7 | * This program is free software; you can redistribute it and/or modify | ||
8 | * it under the terms of the GNU General Public License version 2 as | ||
9 | * published by the Free Software Foundation. | ||
10 | */ | ||
11 | |||
12 | #include <linux/delay.h> | ||
13 | #include <linux/init.h> | ||
14 | #include <linux/interrupt.h> | ||
15 | #include <linux/io.h> | ||
16 | #include <linux/kernel.h> | ||
17 | #include <linux/module.h> | ||
18 | #include <linux/pci.h> | ||
19 | #include <linux/platform_device.h> | ||
20 | #include <linux/slab.h> | ||
21 | |||
22 | /* AHB-PCI Bridge PCI communication registers */ | ||
23 | #define RCAR_AHBPCI_PCICOM_OFFSET 0x800 | ||
24 | |||
25 | #define RCAR_PCIAHB_WIN1_CTR_REG (RCAR_AHBPCI_PCICOM_OFFSET + 0x00) | ||
26 | #define RCAR_PCIAHB_WIN2_CTR_REG (RCAR_AHBPCI_PCICOM_OFFSET + 0x04) | ||
27 | #define RCAR_PCIAHB_PREFETCH0 0x0 | ||
28 | #define RCAR_PCIAHB_PREFETCH4 0x1 | ||
29 | #define RCAR_PCIAHB_PREFETCH8 0x2 | ||
30 | #define RCAR_PCIAHB_PREFETCH16 0x3 | ||
31 | |||
32 | #define RCAR_AHBPCI_WIN1_CTR_REG (RCAR_AHBPCI_PCICOM_OFFSET + 0x10) | ||
33 | #define RCAR_AHBPCI_WIN2_CTR_REG (RCAR_AHBPCI_PCICOM_OFFSET + 0x14) | ||
34 | #define RCAR_AHBPCI_WIN_CTR_MEM (3 << 1) | ||
35 | #define RCAR_AHBPCI_WIN_CTR_CFG (5 << 1) | ||
36 | #define RCAR_AHBPCI_WIN1_HOST (1 << 30) | ||
37 | #define RCAR_AHBPCI_WIN1_DEVICE (1 << 31) | ||
38 | |||
39 | #define RCAR_PCI_INT_ENABLE_REG (RCAR_AHBPCI_PCICOM_OFFSET + 0x20) | ||
40 | #define RCAR_PCI_INT_STATUS_REG (RCAR_AHBPCI_PCICOM_OFFSET + 0x24) | ||
41 | #define RCAR_PCI_INT_A (1 << 16) | ||
42 | #define RCAR_PCI_INT_B (1 << 17) | ||
43 | #define RCAR_PCI_INT_PME (1 << 19) | ||
44 | |||
45 | #define RCAR_AHB_BUS_CTR_REG (RCAR_AHBPCI_PCICOM_OFFSET + 0x30) | ||
46 | #define RCAR_AHB_BUS_MMODE_HTRANS (1 << 0) | ||
47 | #define RCAR_AHB_BUS_MMODE_BYTE_BURST (1 << 1) | ||
48 | #define RCAR_AHB_BUS_MMODE_WR_INCR (1 << 2) | ||
49 | #define RCAR_AHB_BUS_MMODE_HBUS_REQ (1 << 7) | ||
50 | #define RCAR_AHB_BUS_SMODE_READYCTR (1 << 17) | ||
51 | #define RCAR_AHB_BUS_MODE (RCAR_AHB_BUS_MMODE_HTRANS | \ | ||
52 | RCAR_AHB_BUS_MMODE_BYTE_BURST | \ | ||
53 | RCAR_AHB_BUS_MMODE_WR_INCR | \ | ||
54 | RCAR_AHB_BUS_MMODE_HBUS_REQ | \ | ||
55 | RCAR_AHB_BUS_SMODE_READYCTR) | ||
56 | |||
57 | #define RCAR_USBCTR_REG (RCAR_AHBPCI_PCICOM_OFFSET + 0x34) | ||
58 | #define RCAR_USBCTR_USBH_RST (1 << 0) | ||
59 | #define RCAR_USBCTR_PCICLK_MASK (1 << 1) | ||
60 | #define RCAR_USBCTR_PLL_RST (1 << 2) | ||
61 | #define RCAR_USBCTR_DIRPD (1 << 8) | ||
62 | #define RCAR_USBCTR_PCIAHB_WIN2_EN (1 << 9) | ||
63 | #define RCAR_USBCTR_PCIAHB_WIN1_256M (0 << 10) | ||
64 | #define RCAR_USBCTR_PCIAHB_WIN1_512M (1 << 10) | ||
65 | #define RCAR_USBCTR_PCIAHB_WIN1_1G (2 << 10) | ||
66 | #define RCAR_USBCTR_PCIAHB_WIN1_2G (3 << 10) | ||
67 | #define RCAR_USBCTR_PCIAHB_WIN1_MASK (3 << 10) | ||
68 | |||
69 | #define RCAR_PCI_ARBITER_CTR_REG (RCAR_AHBPCI_PCICOM_OFFSET + 0x40) | ||
70 | #define RCAR_PCI_ARBITER_PCIREQ0 (1 << 0) | ||
71 | #define RCAR_PCI_ARBITER_PCIREQ1 (1 << 1) | ||
72 | #define RCAR_PCI_ARBITER_PCIBP_MODE (1 << 12) | ||
73 | |||
74 | #define RCAR_PCI_UNIT_REV_REG (RCAR_AHBPCI_PCICOM_OFFSET + 0x48) | ||
75 | |||
76 | /* Number of internal PCI controllers */ | ||
77 | #define RCAR_PCI_NR_CONTROLLERS 3 | ||
78 | |||
79 | struct rcar_pci_priv { | ||
80 | void __iomem *reg; | ||
81 | struct resource io_res; | ||
82 | struct resource mem_res; | ||
83 | struct resource *cfg_res; | ||
84 | int irq; | ||
85 | }; | ||
86 | |||
87 | /* PCI configuration space operations */ | ||
88 | static void __iomem *rcar_pci_cfg_base(struct pci_bus *bus, unsigned int devfn, | ||
89 | int where) | ||
90 | { | ||
91 | struct pci_sys_data *sys = bus->sysdata; | ||
92 | struct rcar_pci_priv *priv = sys->private_data; | ||
93 | int slot, val; | ||
94 | |||
95 | if (sys->busnr != bus->number || PCI_FUNC(devfn)) | ||
96 | return NULL; | ||
97 | |||
98 | /* Only one EHCI/OHCI device built-in */ | ||
99 | slot = PCI_SLOT(devfn); | ||
100 | if (slot > 2) | ||
101 | return NULL; | ||
102 | |||
103 | val = slot ? RCAR_AHBPCI_WIN1_DEVICE | RCAR_AHBPCI_WIN_CTR_CFG : | ||
104 | RCAR_AHBPCI_WIN1_HOST | RCAR_AHBPCI_WIN_CTR_CFG; | ||
105 | |||
106 | iowrite32(val, priv->reg + RCAR_AHBPCI_WIN1_CTR_REG); | ||
107 | return priv->reg + (slot >> 1) * 0x100 + where; | ||
108 | } | ||
109 | |||
110 | static int rcar_pci_read_config(struct pci_bus *bus, unsigned int devfn, | ||
111 | int where, int size, u32 *val) | ||
112 | { | ||
113 | void __iomem *reg = rcar_pci_cfg_base(bus, devfn, where); | ||
114 | |||
115 | if (!reg) | ||
116 | return PCIBIOS_DEVICE_NOT_FOUND; | ||
117 | |||
118 | switch (size) { | ||
119 | case 1: | ||
120 | *val = ioread8(reg); | ||
121 | break; | ||
122 | case 2: | ||
123 | *val = ioread16(reg); | ||
124 | break; | ||
125 | default: | ||
126 | *val = ioread32(reg); | ||
127 | break; | ||
128 | } | ||
129 | |||
130 | return PCIBIOS_SUCCESSFUL; | ||
131 | } | ||
132 | |||
133 | static int rcar_pci_write_config(struct pci_bus *bus, unsigned int devfn, | ||
134 | int where, int size, u32 val) | ||
135 | { | ||
136 | void __iomem *reg = rcar_pci_cfg_base(bus, devfn, where); | ||
137 | |||
138 | if (!reg) | ||
139 | return PCIBIOS_DEVICE_NOT_FOUND; | ||
140 | |||
141 | switch (size) { | ||
142 | case 1: | ||
143 | iowrite8(val, reg); | ||
144 | break; | ||
145 | case 2: | ||
146 | iowrite16(val, reg); | ||
147 | break; | ||
148 | default: | ||
149 | iowrite32(val, reg); | ||
150 | break; | ||
151 | } | ||
152 | |||
153 | return PCIBIOS_SUCCESSFUL; | ||
154 | } | ||
155 | |||
156 | /* PCI interrupt mapping */ | ||
157 | static int __init rcar_pci_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) | ||
158 | { | ||
159 | struct pci_sys_data *sys = dev->bus->sysdata; | ||
160 | struct rcar_pci_priv *priv = sys->private_data; | ||
161 | |||
162 | return priv->irq; | ||
163 | } | ||
164 | |||
165 | /* PCI host controller setup */ | ||
166 | static int __init rcar_pci_setup(int nr, struct pci_sys_data *sys) | ||
167 | { | ||
168 | struct rcar_pci_priv *priv = sys->private_data; | ||
169 | void __iomem *reg = priv->reg; | ||
170 | u32 val; | ||
171 | |||
172 | val = ioread32(reg + RCAR_PCI_UNIT_REV_REG); | ||
173 | pr_info("PCI: bus%u revision %x\n", sys->busnr, val); | ||
174 | |||
175 | /* Disable Direct Power Down State and assert reset */ | ||
176 | val = ioread32(reg + RCAR_USBCTR_REG) & ~RCAR_USBCTR_DIRPD; | ||
177 | val |= RCAR_USBCTR_USBH_RST | RCAR_USBCTR_PLL_RST; | ||
178 | iowrite32(val, reg + RCAR_USBCTR_REG); | ||
179 | udelay(4); | ||
180 | |||
181 | /* De-assert reset and set PCIAHB window1 size to 1GB */ | ||
182 | val &= ~(RCAR_USBCTR_PCIAHB_WIN1_MASK | RCAR_USBCTR_PCICLK_MASK | | ||
183 | RCAR_USBCTR_USBH_RST | RCAR_USBCTR_PLL_RST); | ||
184 | iowrite32(val | RCAR_USBCTR_PCIAHB_WIN1_1G, reg + RCAR_USBCTR_REG); | ||
185 | |||
186 | /* Configure AHB master and slave modes */ | ||
187 | iowrite32(RCAR_AHB_BUS_MODE, reg + RCAR_AHB_BUS_CTR_REG); | ||
188 | |||
189 | /* Configure PCI arbiter */ | ||
190 | val = ioread32(reg + RCAR_PCI_ARBITER_CTR_REG); | ||
191 | val |= RCAR_PCI_ARBITER_PCIREQ0 | RCAR_PCI_ARBITER_PCIREQ1 | | ||
192 | RCAR_PCI_ARBITER_PCIBP_MODE; | ||
193 | iowrite32(val, reg + RCAR_PCI_ARBITER_CTR_REG); | ||
194 | |||
195 | /* PCI-AHB mapping: 0x40000000-0x80000000 */ | ||
196 | iowrite32(0x40000000 | RCAR_PCIAHB_PREFETCH16, | ||
197 | reg + RCAR_PCIAHB_WIN1_CTR_REG); | ||
198 | |||
199 | /* AHB-PCI mapping: OHCI/EHCI registers */ | ||
200 | val = priv->mem_res.start | RCAR_AHBPCI_WIN_CTR_MEM; | ||
201 | iowrite32(val, reg + RCAR_AHBPCI_WIN2_CTR_REG); | ||
202 | |||
203 | /* Enable AHB-PCI bridge PCI configuration access */ | ||
204 | iowrite32(RCAR_AHBPCI_WIN1_HOST | RCAR_AHBPCI_WIN_CTR_CFG, | ||
205 | reg + RCAR_AHBPCI_WIN1_CTR_REG); | ||
206 | /* Set PCI-AHB Window1 address */ | ||
207 | iowrite32(0x40000000 | PCI_BASE_ADDRESS_MEM_PREFETCH, | ||
208 | reg + PCI_BASE_ADDRESS_1); | ||
209 | /* Set AHB-PCI bridge PCI communication area address */ | ||
210 | val = priv->cfg_res->start + RCAR_AHBPCI_PCICOM_OFFSET; | ||
211 | iowrite32(val, reg + PCI_BASE_ADDRESS_0); | ||
212 | |||
213 | val = ioread32(reg + PCI_COMMAND); | ||
214 | val |= PCI_COMMAND_SERR | PCI_COMMAND_PARITY | | ||
215 | PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER; | ||
216 | iowrite32(val, reg + PCI_COMMAND); | ||
217 | |||
218 | /* Enable PCI interrupts */ | ||
219 | iowrite32(RCAR_PCI_INT_A | RCAR_PCI_INT_B | RCAR_PCI_INT_PME, | ||
220 | reg + RCAR_PCI_INT_ENABLE_REG); | ||
221 | |||
222 | /* Add PCI resources */ | ||
223 | pci_add_resource(&sys->resources, &priv->io_res); | ||
224 | pci_add_resource(&sys->resources, &priv->mem_res); | ||
225 | |||
226 | return 1; | ||
227 | } | ||
228 | |||
229 | static struct pci_ops rcar_pci_ops = { | ||
230 | .read = rcar_pci_read_config, | ||
231 | .write = rcar_pci_write_config, | ||
232 | }; | ||
233 | |||
234 | static struct hw_pci rcar_hw_pci __initdata = { | ||
235 | .map_irq = rcar_pci_map_irq, | ||
236 | .ops = &rcar_pci_ops, | ||
237 | .setup = rcar_pci_setup, | ||
238 | }; | ||
239 | |||
240 | static int rcar_pci_count __initdata; | ||
241 | |||
242 | static int __init rcar_pci_add_controller(struct rcar_pci_priv *priv) | ||
243 | { | ||
244 | void **private_data; | ||
245 | int count; | ||
246 | |||
247 | if (rcar_hw_pci.nr_controllers < rcar_pci_count) | ||
248 | goto add_priv; | ||
249 | |||
250 | /* (Re)allocate private data pointer array if needed */ | ||
251 | count = rcar_pci_count + RCAR_PCI_NR_CONTROLLERS; | ||
252 | private_data = kzalloc(count * sizeof(void *), GFP_KERNEL); | ||
253 | if (!private_data) | ||
254 | return -ENOMEM; | ||
255 | |||
256 | rcar_pci_count = count; | ||
257 | if (rcar_hw_pci.private_data) { | ||
258 | memcpy(private_data, rcar_hw_pci.private_data, | ||
259 | rcar_hw_pci.nr_controllers * sizeof(void *)); | ||
260 | kfree(rcar_hw_pci.private_data); | ||
261 | } | ||
262 | |||
263 | rcar_hw_pci.private_data = private_data; | ||
264 | |||
265 | add_priv: | ||
266 | /* Add private data pointer to the array */ | ||
267 | rcar_hw_pci.private_data[rcar_hw_pci.nr_controllers++] = priv; | ||
268 | return 0; | ||
269 | } | ||
270 | |||
271 | static int __init rcar_pci_probe(struct platform_device *pdev) | ||
272 | { | ||
273 | struct resource *cfg_res, *mem_res; | ||
274 | struct rcar_pci_priv *priv; | ||
275 | void __iomem *reg; | ||
276 | |||
277 | cfg_res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | ||
278 | reg = devm_ioremap_resource(&pdev->dev, cfg_res); | ||
279 | if (!reg) | ||
280 | return -ENODEV; | ||
281 | |||
282 | mem_res = platform_get_resource(pdev, IORESOURCE_MEM, 1); | ||
283 | if (!mem_res || !mem_res->start) | ||
284 | return -ENODEV; | ||
285 | |||
286 | priv = devm_kzalloc(&pdev->dev, | ||
287 | sizeof(struct rcar_pci_priv), GFP_KERNEL); | ||
288 | if (!priv) | ||
289 | return -ENOMEM; | ||
290 | |||
291 | priv->mem_res = *mem_res; | ||
292 | /* | ||
293 | * The controller does not support/use port I/O, | ||
294 | * so setup a dummy port I/O region here. | ||
295 | */ | ||
296 | priv->io_res.start = priv->mem_res.start; | ||
297 | priv->io_res.end = priv->mem_res.end; | ||
298 | priv->io_res.flags = IORESOURCE_IO; | ||
299 | |||
300 | priv->cfg_res = cfg_res; | ||
301 | |||
302 | priv->irq = platform_get_irq(pdev, 0); | ||
303 | priv->reg = reg; | ||
304 | |||
305 | return rcar_pci_add_controller(priv); | ||
306 | } | ||
307 | |||
308 | static struct platform_driver rcar_pci_driver = { | ||
309 | .driver = { | ||
310 | .name = "pci-rcar-gen2", | ||
311 | }, | ||
312 | }; | ||
313 | |||
314 | static int __init rcar_pci_init(void) | ||
315 | { | ||
316 | int retval; | ||
317 | |||
318 | retval = platform_driver_probe(&rcar_pci_driver, rcar_pci_probe); | ||
319 | if (!retval) | ||
320 | pci_common_init(&rcar_hw_pci); | ||
321 | |||
322 | /* Private data pointer array is not needed any more */ | ||
323 | kfree(rcar_hw_pci.private_data); | ||
324 | rcar_hw_pci.private_data = NULL; | ||
325 | |||
326 | return retval; | ||
327 | } | ||
328 | |||
329 | subsys_initcall(rcar_pci_init); | ||
330 | |||
331 | MODULE_LICENSE("GPL v2"); | ||
332 | MODULE_DESCRIPTION("Renesas R-Car Gen2 internal PCI"); | ||
333 | MODULE_AUTHOR("Valentine Barshak <valentine.barshak@cogentembedded.com>"); | ||
diff --git a/drivers/pci/host/pci-tegra.c b/drivers/pci/host/pci-tegra.c index 2e9888a0635a..7c4f38dd42ba 100644 --- a/drivers/pci/host/pci-tegra.c +++ b/drivers/pci/host/pci-tegra.c | |||
@@ -408,7 +408,7 @@ static void __iomem *tegra_pcie_bus_map(struct tegra_pcie *pcie, | |||
408 | 408 | ||
409 | list_for_each_entry(bus, &pcie->busses, list) | 409 | list_for_each_entry(bus, &pcie->busses, list) |
410 | if (bus->nr == busnr) | 410 | if (bus->nr == busnr) |
411 | return bus->area->addr; | 411 | return (void __iomem *)bus->area->addr; |
412 | 412 | ||
413 | bus = tegra_pcie_bus_alloc(pcie, busnr); | 413 | bus = tegra_pcie_bus_alloc(pcie, busnr); |
414 | if (IS_ERR(bus)) | 414 | if (IS_ERR(bus)) |
@@ -416,7 +416,7 @@ static void __iomem *tegra_pcie_bus_map(struct tegra_pcie *pcie, | |||
416 | 416 | ||
417 | list_add_tail(&bus->list, &pcie->busses); | 417 | list_add_tail(&bus->list, &pcie->busses); |
418 | 418 | ||
419 | return bus->area->addr; | 419 | return (void __iomem *)bus->area->addr; |
420 | } | 420 | } |
421 | 421 | ||
422 | static void __iomem *tegra_pcie_conf_address(struct pci_bus *bus, | 422 | static void __iomem *tegra_pcie_conf_address(struct pci_bus *bus, |
diff --git a/drivers/pci/host/pcie-designware.c b/drivers/pci/host/pcie-designware.c index c10e9ac9bbbc..1e1fea4d959b 100644 --- a/drivers/pci/host/pcie-designware.c +++ b/drivers/pci/host/pcie-designware.c | |||
@@ -11,8 +11,11 @@ | |||
11 | * published by the Free Software Foundation. | 11 | * published by the Free Software Foundation. |
12 | */ | 12 | */ |
13 | 13 | ||
14 | #include <linux/irq.h> | ||
15 | #include <linux/irqdomain.h> | ||
14 | #include <linux/kernel.h> | 16 | #include <linux/kernel.h> |
15 | #include <linux/module.h> | 17 | #include <linux/module.h> |
18 | #include <linux/msi.h> | ||
16 | #include <linux/of_address.h> | 19 | #include <linux/of_address.h> |
17 | #include <linux/pci.h> | 20 | #include <linux/pci.h> |
18 | #include <linux/pci_regs.h> | 21 | #include <linux/pci_regs.h> |
@@ -64,7 +67,7 @@ | |||
64 | 67 | ||
65 | static struct hw_pci dw_pci; | 68 | static struct hw_pci dw_pci; |
66 | 69 | ||
67 | unsigned long global_io_offset; | 70 | static unsigned long global_io_offset; |
68 | 71 | ||
69 | static inline struct pcie_port *sys_to_pcie(struct pci_sys_data *sys) | 72 | static inline struct pcie_port *sys_to_pcie(struct pci_sys_data *sys) |
70 | { | 73 | { |
@@ -115,8 +118,8 @@ static inline void dw_pcie_writel_rc(struct pcie_port *pp, u32 val, u32 reg) | |||
115 | writel(val, pp->dbi_base + reg); | 118 | writel(val, pp->dbi_base + reg); |
116 | } | 119 | } |
117 | 120 | ||
118 | int dw_pcie_rd_own_conf(struct pcie_port *pp, int where, int size, | 121 | static int dw_pcie_rd_own_conf(struct pcie_port *pp, int where, int size, |
119 | u32 *val) | 122 | u32 *val) |
120 | { | 123 | { |
121 | int ret; | 124 | int ret; |
122 | 125 | ||
@@ -128,8 +131,8 @@ int dw_pcie_rd_own_conf(struct pcie_port *pp, int where, int size, | |||
128 | return ret; | 131 | return ret; |
129 | } | 132 | } |
130 | 133 | ||
131 | int dw_pcie_wr_own_conf(struct pcie_port *pp, int where, int size, | 134 | static int dw_pcie_wr_own_conf(struct pcie_port *pp, int where, int size, |
132 | u32 val) | 135 | u32 val) |
133 | { | 136 | { |
134 | int ret; | 137 | int ret; |
135 | 138 | ||
@@ -142,6 +145,205 @@ int dw_pcie_wr_own_conf(struct pcie_port *pp, int where, int size, | |||
142 | return ret; | 145 | return ret; |
143 | } | 146 | } |
144 | 147 | ||
148 | static struct irq_chip dw_msi_irq_chip = { | ||
149 | .name = "PCI-MSI", | ||
150 | .irq_enable = unmask_msi_irq, | ||
151 | .irq_disable = mask_msi_irq, | ||
152 | .irq_mask = mask_msi_irq, | ||
153 | .irq_unmask = unmask_msi_irq, | ||
154 | }; | ||
155 | |||
156 | /* MSI int handler */ | ||
157 | void dw_handle_msi_irq(struct pcie_port *pp) | ||
158 | { | ||
159 | unsigned long val; | ||
160 | int i, pos, irq; | ||
161 | |||
162 | for (i = 0; i < MAX_MSI_CTRLS; i++) { | ||
163 | dw_pcie_rd_own_conf(pp, PCIE_MSI_INTR0_STATUS + i * 12, 4, | ||
164 | (u32 *)&val); | ||
165 | if (val) { | ||
166 | pos = 0; | ||
167 | while ((pos = find_next_bit(&val, 32, pos)) != 32) { | ||
168 | irq = irq_find_mapping(pp->irq_domain, | ||
169 | i * 32 + pos); | ||
170 | generic_handle_irq(irq); | ||
171 | pos++; | ||
172 | } | ||
173 | } | ||
174 | dw_pcie_wr_own_conf(pp, PCIE_MSI_INTR0_STATUS + i * 12, 4, val); | ||
175 | } | ||
176 | } | ||
177 | |||
178 | void dw_pcie_msi_init(struct pcie_port *pp) | ||
179 | { | ||
180 | pp->msi_data = __get_free_pages(GFP_KERNEL, 0); | ||
181 | |||
182 | /* program the msi_data */ | ||
183 | dw_pcie_wr_own_conf(pp, PCIE_MSI_ADDR_LO, 4, | ||
184 | virt_to_phys((void *)pp->msi_data)); | ||
185 | dw_pcie_wr_own_conf(pp, PCIE_MSI_ADDR_HI, 4, 0); | ||
186 | } | ||
187 | |||
188 | static int find_valid_pos0(struct pcie_port *pp, int msgvec, int pos, int *pos0) | ||
189 | { | ||
190 | int flag = 1; | ||
191 | |||
192 | do { | ||
193 | pos = find_next_zero_bit(pp->msi_irq_in_use, | ||
194 | MAX_MSI_IRQS, pos); | ||
195 | /*if you have reached to the end then get out from here.*/ | ||
196 | if (pos == MAX_MSI_IRQS) | ||
197 | return -ENOSPC; | ||
198 | /* | ||
199 | * Check if this position is at correct offset.nvec is always a | ||
200 | * power of two. pos0 must be nvec bit alligned. | ||
201 | */ | ||
202 | if (pos % msgvec) | ||
203 | pos += msgvec - (pos % msgvec); | ||
204 | else | ||
205 | flag = 0; | ||
206 | } while (flag); | ||
207 | |||
208 | *pos0 = pos; | ||
209 | return 0; | ||
210 | } | ||
211 | |||
212 | static int assign_irq(int no_irqs, struct msi_desc *desc, int *pos) | ||
213 | { | ||
214 | int res, bit, irq, pos0, pos1, i; | ||
215 | u32 val; | ||
216 | struct pcie_port *pp = sys_to_pcie(desc->dev->bus->sysdata); | ||
217 | |||
218 | if (!pp) { | ||
219 | BUG(); | ||
220 | return -EINVAL; | ||
221 | } | ||
222 | |||
223 | pos0 = find_first_zero_bit(pp->msi_irq_in_use, | ||
224 | MAX_MSI_IRQS); | ||
225 | if (pos0 % no_irqs) { | ||
226 | if (find_valid_pos0(pp, no_irqs, pos0, &pos0)) | ||
227 | goto no_valid_irq; | ||
228 | } | ||
229 | if (no_irqs > 1) { | ||
230 | pos1 = find_next_bit(pp->msi_irq_in_use, | ||
231 | MAX_MSI_IRQS, pos0); | ||
232 | /* there must be nvec number of consecutive free bits */ | ||
233 | while ((pos1 - pos0) < no_irqs) { | ||
234 | if (find_valid_pos0(pp, no_irqs, pos1, &pos0)) | ||
235 | goto no_valid_irq; | ||
236 | pos1 = find_next_bit(pp->msi_irq_in_use, | ||
237 | MAX_MSI_IRQS, pos0); | ||
238 | } | ||
239 | } | ||
240 | |||
241 | irq = irq_find_mapping(pp->irq_domain, pos0); | ||
242 | if (!irq) | ||
243 | goto no_valid_irq; | ||
244 | |||
245 | i = 0; | ||
246 | while (i < no_irqs) { | ||
247 | set_bit(pos0 + i, pp->msi_irq_in_use); | ||
248 | irq_alloc_descs((irq + i), (irq + i), 1, 0); | ||
249 | irq_set_msi_desc(irq + i, desc); | ||
250 | /*Enable corresponding interrupt in MSI interrupt controller */ | ||
251 | res = ((pos0 + i) / 32) * 12; | ||
252 | bit = (pos0 + i) % 32; | ||
253 | dw_pcie_rd_own_conf(pp, PCIE_MSI_INTR0_ENABLE + res, 4, &val); | ||
254 | val |= 1 << bit; | ||
255 | dw_pcie_wr_own_conf(pp, PCIE_MSI_INTR0_ENABLE + res, 4, val); | ||
256 | i++; | ||
257 | } | ||
258 | |||
259 | *pos = pos0; | ||
260 | return irq; | ||
261 | |||
262 | no_valid_irq: | ||
263 | *pos = pos0; | ||
264 | return -ENOSPC; | ||
265 | } | ||
266 | |||
267 | static void clear_irq(unsigned int irq) | ||
268 | { | ||
269 | int res, bit, val, pos; | ||
270 | struct irq_desc *desc; | ||
271 | struct msi_desc *msi; | ||
272 | struct pcie_port *pp; | ||
273 | struct irq_data *data = irq_get_irq_data(irq); | ||
274 | |||
275 | /* get the port structure */ | ||
276 | desc = irq_to_desc(irq); | ||
277 | msi = irq_desc_get_msi_desc(desc); | ||
278 | pp = sys_to_pcie(msi->dev->bus->sysdata); | ||
279 | if (!pp) { | ||
280 | BUG(); | ||
281 | return; | ||
282 | } | ||
283 | |||
284 | pos = data->hwirq; | ||
285 | |||
286 | irq_free_desc(irq); | ||
287 | |||
288 | clear_bit(pos, pp->msi_irq_in_use); | ||
289 | |||
290 | /* Disable corresponding interrupt on MSI interrupt controller */ | ||
291 | res = (pos / 32) * 12; | ||
292 | bit = pos % 32; | ||
293 | dw_pcie_rd_own_conf(pp, PCIE_MSI_INTR0_ENABLE + res, 4, &val); | ||
294 | val &= ~(1 << bit); | ||
295 | dw_pcie_wr_own_conf(pp, PCIE_MSI_INTR0_ENABLE + res, 4, val); | ||
296 | } | ||
297 | |||
298 | static int dw_msi_setup_irq(struct msi_chip *chip, struct pci_dev *pdev, | ||
299 | struct msi_desc *desc) | ||
300 | { | ||
301 | int irq, pos, msgvec; | ||
302 | u16 msg_ctr; | ||
303 | struct msi_msg msg; | ||
304 | struct pcie_port *pp = sys_to_pcie(pdev->bus->sysdata); | ||
305 | |||
306 | if (!pp) { | ||
307 | BUG(); | ||
308 | return -EINVAL; | ||
309 | } | ||
310 | |||
311 | pci_read_config_word(pdev, desc->msi_attrib.pos+PCI_MSI_FLAGS, | ||
312 | &msg_ctr); | ||
313 | msgvec = (msg_ctr&PCI_MSI_FLAGS_QSIZE) >> 4; | ||
314 | if (msgvec == 0) | ||
315 | msgvec = (msg_ctr & PCI_MSI_FLAGS_QMASK) >> 1; | ||
316 | if (msgvec > 5) | ||
317 | msgvec = 0; | ||
318 | |||
319 | irq = assign_irq((1 << msgvec), desc, &pos); | ||
320 | if (irq < 0) | ||
321 | return irq; | ||
322 | |||
323 | msg_ctr &= ~PCI_MSI_FLAGS_QSIZE; | ||
324 | msg_ctr |= msgvec << 4; | ||
325 | pci_write_config_word(pdev, desc->msi_attrib.pos + PCI_MSI_FLAGS, | ||
326 | msg_ctr); | ||
327 | desc->msi_attrib.multiple = msgvec; | ||
328 | |||
329 | msg.address_lo = virt_to_phys((void *)pp->msi_data); | ||
330 | msg.address_hi = 0x0; | ||
331 | msg.data = pos; | ||
332 | write_msi_msg(irq, &msg); | ||
333 | |||
334 | return 0; | ||
335 | } | ||
336 | |||
337 | static void dw_msi_teardown_irq(struct msi_chip *chip, unsigned int irq) | ||
338 | { | ||
339 | clear_irq(irq); | ||
340 | } | ||
341 | |||
342 | static struct msi_chip dw_pcie_msi_chip = { | ||
343 | .setup_irq = dw_msi_setup_irq, | ||
344 | .teardown_irq = dw_msi_teardown_irq, | ||
345 | }; | ||
346 | |||
145 | int dw_pcie_link_up(struct pcie_port *pp) | 347 | int dw_pcie_link_up(struct pcie_port *pp) |
146 | { | 348 | { |
147 | if (pp->ops->link_up) | 349 | if (pp->ops->link_up) |
@@ -150,12 +352,27 @@ int dw_pcie_link_up(struct pcie_port *pp) | |||
150 | return 0; | 352 | return 0; |
151 | } | 353 | } |
152 | 354 | ||
355 | static int dw_pcie_msi_map(struct irq_domain *domain, unsigned int irq, | ||
356 | irq_hw_number_t hwirq) | ||
357 | { | ||
358 | irq_set_chip_and_handler(irq, &dw_msi_irq_chip, handle_simple_irq); | ||
359 | irq_set_chip_data(irq, domain->host_data); | ||
360 | set_irq_flags(irq, IRQF_VALID); | ||
361 | |||
362 | return 0; | ||
363 | } | ||
364 | |||
365 | static const struct irq_domain_ops msi_domain_ops = { | ||
366 | .map = dw_pcie_msi_map, | ||
367 | }; | ||
368 | |||
153 | int __init dw_pcie_host_init(struct pcie_port *pp) | 369 | int __init dw_pcie_host_init(struct pcie_port *pp) |
154 | { | 370 | { |
155 | struct device_node *np = pp->dev->of_node; | 371 | struct device_node *np = pp->dev->of_node; |
156 | struct of_pci_range range; | 372 | struct of_pci_range range; |
157 | struct of_pci_range_parser parser; | 373 | struct of_pci_range_parser parser; |
158 | u32 val; | 374 | u32 val; |
375 | int i; | ||
159 | 376 | ||
160 | if (of_pci_range_parser_init(&parser, np)) { | 377 | if (of_pci_range_parser_init(&parser, np)) { |
161 | dev_err(pp->dev, "missing ranges property\n"); | 378 | dev_err(pp->dev, "missing ranges property\n"); |
@@ -223,6 +440,19 @@ int __init dw_pcie_host_init(struct pcie_port *pp) | |||
223 | return -EINVAL; | 440 | return -EINVAL; |
224 | } | 441 | } |
225 | 442 | ||
443 | if (IS_ENABLED(CONFIG_PCI_MSI)) { | ||
444 | pp->irq_domain = irq_domain_add_linear(pp->dev->of_node, | ||
445 | MAX_MSI_IRQS, &msi_domain_ops, | ||
446 | &dw_pcie_msi_chip); | ||
447 | if (!pp->irq_domain) { | ||
448 | dev_err(pp->dev, "irq domain init failed\n"); | ||
449 | return -ENXIO; | ||
450 | } | ||
451 | |||
452 | for (i = 0; i < MAX_MSI_IRQS; i++) | ||
453 | irq_create_mapping(pp->irq_domain, i); | ||
454 | } | ||
455 | |||
226 | if (pp->ops->host_init) | 456 | if (pp->ops->host_init) |
227 | pp->ops->host_init(pp); | 457 | pp->ops->host_init(pp); |
228 | 458 | ||
@@ -438,7 +668,7 @@ static struct pci_ops dw_pcie_ops = { | |||
438 | .write = dw_pcie_wr_conf, | 668 | .write = dw_pcie_wr_conf, |
439 | }; | 669 | }; |
440 | 670 | ||
441 | int dw_pcie_setup(int nr, struct pci_sys_data *sys) | 671 | static int dw_pcie_setup(int nr, struct pci_sys_data *sys) |
442 | { | 672 | { |
443 | struct pcie_port *pp; | 673 | struct pcie_port *pp; |
444 | 674 | ||
@@ -461,7 +691,7 @@ int dw_pcie_setup(int nr, struct pci_sys_data *sys) | |||
461 | return 1; | 691 | return 1; |
462 | } | 692 | } |
463 | 693 | ||
464 | struct pci_bus *dw_pcie_scan_bus(int nr, struct pci_sys_data *sys) | 694 | static struct pci_bus *dw_pcie_scan_bus(int nr, struct pci_sys_data *sys) |
465 | { | 695 | { |
466 | struct pci_bus *bus; | 696 | struct pci_bus *bus; |
467 | struct pcie_port *pp = sys_to_pcie(sys); | 697 | struct pcie_port *pp = sys_to_pcie(sys); |
@@ -478,17 +708,28 @@ struct pci_bus *dw_pcie_scan_bus(int nr, struct pci_sys_data *sys) | |||
478 | return bus; | 708 | return bus; |
479 | } | 709 | } |
480 | 710 | ||
481 | int dw_pcie_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) | 711 | static int dw_pcie_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) |
482 | { | 712 | { |
483 | struct pcie_port *pp = sys_to_pcie(dev->bus->sysdata); | 713 | struct pcie_port *pp = sys_to_pcie(dev->bus->sysdata); |
484 | 714 | ||
485 | return pp->irq; | 715 | return pp->irq; |
486 | } | 716 | } |
487 | 717 | ||
718 | static void dw_pcie_add_bus(struct pci_bus *bus) | ||
719 | { | ||
720 | if (IS_ENABLED(CONFIG_PCI_MSI)) { | ||
721 | struct pcie_port *pp = sys_to_pcie(bus->sysdata); | ||
722 | |||
723 | dw_pcie_msi_chip.dev = pp->dev; | ||
724 | bus->msi = &dw_pcie_msi_chip; | ||
725 | } | ||
726 | } | ||
727 | |||
488 | static struct hw_pci dw_pci = { | 728 | static struct hw_pci dw_pci = { |
489 | .setup = dw_pcie_setup, | 729 | .setup = dw_pcie_setup, |
490 | .scan = dw_pcie_scan_bus, | 730 | .scan = dw_pcie_scan_bus, |
491 | .map_irq = dw_pcie_map_irq, | 731 | .map_irq = dw_pcie_map_irq, |
732 | .add_bus = dw_pcie_add_bus, | ||
492 | }; | 733 | }; |
493 | 734 | ||
494 | void dw_pcie_setup_rc(struct pcie_port *pp) | 735 | void dw_pcie_setup_rc(struct pcie_port *pp) |
diff --git a/drivers/pci/host/pcie-designware.h b/drivers/pci/host/pcie-designware.h index 133820f1da97..c15379be2372 100644 --- a/drivers/pci/host/pcie-designware.h +++ b/drivers/pci/host/pcie-designware.h | |||
@@ -11,6 +11,9 @@ | |||
11 | * published by the Free Software Foundation. | 11 | * published by the Free Software Foundation. |
12 | */ | 12 | */ |
13 | 13 | ||
14 | #ifndef _PCIE_DESIGNWARE_H | ||
15 | #define _PCIE_DESIGNWARE_H | ||
16 | |||
14 | struct pcie_port_info { | 17 | struct pcie_port_info { |
15 | u32 cfg0_size; | 18 | u32 cfg0_size; |
16 | u32 cfg1_size; | 19 | u32 cfg1_size; |
@@ -20,6 +23,14 @@ struct pcie_port_info { | |||
20 | phys_addr_t mem_bus_addr; | 23 | phys_addr_t mem_bus_addr; |
21 | }; | 24 | }; |
22 | 25 | ||
26 | /* | ||
27 | * Maximum number of MSI IRQs can be 256 per controller. But keep | ||
28 | * it 32 as of now. Probably we will never need more than 32. If needed, | ||
29 | * then increment it in multiple of 32. | ||
30 | */ | ||
31 | #define MAX_MSI_IRQS 32 | ||
32 | #define MAX_MSI_CTRLS (MAX_MSI_IRQS / 32) | ||
33 | |||
23 | struct pcie_port { | 34 | struct pcie_port { |
24 | struct device *dev; | 35 | struct device *dev; |
25 | u8 root_bus_nr; | 36 | u8 root_bus_nr; |
@@ -38,6 +49,10 @@ struct pcie_port { | |||
38 | int irq; | 49 | int irq; |
39 | u32 lanes; | 50 | u32 lanes; |
40 | struct pcie_host_ops *ops; | 51 | struct pcie_host_ops *ops; |
52 | int msi_irq; | ||
53 | struct irq_domain *irq_domain; | ||
54 | unsigned long msi_data; | ||
55 | DECLARE_BITMAP(msi_irq_in_use, MAX_MSI_IRQS); | ||
41 | }; | 56 | }; |
42 | 57 | ||
43 | struct pcie_host_ops { | 58 | struct pcie_host_ops { |
@@ -51,15 +66,12 @@ struct pcie_host_ops { | |||
51 | void (*host_init)(struct pcie_port *pp); | 66 | void (*host_init)(struct pcie_port *pp); |
52 | }; | 67 | }; |
53 | 68 | ||
54 | extern unsigned long global_io_offset; | ||
55 | |||
56 | int cfg_read(void __iomem *addr, int where, int size, u32 *val); | 69 | int cfg_read(void __iomem *addr, int where, int size, u32 *val); |
57 | int cfg_write(void __iomem *addr, int where, int size, u32 val); | 70 | int cfg_write(void __iomem *addr, int where, int size, u32 val); |
58 | int dw_pcie_wr_own_conf(struct pcie_port *pp, int where, int size, u32 val); | 71 | void dw_handle_msi_irq(struct pcie_port *pp); |
59 | int dw_pcie_rd_own_conf(struct pcie_port *pp, int where, int size, u32 *val); | 72 | void dw_pcie_msi_init(struct pcie_port *pp); |
60 | int dw_pcie_link_up(struct pcie_port *pp); | 73 | int dw_pcie_link_up(struct pcie_port *pp); |
61 | void dw_pcie_setup_rc(struct pcie_port *pp); | 74 | void dw_pcie_setup_rc(struct pcie_port *pp); |
62 | int dw_pcie_host_init(struct pcie_port *pp); | 75 | int dw_pcie_host_init(struct pcie_port *pp); |
63 | int dw_pcie_setup(int nr, struct pci_sys_data *sys); | 76 | |
64 | struct pci_bus *dw_pcie_scan_bus(int nr, struct pci_sys_data *sys); | 77 | #endif /* _PCIE_DESIGNWARE_H */ |
65 | int dw_pcie_map_irq(const struct pci_dev *dev, u8 slot, u8 pin); | ||
diff --git a/drivers/pci/hotplug/acpi_pcihp.c b/drivers/pci/hotplug/acpi_pcihp.c index 5440131cd4ee..1ce8ee054f1a 100644 --- a/drivers/pci/hotplug/acpi_pcihp.c +++ b/drivers/pci/hotplug/acpi_pcihp.c | |||
@@ -338,7 +338,7 @@ int acpi_get_hp_hw_control_from_firmware(struct pci_dev *pdev, u32 flags) | |||
338 | acpi_handle chandle, handle; | 338 | acpi_handle chandle, handle; |
339 | struct acpi_buffer string = { ACPI_ALLOCATE_BUFFER, NULL }; | 339 | struct acpi_buffer string = { ACPI_ALLOCATE_BUFFER, NULL }; |
340 | 340 | ||
341 | flags &= OSC_SHPC_NATIVE_HP_CONTROL; | 341 | flags &= OSC_PCI_SHPC_NATIVE_HP_CONTROL; |
342 | if (!flags) { | 342 | if (!flags) { |
343 | err("Invalid flags %u specified!\n", flags); | 343 | err("Invalid flags %u specified!\n", flags); |
344 | return -EINVAL; | 344 | return -EINVAL; |
diff --git a/drivers/pci/hotplug/acpiphp.h b/drivers/pci/hotplug/acpiphp.h index f4e028924667..26100f510b10 100644 --- a/drivers/pci/hotplug/acpiphp.h +++ b/drivers/pci/hotplug/acpiphp.h | |||
@@ -39,16 +39,6 @@ | |||
39 | #include <linux/mutex.h> | 39 | #include <linux/mutex.h> |
40 | #include <linux/pci_hotplug.h> | 40 | #include <linux/pci_hotplug.h> |
41 | 41 | ||
42 | #define dbg(format, arg...) \ | ||
43 | do { \ | ||
44 | if (acpiphp_debug) \ | ||
45 | printk(KERN_DEBUG "%s: " format, \ | ||
46 | MY_NAME , ## arg); \ | ||
47 | } while (0) | ||
48 | #define err(format, arg...) printk(KERN_ERR "%s: " format, MY_NAME , ## arg) | ||
49 | #define info(format, arg...) printk(KERN_INFO "%s: " format, MY_NAME , ## arg) | ||
50 | #define warn(format, arg...) printk(KERN_WARNING "%s: " format, MY_NAME , ## arg) | ||
51 | |||
52 | struct acpiphp_context; | 42 | struct acpiphp_context; |
53 | struct acpiphp_bridge; | 43 | struct acpiphp_bridge; |
54 | struct acpiphp_slot; | 44 | struct acpiphp_slot; |
diff --git a/drivers/pci/hotplug/acpiphp_core.c b/drivers/pci/hotplug/acpiphp_core.c index bf2203ef1308..8650d39db392 100644 --- a/drivers/pci/hotplug/acpiphp_core.c +++ b/drivers/pci/hotplug/acpiphp_core.c | |||
@@ -31,6 +31,8 @@ | |||
31 | * | 31 | * |
32 | */ | 32 | */ |
33 | 33 | ||
34 | #define pr_fmt(fmt) "acpiphp: " fmt | ||
35 | |||
34 | #include <linux/init.h> | 36 | #include <linux/init.h> |
35 | #include <linux/module.h> | 37 | #include <linux/module.h> |
36 | #include <linux/moduleparam.h> | 38 | #include <linux/moduleparam.h> |
@@ -43,12 +45,9 @@ | |||
43 | #include <linux/smp.h> | 45 | #include <linux/smp.h> |
44 | #include "acpiphp.h" | 46 | #include "acpiphp.h" |
45 | 47 | ||
46 | #define MY_NAME "acpiphp" | ||
47 | |||
48 | /* name size which is used for entries in pcihpfs */ | 48 | /* name size which is used for entries in pcihpfs */ |
49 | #define SLOT_NAME_SIZE 21 /* {_SUN} */ | 49 | #define SLOT_NAME_SIZE 21 /* {_SUN} */ |
50 | 50 | ||
51 | bool acpiphp_debug; | ||
52 | bool acpiphp_disabled; | 51 | bool acpiphp_disabled; |
53 | 52 | ||
54 | /* local variables */ | 53 | /* local variables */ |
@@ -61,9 +60,7 @@ static struct acpiphp_attention_info *attention_info; | |||
61 | MODULE_AUTHOR(DRIVER_AUTHOR); | 60 | MODULE_AUTHOR(DRIVER_AUTHOR); |
62 | MODULE_DESCRIPTION(DRIVER_DESC); | 61 | MODULE_DESCRIPTION(DRIVER_DESC); |
63 | MODULE_LICENSE("GPL"); | 62 | MODULE_LICENSE("GPL"); |
64 | MODULE_PARM_DESC(debug, "Debugging mode enabled or not"); | ||
65 | MODULE_PARM_DESC(disable, "disable acpiphp driver"); | 63 | MODULE_PARM_DESC(disable, "disable acpiphp driver"); |
66 | module_param_named(debug, acpiphp_debug, bool, 0644); | ||
67 | module_param_named(disable, acpiphp_disabled, bool, 0444); | 64 | module_param_named(disable, acpiphp_disabled, bool, 0444); |
68 | 65 | ||
69 | /* export the attention callback registration methods */ | 66 | /* export the attention callback registration methods */ |
@@ -139,7 +136,7 @@ static int enable_slot(struct hotplug_slot *hotplug_slot) | |||
139 | { | 136 | { |
140 | struct slot *slot = hotplug_slot->private; | 137 | struct slot *slot = hotplug_slot->private; |
141 | 138 | ||
142 | dbg("%s - physical_slot = %s\n", __func__, slot_name(slot)); | 139 | pr_debug("%s - physical_slot = %s\n", __func__, slot_name(slot)); |
143 | 140 | ||
144 | /* enable the specified slot */ | 141 | /* enable the specified slot */ |
145 | return acpiphp_enable_slot(slot->acpi_slot); | 142 | return acpiphp_enable_slot(slot->acpi_slot); |
@@ -156,7 +153,7 @@ static int disable_slot(struct hotplug_slot *hotplug_slot) | |||
156 | { | 153 | { |
157 | struct slot *slot = hotplug_slot->private; | 154 | struct slot *slot = hotplug_slot->private; |
158 | 155 | ||
159 | dbg("%s - physical_slot = %s\n", __func__, slot_name(slot)); | 156 | pr_debug("%s - physical_slot = %s\n", __func__, slot_name(slot)); |
160 | 157 | ||
161 | /* disable the specified slot */ | 158 | /* disable the specified slot */ |
162 | return acpiphp_disable_and_eject_slot(slot->acpi_slot); | 159 | return acpiphp_disable_and_eject_slot(slot->acpi_slot); |
@@ -176,8 +173,9 @@ static int disable_slot(struct hotplug_slot *hotplug_slot) | |||
176 | { | 173 | { |
177 | int retval = -ENODEV; | 174 | int retval = -ENODEV; |
178 | 175 | ||
179 | dbg("%s - physical_slot = %s\n", __func__, hotplug_slot_name(hotplug_slot)); | 176 | pr_debug("%s - physical_slot = %s\n", __func__, |
180 | 177 | hotplug_slot_name(hotplug_slot)); | |
178 | |||
181 | if (attention_info && try_module_get(attention_info->owner)) { | 179 | if (attention_info && try_module_get(attention_info->owner)) { |
182 | retval = attention_info->set_attn(hotplug_slot, status); | 180 | retval = attention_info->set_attn(hotplug_slot, status); |
183 | module_put(attention_info->owner); | 181 | module_put(attention_info->owner); |
@@ -199,7 +197,7 @@ static int get_power_status(struct hotplug_slot *hotplug_slot, u8 *value) | |||
199 | { | 197 | { |
200 | struct slot *slot = hotplug_slot->private; | 198 | struct slot *slot = hotplug_slot->private; |
201 | 199 | ||
202 | dbg("%s - physical_slot = %s\n", __func__, slot_name(slot)); | 200 | pr_debug("%s - physical_slot = %s\n", __func__, slot_name(slot)); |
203 | 201 | ||
204 | *value = acpiphp_get_power_status(slot->acpi_slot); | 202 | *value = acpiphp_get_power_status(slot->acpi_slot); |
205 | 203 | ||
@@ -221,7 +219,8 @@ static int get_attention_status(struct hotplug_slot *hotplug_slot, u8 *value) | |||
221 | { | 219 | { |
222 | int retval = -EINVAL; | 220 | int retval = -EINVAL; |
223 | 221 | ||
224 | dbg("%s - physical_slot = %s\n", __func__, hotplug_slot_name(hotplug_slot)); | 222 | pr_debug("%s - physical_slot = %s\n", __func__, |
223 | hotplug_slot_name(hotplug_slot)); | ||
225 | 224 | ||
226 | if (attention_info && try_module_get(attention_info->owner)) { | 225 | if (attention_info && try_module_get(attention_info->owner)) { |
227 | retval = attention_info->get_attn(hotplug_slot, value); | 226 | retval = attention_info->get_attn(hotplug_slot, value); |
@@ -244,7 +243,7 @@ static int get_latch_status(struct hotplug_slot *hotplug_slot, u8 *value) | |||
244 | { | 243 | { |
245 | struct slot *slot = hotplug_slot->private; | 244 | struct slot *slot = hotplug_slot->private; |
246 | 245 | ||
247 | dbg("%s - physical_slot = %s\n", __func__, slot_name(slot)); | 246 | pr_debug("%s - physical_slot = %s\n", __func__, slot_name(slot)); |
248 | 247 | ||
249 | *value = acpiphp_get_latch_status(slot->acpi_slot); | 248 | *value = acpiphp_get_latch_status(slot->acpi_slot); |
250 | 249 | ||
@@ -264,7 +263,7 @@ static int get_adapter_status(struct hotplug_slot *hotplug_slot, u8 *value) | |||
264 | { | 263 | { |
265 | struct slot *slot = hotplug_slot->private; | 264 | struct slot *slot = hotplug_slot->private; |
266 | 265 | ||
267 | dbg("%s - physical_slot = %s\n", __func__, slot_name(slot)); | 266 | pr_debug("%s - physical_slot = %s\n", __func__, slot_name(slot)); |
268 | 267 | ||
269 | *value = acpiphp_get_adapter_status(slot->acpi_slot); | 268 | *value = acpiphp_get_adapter_status(slot->acpi_slot); |
270 | 269 | ||
@@ -279,7 +278,7 @@ static void release_slot(struct hotplug_slot *hotplug_slot) | |||
279 | { | 278 | { |
280 | struct slot *slot = hotplug_slot->private; | 279 | struct slot *slot = hotplug_slot->private; |
281 | 280 | ||
282 | dbg("%s - physical_slot = %s\n", __func__, slot_name(slot)); | 281 | pr_debug("%s - physical_slot = %s\n", __func__, slot_name(slot)); |
283 | 282 | ||
284 | kfree(slot->hotplug_slot); | 283 | kfree(slot->hotplug_slot); |
285 | kfree(slot); | 284 | kfree(slot); |
@@ -322,11 +321,11 @@ int acpiphp_register_hotplug_slot(struct acpiphp_slot *acpiphp_slot, | |||
322 | if (retval == -EBUSY) | 321 | if (retval == -EBUSY) |
323 | goto error_hpslot; | 322 | goto error_hpslot; |
324 | if (retval) { | 323 | if (retval) { |
325 | err("pci_hp_register failed with error %d\n", retval); | 324 | pr_err("pci_hp_register failed with error %d\n", retval); |
326 | goto error_hpslot; | 325 | goto error_hpslot; |
327 | } | 326 | } |
328 | 327 | ||
329 | info("Slot [%s] registered\n", slot_name(slot)); | 328 | pr_info("Slot [%s] registered\n", slot_name(slot)); |
330 | 329 | ||
331 | return 0; | 330 | return 0; |
332 | error_hpslot: | 331 | error_hpslot: |
@@ -343,17 +342,17 @@ void acpiphp_unregister_hotplug_slot(struct acpiphp_slot *acpiphp_slot) | |||
343 | struct slot *slot = acpiphp_slot->slot; | 342 | struct slot *slot = acpiphp_slot->slot; |
344 | int retval = 0; | 343 | int retval = 0; |
345 | 344 | ||
346 | info("Slot [%s] unregistered\n", slot_name(slot)); | 345 | pr_info("Slot [%s] unregistered\n", slot_name(slot)); |
347 | 346 | ||
348 | retval = pci_hp_deregister(slot->hotplug_slot); | 347 | retval = pci_hp_deregister(slot->hotplug_slot); |
349 | if (retval) | 348 | if (retval) |
350 | err("pci_hp_deregister failed with error %d\n", retval); | 349 | pr_err("pci_hp_deregister failed with error %d\n", retval); |
351 | } | 350 | } |
352 | 351 | ||
353 | 352 | ||
354 | void __init acpiphp_init(void) | 353 | void __init acpiphp_init(void) |
355 | { | 354 | { |
356 | info(DRIVER_DESC " version: " DRIVER_VERSION "%s\n", | 355 | pr_info(DRIVER_DESC " version: " DRIVER_VERSION "%s\n", |
357 | acpiphp_disabled ? ", disabled by user; please report a bug" | 356 | acpiphp_disabled ? ", disabled by user; please report a bug" |
358 | : ""); | 357 | : ""); |
359 | } | 358 | } |
diff --git a/drivers/pci/hotplug/acpiphp_glue.c b/drivers/pci/hotplug/acpiphp_glue.c index 4a0a9ac7a1e5..5b4e9eb0e8ff 100644 --- a/drivers/pci/hotplug/acpiphp_glue.c +++ b/drivers/pci/hotplug/acpiphp_glue.c | |||
@@ -39,6 +39,8 @@ | |||
39 | * bus. It loses the refcount when the the driver unloads. | 39 | * bus. It loses the refcount when the the driver unloads. |
40 | */ | 40 | */ |
41 | 41 | ||
42 | #define pr_fmt(fmt) "acpiphp_glue: " fmt | ||
43 | |||
42 | #include <linux/init.h> | 44 | #include <linux/init.h> |
43 | #include <linux/module.h> | 45 | #include <linux/module.h> |
44 | 46 | ||
@@ -58,8 +60,6 @@ static LIST_HEAD(bridge_list); | |||
58 | static DEFINE_MUTEX(bridge_mutex); | 60 | static DEFINE_MUTEX(bridge_mutex); |
59 | static DEFINE_MUTEX(acpiphp_context_lock); | 61 | static DEFINE_MUTEX(acpiphp_context_lock); |
60 | 62 | ||
61 | #define MY_NAME "acpiphp_glue" | ||
62 | |||
63 | static void handle_hotplug_event(acpi_handle handle, u32 type, void *data); | 63 | static void handle_hotplug_event(acpi_handle handle, u32 type, void *data); |
64 | static void acpiphp_sanitize_bus(struct pci_bus *bus); | 64 | static void acpiphp_sanitize_bus(struct pci_bus *bus); |
65 | static void acpiphp_set_hpp_values(struct pci_bus *bus); | 65 | static void acpiphp_set_hpp_values(struct pci_bus *bus); |
@@ -335,7 +335,7 @@ static acpi_status register_slot(acpi_handle handle, u32 lvl, void *data, | |||
335 | if (ACPI_FAILURE(status)) | 335 | if (ACPI_FAILURE(status)) |
336 | sun = bridge->nr_slots; | 336 | sun = bridge->nr_slots; |
337 | 337 | ||
338 | dbg("found ACPI PCI Hotplug slot %llu at PCI %04x:%02x:%02x\n", | 338 | pr_debug("found ACPI PCI Hotplug slot %llu at PCI %04x:%02x:%02x\n", |
339 | sun, pci_domain_nr(pbus), pbus->number, device); | 339 | sun, pci_domain_nr(pbus), pbus->number, device); |
340 | 340 | ||
341 | retval = acpiphp_register_hotplug_slot(slot, sun); | 341 | retval = acpiphp_register_hotplug_slot(slot, sun); |
@@ -343,10 +343,10 @@ static acpi_status register_slot(acpi_handle handle, u32 lvl, void *data, | |||
343 | slot->slot = NULL; | 343 | slot->slot = NULL; |
344 | bridge->nr_slots--; | 344 | bridge->nr_slots--; |
345 | if (retval == -EBUSY) | 345 | if (retval == -EBUSY) |
346 | warn("Slot %llu already registered by another " | 346 | pr_warn("Slot %llu already registered by another " |
347 | "hotplug driver\n", sun); | 347 | "hotplug driver\n", sun); |
348 | else | 348 | else |
349 | warn("acpiphp_register_hotplug_slot failed " | 349 | pr_warn("acpiphp_register_hotplug_slot failed " |
350 | "(err code = 0x%x)\n", retval); | 350 | "(err code = 0x%x)\n", retval); |
351 | } | 351 | } |
352 | /* Even if the slot registration fails, we can still use it. */ | 352 | /* Even if the slot registration fails, we can still use it. */ |
@@ -369,7 +369,7 @@ static acpi_status register_slot(acpi_handle handle, u32 lvl, void *data, | |||
369 | if (register_hotplug_dock_device(handle, | 369 | if (register_hotplug_dock_device(handle, |
370 | &acpiphp_dock_ops, context, | 370 | &acpiphp_dock_ops, context, |
371 | acpiphp_dock_init, acpiphp_dock_release)) | 371 | acpiphp_dock_init, acpiphp_dock_release)) |
372 | dbg("failed to register dock device\n"); | 372 | pr_debug("failed to register dock device\n"); |
373 | } | 373 | } |
374 | 374 | ||
375 | /* install notify handler */ | 375 | /* install notify handler */ |
@@ -427,7 +427,7 @@ static void cleanup_bridge(struct acpiphp_bridge *bridge) | |||
427 | ACPI_SYSTEM_NOTIFY, | 427 | ACPI_SYSTEM_NOTIFY, |
428 | handle_hotplug_event); | 428 | handle_hotplug_event); |
429 | if (ACPI_FAILURE(status)) | 429 | if (ACPI_FAILURE(status)) |
430 | err("failed to remove notify handler\n"); | 430 | pr_err("failed to remove notify handler\n"); |
431 | } | 431 | } |
432 | } | 432 | } |
433 | if (slot->slot) | 433 | if (slot->slot) |
@@ -826,8 +826,9 @@ static void hotplug_event(acpi_handle handle, u32 type, void *data) | |||
826 | switch (type) { | 826 | switch (type) { |
827 | case ACPI_NOTIFY_BUS_CHECK: | 827 | case ACPI_NOTIFY_BUS_CHECK: |
828 | /* bus re-enumerate */ | 828 | /* bus re-enumerate */ |
829 | dbg("%s: Bus check notify on %s\n", __func__, objname); | 829 | pr_debug("%s: Bus check notify on %s\n", __func__, objname); |
830 | dbg("%s: re-enumerating slots under %s\n", __func__, objname); | 830 | pr_debug("%s: re-enumerating slots under %s\n", |
831 | __func__, objname); | ||
831 | if (bridge) { | 832 | if (bridge) { |
832 | acpiphp_check_bridge(bridge); | 833 | acpiphp_check_bridge(bridge); |
833 | } else { | 834 | } else { |
@@ -841,7 +842,7 @@ static void hotplug_event(acpi_handle handle, u32 type, void *data) | |||
841 | 842 | ||
842 | case ACPI_NOTIFY_DEVICE_CHECK: | 843 | case ACPI_NOTIFY_DEVICE_CHECK: |
843 | /* device check */ | 844 | /* device check */ |
844 | dbg("%s: Device check notify on %s\n", __func__, objname); | 845 | pr_debug("%s: Device check notify on %s\n", __func__, objname); |
845 | if (bridge) { | 846 | if (bridge) { |
846 | acpiphp_check_bridge(bridge); | 847 | acpiphp_check_bridge(bridge); |
847 | } else { | 848 | } else { |
@@ -862,7 +863,7 @@ static void hotplug_event(acpi_handle handle, u32 type, void *data) | |||
862 | 863 | ||
863 | case ACPI_NOTIFY_EJECT_REQUEST: | 864 | case ACPI_NOTIFY_EJECT_REQUEST: |
864 | /* request device eject */ | 865 | /* request device eject */ |
865 | dbg("%s: Device eject notify on %s\n", __func__, objname); | 866 | pr_debug("%s: Device eject notify on %s\n", __func__, objname); |
866 | acpiphp_disable_and_eject_slot(func->slot); | 867 | acpiphp_disable_and_eject_slot(func->slot); |
867 | break; | 868 | break; |
868 | } | 869 | } |
diff --git a/drivers/pci/hotplug/acpiphp_ibm.c b/drivers/pci/hotplug/acpiphp_ibm.c index 2f5786c8522c..0d64c414bf78 100644 --- a/drivers/pci/hotplug/acpiphp_ibm.c +++ b/drivers/pci/hotplug/acpiphp_ibm.c | |||
@@ -25,6 +25,8 @@ | |||
25 | * | 25 | * |
26 | */ | 26 | */ |
27 | 27 | ||
28 | #define pr_fmt(fmt) "acpiphp_ibm: " fmt | ||
29 | |||
28 | #include <linux/init.h> | 30 | #include <linux/init.h> |
29 | #include <linux/slab.h> | 31 | #include <linux/slab.h> |
30 | #include <linux/module.h> | 32 | #include <linux/module.h> |
@@ -43,23 +45,11 @@ | |||
43 | #define DRIVER_AUTHOR "Irene Zubarev <zubarev@us.ibm.com>, Vernon Mauery <vernux@us.ibm.com>" | 45 | #define DRIVER_AUTHOR "Irene Zubarev <zubarev@us.ibm.com>, Vernon Mauery <vernux@us.ibm.com>" |
44 | #define DRIVER_DESC "ACPI Hot Plug PCI Controller Driver IBM extension" | 46 | #define DRIVER_DESC "ACPI Hot Plug PCI Controller Driver IBM extension" |
45 | 47 | ||
46 | static bool debug; | ||
47 | 48 | ||
48 | MODULE_AUTHOR(DRIVER_AUTHOR); | 49 | MODULE_AUTHOR(DRIVER_AUTHOR); |
49 | MODULE_DESCRIPTION(DRIVER_DESC); | 50 | MODULE_DESCRIPTION(DRIVER_DESC); |
50 | MODULE_LICENSE("GPL"); | 51 | MODULE_LICENSE("GPL"); |
51 | MODULE_VERSION(DRIVER_VERSION); | 52 | MODULE_VERSION(DRIVER_VERSION); |
52 | module_param(debug, bool, 0644); | ||
53 | MODULE_PARM_DESC(debug, " Debugging mode enabled or not"); | ||
54 | #define MY_NAME "acpiphp_ibm" | ||
55 | |||
56 | #undef dbg | ||
57 | #define dbg(format, arg...) \ | ||
58 | do { \ | ||
59 | if (debug) \ | ||
60 | printk(KERN_DEBUG "%s: " format, \ | ||
61 | MY_NAME , ## arg); \ | ||
62 | } while (0) | ||
63 | 53 | ||
64 | #define FOUND_APCI 0x61504349 | 54 | #define FOUND_APCI 0x61504349 |
65 | /* these are the names for the IBM ACPI pseudo-device */ | 55 | /* these are the names for the IBM ACPI pseudo-device */ |
@@ -189,7 +179,7 @@ static int ibm_set_attention_status(struct hotplug_slot *slot, u8 status) | |||
189 | 179 | ||
190 | ibm_slot = ibm_slot_from_id(hpslot_to_sun(slot)); | 180 | ibm_slot = ibm_slot_from_id(hpslot_to_sun(slot)); |
191 | 181 | ||
192 | dbg("%s: set slot %d (%d) attention status to %d\n", __func__, | 182 | pr_debug("%s: set slot %d (%d) attention status to %d\n", __func__, |
193 | ibm_slot->slot.slot_num, ibm_slot->slot.slot_id, | 183 | ibm_slot->slot.slot_num, ibm_slot->slot.slot_id, |
194 | (status ? 1 : 0)); | 184 | (status ? 1 : 0)); |
195 | 185 | ||
@@ -202,10 +192,10 @@ static int ibm_set_attention_status(struct hotplug_slot *slot, u8 status) | |||
202 | 192 | ||
203 | stat = acpi_evaluate_integer(ibm_acpi_handle, "APLS", ¶ms, &rc); | 193 | stat = acpi_evaluate_integer(ibm_acpi_handle, "APLS", ¶ms, &rc); |
204 | if (ACPI_FAILURE(stat)) { | 194 | if (ACPI_FAILURE(stat)) { |
205 | err("APLS evaluation failed: 0x%08x\n", stat); | 195 | pr_err("APLS evaluation failed: 0x%08x\n", stat); |
206 | return -ENODEV; | 196 | return -ENODEV; |
207 | } else if (!rc) { | 197 | } else if (!rc) { |
208 | err("APLS method failed: 0x%08llx\n", rc); | 198 | pr_err("APLS method failed: 0x%08llx\n", rc); |
209 | return -ERANGE; | 199 | return -ERANGE; |
210 | } | 200 | } |
211 | return 0; | 201 | return 0; |
@@ -234,7 +224,7 @@ static int ibm_get_attention_status(struct hotplug_slot *slot, u8 *status) | |||
234 | else | 224 | else |
235 | *status = 0; | 225 | *status = 0; |
236 | 226 | ||
237 | dbg("%s: get slot %d (%d) attention status is %d\n", __func__, | 227 | pr_debug("%s: get slot %d (%d) attention status is %d\n", __func__, |
238 | ibm_slot->slot.slot_num, ibm_slot->slot.slot_id, | 228 | ibm_slot->slot.slot_num, ibm_slot->slot.slot_id, |
239 | *status); | 229 | *status); |
240 | 230 | ||
@@ -266,10 +256,10 @@ static void ibm_handle_events(acpi_handle handle, u32 event, void *context) | |||
266 | u8 subevent = event & 0xf0; | 256 | u8 subevent = event & 0xf0; |
267 | struct notification *note = context; | 257 | struct notification *note = context; |
268 | 258 | ||
269 | dbg("%s: Received notification %02x\n", __func__, event); | 259 | pr_debug("%s: Received notification %02x\n", __func__, event); |
270 | 260 | ||
271 | if (subevent == 0x80) { | 261 | if (subevent == 0x80) { |
272 | dbg("%s: generationg bus event\n", __func__); | 262 | pr_debug("%s: generationg bus event\n", __func__); |
273 | acpi_bus_generate_netlink_event(note->device->pnp.device_class, | 263 | acpi_bus_generate_netlink_event(note->device->pnp.device_class, |
274 | dev_name(¬e->device->dev), | 264 | dev_name(¬e->device->dev), |
275 | note->event, detail); | 265 | note->event, detail); |
@@ -301,7 +291,7 @@ static int ibm_get_table_from_acpi(char **bufp) | |||
301 | 291 | ||
302 | status = acpi_evaluate_object(ibm_acpi_handle, "APCI", NULL, &buffer); | 292 | status = acpi_evaluate_object(ibm_acpi_handle, "APCI", NULL, &buffer); |
303 | if (ACPI_FAILURE(status)) { | 293 | if (ACPI_FAILURE(status)) { |
304 | err("%s: APCI evaluation failed\n", __func__); | 294 | pr_err("%s: APCI evaluation failed\n", __func__); |
305 | return -ENODEV; | 295 | return -ENODEV; |
306 | } | 296 | } |
307 | 297 | ||
@@ -309,13 +299,13 @@ static int ibm_get_table_from_acpi(char **bufp) | |||
309 | if (!(package) || | 299 | if (!(package) || |
310 | (package->type != ACPI_TYPE_PACKAGE) || | 300 | (package->type != ACPI_TYPE_PACKAGE) || |
311 | !(package->package.elements)) { | 301 | !(package->package.elements)) { |
312 | err("%s: Invalid APCI object\n", __func__); | 302 | pr_err("%s: Invalid APCI object\n", __func__); |
313 | goto read_table_done; | 303 | goto read_table_done; |
314 | } | 304 | } |
315 | 305 | ||
316 | for(size = 0, i = 0; i < package->package.count; i++) { | 306 | for(size = 0, i = 0; i < package->package.count; i++) { |
317 | if (package->package.elements[i].type != ACPI_TYPE_BUFFER) { | 307 | if (package->package.elements[i].type != ACPI_TYPE_BUFFER) { |
318 | err("%s: Invalid APCI element %d\n", __func__, i); | 308 | pr_err("%s: Invalid APCI element %d\n", __func__, i); |
319 | goto read_table_done; | 309 | goto read_table_done; |
320 | } | 310 | } |
321 | size += package->package.elements[i].buffer.length; | 311 | size += package->package.elements[i].buffer.length; |
@@ -325,7 +315,7 @@ static int ibm_get_table_from_acpi(char **bufp) | |||
325 | goto read_table_done; | 315 | goto read_table_done; |
326 | 316 | ||
327 | lbuf = kzalloc(size, GFP_KERNEL); | 317 | lbuf = kzalloc(size, GFP_KERNEL); |
328 | dbg("%s: element count: %i, ASL table size: %i, &table = 0x%p\n", | 318 | pr_debug("%s: element count: %i, ASL table size: %i, &table = 0x%p\n", |
329 | __func__, package->package.count, size, lbuf); | 319 | __func__, package->package.count, size, lbuf); |
330 | 320 | ||
331 | if (lbuf) { | 321 | if (lbuf) { |
@@ -370,8 +360,8 @@ static ssize_t ibm_read_apci_table(struct file *filp, struct kobject *kobj, | |||
370 | { | 360 | { |
371 | int bytes_read = -EINVAL; | 361 | int bytes_read = -EINVAL; |
372 | char *table = NULL; | 362 | char *table = NULL; |
373 | 363 | ||
374 | dbg("%s: pos = %d, size = %zd\n", __func__, (int)pos, size); | 364 | pr_debug("%s: pos = %d, size = %zd\n", __func__, (int)pos, size); |
375 | 365 | ||
376 | if (pos == 0) { | 366 | if (pos == 0) { |
377 | bytes_read = ibm_get_table_from_acpi(&table); | 367 | bytes_read = ibm_get_table_from_acpi(&table); |
@@ -403,7 +393,7 @@ static acpi_status __init ibm_find_acpi_device(acpi_handle handle, | |||
403 | 393 | ||
404 | status = acpi_get_object_info(handle, &info); | 394 | status = acpi_get_object_info(handle, &info); |
405 | if (ACPI_FAILURE(status)) { | 395 | if (ACPI_FAILURE(status)) { |
406 | err("%s: Failed to get device information status=0x%x\n", | 396 | pr_err("%s: Failed to get device information status=0x%x\n", |
407 | __func__, status); | 397 | __func__, status); |
408 | return retval; | 398 | return retval; |
409 | } | 399 | } |
@@ -411,7 +401,7 @@ static acpi_status __init ibm_find_acpi_device(acpi_handle handle, | |||
411 | if (info->current_status && (info->valid & ACPI_VALID_HID) && | 401 | if (info->current_status && (info->valid & ACPI_VALID_HID) && |
412 | (!strcmp(info->hardware_id.string, IBM_HARDWARE_ID1) || | 402 | (!strcmp(info->hardware_id.string, IBM_HARDWARE_ID1) || |
413 | !strcmp(info->hardware_id.string, IBM_HARDWARE_ID2))) { | 403 | !strcmp(info->hardware_id.string, IBM_HARDWARE_ID2))) { |
414 | dbg("found hardware: %s, handle: %p\n", | 404 | pr_debug("found hardware: %s, handle: %p\n", |
415 | info->hardware_id.string, handle); | 405 | info->hardware_id.string, handle); |
416 | *phandle = handle; | 406 | *phandle = handle; |
417 | /* returning non-zero causes the search to stop | 407 | /* returning non-zero causes the search to stop |
@@ -432,18 +422,18 @@ static int __init ibm_acpiphp_init(void) | |||
432 | struct acpi_device *device; | 422 | struct acpi_device *device; |
433 | struct kobject *sysdir = &pci_slots_kset->kobj; | 423 | struct kobject *sysdir = &pci_slots_kset->kobj; |
434 | 424 | ||
435 | dbg("%s\n", __func__); | 425 | pr_debug("%s\n", __func__); |
436 | 426 | ||
437 | if (acpi_walk_namespace(ACPI_TYPE_DEVICE, ACPI_ROOT_OBJECT, | 427 | if (acpi_walk_namespace(ACPI_TYPE_DEVICE, ACPI_ROOT_OBJECT, |
438 | ACPI_UINT32_MAX, ibm_find_acpi_device, NULL, | 428 | ACPI_UINT32_MAX, ibm_find_acpi_device, NULL, |
439 | &ibm_acpi_handle, NULL) != FOUND_APCI) { | 429 | &ibm_acpi_handle, NULL) != FOUND_APCI) { |
440 | err("%s: acpi_walk_namespace failed\n", __func__); | 430 | pr_err("%s: acpi_walk_namespace failed\n", __func__); |
441 | retval = -ENODEV; | 431 | retval = -ENODEV; |
442 | goto init_return; | 432 | goto init_return; |
443 | } | 433 | } |
444 | dbg("%s: found IBM aPCI device\n", __func__); | 434 | pr_debug("%s: found IBM aPCI device\n", __func__); |
445 | if (acpi_bus_get_device(ibm_acpi_handle, &device)) { | 435 | if (acpi_bus_get_device(ibm_acpi_handle, &device)) { |
446 | err("%s: acpi_bus_get_device failed\n", __func__); | 436 | pr_err("%s: acpi_bus_get_device failed\n", __func__); |
447 | retval = -ENODEV; | 437 | retval = -ENODEV; |
448 | goto init_return; | 438 | goto init_return; |
449 | } | 439 | } |
@@ -457,7 +447,7 @@ static int __init ibm_acpiphp_init(void) | |||
457 | ACPI_DEVICE_NOTIFY, ibm_handle_events, | 447 | ACPI_DEVICE_NOTIFY, ibm_handle_events, |
458 | &ibm_note); | 448 | &ibm_note); |
459 | if (ACPI_FAILURE(status)) { | 449 | if (ACPI_FAILURE(status)) { |
460 | err("%s: Failed to register notification handler\n", | 450 | pr_err("%s: Failed to register notification handler\n", |
461 | __func__); | 451 | __func__); |
462 | retval = -EBUSY; | 452 | retval = -EBUSY; |
463 | goto init_cleanup; | 453 | goto init_cleanup; |
@@ -479,17 +469,17 @@ static void __exit ibm_acpiphp_exit(void) | |||
479 | acpi_status status; | 469 | acpi_status status; |
480 | struct kobject *sysdir = &pci_slots_kset->kobj; | 470 | struct kobject *sysdir = &pci_slots_kset->kobj; |
481 | 471 | ||
482 | dbg("%s\n", __func__); | 472 | pr_debug("%s\n", __func__); |
483 | 473 | ||
484 | if (acpiphp_unregister_attention(&ibm_attention_info)) | 474 | if (acpiphp_unregister_attention(&ibm_attention_info)) |
485 | err("%s: attention info deregistration failed", __func__); | 475 | pr_err("%s: attention info deregistration failed", __func__); |
486 | 476 | ||
487 | status = acpi_remove_notify_handler( | 477 | status = acpi_remove_notify_handler( |
488 | ibm_acpi_handle, | 478 | ibm_acpi_handle, |
489 | ACPI_DEVICE_NOTIFY, | 479 | ACPI_DEVICE_NOTIFY, |
490 | ibm_handle_events); | 480 | ibm_handle_events); |
491 | if (ACPI_FAILURE(status)) | 481 | if (ACPI_FAILURE(status)) |
492 | err("%s: Notification handler removal failed\n", __func__); | 482 | pr_err("%s: Notification handler removal failed\n", __func__); |
493 | /* remove the /sys entries */ | 483 | /* remove the /sys entries */ |
494 | sysfs_remove_bin_file(sysdir, &ibm_apci_table_attr); | 484 | sysfs_remove_bin_file(sysdir, &ibm_apci_table_attr); |
495 | } | 485 | } |
diff --git a/drivers/pci/hotplug/shpchp.h b/drivers/pci/hotplug/shpchp.h index e260f207a90e..d876e4b3c6a9 100644 --- a/drivers/pci/hotplug/shpchp.h +++ b/drivers/pci/hotplug/shpchp.h | |||
@@ -191,7 +191,7 @@ static inline const char *slot_name(struct slot *slot) | |||
191 | #include <linux/pci-acpi.h> | 191 | #include <linux/pci-acpi.h> |
192 | static inline int get_hp_hw_control_from_firmware(struct pci_dev *dev) | 192 | static inline int get_hp_hw_control_from_firmware(struct pci_dev *dev) |
193 | { | 193 | { |
194 | u32 flags = OSC_SHPC_NATIVE_HP_CONTROL; | 194 | u32 flags = OSC_PCI_SHPC_NATIVE_HP_CONTROL; |
195 | return acpi_get_hp_hw_control_from_firmware(dev, flags); | 195 | return acpi_get_hp_hw_control_from_firmware(dev, flags); |
196 | } | 196 | } |
197 | #else | 197 | #else |
diff --git a/drivers/pci/msi.c b/drivers/pci/msi.c index d5f90d6383bc..5e63645a7abe 100644 --- a/drivers/pci/msi.c +++ b/drivers/pci/msi.c | |||
@@ -185,7 +185,7 @@ static inline __attribute_const__ u32 msi_enabled_mask(u16 control) | |||
185 | * reliably as devices without an INTx disable bit will then generate a | 185 | * reliably as devices without an INTx disable bit will then generate a |
186 | * level IRQ which will never be cleared. | 186 | * level IRQ which will never be cleared. |
187 | */ | 187 | */ |
188 | static u32 __msi_mask_irq(struct msi_desc *desc, u32 mask, u32 flag) | 188 | u32 default_msi_mask_irq(struct msi_desc *desc, u32 mask, u32 flag) |
189 | { | 189 | { |
190 | u32 mask_bits = desc->masked; | 190 | u32 mask_bits = desc->masked; |
191 | 191 | ||
@@ -199,9 +199,14 @@ static u32 __msi_mask_irq(struct msi_desc *desc, u32 mask, u32 flag) | |||
199 | return mask_bits; | 199 | return mask_bits; |
200 | } | 200 | } |
201 | 201 | ||
202 | __weak u32 arch_msi_mask_irq(struct msi_desc *desc, u32 mask, u32 flag) | ||
203 | { | ||
204 | return default_msi_mask_irq(desc, mask, flag); | ||
205 | } | ||
206 | |||
202 | static void msi_mask_irq(struct msi_desc *desc, u32 mask, u32 flag) | 207 | static void msi_mask_irq(struct msi_desc *desc, u32 mask, u32 flag) |
203 | { | 208 | { |
204 | desc->masked = __msi_mask_irq(desc, mask, flag); | 209 | desc->masked = arch_msi_mask_irq(desc, mask, flag); |
205 | } | 210 | } |
206 | 211 | ||
207 | /* | 212 | /* |
@@ -211,7 +216,7 @@ static void msi_mask_irq(struct msi_desc *desc, u32 mask, u32 flag) | |||
211 | * file. This saves a few milliseconds when initialising devices with lots | 216 | * file. This saves a few milliseconds when initialising devices with lots |
212 | * of MSI-X interrupts. | 217 | * of MSI-X interrupts. |
213 | */ | 218 | */ |
214 | static u32 __msix_mask_irq(struct msi_desc *desc, u32 flag) | 219 | u32 default_msix_mask_irq(struct msi_desc *desc, u32 flag) |
215 | { | 220 | { |
216 | u32 mask_bits = desc->masked; | 221 | u32 mask_bits = desc->masked; |
217 | unsigned offset = desc->msi_attrib.entry_nr * PCI_MSIX_ENTRY_SIZE + | 222 | unsigned offset = desc->msi_attrib.entry_nr * PCI_MSIX_ENTRY_SIZE + |
@@ -224,9 +229,14 @@ static u32 __msix_mask_irq(struct msi_desc *desc, u32 flag) | |||
224 | return mask_bits; | 229 | return mask_bits; |
225 | } | 230 | } |
226 | 231 | ||
232 | __weak u32 arch_msix_mask_irq(struct msi_desc *desc, u32 flag) | ||
233 | { | ||
234 | return default_msix_mask_irq(desc, flag); | ||
235 | } | ||
236 | |||
227 | static void msix_mask_irq(struct msi_desc *desc, u32 flag) | 237 | static void msix_mask_irq(struct msi_desc *desc, u32 flag) |
228 | { | 238 | { |
229 | desc->masked = __msix_mask_irq(desc, flag); | 239 | desc->masked = arch_msix_mask_irq(desc, flag); |
230 | } | 240 | } |
231 | 241 | ||
232 | static void msi_set_mask_bit(struct irq_data *data, u32 flag) | 242 | static void msi_set_mask_bit(struct irq_data *data, u32 flag) |
@@ -831,7 +841,7 @@ int pci_enable_msi_block(struct pci_dev *dev, unsigned int nvec) | |||
831 | int status, maxvec; | 841 | int status, maxvec; |
832 | u16 msgctl; | 842 | u16 msgctl; |
833 | 843 | ||
834 | if (!dev->msi_cap) | 844 | if (!dev->msi_cap || dev->current_state != PCI_D0) |
835 | return -EINVAL; | 845 | return -EINVAL; |
836 | 846 | ||
837 | pci_read_config_word(dev, dev->msi_cap + PCI_MSI_FLAGS, &msgctl); | 847 | pci_read_config_word(dev, dev->msi_cap + PCI_MSI_FLAGS, &msgctl); |
@@ -862,7 +872,7 @@ int pci_enable_msi_block_auto(struct pci_dev *dev, unsigned int *maxvec) | |||
862 | int ret, nvec; | 872 | int ret, nvec; |
863 | u16 msgctl; | 873 | u16 msgctl; |
864 | 874 | ||
865 | if (!dev->msi_cap) | 875 | if (!dev->msi_cap || dev->current_state != PCI_D0) |
866 | return -EINVAL; | 876 | return -EINVAL; |
867 | 877 | ||
868 | pci_read_config_word(dev, dev->msi_cap + PCI_MSI_FLAGS, &msgctl); | 878 | pci_read_config_word(dev, dev->msi_cap + PCI_MSI_FLAGS, &msgctl); |
@@ -902,7 +912,7 @@ void pci_msi_shutdown(struct pci_dev *dev) | |||
902 | pci_read_config_word(dev, dev->msi_cap + PCI_MSI_FLAGS, &ctrl); | 912 | pci_read_config_word(dev, dev->msi_cap + PCI_MSI_FLAGS, &ctrl); |
903 | mask = msi_capable_mask(ctrl); | 913 | mask = msi_capable_mask(ctrl); |
904 | /* Keep cached state to be restored */ | 914 | /* Keep cached state to be restored */ |
905 | __msi_mask_irq(desc, mask, ~mask); | 915 | arch_msi_mask_irq(desc, mask, ~mask); |
906 | 916 | ||
907 | /* Restore dev->irq to its default pin-assertion irq */ | 917 | /* Restore dev->irq to its default pin-assertion irq */ |
908 | dev->irq = desc->msi_attrib.default_irq; | 918 | dev->irq = desc->msi_attrib.default_irq; |
@@ -955,7 +965,7 @@ int pci_enable_msix(struct pci_dev *dev, struct msix_entry *entries, int nvec) | |||
955 | int status, nr_entries; | 965 | int status, nr_entries; |
956 | int i, j; | 966 | int i, j; |
957 | 967 | ||
958 | if (!entries || !dev->msix_cap) | 968 | if (!entries || !dev->msix_cap || dev->current_state != PCI_D0) |
959 | return -EINVAL; | 969 | return -EINVAL; |
960 | 970 | ||
961 | status = pci_msi_check_device(dev, nvec, PCI_CAP_ID_MSIX); | 971 | status = pci_msi_check_device(dev, nvec, PCI_CAP_ID_MSIX); |
@@ -998,7 +1008,7 @@ void pci_msix_shutdown(struct pci_dev *dev) | |||
998 | /* Return the device with MSI-X masked as initial states */ | 1008 | /* Return the device with MSI-X masked as initial states */ |
999 | list_for_each_entry(entry, &dev->msi_list, list) { | 1009 | list_for_each_entry(entry, &dev->msi_list, list) { |
1000 | /* Keep cached states to be restored */ | 1010 | /* Keep cached states to be restored */ |
1001 | __msix_mask_irq(entry, 1); | 1011 | arch_msix_mask_irq(entry, 1); |
1002 | } | 1012 | } |
1003 | 1013 | ||
1004 | msix_set_enable(dev, 0); | 1014 | msix_set_enable(dev, 0); |
diff --git a/drivers/pci/pci-driver.c b/drivers/pci/pci-driver.c index 38f3c0140dfb..454853507b7e 100644 --- a/drivers/pci/pci-driver.c +++ b/drivers/pci/pci-driver.c | |||
@@ -267,11 +267,19 @@ static long local_pci_probe(void *_ddi) | |||
267 | pm_runtime_get_sync(dev); | 267 | pm_runtime_get_sync(dev); |
268 | pci_dev->driver = pci_drv; | 268 | pci_dev->driver = pci_drv; |
269 | rc = pci_drv->probe(pci_dev, ddi->id); | 269 | rc = pci_drv->probe(pci_dev, ddi->id); |
270 | if (rc) { | 270 | if (!rc) |
271 | return rc; | ||
272 | if (rc < 0) { | ||
271 | pci_dev->driver = NULL; | 273 | pci_dev->driver = NULL; |
272 | pm_runtime_put_sync(dev); | 274 | pm_runtime_put_sync(dev); |
275 | return rc; | ||
273 | } | 276 | } |
274 | return rc; | 277 | /* |
278 | * Probe function should return < 0 for failure, 0 for success | ||
279 | * Treat values > 0 as success, but warn. | ||
280 | */ | ||
281 | dev_warn(dev, "Driver probe function unexpectedly returned %d\n", rc); | ||
282 | return 0; | ||
275 | } | 283 | } |
276 | 284 | ||
277 | static int pci_call_probe(struct pci_driver *drv, struct pci_dev *dev, | 285 | static int pci_call_probe(struct pci_driver *drv, struct pci_dev *dev, |
@@ -602,18 +610,10 @@ static int pci_pm_prepare(struct device *dev) | |||
602 | return error; | 610 | return error; |
603 | } | 611 | } |
604 | 612 | ||
605 | static void pci_pm_complete(struct device *dev) | ||
606 | { | ||
607 | struct device_driver *drv = dev->driver; | ||
608 | |||
609 | if (drv && drv->pm && drv->pm->complete) | ||
610 | drv->pm->complete(dev); | ||
611 | } | ||
612 | 613 | ||
613 | #else /* !CONFIG_PM_SLEEP */ | 614 | #else /* !CONFIG_PM_SLEEP */ |
614 | 615 | ||
615 | #define pci_pm_prepare NULL | 616 | #define pci_pm_prepare NULL |
616 | #define pci_pm_complete NULL | ||
617 | 617 | ||
618 | #endif /* !CONFIG_PM_SLEEP */ | 618 | #endif /* !CONFIG_PM_SLEEP */ |
619 | 619 | ||
@@ -1124,9 +1124,8 @@ static int pci_pm_runtime_idle(struct device *dev) | |||
1124 | 1124 | ||
1125 | #ifdef CONFIG_PM | 1125 | #ifdef CONFIG_PM |
1126 | 1126 | ||
1127 | const struct dev_pm_ops pci_dev_pm_ops = { | 1127 | static const struct dev_pm_ops pci_dev_pm_ops = { |
1128 | .prepare = pci_pm_prepare, | 1128 | .prepare = pci_pm_prepare, |
1129 | .complete = pci_pm_complete, | ||
1130 | .suspend = pci_pm_suspend, | 1129 | .suspend = pci_pm_suspend, |
1131 | .resume = pci_pm_resume, | 1130 | .resume = pci_pm_resume, |
1132 | .freeze = pci_pm_freeze, | 1131 | .freeze = pci_pm_freeze, |
@@ -1319,7 +1318,7 @@ struct bus_type pci_bus_type = { | |||
1319 | .probe = pci_device_probe, | 1318 | .probe = pci_device_probe, |
1320 | .remove = pci_device_remove, | 1319 | .remove = pci_device_remove, |
1321 | .shutdown = pci_device_shutdown, | 1320 | .shutdown = pci_device_shutdown, |
1322 | .dev_attrs = pci_dev_attrs, | 1321 | .dev_groups = pci_dev_groups, |
1323 | .bus_groups = pci_bus_groups, | 1322 | .bus_groups = pci_bus_groups, |
1324 | .drv_groups = pci_drv_groups, | 1323 | .drv_groups = pci_drv_groups, |
1325 | .pm = PCI_PM_OPS_PTR, | 1324 | .pm = PCI_PM_OPS_PTR, |
diff --git a/drivers/pci/pci-sysfs.c b/drivers/pci/pci-sysfs.c index d8eb880bd1fc..2aaa83c85a4e 100644 --- a/drivers/pci/pci-sysfs.c +++ b/drivers/pci/pci-sysfs.c | |||
@@ -42,7 +42,8 @@ field##_show(struct device *dev, struct device_attribute *attr, char *buf) \ | |||
42 | \ | 42 | \ |
43 | pdev = to_pci_dev (dev); \ | 43 | pdev = to_pci_dev (dev); \ |
44 | return sprintf (buf, format_string, pdev->field); \ | 44 | return sprintf (buf, format_string, pdev->field); \ |
45 | } | 45 | } \ |
46 | static DEVICE_ATTR_RO(field) | ||
46 | 47 | ||
47 | pci_config_attr(vendor, "0x%04x\n"); | 48 | pci_config_attr(vendor, "0x%04x\n"); |
48 | pci_config_attr(device, "0x%04x\n"); | 49 | pci_config_attr(device, "0x%04x\n"); |
@@ -73,10 +74,13 @@ static ssize_t broken_parity_status_store(struct device *dev, | |||
73 | 74 | ||
74 | return count; | 75 | return count; |
75 | } | 76 | } |
77 | static DEVICE_ATTR_RW(broken_parity_status); | ||
76 | 78 | ||
77 | static ssize_t local_cpus_show(struct device *dev, | 79 | static ssize_t pci_dev_show_local_cpu(struct device *dev, |
78 | struct device_attribute *attr, char *buf) | 80 | int type, |
79 | { | 81 | struct device_attribute *attr, |
82 | char *buf) | ||
83 | { | ||
80 | const struct cpumask *mask; | 84 | const struct cpumask *mask; |
81 | int len; | 85 | int len; |
82 | 86 | ||
@@ -86,30 +90,28 @@ static ssize_t local_cpus_show(struct device *dev, | |||
86 | #else | 90 | #else |
87 | mask = cpumask_of_pcibus(to_pci_dev(dev)->bus); | 91 | mask = cpumask_of_pcibus(to_pci_dev(dev)->bus); |
88 | #endif | 92 | #endif |
89 | len = cpumask_scnprintf(buf, PAGE_SIZE-2, mask); | 93 | len = type ? |
94 | cpumask_scnprintf(buf, PAGE_SIZE-2, mask) : | ||
95 | cpulist_scnprintf(buf, PAGE_SIZE-2, mask); | ||
96 | |||
90 | buf[len++] = '\n'; | 97 | buf[len++] = '\n'; |
91 | buf[len] = '\0'; | 98 | buf[len] = '\0'; |
92 | return len; | 99 | return len; |
93 | } | 100 | } |
94 | 101 | ||
102 | static ssize_t local_cpus_show(struct device *dev, | ||
103 | struct device_attribute *attr, char *buf) | ||
104 | { | ||
105 | return pci_dev_show_local_cpu(dev, 1, attr, buf); | ||
106 | } | ||
107 | static DEVICE_ATTR_RO(local_cpus); | ||
95 | 108 | ||
96 | static ssize_t local_cpulist_show(struct device *dev, | 109 | static ssize_t local_cpulist_show(struct device *dev, |
97 | struct device_attribute *attr, char *buf) | 110 | struct device_attribute *attr, char *buf) |
98 | { | 111 | { |
99 | const struct cpumask *mask; | 112 | return pci_dev_show_local_cpu(dev, 0, attr, buf); |
100 | int len; | ||
101 | |||
102 | #ifdef CONFIG_NUMA | ||
103 | mask = (dev_to_node(dev) == -1) ? cpu_online_mask : | ||
104 | cpumask_of_node(dev_to_node(dev)); | ||
105 | #else | ||
106 | mask = cpumask_of_pcibus(to_pci_dev(dev)->bus); | ||
107 | #endif | ||
108 | len = cpulist_scnprintf(buf, PAGE_SIZE-2, mask); | ||
109 | buf[len++] = '\n'; | ||
110 | buf[len] = '\0'; | ||
111 | return len; | ||
112 | } | 113 | } |
114 | static DEVICE_ATTR_RO(local_cpulist); | ||
113 | 115 | ||
114 | /* | 116 | /* |
115 | * PCI Bus Class Devices | 117 | * PCI Bus Class Devices |
@@ -170,6 +172,7 @@ resource_show(struct device * dev, struct device_attribute *attr, char * buf) | |||
170 | } | 172 | } |
171 | return (str - buf); | 173 | return (str - buf); |
172 | } | 174 | } |
175 | static DEVICE_ATTR_RO(resource); | ||
173 | 176 | ||
174 | static ssize_t modalias_show(struct device *dev, struct device_attribute *attr, char *buf) | 177 | static ssize_t modalias_show(struct device *dev, struct device_attribute *attr, char *buf) |
175 | { | 178 | { |
@@ -181,10 +184,11 @@ static ssize_t modalias_show(struct device *dev, struct device_attribute *attr, | |||
181 | (u8)(pci_dev->class >> 16), (u8)(pci_dev->class >> 8), | 184 | (u8)(pci_dev->class >> 16), (u8)(pci_dev->class >> 8), |
182 | (u8)(pci_dev->class)); | 185 | (u8)(pci_dev->class)); |
183 | } | 186 | } |
187 | static DEVICE_ATTR_RO(modalias); | ||
184 | 188 | ||
185 | static ssize_t is_enabled_store(struct device *dev, | 189 | static ssize_t enabled_store(struct device *dev, |
186 | struct device_attribute *attr, const char *buf, | 190 | struct device_attribute *attr, const char *buf, |
187 | size_t count) | 191 | size_t count) |
188 | { | 192 | { |
189 | struct pci_dev *pdev = to_pci_dev(dev); | 193 | struct pci_dev *pdev = to_pci_dev(dev); |
190 | unsigned long val; | 194 | unsigned long val; |
@@ -208,14 +212,15 @@ static ssize_t is_enabled_store(struct device *dev, | |||
208 | return result < 0 ? result : count; | 212 | return result < 0 ? result : count; |
209 | } | 213 | } |
210 | 214 | ||
211 | static ssize_t is_enabled_show(struct device *dev, | 215 | static ssize_t enabled_show(struct device *dev, |
212 | struct device_attribute *attr, char *buf) | 216 | struct device_attribute *attr, char *buf) |
213 | { | 217 | { |
214 | struct pci_dev *pdev; | 218 | struct pci_dev *pdev; |
215 | 219 | ||
216 | pdev = to_pci_dev (dev); | 220 | pdev = to_pci_dev (dev); |
217 | return sprintf (buf, "%u\n", atomic_read(&pdev->enable_cnt)); | 221 | return sprintf (buf, "%u\n", atomic_read(&pdev->enable_cnt)); |
218 | } | 222 | } |
223 | static DEVICE_ATTR_RW(enabled); | ||
219 | 224 | ||
220 | #ifdef CONFIG_NUMA | 225 | #ifdef CONFIG_NUMA |
221 | static ssize_t | 226 | static ssize_t |
@@ -223,6 +228,7 @@ numa_node_show(struct device *dev, struct device_attribute *attr, char *buf) | |||
223 | { | 228 | { |
224 | return sprintf (buf, "%d\n", dev->numa_node); | 229 | return sprintf (buf, "%d\n", dev->numa_node); |
225 | } | 230 | } |
231 | static DEVICE_ATTR_RO(numa_node); | ||
226 | #endif | 232 | #endif |
227 | 233 | ||
228 | static ssize_t | 234 | static ssize_t |
@@ -232,6 +238,7 @@ dma_mask_bits_show(struct device *dev, struct device_attribute *attr, char *buf) | |||
232 | 238 | ||
233 | return sprintf (buf, "%d\n", fls64(pdev->dma_mask)); | 239 | return sprintf (buf, "%d\n", fls64(pdev->dma_mask)); |
234 | } | 240 | } |
241 | static DEVICE_ATTR_RO(dma_mask_bits); | ||
235 | 242 | ||
236 | static ssize_t | 243 | static ssize_t |
237 | consistent_dma_mask_bits_show(struct device *dev, struct device_attribute *attr, | 244 | consistent_dma_mask_bits_show(struct device *dev, struct device_attribute *attr, |
@@ -239,6 +246,7 @@ consistent_dma_mask_bits_show(struct device *dev, struct device_attribute *attr, | |||
239 | { | 246 | { |
240 | return sprintf (buf, "%d\n", fls64(dev->coherent_dma_mask)); | 247 | return sprintf (buf, "%d\n", fls64(dev->coherent_dma_mask)); |
241 | } | 248 | } |
249 | static DEVICE_ATTR_RO(consistent_dma_mask_bits); | ||
242 | 250 | ||
243 | static ssize_t | 251 | static ssize_t |
244 | msi_bus_show(struct device *dev, struct device_attribute *attr, char *buf) | 252 | msi_bus_show(struct device *dev, struct device_attribute *attr, char *buf) |
@@ -283,6 +291,7 @@ msi_bus_store(struct device *dev, struct device_attribute *attr, | |||
283 | 291 | ||
284 | return count; | 292 | return count; |
285 | } | 293 | } |
294 | static DEVICE_ATTR_RW(msi_bus); | ||
286 | 295 | ||
287 | static DEFINE_MUTEX(pci_remove_rescan_mutex); | 296 | static DEFINE_MUTEX(pci_remove_rescan_mutex); |
288 | static ssize_t bus_rescan_store(struct bus_type *bus, const char *buf, | 297 | static ssize_t bus_rescan_store(struct bus_type *bus, const char *buf, |
@@ -304,7 +313,7 @@ static ssize_t bus_rescan_store(struct bus_type *bus, const char *buf, | |||
304 | } | 313 | } |
305 | static BUS_ATTR(rescan, (S_IWUSR|S_IWGRP), NULL, bus_rescan_store); | 314 | static BUS_ATTR(rescan, (S_IWUSR|S_IWGRP), NULL, bus_rescan_store); |
306 | 315 | ||
307 | struct attribute *pci_bus_attrs[] = { | 316 | static struct attribute *pci_bus_attrs[] = { |
308 | &bus_attr_rescan.attr, | 317 | &bus_attr_rescan.attr, |
309 | NULL, | 318 | NULL, |
310 | }; | 319 | }; |
@@ -335,8 +344,9 @@ dev_rescan_store(struct device *dev, struct device_attribute *attr, | |||
335 | } | 344 | } |
336 | return count; | 345 | return count; |
337 | } | 346 | } |
338 | struct device_attribute dev_rescan_attr = __ATTR(rescan, (S_IWUSR|S_IWGRP), | 347 | static struct device_attribute dev_rescan_attr = __ATTR(rescan, |
339 | NULL, dev_rescan_store); | 348 | (S_IWUSR|S_IWGRP), |
349 | NULL, dev_rescan_store); | ||
340 | 350 | ||
341 | static void remove_callback(struct device *dev) | 351 | static void remove_callback(struct device *dev) |
342 | { | 352 | { |
@@ -366,8 +376,9 @@ remove_store(struct device *dev, struct device_attribute *dummy, | |||
366 | count = ret; | 376 | count = ret; |
367 | return count; | 377 | return count; |
368 | } | 378 | } |
369 | struct device_attribute dev_remove_attr = __ATTR(remove, (S_IWUSR|S_IWGRP), | 379 | static struct device_attribute dev_remove_attr = __ATTR(remove, |
370 | NULL, remove_store); | 380 | (S_IWUSR|S_IWGRP), |
381 | NULL, remove_store); | ||
371 | 382 | ||
372 | static ssize_t | 383 | static ssize_t |
373 | dev_bus_rescan_store(struct device *dev, struct device_attribute *attr, | 384 | dev_bus_rescan_store(struct device *dev, struct device_attribute *attr, |
@@ -414,6 +425,7 @@ static ssize_t d3cold_allowed_show(struct device *dev, | |||
414 | struct pci_dev *pdev = to_pci_dev(dev); | 425 | struct pci_dev *pdev = to_pci_dev(dev); |
415 | return sprintf (buf, "%u\n", pdev->d3cold_allowed); | 426 | return sprintf (buf, "%u\n", pdev->d3cold_allowed); |
416 | } | 427 | } |
428 | static DEVICE_ATTR_RW(d3cold_allowed); | ||
417 | #endif | 429 | #endif |
418 | 430 | ||
419 | #ifdef CONFIG_PCI_IOV | 431 | #ifdef CONFIG_PCI_IOV |
@@ -499,30 +511,38 @@ static struct device_attribute sriov_numvfs_attr = | |||
499 | sriov_numvfs_show, sriov_numvfs_store); | 511 | sriov_numvfs_show, sriov_numvfs_store); |
500 | #endif /* CONFIG_PCI_IOV */ | 512 | #endif /* CONFIG_PCI_IOV */ |
501 | 513 | ||
502 | struct device_attribute pci_dev_attrs[] = { | 514 | static struct attribute *pci_dev_attrs[] = { |
503 | __ATTR_RO(resource), | 515 | &dev_attr_resource.attr, |
504 | __ATTR_RO(vendor), | 516 | &dev_attr_vendor.attr, |
505 | __ATTR_RO(device), | 517 | &dev_attr_device.attr, |
506 | __ATTR_RO(subsystem_vendor), | 518 | &dev_attr_subsystem_vendor.attr, |
507 | __ATTR_RO(subsystem_device), | 519 | &dev_attr_subsystem_device.attr, |
508 | __ATTR_RO(class), | 520 | &dev_attr_class.attr, |
509 | __ATTR_RO(irq), | 521 | &dev_attr_irq.attr, |
510 | __ATTR_RO(local_cpus), | 522 | &dev_attr_local_cpus.attr, |
511 | __ATTR_RO(local_cpulist), | 523 | &dev_attr_local_cpulist.attr, |
512 | __ATTR_RO(modalias), | 524 | &dev_attr_modalias.attr, |
513 | #ifdef CONFIG_NUMA | 525 | #ifdef CONFIG_NUMA |
514 | __ATTR_RO(numa_node), | 526 | &dev_attr_numa_node.attr, |
515 | #endif | 527 | #endif |
516 | __ATTR_RO(dma_mask_bits), | 528 | &dev_attr_dma_mask_bits.attr, |
517 | __ATTR_RO(consistent_dma_mask_bits), | 529 | &dev_attr_consistent_dma_mask_bits.attr, |
518 | __ATTR(enable, 0600, is_enabled_show, is_enabled_store), | 530 | &dev_attr_enabled.attr, |
519 | __ATTR(broken_parity_status,(S_IRUGO|S_IWUSR), | 531 | &dev_attr_broken_parity_status.attr, |
520 | broken_parity_status_show,broken_parity_status_store), | 532 | &dev_attr_msi_bus.attr, |
521 | __ATTR(msi_bus, 0644, msi_bus_show, msi_bus_store), | ||
522 | #if defined(CONFIG_PM_RUNTIME) && defined(CONFIG_ACPI) | 533 | #if defined(CONFIG_PM_RUNTIME) && defined(CONFIG_ACPI) |
523 | __ATTR(d3cold_allowed, 0644, d3cold_allowed_show, d3cold_allowed_store), | 534 | &dev_attr_d3cold_allowed.attr, |
524 | #endif | 535 | #endif |
525 | __ATTR_NULL, | 536 | NULL, |
537 | }; | ||
538 | |||
539 | static const struct attribute_group pci_dev_group = { | ||
540 | .attrs = pci_dev_attrs, | ||
541 | }; | ||
542 | |||
543 | const struct attribute_group *pci_dev_groups[] = { | ||
544 | &pci_dev_group, | ||
545 | NULL, | ||
526 | }; | 546 | }; |
527 | 547 | ||
528 | static struct attribute *pcibus_attrs[] = { | 548 | static struct attribute *pcibus_attrs[] = { |
@@ -554,7 +574,7 @@ boot_vga_show(struct device *dev, struct device_attribute *attr, char *buf) | |||
554 | !!(pdev->resource[PCI_ROM_RESOURCE].flags & | 574 | !!(pdev->resource[PCI_ROM_RESOURCE].flags & |
555 | IORESOURCE_ROM_SHADOW)); | 575 | IORESOURCE_ROM_SHADOW)); |
556 | } | 576 | } |
557 | struct device_attribute vga_attr = __ATTR_RO(boot_vga); | 577 | static struct device_attribute vga_attr = __ATTR_RO(boot_vga); |
558 | 578 | ||
559 | static ssize_t | 579 | static ssize_t |
560 | pci_read_config(struct file *filp, struct kobject *kobj, | 580 | pci_read_config(struct file *filp, struct kobject *kobj, |
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c index bdd64b1b4817..b127fbda6fc8 100644 --- a/drivers/pci/pci.c +++ b/drivers/pci/pci.c | |||
@@ -1148,18 +1148,16 @@ int pci_reenable_device(struct pci_dev *dev) | |||
1148 | 1148 | ||
1149 | static void pci_enable_bridge(struct pci_dev *dev) | 1149 | static void pci_enable_bridge(struct pci_dev *dev) |
1150 | { | 1150 | { |
1151 | struct pci_dev *bridge; | ||
1151 | int retval; | 1152 | int retval; |
1152 | 1153 | ||
1153 | if (!dev) | 1154 | bridge = pci_upstream_bridge(dev); |
1154 | return; | 1155 | if (bridge) |
1155 | 1156 | pci_enable_bridge(bridge); | |
1156 | pci_enable_bridge(dev->bus->self); | ||
1157 | 1157 | ||
1158 | if (pci_is_enabled(dev)) { | 1158 | if (pci_is_enabled(dev)) { |
1159 | if (!dev->is_busmaster) { | 1159 | if (!dev->is_busmaster) |
1160 | dev_warn(&dev->dev, "driver skip pci_set_master, fix it!\n"); | ||
1161 | pci_set_master(dev); | 1160 | pci_set_master(dev); |
1162 | } | ||
1163 | return; | 1161 | return; |
1164 | } | 1162 | } |
1165 | 1163 | ||
@@ -1172,6 +1170,7 @@ static void pci_enable_bridge(struct pci_dev *dev) | |||
1172 | 1170 | ||
1173 | static int pci_enable_device_flags(struct pci_dev *dev, unsigned long flags) | 1171 | static int pci_enable_device_flags(struct pci_dev *dev, unsigned long flags) |
1174 | { | 1172 | { |
1173 | struct pci_dev *bridge; | ||
1175 | int err; | 1174 | int err; |
1176 | int i, bars = 0; | 1175 | int i, bars = 0; |
1177 | 1176 | ||
@@ -1190,7 +1189,9 @@ static int pci_enable_device_flags(struct pci_dev *dev, unsigned long flags) | |||
1190 | if (atomic_inc_return(&dev->enable_cnt) > 1) | 1189 | if (atomic_inc_return(&dev->enable_cnt) > 1) |
1191 | return 0; /* already enabled */ | 1190 | return 0; /* already enabled */ |
1192 | 1191 | ||
1193 | pci_enable_bridge(dev->bus->self); | 1192 | bridge = pci_upstream_bridge(dev); |
1193 | if (bridge) | ||
1194 | pci_enable_bridge(bridge); | ||
1194 | 1195 | ||
1195 | /* only skip sriov related */ | 1196 | /* only skip sriov related */ |
1196 | for (i = 0; i <= PCI_ROM_RESOURCE; i++) | 1197 | for (i = 0; i <= PCI_ROM_RESOURCE; i++) |
@@ -1644,8 +1645,10 @@ void pci_pme_active(struct pci_dev *dev, bool enable) | |||
1644 | if (enable) { | 1645 | if (enable) { |
1645 | pme_dev = kmalloc(sizeof(struct pci_pme_device), | 1646 | pme_dev = kmalloc(sizeof(struct pci_pme_device), |
1646 | GFP_KERNEL); | 1647 | GFP_KERNEL); |
1647 | if (!pme_dev) | 1648 | if (!pme_dev) { |
1648 | goto out; | 1649 | dev_warn(&dev->dev, "can't enable PME#\n"); |
1650 | return; | ||
1651 | } | ||
1649 | pme_dev->dev = dev; | 1652 | pme_dev->dev = dev; |
1650 | mutex_lock(&pci_pme_list_mutex); | 1653 | mutex_lock(&pci_pme_list_mutex); |
1651 | list_add(&pme_dev->list, &pci_pme_list); | 1654 | list_add(&pme_dev->list, &pci_pme_list); |
@@ -1666,7 +1669,6 @@ void pci_pme_active(struct pci_dev *dev, bool enable) | |||
1666 | } | 1669 | } |
1667 | } | 1670 | } |
1668 | 1671 | ||
1669 | out: | ||
1670 | dev_dbg(&dev->dev, "PME# %s\n", enable ? "enabled" : "disabled"); | 1672 | dev_dbg(&dev->dev, "PME# %s\n", enable ? "enabled" : "disabled"); |
1671 | } | 1673 | } |
1672 | 1674 | ||
@@ -2860,7 +2862,7 @@ void __weak pcibios_set_master(struct pci_dev *dev) | |||
2860 | lat = pcibios_max_latency; | 2862 | lat = pcibios_max_latency; |
2861 | else | 2863 | else |
2862 | return; | 2864 | return; |
2863 | dev_printk(KERN_DEBUG, &dev->dev, "setting latency timer to %d\n", lat); | 2865 | |
2864 | pci_write_config_byte(dev, PCI_LATENCY_TIMER, lat); | 2866 | pci_write_config_byte(dev, PCI_LATENCY_TIMER, lat); |
2865 | } | 2867 | } |
2866 | 2868 | ||
@@ -3978,6 +3980,7 @@ int pcie_get_mps(struct pci_dev *dev) | |||
3978 | 3980 | ||
3979 | return 128 << ((ctl & PCI_EXP_DEVCTL_PAYLOAD) >> 5); | 3981 | return 128 << ((ctl & PCI_EXP_DEVCTL_PAYLOAD) >> 5); |
3980 | } | 3982 | } |
3983 | EXPORT_SYMBOL(pcie_get_mps); | ||
3981 | 3984 | ||
3982 | /** | 3985 | /** |
3983 | * pcie_set_mps - set PCI Express maximum payload size | 3986 | * pcie_set_mps - set PCI Express maximum payload size |
@@ -4002,6 +4005,7 @@ int pcie_set_mps(struct pci_dev *dev, int mps) | |||
4002 | return pcie_capability_clear_and_set_word(dev, PCI_EXP_DEVCTL, | 4005 | return pcie_capability_clear_and_set_word(dev, PCI_EXP_DEVCTL, |
4003 | PCI_EXP_DEVCTL_PAYLOAD, v); | 4006 | PCI_EXP_DEVCTL_PAYLOAD, v); |
4004 | } | 4007 | } |
4008 | EXPORT_SYMBOL(pcie_set_mps); | ||
4005 | 4009 | ||
4006 | /** | 4010 | /** |
4007 | * pcie_get_minimum_link - determine minimum link settings of a PCI device | 4011 | * pcie_get_minimum_link - determine minimum link settings of a PCI device |
diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h index 607be58dd728..9c91ecc1301b 100644 --- a/drivers/pci/pci.h +++ b/drivers/pci/pci.h | |||
@@ -153,7 +153,7 @@ static inline int pci_no_d1d2(struct pci_dev *dev) | |||
153 | return (dev->no_d1d2 || parent_dstates); | 153 | return (dev->no_d1d2 || parent_dstates); |
154 | 154 | ||
155 | } | 155 | } |
156 | extern struct device_attribute pci_dev_attrs[]; | 156 | extern const struct attribute_group *pci_dev_groups[]; |
157 | extern const struct attribute_group *pcibus_groups[]; | 157 | extern const struct attribute_group *pcibus_groups[]; |
158 | extern struct device_type pci_dev_type; | 158 | extern struct device_type pci_dev_type; |
159 | extern const struct attribute_group *pci_bus_groups[]; | 159 | extern const struct attribute_group *pci_bus_groups[]; |
diff --git a/drivers/pci/pcie/portdrv_core.c b/drivers/pci/pcie/portdrv_core.c index 31063ac30992..08d131f7815b 100644 --- a/drivers/pci/pcie/portdrv_core.c +++ b/drivers/pci/pcie/portdrv_core.c | |||
@@ -260,13 +260,14 @@ static int get_port_device_capability(struct pci_dev *dev) | |||
260 | if (pcie_ports_disabled) | 260 | if (pcie_ports_disabled) |
261 | return 0; | 261 | return 0; |
262 | 262 | ||
263 | err = pcie_port_platform_notify(dev, &cap_mask); | 263 | cap_mask = PCIE_PORT_SERVICE_PME | PCIE_PORT_SERVICE_HP |
264 | if (!pcie_ports_auto) { | 264 | | PCIE_PORT_SERVICE_VC; |
265 | cap_mask = PCIE_PORT_SERVICE_PME | PCIE_PORT_SERVICE_HP | 265 | if (pci_aer_available()) |
266 | | PCIE_PORT_SERVICE_VC; | 266 | cap_mask |= PCIE_PORT_SERVICE_AER; |
267 | if (pci_aer_available()) | 267 | |
268 | cap_mask |= PCIE_PORT_SERVICE_AER; | 268 | if (pcie_ports_auto) { |
269 | } else if (err) { | 269 | err = pcie_port_platform_notify(dev, &cap_mask); |
270 | if (err) | ||
270 | return 0; | 271 | return 0; |
271 | } | 272 | } |
272 | 273 | ||
diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c index 7ef0f868b3e0..5e14f5a51357 100644 --- a/drivers/pci/probe.c +++ b/drivers/pci/probe.c | |||
@@ -641,8 +641,7 @@ static void pci_set_bus_speed(struct pci_bus *bus) | |||
641 | return; | 641 | return; |
642 | } | 642 | } |
643 | 643 | ||
644 | pos = pci_find_capability(bridge, PCI_CAP_ID_EXP); | 644 | if (pci_is_pcie(bridge)) { |
645 | if (pos) { | ||
646 | u32 linkcap; | 645 | u32 linkcap; |
647 | u16 linksta; | 646 | u16 linksta; |
648 | 647 | ||
@@ -984,7 +983,6 @@ void set_pcie_port_type(struct pci_dev *pdev) | |||
984 | pos = pci_find_capability(pdev, PCI_CAP_ID_EXP); | 983 | pos = pci_find_capability(pdev, PCI_CAP_ID_EXP); |
985 | if (!pos) | 984 | if (!pos) |
986 | return; | 985 | return; |
987 | pdev->is_pcie = 1; | ||
988 | pdev->pcie_cap = pos; | 986 | pdev->pcie_cap = pos; |
989 | pci_read_config_word(pdev, pos + PCI_EXP_FLAGS, ®16); | 987 | pci_read_config_word(pdev, pos + PCI_EXP_FLAGS, ®16); |
990 | pdev->pcie_flags_reg = reg16; | 988 | pdev->pcie_flags_reg = reg16; |
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c index f6c31fabf3af..91490453c229 100644 --- a/drivers/pci/quirks.c +++ b/drivers/pci/quirks.c | |||
@@ -2955,6 +2955,29 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x0102, disable_igfx_irq); | |||
2955 | DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x010a, disable_igfx_irq); | 2955 | DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x010a, disable_igfx_irq); |
2956 | 2956 | ||
2957 | /* | 2957 | /* |
2958 | * PCI devices which are on Intel chips can skip the 10ms delay | ||
2959 | * before entering D3 mode. | ||
2960 | */ | ||
2961 | static void quirk_remove_d3_delay(struct pci_dev *dev) | ||
2962 | { | ||
2963 | dev->d3_delay = 0; | ||
2964 | } | ||
2965 | DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x0c00, quirk_remove_d3_delay); | ||
2966 | DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x0412, quirk_remove_d3_delay); | ||
2967 | DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x0c0c, quirk_remove_d3_delay); | ||
2968 | DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x8c31, quirk_remove_d3_delay); | ||
2969 | DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x8c3a, quirk_remove_d3_delay); | ||
2970 | DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x8c3d, quirk_remove_d3_delay); | ||
2971 | DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x8c2d, quirk_remove_d3_delay); | ||
2972 | DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x8c20, quirk_remove_d3_delay); | ||
2973 | DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x8c18, quirk_remove_d3_delay); | ||
2974 | DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x8c1c, quirk_remove_d3_delay); | ||
2975 | DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x8c26, quirk_remove_d3_delay); | ||
2976 | DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x8c4e, quirk_remove_d3_delay); | ||
2977 | DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x8c02, quirk_remove_d3_delay); | ||
2978 | DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x8c22, quirk_remove_d3_delay); | ||
2979 | |||
2980 | /* | ||
2958 | * Some devices may pass our check in pci_intx_mask_supported if | 2981 | * Some devices may pass our check in pci_intx_mask_supported if |
2959 | * PCI_COMMAND_INTX_DISABLE works though they actually do not properly | 2982 | * PCI_COMMAND_INTX_DISABLE works though they actually do not properly |
2960 | * support this feature. | 2983 | * support this feature. |
diff --git a/drivers/pci/setup-bus.c b/drivers/pci/setup-bus.c index bc26d7990cc3..4ce83b26ae9e 100644 --- a/drivers/pci/setup-bus.c +++ b/drivers/pci/setup-bus.c | |||
@@ -982,7 +982,7 @@ static int pbus_size_mem(struct pci_bus *bus, unsigned long mask, | |||
982 | } | 982 | } |
983 | 983 | ||
984 | min_align = calculate_mem_align(aligns, max_order); | 984 | min_align = calculate_mem_align(aligns, max_order); |
985 | min_align = max(min_align, window_alignment(bus, b_res->flags & mask)); | 985 | min_align = max(min_align, window_alignment(bus, b_res->flags)); |
986 | size0 = calculate_memsize(size, min_size, 0, resource_size(b_res), min_align); | 986 | size0 = calculate_memsize(size, min_size, 0, resource_size(b_res), min_align); |
987 | if (children_add_size > add_size) | 987 | if (children_add_size > add_size) |
988 | add_size = children_add_size; | 988 | add_size = children_add_size; |
@@ -1136,7 +1136,7 @@ void __ref __pci_bus_size_bridges(struct pci_bus *bus, | |||
1136 | } | 1136 | } |
1137 | 1137 | ||
1138 | /* The root bus? */ | 1138 | /* The root bus? */ |
1139 | if (!bus->self) | 1139 | if (pci_is_root_bus(bus)) |
1140 | return; | 1140 | return; |
1141 | 1141 | ||
1142 | switch (bus->self->class >> 8) { | 1142 | switch (bus->self->class >> 8) { |
diff --git a/drivers/scsi/bfa/bfad.c b/drivers/scsi/bfa/bfad.c index f8ca7becacca..7591fa4e28bb 100644 --- a/drivers/scsi/bfa/bfad.c +++ b/drivers/scsi/bfa/bfad.c | |||
@@ -766,49 +766,20 @@ bfad_pci_init(struct pci_dev *pdev, struct bfad_s *bfad) | |||
766 | bfad->pcidev = pdev; | 766 | bfad->pcidev = pdev; |
767 | 767 | ||
768 | /* Adjust PCIe Maximum Read Request Size */ | 768 | /* Adjust PCIe Maximum Read Request Size */ |
769 | if (pcie_max_read_reqsz > 0) { | 769 | if (pci_is_pcie(pdev) && pcie_max_read_reqsz) { |
770 | int pcie_cap_reg; | 770 | if (pcie_max_read_reqsz >= 128 && |
771 | u16 pcie_dev_ctl; | 771 | pcie_max_read_reqsz <= 4096 && |
772 | u16 mask = 0xffff; | 772 | is_power_of_2(pcie_max_read_reqsz)) { |
773 | 773 | int max_rq = pcie_get_readrq(pdev); | |
774 | switch (pcie_max_read_reqsz) { | 774 | printk(KERN_WARNING "BFA[%s]: " |
775 | case 128: | ||
776 | mask = 0x0; | ||
777 | break; | ||
778 | case 256: | ||
779 | mask = 0x1000; | ||
780 | break; | ||
781 | case 512: | ||
782 | mask = 0x2000; | ||
783 | break; | ||
784 | case 1024: | ||
785 | mask = 0x3000; | ||
786 | break; | ||
787 | case 2048: | ||
788 | mask = 0x4000; | ||
789 | break; | ||
790 | case 4096: | ||
791 | mask = 0x5000; | ||
792 | break; | ||
793 | default: | ||
794 | break; | ||
795 | } | ||
796 | |||
797 | pcie_cap_reg = pci_find_capability(pdev, PCI_CAP_ID_EXP); | ||
798 | if (mask != 0xffff && pcie_cap_reg) { | ||
799 | pcie_cap_reg += 0x08; | ||
800 | pci_read_config_word(pdev, pcie_cap_reg, &pcie_dev_ctl); | ||
801 | if ((pcie_dev_ctl & 0x7000) != mask) { | ||
802 | printk(KERN_WARNING "BFA[%s]: " | ||
803 | "pcie_max_read_request_size is %d, " | 775 | "pcie_max_read_request_size is %d, " |
804 | "reset to %d\n", bfad->pci_name, | 776 | "reset to %d\n", bfad->pci_name, max_rq, |
805 | (1 << ((pcie_dev_ctl & 0x7000) >> 12)) << 7, | ||
806 | pcie_max_read_reqsz); | 777 | pcie_max_read_reqsz); |
807 | 778 | pcie_set_readrq(pdev, pcie_max_read_reqsz); | |
808 | pcie_dev_ctl &= ~0x7000; | 779 | } else { |
809 | pci_write_config_word(pdev, pcie_cap_reg, | 780 | printk(KERN_WARNING "BFA[%s]: invalid " |
810 | pcie_dev_ctl | mask); | 781 | "pcie_max_read_request_size %d ignored\n", |
811 | } | 782 | bfad->pci_name, pcie_max_read_reqsz); |
812 | } | 783 | } |
813 | } | 784 | } |
814 | 785 | ||
diff --git a/drivers/scsi/csiostor/csio_hw.c b/drivers/scsi/csiostor/csio_hw.c index 0eb35b9b3784..0eaec4748957 100644 --- a/drivers/scsi/csiostor/csio_hw.c +++ b/drivers/scsi/csiostor/csio_hw.c | |||
@@ -852,22 +852,6 @@ csio_hw_get_flash_params(struct csio_hw *hw) | |||
852 | return 0; | 852 | return 0; |
853 | } | 853 | } |
854 | 854 | ||
855 | static void | ||
856 | csio_set_pcie_completion_timeout(struct csio_hw *hw, u8 range) | ||
857 | { | ||
858 | uint16_t val; | ||
859 | int pcie_cap; | ||
860 | |||
861 | if (!csio_pci_capability(hw->pdev, PCI_CAP_ID_EXP, &pcie_cap)) { | ||
862 | pci_read_config_word(hw->pdev, | ||
863 | pcie_cap + PCI_EXP_DEVCTL2, &val); | ||
864 | val &= 0xfff0; | ||
865 | val |= range ; | ||
866 | pci_write_config_word(hw->pdev, | ||
867 | pcie_cap + PCI_EXP_DEVCTL2, val); | ||
868 | } | ||
869 | } | ||
870 | |||
871 | /*****************************************************************************/ | 855 | /*****************************************************************************/ |
872 | /* HW State machine assists */ | 856 | /* HW State machine assists */ |
873 | /*****************************************************************************/ | 857 | /*****************************************************************************/ |
@@ -2069,8 +2053,10 @@ csio_hw_configure(struct csio_hw *hw) | |||
2069 | goto out; | 2053 | goto out; |
2070 | } | 2054 | } |
2071 | 2055 | ||
2072 | /* Set pci completion timeout value to 4 seconds. */ | 2056 | /* Set PCIe completion timeout to 4 seconds */ |
2073 | csio_set_pcie_completion_timeout(hw, 0xd); | 2057 | if (pci_is_pcie(hw->pdev)) |
2058 | pcie_capability_clear_and_set_word(hw->pdev, PCI_EXP_DEVCTL2, | ||
2059 | PCI_EXP_DEVCTL2_COMP_TIMEOUT, 0xd); | ||
2074 | 2060 | ||
2075 | hw->chip_ops->chip_set_mem_win(hw, MEMWIN_CSIOSTOR); | 2061 | hw->chip_ops->chip_set_mem_win(hw, MEMWIN_CSIOSTOR); |
2076 | 2062 | ||
diff --git a/drivers/scsi/qla2xxx/qla_mr.c b/drivers/scsi/qla2xxx/qla_mr.c index 62ee7131b204..30d20e74e48a 100644 --- a/drivers/scsi/qla2xxx/qla_mr.c +++ b/drivers/scsi/qla2xxx/qla_mr.c | |||
@@ -507,7 +507,7 @@ qlafx00_pci_config(scsi_qla_host_t *vha) | |||
507 | pci_write_config_word(ha->pdev, PCI_COMMAND, w); | 507 | pci_write_config_word(ha->pdev, PCI_COMMAND, w); |
508 | 508 | ||
509 | /* PCIe -- adjust Maximum Read Request Size (2048). */ | 509 | /* PCIe -- adjust Maximum Read Request Size (2048). */ |
510 | if (pci_find_capability(ha->pdev, PCI_CAP_ID_EXP)) | 510 | if (pci_is_pcie(ha->pdev)) |
511 | pcie_set_readrq(ha->pdev, 2048); | 511 | pcie_set_readrq(ha->pdev, 2048); |
512 | 512 | ||
513 | ha->chip_revision = ha->pdev->revision; | 513 | ha->chip_revision = ha->pdev->revision; |
@@ -660,10 +660,8 @@ char * | |||
660 | qlafx00_pci_info_str(struct scsi_qla_host *vha, char *str) | 660 | qlafx00_pci_info_str(struct scsi_qla_host *vha, char *str) |
661 | { | 661 | { |
662 | struct qla_hw_data *ha = vha->hw; | 662 | struct qla_hw_data *ha = vha->hw; |
663 | int pcie_reg; | ||
664 | 663 | ||
665 | pcie_reg = pci_find_capability(ha->pdev, PCI_CAP_ID_EXP); | 664 | if (pci_is_pcie(ha->pdev)) { |
666 | if (pcie_reg) { | ||
667 | strcpy(str, "PCIe iSA"); | 665 | strcpy(str, "PCIe iSA"); |
668 | return str; | 666 | return str; |
669 | } | 667 | } |
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c index 9f01bbbf3a26..bcd57f699ebb 100644 --- a/drivers/scsi/qla2xxx/qla_os.c +++ b/drivers/scsi/qla2xxx/qla_os.c | |||
@@ -494,18 +494,14 @@ qla24xx_pci_info_str(struct scsi_qla_host *vha, char *str) | |||
494 | static char *pci_bus_modes[] = { "33", "66", "100", "133", }; | 494 | static char *pci_bus_modes[] = { "33", "66", "100", "133", }; |
495 | struct qla_hw_data *ha = vha->hw; | 495 | struct qla_hw_data *ha = vha->hw; |
496 | uint32_t pci_bus; | 496 | uint32_t pci_bus; |
497 | int pcie_reg; | ||
498 | 497 | ||
499 | pcie_reg = pci_pcie_cap(ha->pdev); | 498 | if (pci_is_pcie(ha->pdev)) { |
500 | if (pcie_reg) { | ||
501 | char lwstr[6]; | 499 | char lwstr[6]; |
502 | uint16_t pcie_lstat, lspeed, lwidth; | 500 | uint32_t lstat, lspeed, lwidth; |
503 | 501 | ||
504 | pcie_reg += PCI_EXP_LNKCAP; | 502 | pcie_capability_read_dword(ha->pdev, PCI_EXP_LNKCAP, &lstat); |
505 | pci_read_config_word(ha->pdev, pcie_reg, &pcie_lstat); | 503 | lspeed = lstat & PCI_EXP_LNKCAP_SLS; |
506 | lspeed = pcie_lstat & (BIT_0 | BIT_1 | BIT_2 | BIT_3); | 504 | lwidth = (lstat & PCI_EXP_LNKCAP_MLW) >> 4; |
507 | lwidth = (pcie_lstat & | ||
508 | (BIT_4 | BIT_5 | BIT_6 | BIT_7 | BIT_8 | BIT_9)) >> 4; | ||
509 | 505 | ||
510 | strcpy(str, "PCIe ("); | 506 | strcpy(str, "PCIe ("); |
511 | switch (lspeed) { | 507 | switch (lspeed) { |
diff --git a/drivers/staging/et131x/et131x.c b/drivers/staging/et131x/et131x.c index 820a332f3188..ab8b29d2cb26 100644 --- a/drivers/staging/et131x/et131x.c +++ b/drivers/staging/et131x/et131x.c | |||
@@ -3601,17 +3601,10 @@ static int et131x_pci_init(struct et131x_adapter *adapter, | |||
3601 | goto err_out; | 3601 | goto err_out; |
3602 | } | 3602 | } |
3603 | 3603 | ||
3604 | /* Let's set up the PORT LOGIC Register. First we need to know what | 3604 | /* Let's set up the PORT LOGIC Register. */ |
3605 | * the max_payload_size is | ||
3606 | */ | ||
3607 | if (pcie_capability_read_word(pdev, PCI_EXP_DEVCAP, &max_payload)) { | ||
3608 | dev_err(&pdev->dev, | ||
3609 | "Could not read PCI config space for Max Payload Size\n"); | ||
3610 | goto err_out; | ||
3611 | } | ||
3612 | 3605 | ||
3613 | /* Program the Ack/Nak latency and replay timers */ | 3606 | /* Program the Ack/Nak latency and replay timers */ |
3614 | max_payload &= 0x07; | 3607 | max_payload = pdev->pcie_mpss; |
3615 | 3608 | ||
3616 | if (max_payload < 2) { | 3609 | if (max_payload < 2) { |
3617 | static const u16 acknak[2] = { 0x76, 0xD0 }; | 3610 | static const u16 acknak[2] = { 0x76, 0xD0 }; |
@@ -3641,8 +3634,7 @@ static int et131x_pci_init(struct et131x_adapter *adapter, | |||
3641 | } | 3634 | } |
3642 | 3635 | ||
3643 | /* Change the max read size to 2k */ | 3636 | /* Change the max read size to 2k */ |
3644 | if (pcie_capability_clear_and_set_word(pdev, PCI_EXP_DEVCTL, | 3637 | if (pcie_set_readrq(pdev, 2048)) { |
3645 | PCI_EXP_DEVCTL_READRQ, 0x4 << 12)) { | ||
3646 | dev_err(&pdev->dev, | 3638 | dev_err(&pdev->dev, |
3647 | "Couldn't change PCI config space for Max read size\n"); | 3639 | "Couldn't change PCI config space for Max read size\n"); |
3648 | goto err_out; | 3640 | goto err_out; |