diff options
Diffstat (limited to 'drivers/pci')
31 files changed, 899 insertions, 688 deletions
diff --git a/drivers/pci/bus.c b/drivers/pci/bus.c index 529d9d7727b0..999cc4088b59 100644 --- a/drivers/pci/bus.c +++ b/drivers/pci/bus.c | |||
@@ -151,6 +151,13 @@ void pci_bus_add_devices(struct pci_bus *bus) | |||
151 | if (retval) | 151 | if (retval) |
152 | dev_err(&dev->dev, "Error creating cpuaffinity" | 152 | dev_err(&dev->dev, "Error creating cpuaffinity" |
153 | " file, continuing...\n"); | 153 | " file, continuing...\n"); |
154 | |||
155 | retval = device_create_file(&child_bus->dev, | ||
156 | &dev_attr_cpulistaffinity); | ||
157 | if (retval) | ||
158 | dev_err(&dev->dev, | ||
159 | "Error creating cpulistaffinity" | ||
160 | " file, continuing...\n"); | ||
154 | } | 161 | } |
155 | } | 162 | } |
156 | } | 163 | } |
diff --git a/drivers/pci/dmar.c b/drivers/pci/dmar.c index 7b3751136e63..691b3adeb870 100644 --- a/drivers/pci/dmar.c +++ b/drivers/pci/dmar.c | |||
@@ -211,7 +211,7 @@ static int __init dmar_parse_dev(struct dmar_drhd_unit *dmaru) | |||
211 | include_all = 1; | 211 | include_all = 1; |
212 | } | 212 | } |
213 | 213 | ||
214 | if (ret || (dmaru->devices_cnt == 0 && !dmaru->include_all)) { | 214 | if (ret) { |
215 | list_del(&dmaru->list); | 215 | list_del(&dmaru->list); |
216 | kfree(dmaru); | 216 | kfree(dmaru); |
217 | } | 217 | } |
@@ -289,6 +289,24 @@ dmar_table_print_dmar_entry(struct acpi_dmar_header *header) | |||
289 | } | 289 | } |
290 | } | 290 | } |
291 | 291 | ||
292 | /** | ||
293 | * dmar_table_detect - checks to see if the platform supports DMAR devices | ||
294 | */ | ||
295 | static int __init dmar_table_detect(void) | ||
296 | { | ||
297 | acpi_status status = AE_OK; | ||
298 | |||
299 | /* if we could find DMAR table, then there are DMAR devices */ | ||
300 | status = acpi_get_table(ACPI_SIG_DMAR, 0, | ||
301 | (struct acpi_table_header **)&dmar_tbl); | ||
302 | |||
303 | if (ACPI_SUCCESS(status) && !dmar_tbl) { | ||
304 | printk (KERN_WARNING PREFIX "Unable to map DMAR\n"); | ||
305 | status = AE_NOT_FOUND; | ||
306 | } | ||
307 | |||
308 | return (ACPI_SUCCESS(status) ? 1 : 0); | ||
309 | } | ||
292 | 310 | ||
293 | /** | 311 | /** |
294 | * parse_dmar_table - parses the DMA reporting table | 312 | * parse_dmar_table - parses the DMA reporting table |
@@ -300,6 +318,12 @@ parse_dmar_table(void) | |||
300 | struct acpi_dmar_header *entry_header; | 318 | struct acpi_dmar_header *entry_header; |
301 | int ret = 0; | 319 | int ret = 0; |
302 | 320 | ||
321 | /* | ||
322 | * Do it again, earlier dmar_tbl mapping could be mapped with | ||
323 | * fixed map. | ||
324 | */ | ||
325 | dmar_table_detect(); | ||
326 | |||
303 | dmar = (struct acpi_table_dmar *)dmar_tbl; | 327 | dmar = (struct acpi_table_dmar *)dmar_tbl; |
304 | if (!dmar) | 328 | if (!dmar) |
305 | return -ENODEV; | 329 | return -ENODEV; |
@@ -373,10 +397,10 @@ dmar_find_matched_drhd_unit(struct pci_dev *dev) | |||
373 | 397 | ||
374 | int __init dmar_dev_scope_init(void) | 398 | int __init dmar_dev_scope_init(void) |
375 | { | 399 | { |
376 | struct dmar_drhd_unit *drhd; | 400 | struct dmar_drhd_unit *drhd, *drhd_n; |
377 | int ret = -ENODEV; | 401 | int ret = -ENODEV; |
378 | 402 | ||
379 | for_each_drhd_unit(drhd) { | 403 | list_for_each_entry_safe(drhd, drhd_n, &dmar_drhd_units, list) { |
380 | ret = dmar_parse_dev(drhd); | 404 | ret = dmar_parse_dev(drhd); |
381 | if (ret) | 405 | if (ret) |
382 | return ret; | 406 | return ret; |
@@ -384,8 +408,8 @@ int __init dmar_dev_scope_init(void) | |||
384 | 408 | ||
385 | #ifdef CONFIG_DMAR | 409 | #ifdef CONFIG_DMAR |
386 | { | 410 | { |
387 | struct dmar_rmrr_unit *rmrr; | 411 | struct dmar_rmrr_unit *rmrr, *rmrr_n; |
388 | for_each_rmrr_units(rmrr) { | 412 | list_for_each_entry_safe(rmrr, rmrr_n, &dmar_rmrr_units, list) { |
389 | ret = rmrr_parse_dev(rmrr); | 413 | ret = rmrr_parse_dev(rmrr); |
390 | if (ret) | 414 | if (ret) |
391 | return ret; | 415 | return ret; |
@@ -430,30 +454,11 @@ int __init dmar_table_init(void) | |||
430 | return 0; | 454 | return 0; |
431 | } | 455 | } |
432 | 456 | ||
433 | /** | ||
434 | * early_dmar_detect - checks to see if the platform supports DMAR devices | ||
435 | */ | ||
436 | int __init early_dmar_detect(void) | ||
437 | { | ||
438 | acpi_status status = AE_OK; | ||
439 | |||
440 | /* if we could find DMAR table, then there are DMAR devices */ | ||
441 | status = acpi_get_table(ACPI_SIG_DMAR, 0, | ||
442 | (struct acpi_table_header **)&dmar_tbl); | ||
443 | |||
444 | if (ACPI_SUCCESS(status) && !dmar_tbl) { | ||
445 | printk (KERN_WARNING PREFIX "Unable to map DMAR\n"); | ||
446 | status = AE_NOT_FOUND; | ||
447 | } | ||
448 | |||
449 | return (ACPI_SUCCESS(status) ? 1 : 0); | ||
450 | } | ||
451 | |||
452 | void __init detect_intel_iommu(void) | 457 | void __init detect_intel_iommu(void) |
453 | { | 458 | { |
454 | int ret; | 459 | int ret; |
455 | 460 | ||
456 | ret = early_dmar_detect(); | 461 | ret = dmar_table_detect(); |
457 | 462 | ||
458 | { | 463 | { |
459 | #ifdef CONFIG_INTR_REMAP | 464 | #ifdef CONFIG_INTR_REMAP |
@@ -470,13 +475,13 @@ void __init detect_intel_iommu(void) | |||
470 | "Queued invalidation will be enabled to support " | 475 | "Queued invalidation will be enabled to support " |
471 | "x2apic and Intr-remapping.\n"); | 476 | "x2apic and Intr-remapping.\n"); |
472 | #endif | 477 | #endif |
473 | |||
474 | #ifdef CONFIG_DMAR | 478 | #ifdef CONFIG_DMAR |
475 | if (ret && !no_iommu && !iommu_detected && !swiotlb && | 479 | if (ret && !no_iommu && !iommu_detected && !swiotlb && |
476 | !dmar_disabled) | 480 | !dmar_disabled) |
477 | iommu_detected = 1; | 481 | iommu_detected = 1; |
478 | #endif | 482 | #endif |
479 | } | 483 | } |
484 | dmar_tbl = NULL; | ||
480 | } | 485 | } |
481 | 486 | ||
482 | 487 | ||
diff --git a/drivers/pci/hotplug/ibmphp_ebda.c b/drivers/pci/hotplug/ibmphp_ebda.c index 7d27631e6e62..8cfd1c4926c8 100644 --- a/drivers/pci/hotplug/ibmphp_ebda.c +++ b/drivers/pci/hotplug/ibmphp_ebda.c | |||
@@ -123,10 +123,8 @@ static struct ebda_pci_rsrc *alloc_ebda_pci_rsrc (void) | |||
123 | static void __init print_bus_info (void) | 123 | static void __init print_bus_info (void) |
124 | { | 124 | { |
125 | struct bus_info *ptr; | 125 | struct bus_info *ptr; |
126 | struct list_head *ptr1; | ||
127 | 126 | ||
128 | list_for_each (ptr1, &bus_info_head) { | 127 | list_for_each_entry(ptr, &bus_info_head, bus_info_list) { |
129 | ptr = list_entry (ptr1, struct bus_info, bus_info_list); | ||
130 | debug ("%s - slot_min = %x\n", __func__, ptr->slot_min); | 128 | debug ("%s - slot_min = %x\n", __func__, ptr->slot_min); |
131 | debug ("%s - slot_max = %x\n", __func__, ptr->slot_max); | 129 | debug ("%s - slot_max = %x\n", __func__, ptr->slot_max); |
132 | debug ("%s - slot_count = %x\n", __func__, ptr->slot_count); | 130 | debug ("%s - slot_count = %x\n", __func__, ptr->slot_count); |
@@ -146,10 +144,8 @@ static void __init print_bus_info (void) | |||
146 | static void print_lo_info (void) | 144 | static void print_lo_info (void) |
147 | { | 145 | { |
148 | struct rio_detail *ptr; | 146 | struct rio_detail *ptr; |
149 | struct list_head *ptr1; | ||
150 | debug ("print_lo_info ----\n"); | 147 | debug ("print_lo_info ----\n"); |
151 | list_for_each (ptr1, &rio_lo_head) { | 148 | list_for_each_entry(ptr, &rio_lo_head, rio_detail_list) { |
152 | ptr = list_entry (ptr1, struct rio_detail, rio_detail_list); | ||
153 | debug ("%s - rio_node_id = %x\n", __func__, ptr->rio_node_id); | 149 | debug ("%s - rio_node_id = %x\n", __func__, ptr->rio_node_id); |
154 | debug ("%s - rio_type = %x\n", __func__, ptr->rio_type); | 150 | debug ("%s - rio_type = %x\n", __func__, ptr->rio_type); |
155 | debug ("%s - owner_id = %x\n", __func__, ptr->owner_id); | 151 | debug ("%s - owner_id = %x\n", __func__, ptr->owner_id); |
@@ -163,10 +159,8 @@ static void print_lo_info (void) | |||
163 | static void print_vg_info (void) | 159 | static void print_vg_info (void) |
164 | { | 160 | { |
165 | struct rio_detail *ptr; | 161 | struct rio_detail *ptr; |
166 | struct list_head *ptr1; | ||
167 | debug ("%s ---\n", __func__); | 162 | debug ("%s ---\n", __func__); |
168 | list_for_each (ptr1, &rio_vg_head) { | 163 | list_for_each_entry(ptr, &rio_vg_head, rio_detail_list) { |
169 | ptr = list_entry (ptr1, struct rio_detail, rio_detail_list); | ||
170 | debug ("%s - rio_node_id = %x\n", __func__, ptr->rio_node_id); | 164 | debug ("%s - rio_node_id = %x\n", __func__, ptr->rio_node_id); |
171 | debug ("%s - rio_type = %x\n", __func__, ptr->rio_type); | 165 | debug ("%s - rio_type = %x\n", __func__, ptr->rio_type); |
172 | debug ("%s - owner_id = %x\n", __func__, ptr->owner_id); | 166 | debug ("%s - owner_id = %x\n", __func__, ptr->owner_id); |
@@ -180,10 +174,8 @@ static void print_vg_info (void) | |||
180 | static void __init print_ebda_pci_rsrc (void) | 174 | static void __init print_ebda_pci_rsrc (void) |
181 | { | 175 | { |
182 | struct ebda_pci_rsrc *ptr; | 176 | struct ebda_pci_rsrc *ptr; |
183 | struct list_head *ptr1; | ||
184 | 177 | ||
185 | list_for_each (ptr1, &ibmphp_ebda_pci_rsrc_head) { | 178 | list_for_each_entry(ptr, &ibmphp_ebda_pci_rsrc_head, ebda_pci_rsrc_list) { |
186 | ptr = list_entry (ptr1, struct ebda_pci_rsrc, ebda_pci_rsrc_list); | ||
187 | debug ("%s - rsrc type: %x bus#: %x dev_func: %x start addr: %x end addr: %x\n", | 179 | debug ("%s - rsrc type: %x bus#: %x dev_func: %x start addr: %x end addr: %x\n", |
188 | __func__, ptr->rsrc_type ,ptr->bus_num, ptr->dev_fun,ptr->start_addr, ptr->end_addr); | 180 | __func__, ptr->rsrc_type ,ptr->bus_num, ptr->dev_fun,ptr->start_addr, ptr->end_addr); |
189 | } | 181 | } |
@@ -192,10 +184,8 @@ static void __init print_ebda_pci_rsrc (void) | |||
192 | static void __init print_ibm_slot (void) | 184 | static void __init print_ibm_slot (void) |
193 | { | 185 | { |
194 | struct slot *ptr; | 186 | struct slot *ptr; |
195 | struct list_head *ptr1; | ||
196 | 187 | ||
197 | list_for_each (ptr1, &ibmphp_slot_head) { | 188 | list_for_each_entry(ptr, &ibmphp_slot_head, ibm_slot_list) { |
198 | ptr = list_entry (ptr1, struct slot, ibm_slot_list); | ||
199 | debug ("%s - slot_number: %x\n", __func__, ptr->number); | 189 | debug ("%s - slot_number: %x\n", __func__, ptr->number); |
200 | } | 190 | } |
201 | } | 191 | } |
@@ -203,10 +193,8 @@ static void __init print_ibm_slot (void) | |||
203 | static void __init print_opt_vg (void) | 193 | static void __init print_opt_vg (void) |
204 | { | 194 | { |
205 | struct opt_rio *ptr; | 195 | struct opt_rio *ptr; |
206 | struct list_head *ptr1; | ||
207 | debug ("%s ---\n", __func__); | 196 | debug ("%s ---\n", __func__); |
208 | list_for_each (ptr1, &opt_vg_head) { | 197 | list_for_each_entry(ptr, &opt_vg_head, opt_rio_list) { |
209 | ptr = list_entry (ptr1, struct opt_rio, opt_rio_list); | ||
210 | debug ("%s - rio_type %x\n", __func__, ptr->rio_type); | 198 | debug ("%s - rio_type %x\n", __func__, ptr->rio_type); |
211 | debug ("%s - chassis_num: %x\n", __func__, ptr->chassis_num); | 199 | debug ("%s - chassis_num: %x\n", __func__, ptr->chassis_num); |
212 | debug ("%s - first_slot_num: %x\n", __func__, ptr->first_slot_num); | 200 | debug ("%s - first_slot_num: %x\n", __func__, ptr->first_slot_num); |
@@ -217,13 +205,9 @@ static void __init print_opt_vg (void) | |||
217 | static void __init print_ebda_hpc (void) | 205 | static void __init print_ebda_hpc (void) |
218 | { | 206 | { |
219 | struct controller *hpc_ptr; | 207 | struct controller *hpc_ptr; |
220 | struct list_head *ptr1; | ||
221 | u16 index; | 208 | u16 index; |
222 | 209 | ||
223 | list_for_each (ptr1, &ebda_hpc_head) { | 210 | list_for_each_entry(hpc_ptr, &ebda_hpc_head, ebda_hpc_list) { |
224 | |||
225 | hpc_ptr = list_entry (ptr1, struct controller, ebda_hpc_list); | ||
226 | |||
227 | for (index = 0; index < hpc_ptr->slot_count; index++) { | 211 | for (index = 0; index < hpc_ptr->slot_count; index++) { |
228 | debug ("%s - physical slot#: %x\n", __func__, hpc_ptr->slots[index].slot_num); | 212 | debug ("%s - physical slot#: %x\n", __func__, hpc_ptr->slots[index].slot_num); |
229 | debug ("%s - pci bus# of the slot: %x\n", __func__, hpc_ptr->slots[index].slot_bus_num); | 213 | debug ("%s - pci bus# of the slot: %x\n", __func__, hpc_ptr->slots[index].slot_bus_num); |
@@ -460,9 +444,7 @@ static int __init ebda_rio_table (void) | |||
460 | static struct opt_rio *search_opt_vg (u8 chassis_num) | 444 | static struct opt_rio *search_opt_vg (u8 chassis_num) |
461 | { | 445 | { |
462 | struct opt_rio *ptr; | 446 | struct opt_rio *ptr; |
463 | struct list_head *ptr1; | 447 | list_for_each_entry(ptr, &opt_vg_head, opt_rio_list) { |
464 | list_for_each (ptr1, &opt_vg_head) { | ||
465 | ptr = list_entry (ptr1, struct opt_rio, opt_rio_list); | ||
466 | if (ptr->chassis_num == chassis_num) | 448 | if (ptr->chassis_num == chassis_num) |
467 | return ptr; | 449 | return ptr; |
468 | } | 450 | } |
@@ -473,10 +455,8 @@ static int __init combine_wpg_for_chassis (void) | |||
473 | { | 455 | { |
474 | struct opt_rio *opt_rio_ptr = NULL; | 456 | struct opt_rio *opt_rio_ptr = NULL; |
475 | struct rio_detail *rio_detail_ptr = NULL; | 457 | struct rio_detail *rio_detail_ptr = NULL; |
476 | struct list_head *list_head_ptr = NULL; | ||
477 | 458 | ||
478 | list_for_each (list_head_ptr, &rio_vg_head) { | 459 | list_for_each_entry(rio_detail_ptr, &rio_vg_head, rio_detail_list) { |
479 | rio_detail_ptr = list_entry (list_head_ptr, struct rio_detail, rio_detail_list); | ||
480 | opt_rio_ptr = search_opt_vg (rio_detail_ptr->chassis_num); | 460 | opt_rio_ptr = search_opt_vg (rio_detail_ptr->chassis_num); |
481 | if (!opt_rio_ptr) { | 461 | if (!opt_rio_ptr) { |
482 | opt_rio_ptr = kzalloc(sizeof(struct opt_rio), GFP_KERNEL); | 462 | opt_rio_ptr = kzalloc(sizeof(struct opt_rio), GFP_KERNEL); |
@@ -497,14 +477,12 @@ static int __init combine_wpg_for_chassis (void) | |||
497 | } | 477 | } |
498 | 478 | ||
499 | /* | 479 | /* |
500 | * reorgnizing linked list of expansion box | 480 | * reorganizing linked list of expansion box |
501 | */ | 481 | */ |
502 | static struct opt_rio_lo *search_opt_lo (u8 chassis_num) | 482 | static struct opt_rio_lo *search_opt_lo (u8 chassis_num) |
503 | { | 483 | { |
504 | struct opt_rio_lo *ptr; | 484 | struct opt_rio_lo *ptr; |
505 | struct list_head *ptr1; | 485 | list_for_each_entry(ptr, &opt_lo_head, opt_rio_lo_list) { |
506 | list_for_each (ptr1, &opt_lo_head) { | ||
507 | ptr = list_entry (ptr1, struct opt_rio_lo, opt_rio_lo_list); | ||
508 | if (ptr->chassis_num == chassis_num) | 486 | if (ptr->chassis_num == chassis_num) |
509 | return ptr; | 487 | return ptr; |
510 | } | 488 | } |
@@ -515,10 +493,8 @@ static int combine_wpg_for_expansion (void) | |||
515 | { | 493 | { |
516 | struct opt_rio_lo *opt_rio_lo_ptr = NULL; | 494 | struct opt_rio_lo *opt_rio_lo_ptr = NULL; |
517 | struct rio_detail *rio_detail_ptr = NULL; | 495 | struct rio_detail *rio_detail_ptr = NULL; |
518 | struct list_head *list_head_ptr = NULL; | ||
519 | 496 | ||
520 | list_for_each (list_head_ptr, &rio_lo_head) { | 497 | list_for_each_entry(rio_detail_ptr, &rio_lo_head, rio_detail_list) { |
521 | rio_detail_ptr = list_entry (list_head_ptr, struct rio_detail, rio_detail_list); | ||
522 | opt_rio_lo_ptr = search_opt_lo (rio_detail_ptr->chassis_num); | 498 | opt_rio_lo_ptr = search_opt_lo (rio_detail_ptr->chassis_num); |
523 | if (!opt_rio_lo_ptr) { | 499 | if (!opt_rio_lo_ptr) { |
524 | opt_rio_lo_ptr = kzalloc(sizeof(struct opt_rio_lo), GFP_KERNEL); | 500 | opt_rio_lo_ptr = kzalloc(sizeof(struct opt_rio_lo), GFP_KERNEL); |
@@ -550,20 +526,17 @@ static int first_slot_num (u8 slot_num, u8 first_slot, u8 var) | |||
550 | { | 526 | { |
551 | struct opt_rio *opt_vg_ptr = NULL; | 527 | struct opt_rio *opt_vg_ptr = NULL; |
552 | struct opt_rio_lo *opt_lo_ptr = NULL; | 528 | struct opt_rio_lo *opt_lo_ptr = NULL; |
553 | struct list_head *ptr = NULL; | ||
554 | int rc = 0; | 529 | int rc = 0; |
555 | 530 | ||
556 | if (!var) { | 531 | if (!var) { |
557 | list_for_each (ptr, &opt_vg_head) { | 532 | list_for_each_entry(opt_vg_ptr, &opt_vg_head, opt_rio_list) { |
558 | opt_vg_ptr = list_entry (ptr, struct opt_rio, opt_rio_list); | ||
559 | if ((first_slot < opt_vg_ptr->first_slot_num) && (slot_num >= opt_vg_ptr->first_slot_num)) { | 533 | if ((first_slot < opt_vg_ptr->first_slot_num) && (slot_num >= opt_vg_ptr->first_slot_num)) { |
560 | rc = -ENODEV; | 534 | rc = -ENODEV; |
561 | break; | 535 | break; |
562 | } | 536 | } |
563 | } | 537 | } |
564 | } else { | 538 | } else { |
565 | list_for_each (ptr, &opt_lo_head) { | 539 | list_for_each_entry(opt_lo_ptr, &opt_lo_head, opt_rio_lo_list) { |
566 | opt_lo_ptr = list_entry (ptr, struct opt_rio_lo, opt_rio_lo_list); | ||
567 | if ((first_slot < opt_lo_ptr->first_slot_num) && (slot_num >= opt_lo_ptr->first_slot_num)) { | 540 | if ((first_slot < opt_lo_ptr->first_slot_num) && (slot_num >= opt_lo_ptr->first_slot_num)) { |
568 | rc = -ENODEV; | 541 | rc = -ENODEV; |
569 | break; | 542 | break; |
@@ -576,10 +549,8 @@ static int first_slot_num (u8 slot_num, u8 first_slot, u8 var) | |||
576 | static struct opt_rio_lo * find_rxe_num (u8 slot_num) | 549 | static struct opt_rio_lo * find_rxe_num (u8 slot_num) |
577 | { | 550 | { |
578 | struct opt_rio_lo *opt_lo_ptr; | 551 | struct opt_rio_lo *opt_lo_ptr; |
579 | struct list_head *ptr; | ||
580 | 552 | ||
581 | list_for_each (ptr, &opt_lo_head) { | 553 | list_for_each_entry(opt_lo_ptr, &opt_lo_head, opt_rio_lo_list) { |
582 | opt_lo_ptr = list_entry (ptr, struct opt_rio_lo, opt_rio_lo_list); | ||
583 | //check to see if this slot_num belongs to expansion box | 554 | //check to see if this slot_num belongs to expansion box |
584 | if ((slot_num >= opt_lo_ptr->first_slot_num) && (!first_slot_num (slot_num, opt_lo_ptr->first_slot_num, 1))) | 555 | if ((slot_num >= opt_lo_ptr->first_slot_num) && (!first_slot_num (slot_num, opt_lo_ptr->first_slot_num, 1))) |
585 | return opt_lo_ptr; | 556 | return opt_lo_ptr; |
@@ -590,10 +561,8 @@ static struct opt_rio_lo * find_rxe_num (u8 slot_num) | |||
590 | static struct opt_rio * find_chassis_num (u8 slot_num) | 561 | static struct opt_rio * find_chassis_num (u8 slot_num) |
591 | { | 562 | { |
592 | struct opt_rio *opt_vg_ptr; | 563 | struct opt_rio *opt_vg_ptr; |
593 | struct list_head *ptr; | ||
594 | 564 | ||
595 | list_for_each (ptr, &opt_vg_head) { | 565 | list_for_each_entry(opt_vg_ptr, &opt_vg_head, opt_rio_list) { |
596 | opt_vg_ptr = list_entry (ptr, struct opt_rio, opt_rio_list); | ||
597 | //check to see if this slot_num belongs to chassis | 566 | //check to see if this slot_num belongs to chassis |
598 | if ((slot_num >= opt_vg_ptr->first_slot_num) && (!first_slot_num (slot_num, opt_vg_ptr->first_slot_num, 0))) | 567 | if ((slot_num >= opt_vg_ptr->first_slot_num) && (!first_slot_num (slot_num, opt_vg_ptr->first_slot_num, 0))) |
599 | return opt_vg_ptr; | 568 | return opt_vg_ptr; |
@@ -607,11 +576,9 @@ static struct opt_rio * find_chassis_num (u8 slot_num) | |||
607 | static u8 calculate_first_slot (u8 slot_num) | 576 | static u8 calculate_first_slot (u8 slot_num) |
608 | { | 577 | { |
609 | u8 first_slot = 1; | 578 | u8 first_slot = 1; |
610 | struct list_head * list; | ||
611 | struct slot * slot_cur; | 579 | struct slot * slot_cur; |
612 | 580 | ||
613 | list_for_each (list, &ibmphp_slot_head) { | 581 | list_for_each_entry(slot_cur, &ibmphp_slot_head, ibm_slot_list) { |
614 | slot_cur = list_entry (list, struct slot, ibm_slot_list); | ||
615 | if (slot_cur->ctrl) { | 582 | if (slot_cur->ctrl) { |
616 | if ((slot_cur->ctrl->ctlr_type != 4) && (slot_cur->ctrl->ending_slot_num > first_slot) && (slot_num > slot_cur->ctrl->ending_slot_num)) | 583 | if ((slot_cur->ctrl->ctlr_type != 4) && (slot_cur->ctrl->ending_slot_num > first_slot) && (slot_num > slot_cur->ctrl->ending_slot_num)) |
617 | first_slot = slot_cur->ctrl->ending_slot_num; | 584 | first_slot = slot_cur->ctrl->ending_slot_num; |
@@ -767,7 +734,6 @@ static int __init ebda_rsrc_controller (void) | |||
767 | struct bus_info *bus_info_ptr1, *bus_info_ptr2; | 734 | struct bus_info *bus_info_ptr1, *bus_info_ptr2; |
768 | int rc; | 735 | int rc; |
769 | struct slot *tmp_slot; | 736 | struct slot *tmp_slot; |
770 | struct list_head *list; | ||
771 | 737 | ||
772 | addr = hpc_list_ptr->phys_addr; | 738 | addr = hpc_list_ptr->phys_addr; |
773 | for (ctlr = 0; ctlr < hpc_list_ptr->num_ctlrs; ctlr++) { | 739 | for (ctlr = 0; ctlr < hpc_list_ptr->num_ctlrs; ctlr++) { |
@@ -997,9 +963,7 @@ static int __init ebda_rsrc_controller (void) | |||
997 | 963 | ||
998 | } /* each hpc */ | 964 | } /* each hpc */ |
999 | 965 | ||
1000 | list_for_each (list, &ibmphp_slot_head) { | 966 | list_for_each_entry(tmp_slot, &ibmphp_slot_head, ibm_slot_list) { |
1001 | tmp_slot = list_entry (list, struct slot, ibm_slot_list); | ||
1002 | |||
1003 | snprintf (tmp_slot->hotplug_slot->name, 30, "%s", create_file_name (tmp_slot)); | 967 | snprintf (tmp_slot->hotplug_slot->name, 30, "%s", create_file_name (tmp_slot)); |
1004 | pci_hp_register(tmp_slot->hotplug_slot, | 968 | pci_hp_register(tmp_slot->hotplug_slot, |
1005 | pci_find_bus(0, tmp_slot->bus), tmp_slot->device); | 969 | pci_find_bus(0, tmp_slot->bus), tmp_slot->device); |
@@ -1101,10 +1065,8 @@ u16 ibmphp_get_total_controllers (void) | |||
1101 | struct slot *ibmphp_get_slot_from_physical_num (u8 physical_num) | 1065 | struct slot *ibmphp_get_slot_from_physical_num (u8 physical_num) |
1102 | { | 1066 | { |
1103 | struct slot *slot; | 1067 | struct slot *slot; |
1104 | struct list_head *list; | ||
1105 | 1068 | ||
1106 | list_for_each (list, &ibmphp_slot_head) { | 1069 | list_for_each_entry(slot, &ibmphp_slot_head, ibm_slot_list) { |
1107 | slot = list_entry (list, struct slot, ibm_slot_list); | ||
1108 | if (slot->number == physical_num) | 1070 | if (slot->number == physical_num) |
1109 | return slot; | 1071 | return slot; |
1110 | } | 1072 | } |
@@ -1120,10 +1082,8 @@ struct slot *ibmphp_get_slot_from_physical_num (u8 physical_num) | |||
1120 | struct bus_info *ibmphp_find_same_bus_num (u32 num) | 1082 | struct bus_info *ibmphp_find_same_bus_num (u32 num) |
1121 | { | 1083 | { |
1122 | struct bus_info *ptr; | 1084 | struct bus_info *ptr; |
1123 | struct list_head *ptr1; | ||
1124 | 1085 | ||
1125 | list_for_each (ptr1, &bus_info_head) { | 1086 | list_for_each_entry(ptr, &bus_info_head, bus_info_list) { |
1126 | ptr = list_entry (ptr1, struct bus_info, bus_info_list); | ||
1127 | if (ptr->busno == num) | 1087 | if (ptr->busno == num) |
1128 | return ptr; | 1088 | return ptr; |
1129 | } | 1089 | } |
@@ -1136,10 +1096,8 @@ struct bus_info *ibmphp_find_same_bus_num (u32 num) | |||
1136 | int ibmphp_get_bus_index (u8 num) | 1096 | int ibmphp_get_bus_index (u8 num) |
1137 | { | 1097 | { |
1138 | struct bus_info *ptr; | 1098 | struct bus_info *ptr; |
1139 | struct list_head *ptr1; | ||
1140 | 1099 | ||
1141 | list_for_each (ptr1, &bus_info_head) { | 1100 | list_for_each_entry(ptr, &bus_info_head, bus_info_list) { |
1142 | ptr = list_entry (ptr1, struct bus_info, bus_info_list); | ||
1143 | if (ptr->busno == num) | 1101 | if (ptr->busno == num) |
1144 | return ptr->index; | 1102 | return ptr->index; |
1145 | } | 1103 | } |
@@ -1212,11 +1170,9 @@ static struct pci_driver ibmphp_driver = { | |||
1212 | int ibmphp_register_pci (void) | 1170 | int ibmphp_register_pci (void) |
1213 | { | 1171 | { |
1214 | struct controller *ctrl; | 1172 | struct controller *ctrl; |
1215 | struct list_head *tmp; | ||
1216 | int rc = 0; | 1173 | int rc = 0; |
1217 | 1174 | ||
1218 | list_for_each (tmp, &ebda_hpc_head) { | 1175 | list_for_each_entry(ctrl, &ebda_hpc_head, ebda_hpc_list) { |
1219 | ctrl = list_entry (tmp, struct controller, ebda_hpc_list); | ||
1220 | if (ctrl->ctlr_type == 1) { | 1176 | if (ctrl->ctlr_type == 1) { |
1221 | rc = pci_register_driver(&ibmphp_driver); | 1177 | rc = pci_register_driver(&ibmphp_driver); |
1222 | break; | 1178 | break; |
@@ -1227,12 +1183,10 @@ int ibmphp_register_pci (void) | |||
1227 | static int ibmphp_probe (struct pci_dev * dev, const struct pci_device_id *ids) | 1183 | static int ibmphp_probe (struct pci_dev * dev, const struct pci_device_id *ids) |
1228 | { | 1184 | { |
1229 | struct controller *ctrl; | 1185 | struct controller *ctrl; |
1230 | struct list_head *tmp; | ||
1231 | 1186 | ||
1232 | debug ("inside ibmphp_probe\n"); | 1187 | debug ("inside ibmphp_probe\n"); |
1233 | 1188 | ||
1234 | list_for_each (tmp, &ebda_hpc_head) { | 1189 | list_for_each_entry(ctrl, &ebda_hpc_head, ebda_hpc_list) { |
1235 | ctrl = list_entry (tmp, struct controller, ebda_hpc_list); | ||
1236 | if (ctrl->ctlr_type == 1) { | 1190 | if (ctrl->ctlr_type == 1) { |
1237 | if ((dev->devfn == ctrl->u.pci_ctlr.dev_fun) && (dev->bus->number == ctrl->u.pci_ctlr.bus)) { | 1191 | if ((dev->devfn == ctrl->u.pci_ctlr.dev_fun) && (dev->bus->number == ctrl->u.pci_ctlr.bus)) { |
1238 | ctrl->ctrl_dev = dev; | 1192 | ctrl->ctrl_dev = dev; |
diff --git a/drivers/pci/hotplug/pci_hotplug_core.c b/drivers/pci/hotplug/pci_hotplug_core.c index 5f85b1b120e3..2e6c4474644e 100644 --- a/drivers/pci/hotplug/pci_hotplug_core.c +++ b/drivers/pci/hotplug/pci_hotplug_core.c | |||
@@ -102,13 +102,13 @@ static int get_##name (struct hotplug_slot *slot, type *value) \ | |||
102 | { \ | 102 | { \ |
103 | struct hotplug_slot_ops *ops = slot->ops; \ | 103 | struct hotplug_slot_ops *ops = slot->ops; \ |
104 | int retval = 0; \ | 104 | int retval = 0; \ |
105 | if (try_module_get(ops->owner)) { \ | 105 | if (!try_module_get(ops->owner)) \ |
106 | if (ops->get_##name) \ | 106 | return -ENODEV; \ |
107 | retval = ops->get_##name(slot, value); \ | 107 | if (ops->get_##name) \ |
108 | else \ | 108 | retval = ops->get_##name(slot, value); \ |
109 | *value = slot->info->name; \ | 109 | else \ |
110 | module_put(ops->owner); \ | 110 | *value = slot->info->name; \ |
111 | } \ | 111 | module_put(ops->owner); \ |
112 | return retval; \ | 112 | return retval; \ |
113 | } | 113 | } |
114 | 114 | ||
diff --git a/drivers/pci/hotplug/pciehp.h b/drivers/pci/hotplug/pciehp.h index 9e6cec67e1cc..c367978bd7fe 100644 --- a/drivers/pci/hotplug/pciehp.h +++ b/drivers/pci/hotplug/pciehp.h | |||
@@ -57,6 +57,19 @@ extern struct workqueue_struct *pciehp_wq; | |||
57 | #define warn(format, arg...) \ | 57 | #define warn(format, arg...) \ |
58 | printk(KERN_WARNING "%s: " format, MY_NAME , ## arg) | 58 | printk(KERN_WARNING "%s: " format, MY_NAME , ## arg) |
59 | 59 | ||
60 | #define ctrl_dbg(ctrl, format, arg...) \ | ||
61 | do { \ | ||
62 | if (pciehp_debug) \ | ||
63 | dev_printk(, &ctrl->pcie->device, \ | ||
64 | format, ## arg); \ | ||
65 | } while (0) | ||
66 | #define ctrl_err(ctrl, format, arg...) \ | ||
67 | dev_err(&ctrl->pcie->device, format, ## arg) | ||
68 | #define ctrl_info(ctrl, format, arg...) \ | ||
69 | dev_info(&ctrl->pcie->device, format, ## arg) | ||
70 | #define ctrl_warn(ctrl, format, arg...) \ | ||
71 | dev_warn(&ctrl->pcie->device, format, ## arg) | ||
72 | |||
60 | #define SLOT_NAME_SIZE 10 | 73 | #define SLOT_NAME_SIZE 10 |
61 | struct slot { | 74 | struct slot { |
62 | u8 bus; | 75 | u8 bus; |
@@ -87,6 +100,7 @@ struct controller { | |||
87 | int num_slots; /* Number of slots on ctlr */ | 100 | int num_slots; /* Number of slots on ctlr */ |
88 | int slot_num_inc; /* 1 or -1 */ | 101 | int slot_num_inc; /* 1 or -1 */ |
89 | struct pci_dev *pci_dev; | 102 | struct pci_dev *pci_dev; |
103 | struct pcie_device *pcie; /* PCI Express port service */ | ||
90 | struct list_head slot_list; | 104 | struct list_head slot_list; |
91 | struct hpc_ops *hpc_ops; | 105 | struct hpc_ops *hpc_ops; |
92 | wait_queue_head_t queue; /* sleep & wake process */ | 106 | wait_queue_head_t queue; /* sleep & wake process */ |
@@ -170,7 +184,7 @@ static inline struct slot *pciehp_find_slot(struct controller *ctrl, u8 device) | |||
170 | return slot; | 184 | return slot; |
171 | } | 185 | } |
172 | 186 | ||
173 | err("%s: slot (device=0x%x) not found\n", __func__, device); | 187 | ctrl_err(ctrl, "%s: slot (device=0x%x) not found\n", __func__, device); |
174 | return NULL; | 188 | return NULL; |
175 | } | 189 | } |
176 | 190 | ||
diff --git a/drivers/pci/hotplug/pciehp_core.c b/drivers/pci/hotplug/pciehp_core.c index 4fd5355bc3b5..c748a19db89d 100644 --- a/drivers/pci/hotplug/pciehp_core.c +++ b/drivers/pci/hotplug/pciehp_core.c | |||
@@ -144,9 +144,10 @@ set_lock_exit: | |||
144 | * sysfs interface which allows the user to toggle the Electro Mechanical | 144 | * sysfs interface which allows the user to toggle the Electro Mechanical |
145 | * Interlock. Valid values are either 0 or 1. 0 == unlock, 1 == lock | 145 | * Interlock. Valid values are either 0 or 1. 0 == unlock, 1 == lock |
146 | */ | 146 | */ |
147 | static ssize_t lock_write_file(struct hotplug_slot *slot, const char *buf, | 147 | static ssize_t lock_write_file(struct hotplug_slot *hotplug_slot, |
148 | size_t count) | 148 | const char *buf, size_t count) |
149 | { | 149 | { |
150 | struct slot *slot = hotplug_slot->private; | ||
150 | unsigned long llock; | 151 | unsigned long llock; |
151 | u8 lock; | 152 | u8 lock; |
152 | int retval = 0; | 153 | int retval = 0; |
@@ -157,10 +158,11 @@ static ssize_t lock_write_file(struct hotplug_slot *slot, const char *buf, | |||
157 | switch (lock) { | 158 | switch (lock) { |
158 | case 0: | 159 | case 0: |
159 | case 1: | 160 | case 1: |
160 | retval = set_lock_status(slot, lock); | 161 | retval = set_lock_status(hotplug_slot, lock); |
161 | break; | 162 | break; |
162 | default: | 163 | default: |
163 | err ("%d is an invalid lock value\n", lock); | 164 | ctrl_err(slot->ctrl, "%d is an invalid lock value\n", |
165 | lock); | ||
164 | retval = -EINVAL; | 166 | retval = -EINVAL; |
165 | } | 167 | } |
166 | if (retval) | 168 | if (retval) |
@@ -180,7 +182,10 @@ static struct hotplug_slot_attribute hotplug_slot_attr_lock = { | |||
180 | */ | 182 | */ |
181 | static void release_slot(struct hotplug_slot *hotplug_slot) | 183 | static void release_slot(struct hotplug_slot *hotplug_slot) |
182 | { | 184 | { |
183 | dbg("%s - physical_slot = %s\n", __func__, hotplug_slot->name); | 185 | struct slot *slot = hotplug_slot->private; |
186 | |||
187 | ctrl_dbg(slot->ctrl, "%s - physical_slot = %s\n", | ||
188 | __func__, hotplug_slot->name); | ||
184 | 189 | ||
185 | kfree(hotplug_slot->info); | 190 | kfree(hotplug_slot->info); |
186 | kfree(hotplug_slot); | 191 | kfree(hotplug_slot); |
@@ -215,9 +220,9 @@ static int init_slots(struct controller *ctrl) | |||
215 | get_adapter_status(hotplug_slot, &info->adapter_status); | 220 | get_adapter_status(hotplug_slot, &info->adapter_status); |
216 | slot->hotplug_slot = hotplug_slot; | 221 | slot->hotplug_slot = hotplug_slot; |
217 | 222 | ||
218 | dbg("Registering bus=%x dev=%x hp_slot=%x sun=%x " | 223 | ctrl_dbg(ctrl, "Registering bus=%x dev=%x hp_slot=%x sun=%x " |
219 | "slot_device_offset=%x\n", slot->bus, slot->device, | 224 | "slot_device_offset=%x\n", slot->bus, slot->device, |
220 | slot->hp_slot, slot->number, ctrl->slot_device_offset); | 225 | slot->hp_slot, slot->number, ctrl->slot_device_offset); |
221 | duplicate_name: | 226 | duplicate_name: |
222 | retval = pci_hp_register(hotplug_slot, | 227 | retval = pci_hp_register(hotplug_slot, |
223 | ctrl->pci_dev->subordinate, | 228 | ctrl->pci_dev->subordinate, |
@@ -233,9 +238,11 @@ duplicate_name: | |||
233 | if (len < SLOT_NAME_SIZE) | 238 | if (len < SLOT_NAME_SIZE) |
234 | goto duplicate_name; | 239 | goto duplicate_name; |
235 | else | 240 | else |
236 | err("duplicate slot name overflow\n"); | 241 | ctrl_err(ctrl, "duplicate slot name " |
242 | "overflow\n"); | ||
237 | } | 243 | } |
238 | err("pci_hp_register failed with error %d\n", retval); | 244 | ctrl_err(ctrl, "pci_hp_register failed with error %d\n", |
245 | retval); | ||
239 | goto error_info; | 246 | goto error_info; |
240 | } | 247 | } |
241 | /* create additional sysfs entries */ | 248 | /* create additional sysfs entries */ |
@@ -244,7 +251,8 @@ duplicate_name: | |||
244 | &hotplug_slot_attr_lock.attr); | 251 | &hotplug_slot_attr_lock.attr); |
245 | if (retval) { | 252 | if (retval) { |
246 | pci_hp_deregister(hotplug_slot); | 253 | pci_hp_deregister(hotplug_slot); |
247 | err("cannot create additional sysfs entries\n"); | 254 | ctrl_err(ctrl, "cannot create additional sysfs " |
255 | "entries\n"); | ||
248 | goto error_info; | 256 | goto error_info; |
249 | } | 257 | } |
250 | } | 258 | } |
@@ -278,7 +286,8 @@ static int set_attention_status(struct hotplug_slot *hotplug_slot, u8 status) | |||
278 | { | 286 | { |
279 | struct slot *slot = hotplug_slot->private; | 287 | struct slot *slot = hotplug_slot->private; |
280 | 288 | ||
281 | dbg("%s - physical_slot = %s\n", __func__, hotplug_slot->name); | 289 | ctrl_dbg(slot->ctrl, "%s - physical_slot = %s\n", |
290 | __func__, hotplug_slot->name); | ||
282 | 291 | ||
283 | hotplug_slot->info->attention_status = status; | 292 | hotplug_slot->info->attention_status = status; |
284 | 293 | ||
@@ -293,7 +302,8 @@ static int enable_slot(struct hotplug_slot *hotplug_slot) | |||
293 | { | 302 | { |
294 | struct slot *slot = hotplug_slot->private; | 303 | struct slot *slot = hotplug_slot->private; |
295 | 304 | ||
296 | dbg("%s - physical_slot = %s\n", __func__, hotplug_slot->name); | 305 | ctrl_dbg(slot->ctrl, "%s - physical_slot = %s\n", |
306 | __func__, hotplug_slot->name); | ||
297 | 307 | ||
298 | return pciehp_sysfs_enable_slot(slot); | 308 | return pciehp_sysfs_enable_slot(slot); |
299 | } | 309 | } |
@@ -303,7 +313,8 @@ static int disable_slot(struct hotplug_slot *hotplug_slot) | |||
303 | { | 313 | { |
304 | struct slot *slot = hotplug_slot->private; | 314 | struct slot *slot = hotplug_slot->private; |
305 | 315 | ||
306 | dbg("%s - physical_slot = %s\n", __func__, hotplug_slot->name); | 316 | ctrl_dbg(slot->ctrl, "%s - physical_slot = %s\n", |
317 | __func__, hotplug_slot->name); | ||
307 | 318 | ||
308 | return pciehp_sysfs_disable_slot(slot); | 319 | return pciehp_sysfs_disable_slot(slot); |
309 | } | 320 | } |
@@ -313,7 +324,8 @@ static int get_power_status(struct hotplug_slot *hotplug_slot, u8 *value) | |||
313 | struct slot *slot = hotplug_slot->private; | 324 | struct slot *slot = hotplug_slot->private; |
314 | int retval; | 325 | int retval; |
315 | 326 | ||
316 | dbg("%s - physical_slot = %s\n", __func__, hotplug_slot->name); | 327 | ctrl_dbg(slot->ctrl, "%s - physical_slot = %s\n", |
328 | __func__, hotplug_slot->name); | ||
317 | 329 | ||
318 | retval = slot->hpc_ops->get_power_status(slot, value); | 330 | retval = slot->hpc_ops->get_power_status(slot, value); |
319 | if (retval < 0) | 331 | if (retval < 0) |
@@ -327,7 +339,8 @@ static int get_attention_status(struct hotplug_slot *hotplug_slot, u8 *value) | |||
327 | struct slot *slot = hotplug_slot->private; | 339 | struct slot *slot = hotplug_slot->private; |
328 | int retval; | 340 | int retval; |
329 | 341 | ||
330 | dbg("%s - physical_slot = %s\n", __func__, hotplug_slot->name); | 342 | ctrl_dbg(slot->ctrl, "%s - physical_slot = %s\n", |
343 | __func__, hotplug_slot->name); | ||
331 | 344 | ||
332 | retval = slot->hpc_ops->get_attention_status(slot, value); | 345 | retval = slot->hpc_ops->get_attention_status(slot, value); |
333 | if (retval < 0) | 346 | if (retval < 0) |
@@ -341,7 +354,8 @@ static int get_latch_status(struct hotplug_slot *hotplug_slot, u8 *value) | |||
341 | struct slot *slot = hotplug_slot->private; | 354 | struct slot *slot = hotplug_slot->private; |
342 | int retval; | 355 | int retval; |
343 | 356 | ||
344 | dbg("%s - physical_slot = %s\n", __func__, hotplug_slot->name); | 357 | ctrl_dbg(slot->ctrl, "%s - physical_slot = %s\n", |
358 | __func__, hotplug_slot->name); | ||
345 | 359 | ||
346 | retval = slot->hpc_ops->get_latch_status(slot, value); | 360 | retval = slot->hpc_ops->get_latch_status(slot, value); |
347 | if (retval < 0) | 361 | if (retval < 0) |
@@ -355,7 +369,8 @@ static int get_adapter_status(struct hotplug_slot *hotplug_slot, u8 *value) | |||
355 | struct slot *slot = hotplug_slot->private; | 369 | struct slot *slot = hotplug_slot->private; |
356 | int retval; | 370 | int retval; |
357 | 371 | ||
358 | dbg("%s - physical_slot = %s\n", __func__, hotplug_slot->name); | 372 | ctrl_dbg(slot->ctrl, "%s - physical_slot = %s\n", |
373 | __func__, hotplug_slot->name); | ||
359 | 374 | ||
360 | retval = slot->hpc_ops->get_adapter_status(slot, value); | 375 | retval = slot->hpc_ops->get_adapter_status(slot, value); |
361 | if (retval < 0) | 376 | if (retval < 0) |
@@ -370,7 +385,8 @@ static int get_max_bus_speed(struct hotplug_slot *hotplug_slot, | |||
370 | struct slot *slot = hotplug_slot->private; | 385 | struct slot *slot = hotplug_slot->private; |
371 | int retval; | 386 | int retval; |
372 | 387 | ||
373 | dbg("%s - physical_slot = %s\n", __func__, hotplug_slot->name); | 388 | ctrl_dbg(slot->ctrl, "%s - physical_slot = %s\n", |
389 | __func__, hotplug_slot->name); | ||
374 | 390 | ||
375 | retval = slot->hpc_ops->get_max_bus_speed(slot, value); | 391 | retval = slot->hpc_ops->get_max_bus_speed(slot, value); |
376 | if (retval < 0) | 392 | if (retval < 0) |
@@ -384,7 +400,8 @@ static int get_cur_bus_speed(struct hotplug_slot *hotplug_slot, enum pci_bus_spe | |||
384 | struct slot *slot = hotplug_slot->private; | 400 | struct slot *slot = hotplug_slot->private; |
385 | int retval; | 401 | int retval; |
386 | 402 | ||
387 | dbg("%s - physical_slot = %s\n", __func__, hotplug_slot->name); | 403 | ctrl_dbg(slot->ctrl, "%s - physical_slot = %s\n", |
404 | __func__, hotplug_slot->name); | ||
388 | 405 | ||
389 | retval = slot->hpc_ops->get_cur_bus_speed(slot, value); | 406 | retval = slot->hpc_ops->get_cur_bus_speed(slot, value); |
390 | if (retval < 0) | 407 | if (retval < 0) |
@@ -402,14 +419,15 @@ static int pciehp_probe(struct pcie_device *dev, const struct pcie_port_service_ | |||
402 | struct pci_dev *pdev = dev->port; | 419 | struct pci_dev *pdev = dev->port; |
403 | 420 | ||
404 | if (pciehp_force) | 421 | if (pciehp_force) |
405 | dbg("Bypassing BIOS check for pciehp use on %s\n", | 422 | dev_info(&dev->device, |
406 | pci_name(pdev)); | 423 | "Bypassing BIOS check for pciehp use on %s\n", |
424 | pci_name(pdev)); | ||
407 | else if (pciehp_get_hp_hw_control_from_firmware(pdev)) | 425 | else if (pciehp_get_hp_hw_control_from_firmware(pdev)) |
408 | goto err_out_none; | 426 | goto err_out_none; |
409 | 427 | ||
410 | ctrl = pcie_init(dev); | 428 | ctrl = pcie_init(dev); |
411 | if (!ctrl) { | 429 | if (!ctrl) { |
412 | dbg("%s: controller initialization failed\n", PCIE_MODULE_NAME); | 430 | dev_err(&dev->device, "controller initialization failed\n"); |
413 | goto err_out_none; | 431 | goto err_out_none; |
414 | } | 432 | } |
415 | set_service_data(dev, ctrl); | 433 | set_service_data(dev, ctrl); |
@@ -418,11 +436,10 @@ static int pciehp_probe(struct pcie_device *dev, const struct pcie_port_service_ | |||
418 | rc = init_slots(ctrl); | 436 | rc = init_slots(ctrl); |
419 | if (rc) { | 437 | if (rc) { |
420 | if (rc == -EBUSY) | 438 | if (rc == -EBUSY) |
421 | warn("%s: slot already registered by another " | 439 | ctrl_warn(ctrl, "slot already registered by another " |
422 | "hotplug driver\n", PCIE_MODULE_NAME); | 440 | "hotplug driver\n"); |
423 | else | 441 | else |
424 | err("%s: slot initialization failed\n", | 442 | ctrl_err(ctrl, "slot initialization failed\n"); |
425 | PCIE_MODULE_NAME); | ||
426 | goto err_out_release_ctlr; | 443 | goto err_out_release_ctlr; |
427 | } | 444 | } |
428 | 445 | ||
@@ -461,13 +478,13 @@ static void pciehp_remove (struct pcie_device *dev) | |||
461 | #ifdef CONFIG_PM | 478 | #ifdef CONFIG_PM |
462 | static int pciehp_suspend (struct pcie_device *dev, pm_message_t state) | 479 | static int pciehp_suspend (struct pcie_device *dev, pm_message_t state) |
463 | { | 480 | { |
464 | printk("%s ENTRY\n", __func__); | 481 | dev_info(&dev->device, "%s ENTRY\n", __func__); |
465 | return 0; | 482 | return 0; |
466 | } | 483 | } |
467 | 484 | ||
468 | static int pciehp_resume (struct pcie_device *dev) | 485 | static int pciehp_resume (struct pcie_device *dev) |
469 | { | 486 | { |
470 | printk("%s ENTRY\n", __func__); | 487 | dev_info(&dev->device, "%s ENTRY\n", __func__); |
471 | if (pciehp_force) { | 488 | if (pciehp_force) { |
472 | struct controller *ctrl = get_service_data(dev); | 489 | struct controller *ctrl = get_service_data(dev); |
473 | struct slot *t_slot; | 490 | struct slot *t_slot; |
@@ -497,10 +514,9 @@ static struct pcie_port_service_id port_pci_ids[] = { { | |||
497 | .driver_data = 0, | 514 | .driver_data = 0, |
498 | }, { /* end: all zeroes */ } | 515 | }, { /* end: all zeroes */ } |
499 | }; | 516 | }; |
500 | static const char device_name[] = "hpdriver"; | ||
501 | 517 | ||
502 | static struct pcie_port_service_driver hpdriver_portdrv = { | 518 | static struct pcie_port_service_driver hpdriver_portdrv = { |
503 | .name = (char *)device_name, | 519 | .name = PCIE_MODULE_NAME, |
504 | .id_table = &port_pci_ids[0], | 520 | .id_table = &port_pci_ids[0], |
505 | 521 | ||
506 | .probe = pciehp_probe, | 522 | .probe = pciehp_probe, |
diff --git a/drivers/pci/hotplug/pciehp_ctrl.c b/drivers/pci/hotplug/pciehp_ctrl.c index 96a5d55a4983..acb7f9efd182 100644 --- a/drivers/pci/hotplug/pciehp_ctrl.c +++ b/drivers/pci/hotplug/pciehp_ctrl.c | |||
@@ -58,14 +58,15 @@ static int queue_interrupt_event(struct slot *p_slot, u32 event_type) | |||
58 | u8 pciehp_handle_attention_button(struct slot *p_slot) | 58 | u8 pciehp_handle_attention_button(struct slot *p_slot) |
59 | { | 59 | { |
60 | u32 event_type; | 60 | u32 event_type; |
61 | struct controller *ctrl = p_slot->ctrl; | ||
61 | 62 | ||
62 | /* Attention Button Change */ | 63 | /* Attention Button Change */ |
63 | dbg("pciehp: Attention button interrupt received.\n"); | 64 | ctrl_dbg(ctrl, "Attention button interrupt received.\n"); |
64 | 65 | ||
65 | /* | 66 | /* |
66 | * Button pressed - See if need to TAKE ACTION!!! | 67 | * Button pressed - See if need to TAKE ACTION!!! |
67 | */ | 68 | */ |
68 | info("Button pressed on Slot(%s)\n", p_slot->name); | 69 | ctrl_info(ctrl, "Button pressed on Slot(%s)\n", p_slot->name); |
69 | event_type = INT_BUTTON_PRESS; | 70 | event_type = INT_BUTTON_PRESS; |
70 | 71 | ||
71 | queue_interrupt_event(p_slot, event_type); | 72 | queue_interrupt_event(p_slot, event_type); |
@@ -77,22 +78,23 @@ u8 pciehp_handle_switch_change(struct slot *p_slot) | |||
77 | { | 78 | { |
78 | u8 getstatus; | 79 | u8 getstatus; |
79 | u32 event_type; | 80 | u32 event_type; |
81 | struct controller *ctrl = p_slot->ctrl; | ||
80 | 82 | ||
81 | /* Switch Change */ | 83 | /* Switch Change */ |
82 | dbg("pciehp: Switch interrupt received.\n"); | 84 | ctrl_dbg(ctrl, "Switch interrupt received.\n"); |
83 | 85 | ||
84 | p_slot->hpc_ops->get_latch_status(p_slot, &getstatus); | 86 | p_slot->hpc_ops->get_latch_status(p_slot, &getstatus); |
85 | if (getstatus) { | 87 | if (getstatus) { |
86 | /* | 88 | /* |
87 | * Switch opened | 89 | * Switch opened |
88 | */ | 90 | */ |
89 | info("Latch open on Slot(%s)\n", p_slot->name); | 91 | ctrl_info(ctrl, "Latch open on Slot(%s)\n", p_slot->name); |
90 | event_type = INT_SWITCH_OPEN; | 92 | event_type = INT_SWITCH_OPEN; |
91 | } else { | 93 | } else { |
92 | /* | 94 | /* |
93 | * Switch closed | 95 | * Switch closed |
94 | */ | 96 | */ |
95 | info("Latch close on Slot(%s)\n", p_slot->name); | 97 | ctrl_info(ctrl, "Latch close on Slot(%s)\n", p_slot->name); |
96 | event_type = INT_SWITCH_CLOSE; | 98 | event_type = INT_SWITCH_CLOSE; |
97 | } | 99 | } |
98 | 100 | ||
@@ -105,9 +107,10 @@ u8 pciehp_handle_presence_change(struct slot *p_slot) | |||
105 | { | 107 | { |
106 | u32 event_type; | 108 | u32 event_type; |
107 | u8 presence_save; | 109 | u8 presence_save; |
110 | struct controller *ctrl = p_slot->ctrl; | ||
108 | 111 | ||
109 | /* Presence Change */ | 112 | /* Presence Change */ |
110 | dbg("pciehp: Presence/Notify input change.\n"); | 113 | ctrl_dbg(ctrl, "Presence/Notify input change.\n"); |
111 | 114 | ||
112 | /* Switch is open, assume a presence change | 115 | /* Switch is open, assume a presence change |
113 | * Save the presence state | 116 | * Save the presence state |
@@ -117,13 +120,13 @@ u8 pciehp_handle_presence_change(struct slot *p_slot) | |||
117 | /* | 120 | /* |
118 | * Card Present | 121 | * Card Present |
119 | */ | 122 | */ |
120 | info("Card present on Slot(%s)\n", p_slot->name); | 123 | ctrl_info(ctrl, "Card present on Slot(%s)\n", p_slot->name); |
121 | event_type = INT_PRESENCE_ON; | 124 | event_type = INT_PRESENCE_ON; |
122 | } else { | 125 | } else { |
123 | /* | 126 | /* |
124 | * Not Present | 127 | * Not Present |
125 | */ | 128 | */ |
126 | info("Card not present on Slot(%s)\n", p_slot->name); | 129 | ctrl_info(ctrl, "Card not present on Slot(%s)\n", p_slot->name); |
127 | event_type = INT_PRESENCE_OFF; | 130 | event_type = INT_PRESENCE_OFF; |
128 | } | 131 | } |
129 | 132 | ||
@@ -135,23 +138,25 @@ u8 pciehp_handle_presence_change(struct slot *p_slot) | |||
135 | u8 pciehp_handle_power_fault(struct slot *p_slot) | 138 | u8 pciehp_handle_power_fault(struct slot *p_slot) |
136 | { | 139 | { |
137 | u32 event_type; | 140 | u32 event_type; |
141 | struct controller *ctrl = p_slot->ctrl; | ||
138 | 142 | ||
139 | /* power fault */ | 143 | /* power fault */ |
140 | dbg("pciehp: Power fault interrupt received.\n"); | 144 | ctrl_dbg(ctrl, "Power fault interrupt received.\n"); |
141 | 145 | ||
142 | if ( !(p_slot->hpc_ops->query_power_fault(p_slot))) { | 146 | if ( !(p_slot->hpc_ops->query_power_fault(p_slot))) { |
143 | /* | 147 | /* |
144 | * power fault Cleared | 148 | * power fault Cleared |
145 | */ | 149 | */ |
146 | info("Power fault cleared on Slot(%s)\n", p_slot->name); | 150 | ctrl_info(ctrl, "Power fault cleared on Slot(%s)\n", |
151 | p_slot->name); | ||
147 | event_type = INT_POWER_FAULT_CLEAR; | 152 | event_type = INT_POWER_FAULT_CLEAR; |
148 | } else { | 153 | } else { |
149 | /* | 154 | /* |
150 | * power fault | 155 | * power fault |
151 | */ | 156 | */ |
152 | info("Power fault on Slot(%s)\n", p_slot->name); | 157 | ctrl_info(ctrl, "Power fault on Slot(%s)\n", p_slot->name); |
153 | event_type = INT_POWER_FAULT; | 158 | event_type = INT_POWER_FAULT; |
154 | info("power fault bit %x set\n", 0); | 159 | ctrl_info(ctrl, "power fault bit %x set\n", 0); |
155 | } | 160 | } |
156 | 161 | ||
157 | queue_interrupt_event(p_slot, event_type); | 162 | queue_interrupt_event(p_slot, event_type); |
@@ -168,8 +173,9 @@ static void set_slot_off(struct controller *ctrl, struct slot * pslot) | |||
168 | /* turn off slot, turn on Amber LED, turn off Green LED if supported*/ | 173 | /* turn off slot, turn on Amber LED, turn off Green LED if supported*/ |
169 | if (POWER_CTRL(ctrl)) { | 174 | if (POWER_CTRL(ctrl)) { |
170 | if (pslot->hpc_ops->power_off_slot(pslot)) { | 175 | if (pslot->hpc_ops->power_off_slot(pslot)) { |
171 | err("%s: Issue of Slot Power Off command failed\n", | 176 | ctrl_err(ctrl, |
172 | __func__); | 177 | "%s: Issue of Slot Power Off command failed\n", |
178 | __func__); | ||
173 | return; | 179 | return; |
174 | } | 180 | } |
175 | } | 181 | } |
@@ -186,8 +192,8 @@ static void set_slot_off(struct controller *ctrl, struct slot * pslot) | |||
186 | 192 | ||
187 | if (ATTN_LED(ctrl)) { | 193 | if (ATTN_LED(ctrl)) { |
188 | if (pslot->hpc_ops->set_attention_status(pslot, 1)) { | 194 | if (pslot->hpc_ops->set_attention_status(pslot, 1)) { |
189 | err("%s: Issue of Set Attention Led command failed\n", | 195 | ctrl_err(ctrl, "%s: Issue of Set Attention " |
190 | __func__); | 196 | "Led command failed\n", __func__); |
191 | return; | 197 | return; |
192 | } | 198 | } |
193 | } | 199 | } |
@@ -205,9 +211,9 @@ static int board_added(struct slot *p_slot) | |||
205 | int retval = 0; | 211 | int retval = 0; |
206 | struct controller *ctrl = p_slot->ctrl; | 212 | struct controller *ctrl = p_slot->ctrl; |
207 | 213 | ||
208 | dbg("%s: slot device, slot offset, hp slot = %d, %d ,%d\n", | 214 | ctrl_dbg(ctrl, "%s: slot device, slot offset, hp slot = %d, %d ,%d\n", |
209 | __func__, p_slot->device, | 215 | __func__, p_slot->device, ctrl->slot_device_offset, |
210 | ctrl->slot_device_offset, p_slot->hp_slot); | 216 | p_slot->hp_slot); |
211 | 217 | ||
212 | if (POWER_CTRL(ctrl)) { | 218 | if (POWER_CTRL(ctrl)) { |
213 | /* Power on slot */ | 219 | /* Power on slot */ |
@@ -225,22 +231,22 @@ static int board_added(struct slot *p_slot) | |||
225 | /* Check link training status */ | 231 | /* Check link training status */ |
226 | retval = p_slot->hpc_ops->check_lnk_status(ctrl); | 232 | retval = p_slot->hpc_ops->check_lnk_status(ctrl); |
227 | if (retval) { | 233 | if (retval) { |
228 | err("%s: Failed to check link status\n", __func__); | 234 | ctrl_err(ctrl, "%s: Failed to check link status\n", __func__); |
229 | set_slot_off(ctrl, p_slot); | 235 | set_slot_off(ctrl, p_slot); |
230 | return retval; | 236 | return retval; |
231 | } | 237 | } |
232 | 238 | ||
233 | /* Check for a power fault */ | 239 | /* Check for a power fault */ |
234 | if (p_slot->hpc_ops->query_power_fault(p_slot)) { | 240 | if (p_slot->hpc_ops->query_power_fault(p_slot)) { |
235 | dbg("%s: power fault detected\n", __func__); | 241 | ctrl_dbg(ctrl, "%s: power fault detected\n", __func__); |
236 | retval = POWER_FAILURE; | 242 | retval = POWER_FAILURE; |
237 | goto err_exit; | 243 | goto err_exit; |
238 | } | 244 | } |
239 | 245 | ||
240 | retval = pciehp_configure_device(p_slot); | 246 | retval = pciehp_configure_device(p_slot); |
241 | if (retval) { | 247 | if (retval) { |
242 | err("Cannot add device 0x%x:%x\n", p_slot->bus, | 248 | ctrl_err(ctrl, "Cannot add device 0x%x:%x\n", |
243 | p_slot->device); | 249 | p_slot->bus, p_slot->device); |
244 | goto err_exit; | 250 | goto err_exit; |
245 | } | 251 | } |
246 | 252 | ||
@@ -272,14 +278,14 @@ static int remove_board(struct slot *p_slot) | |||
272 | if (retval) | 278 | if (retval) |
273 | return retval; | 279 | return retval; |
274 | 280 | ||
275 | dbg("In %s, hp_slot = %d\n", __func__, p_slot->hp_slot); | 281 | ctrl_dbg(ctrl, "In %s, hp_slot = %d\n", __func__, p_slot->hp_slot); |
276 | 282 | ||
277 | if (POWER_CTRL(ctrl)) { | 283 | if (POWER_CTRL(ctrl)) { |
278 | /* power off slot */ | 284 | /* power off slot */ |
279 | retval = p_slot->hpc_ops->power_off_slot(p_slot); | 285 | retval = p_slot->hpc_ops->power_off_slot(p_slot); |
280 | if (retval) { | 286 | if (retval) { |
281 | err("%s: Issue of Slot Disable command failed\n", | 287 | ctrl_err(ctrl, "%s: Issue of Slot Disable command " |
282 | __func__); | 288 | "failed\n", __func__); |
283 | return retval; | 289 | return retval; |
284 | } | 290 | } |
285 | } | 291 | } |
@@ -320,8 +326,8 @@ static void pciehp_power_thread(struct work_struct *work) | |||
320 | switch (p_slot->state) { | 326 | switch (p_slot->state) { |
321 | case POWEROFF_STATE: | 327 | case POWEROFF_STATE: |
322 | mutex_unlock(&p_slot->lock); | 328 | mutex_unlock(&p_slot->lock); |
323 | dbg("%s: disabling bus:device(%x:%x)\n", | 329 | ctrl_dbg(p_slot->ctrl, "%s: disabling bus:device(%x:%x)\n", |
324 | __func__, p_slot->bus, p_slot->device); | 330 | __func__, p_slot->bus, p_slot->device); |
325 | pciehp_disable_slot(p_slot); | 331 | pciehp_disable_slot(p_slot); |
326 | mutex_lock(&p_slot->lock); | 332 | mutex_lock(&p_slot->lock); |
327 | p_slot->state = STATIC_STATE; | 333 | p_slot->state = STATIC_STATE; |
@@ -349,7 +355,8 @@ void pciehp_queue_pushbutton_work(struct work_struct *work) | |||
349 | 355 | ||
350 | info = kmalloc(sizeof(*info), GFP_KERNEL); | 356 | info = kmalloc(sizeof(*info), GFP_KERNEL); |
351 | if (!info) { | 357 | if (!info) { |
352 | err("%s: Cannot allocate memory\n", __func__); | 358 | ctrl_err(p_slot->ctrl, "%s: Cannot allocate memory\n", |
359 | __func__); | ||
353 | return; | 360 | return; |
354 | } | 361 | } |
355 | info->p_slot = p_slot; | 362 | info->p_slot = p_slot; |
@@ -403,12 +410,14 @@ static void handle_button_press_event(struct slot *p_slot) | |||
403 | p_slot->hpc_ops->get_power_status(p_slot, &getstatus); | 410 | p_slot->hpc_ops->get_power_status(p_slot, &getstatus); |
404 | if (getstatus) { | 411 | if (getstatus) { |
405 | p_slot->state = BLINKINGOFF_STATE; | 412 | p_slot->state = BLINKINGOFF_STATE; |
406 | info("PCI slot #%s - powering off due to button " | 413 | ctrl_info(ctrl, |
407 | "press.\n", p_slot->name); | 414 | "PCI slot #%s - powering off due to button " |
415 | "press.\n", p_slot->name); | ||
408 | } else { | 416 | } else { |
409 | p_slot->state = BLINKINGON_STATE; | 417 | p_slot->state = BLINKINGON_STATE; |
410 | info("PCI slot #%s - powering on due to button " | 418 | ctrl_info(ctrl, |
411 | "press.\n", p_slot->name); | 419 | "PCI slot #%s - powering on due to button " |
420 | "press.\n", p_slot->name); | ||
412 | } | 421 | } |
413 | /* blink green LED and turn off amber */ | 422 | /* blink green LED and turn off amber */ |
414 | if (PWR_LED(ctrl)) | 423 | if (PWR_LED(ctrl)) |
@@ -425,8 +434,8 @@ static void handle_button_press_event(struct slot *p_slot) | |||
425 | * press the attention again before the 5 sec. limit | 434 | * press the attention again before the 5 sec. limit |
426 | * expires to cancel hot-add or hot-remove | 435 | * expires to cancel hot-add or hot-remove |
427 | */ | 436 | */ |
428 | info("Button cancel on Slot(%s)\n", p_slot->name); | 437 | ctrl_info(ctrl, "Button cancel on Slot(%s)\n", p_slot->name); |
429 | dbg("%s: button cancel\n", __func__); | 438 | ctrl_dbg(ctrl, "%s: button cancel\n", __func__); |
430 | cancel_delayed_work(&p_slot->work); | 439 | cancel_delayed_work(&p_slot->work); |
431 | if (p_slot->state == BLINKINGOFF_STATE) { | 440 | if (p_slot->state == BLINKINGOFF_STATE) { |
432 | if (PWR_LED(ctrl)) | 441 | if (PWR_LED(ctrl)) |
@@ -437,8 +446,8 @@ static void handle_button_press_event(struct slot *p_slot) | |||
437 | } | 446 | } |
438 | if (ATTN_LED(ctrl)) | 447 | if (ATTN_LED(ctrl)) |
439 | p_slot->hpc_ops->set_attention_status(p_slot, 0); | 448 | p_slot->hpc_ops->set_attention_status(p_slot, 0); |
440 | info("PCI slot #%s - action canceled due to button press\n", | 449 | ctrl_info(ctrl, "PCI slot #%s - action canceled " |
441 | p_slot->name); | 450 | "due to button press\n", p_slot->name); |
442 | p_slot->state = STATIC_STATE; | 451 | p_slot->state = STATIC_STATE; |
443 | break; | 452 | break; |
444 | case POWEROFF_STATE: | 453 | case POWEROFF_STATE: |
@@ -448,11 +457,11 @@ static void handle_button_press_event(struct slot *p_slot) | |||
448 | * this means that the previous attention button action | 457 | * this means that the previous attention button action |
449 | * to hot-add or hot-remove is undergoing | 458 | * to hot-add or hot-remove is undergoing |
450 | */ | 459 | */ |
451 | info("Button ignore on Slot(%s)\n", p_slot->name); | 460 | ctrl_info(ctrl, "Button ignore on Slot(%s)\n", p_slot->name); |
452 | update_slot_info(p_slot); | 461 | update_slot_info(p_slot); |
453 | break; | 462 | break; |
454 | default: | 463 | default: |
455 | warn("Not a valid state\n"); | 464 | ctrl_warn(ctrl, "Not a valid state\n"); |
456 | break; | 465 | break; |
457 | } | 466 | } |
458 | } | 467 | } |
@@ -467,7 +476,8 @@ static void handle_surprise_event(struct slot *p_slot) | |||
467 | 476 | ||
468 | info = kmalloc(sizeof(*info), GFP_KERNEL); | 477 | info = kmalloc(sizeof(*info), GFP_KERNEL); |
469 | if (!info) { | 478 | if (!info) { |
470 | err("%s: Cannot allocate memory\n", __func__); | 479 | ctrl_err(p_slot->ctrl, "%s: Cannot allocate memory\n", |
480 | __func__); | ||
471 | return; | 481 | return; |
472 | } | 482 | } |
473 | info->p_slot = p_slot; | 483 | info->p_slot = p_slot; |
@@ -505,7 +515,7 @@ static void interrupt_event_handler(struct work_struct *work) | |||
505 | case INT_PRESENCE_OFF: | 515 | case INT_PRESENCE_OFF: |
506 | if (!HP_SUPR_RM(ctrl)) | 516 | if (!HP_SUPR_RM(ctrl)) |
507 | break; | 517 | break; |
508 | dbg("Surprise Removal\n"); | 518 | ctrl_dbg(ctrl, "Surprise Removal\n"); |
509 | update_slot_info(p_slot); | 519 | update_slot_info(p_slot); |
510 | handle_surprise_event(p_slot); | 520 | handle_surprise_event(p_slot); |
511 | break; | 521 | break; |
@@ -522,22 +532,23 @@ int pciehp_enable_slot(struct slot *p_slot) | |||
522 | { | 532 | { |
523 | u8 getstatus = 0; | 533 | u8 getstatus = 0; |
524 | int rc; | 534 | int rc; |
535 | struct controller *ctrl = p_slot->ctrl; | ||
525 | 536 | ||
526 | /* Check to see if (latch closed, card present, power off) */ | 537 | /* Check to see if (latch closed, card present, power off) */ |
527 | mutex_lock(&p_slot->ctrl->crit_sect); | 538 | mutex_lock(&p_slot->ctrl->crit_sect); |
528 | 539 | ||
529 | rc = p_slot->hpc_ops->get_adapter_status(p_slot, &getstatus); | 540 | rc = p_slot->hpc_ops->get_adapter_status(p_slot, &getstatus); |
530 | if (rc || !getstatus) { | 541 | if (rc || !getstatus) { |
531 | info("%s: no adapter on slot(%s)\n", __func__, | 542 | ctrl_info(ctrl, "%s: no adapter on slot(%s)\n", |
532 | p_slot->name); | 543 | __func__, p_slot->name); |
533 | mutex_unlock(&p_slot->ctrl->crit_sect); | 544 | mutex_unlock(&p_slot->ctrl->crit_sect); |
534 | return -ENODEV; | 545 | return -ENODEV; |
535 | } | 546 | } |
536 | if (MRL_SENS(p_slot->ctrl)) { | 547 | if (MRL_SENS(p_slot->ctrl)) { |
537 | rc = p_slot->hpc_ops->get_latch_status(p_slot, &getstatus); | 548 | rc = p_slot->hpc_ops->get_latch_status(p_slot, &getstatus); |
538 | if (rc || getstatus) { | 549 | if (rc || getstatus) { |
539 | info("%s: latch open on slot(%s)\n", __func__, | 550 | ctrl_info(ctrl, "%s: latch open on slot(%s)\n", |
540 | p_slot->name); | 551 | __func__, p_slot->name); |
541 | mutex_unlock(&p_slot->ctrl->crit_sect); | 552 | mutex_unlock(&p_slot->ctrl->crit_sect); |
542 | return -ENODEV; | 553 | return -ENODEV; |
543 | } | 554 | } |
@@ -546,8 +557,8 @@ int pciehp_enable_slot(struct slot *p_slot) | |||
546 | if (POWER_CTRL(p_slot->ctrl)) { | 557 | if (POWER_CTRL(p_slot->ctrl)) { |
547 | rc = p_slot->hpc_ops->get_power_status(p_slot, &getstatus); | 558 | rc = p_slot->hpc_ops->get_power_status(p_slot, &getstatus); |
548 | if (rc || getstatus) { | 559 | if (rc || getstatus) { |
549 | info("%s: already enabled on slot(%s)\n", __func__, | 560 | ctrl_info(ctrl, "%s: already enabled on slot(%s)\n", |
550 | p_slot->name); | 561 | __func__, p_slot->name); |
551 | mutex_unlock(&p_slot->ctrl->crit_sect); | 562 | mutex_unlock(&p_slot->ctrl->crit_sect); |
552 | return -EINVAL; | 563 | return -EINVAL; |
553 | } | 564 | } |
@@ -571,6 +582,7 @@ int pciehp_disable_slot(struct slot *p_slot) | |||
571 | { | 582 | { |
572 | u8 getstatus = 0; | 583 | u8 getstatus = 0; |
573 | int ret = 0; | 584 | int ret = 0; |
585 | struct controller *ctrl = p_slot->ctrl; | ||
574 | 586 | ||
575 | if (!p_slot->ctrl) | 587 | if (!p_slot->ctrl) |
576 | return 1; | 588 | return 1; |
@@ -581,8 +593,8 @@ int pciehp_disable_slot(struct slot *p_slot) | |||
581 | if (!HP_SUPR_RM(p_slot->ctrl)) { | 593 | if (!HP_SUPR_RM(p_slot->ctrl)) { |
582 | ret = p_slot->hpc_ops->get_adapter_status(p_slot, &getstatus); | 594 | ret = p_slot->hpc_ops->get_adapter_status(p_slot, &getstatus); |
583 | if (ret || !getstatus) { | 595 | if (ret || !getstatus) { |
584 | info("%s: no adapter on slot(%s)\n", __func__, | 596 | ctrl_info(ctrl, "%s: no adapter on slot(%s)\n", |
585 | p_slot->name); | 597 | __func__, p_slot->name); |
586 | mutex_unlock(&p_slot->ctrl->crit_sect); | 598 | mutex_unlock(&p_slot->ctrl->crit_sect); |
587 | return -ENODEV; | 599 | return -ENODEV; |
588 | } | 600 | } |
@@ -591,8 +603,8 @@ int pciehp_disable_slot(struct slot *p_slot) | |||
591 | if (MRL_SENS(p_slot->ctrl)) { | 603 | if (MRL_SENS(p_slot->ctrl)) { |
592 | ret = p_slot->hpc_ops->get_latch_status(p_slot, &getstatus); | 604 | ret = p_slot->hpc_ops->get_latch_status(p_slot, &getstatus); |
593 | if (ret || getstatus) { | 605 | if (ret || getstatus) { |
594 | info("%s: latch open on slot(%s)\n", __func__, | 606 | ctrl_info(ctrl, "%s: latch open on slot(%s)\n", |
595 | p_slot->name); | 607 | __func__, p_slot->name); |
596 | mutex_unlock(&p_slot->ctrl->crit_sect); | 608 | mutex_unlock(&p_slot->ctrl->crit_sect); |
597 | return -ENODEV; | 609 | return -ENODEV; |
598 | } | 610 | } |
@@ -601,8 +613,8 @@ int pciehp_disable_slot(struct slot *p_slot) | |||
601 | if (POWER_CTRL(p_slot->ctrl)) { | 613 | if (POWER_CTRL(p_slot->ctrl)) { |
602 | ret = p_slot->hpc_ops->get_power_status(p_slot, &getstatus); | 614 | ret = p_slot->hpc_ops->get_power_status(p_slot, &getstatus); |
603 | if (ret || !getstatus) { | 615 | if (ret || !getstatus) { |
604 | info("%s: already disabled slot(%s)\n", __func__, | 616 | ctrl_info(ctrl, "%s: already disabled slot(%s)\n", |
605 | p_slot->name); | 617 | __func__, p_slot->name); |
606 | mutex_unlock(&p_slot->ctrl->crit_sect); | 618 | mutex_unlock(&p_slot->ctrl->crit_sect); |
607 | return -EINVAL; | 619 | return -EINVAL; |
608 | } | 620 | } |
@@ -618,6 +630,7 @@ int pciehp_disable_slot(struct slot *p_slot) | |||
618 | int pciehp_sysfs_enable_slot(struct slot *p_slot) | 630 | int pciehp_sysfs_enable_slot(struct slot *p_slot) |
619 | { | 631 | { |
620 | int retval = -ENODEV; | 632 | int retval = -ENODEV; |
633 | struct controller *ctrl = p_slot->ctrl; | ||
621 | 634 | ||
622 | mutex_lock(&p_slot->lock); | 635 | mutex_lock(&p_slot->lock); |
623 | switch (p_slot->state) { | 636 | switch (p_slot->state) { |
@@ -631,15 +644,15 @@ int pciehp_sysfs_enable_slot(struct slot *p_slot) | |||
631 | p_slot->state = STATIC_STATE; | 644 | p_slot->state = STATIC_STATE; |
632 | break; | 645 | break; |
633 | case POWERON_STATE: | 646 | case POWERON_STATE: |
634 | info("Slot %s is already in powering on state\n", | 647 | ctrl_info(ctrl, "Slot %s is already in powering on state\n", |
635 | p_slot->name); | 648 | p_slot->name); |
636 | break; | 649 | break; |
637 | case BLINKINGOFF_STATE: | 650 | case BLINKINGOFF_STATE: |
638 | case POWEROFF_STATE: | 651 | case POWEROFF_STATE: |
639 | info("Already enabled on slot %s\n", p_slot->name); | 652 | ctrl_info(ctrl, "Already enabled on slot %s\n", p_slot->name); |
640 | break; | 653 | break; |
641 | default: | 654 | default: |
642 | err("Not a valid state on slot %s\n", p_slot->name); | 655 | ctrl_err(ctrl, "Not a valid state on slot %s\n", p_slot->name); |
643 | break; | 656 | break; |
644 | } | 657 | } |
645 | mutex_unlock(&p_slot->lock); | 658 | mutex_unlock(&p_slot->lock); |
@@ -650,6 +663,7 @@ int pciehp_sysfs_enable_slot(struct slot *p_slot) | |||
650 | int pciehp_sysfs_disable_slot(struct slot *p_slot) | 663 | int pciehp_sysfs_disable_slot(struct slot *p_slot) |
651 | { | 664 | { |
652 | int retval = -ENODEV; | 665 | int retval = -ENODEV; |
666 | struct controller *ctrl = p_slot->ctrl; | ||
653 | 667 | ||
654 | mutex_lock(&p_slot->lock); | 668 | mutex_lock(&p_slot->lock); |
655 | switch (p_slot->state) { | 669 | switch (p_slot->state) { |
@@ -663,15 +677,15 @@ int pciehp_sysfs_disable_slot(struct slot *p_slot) | |||
663 | p_slot->state = STATIC_STATE; | 677 | p_slot->state = STATIC_STATE; |
664 | break; | 678 | break; |
665 | case POWEROFF_STATE: | 679 | case POWEROFF_STATE: |
666 | info("Slot %s is already in powering off state\n", | 680 | ctrl_info(ctrl, "Slot %s is already in powering off state\n", |
667 | p_slot->name); | 681 | p_slot->name); |
668 | break; | 682 | break; |
669 | case BLINKINGON_STATE: | 683 | case BLINKINGON_STATE: |
670 | case POWERON_STATE: | 684 | case POWERON_STATE: |
671 | info("Already disabled on slot %s\n", p_slot->name); | 685 | ctrl_info(ctrl, "Already disabled on slot %s\n", p_slot->name); |
672 | break; | 686 | break; |
673 | default: | 687 | default: |
674 | err("Not a valid state on slot %s\n", p_slot->name); | 688 | ctrl_err(ctrl, "Not a valid state on slot %s\n", p_slot->name); |
675 | break; | 689 | break; |
676 | } | 690 | } |
677 | mutex_unlock(&p_slot->lock); | 691 | mutex_unlock(&p_slot->lock); |
diff --git a/drivers/pci/hotplug/pciehp_hpc.c b/drivers/pci/hotplug/pciehp_hpc.c index 9d934ddee956..8e9530c4c36d 100644 --- a/drivers/pci/hotplug/pciehp_hpc.c +++ b/drivers/pci/hotplug/pciehp_hpc.c | |||
@@ -223,7 +223,7 @@ static void start_int_poll_timer(struct controller *ctrl, int sec) | |||
223 | 223 | ||
224 | static inline int pciehp_request_irq(struct controller *ctrl) | 224 | static inline int pciehp_request_irq(struct controller *ctrl) |
225 | { | 225 | { |
226 | int retval, irq = ctrl->pci_dev->irq; | 226 | int retval, irq = ctrl->pcie->irq; |
227 | 227 | ||
228 | /* Install interrupt polling timer. Start with 10 sec delay */ | 228 | /* Install interrupt polling timer. Start with 10 sec delay */ |
229 | if (pciehp_poll_mode) { | 229 | if (pciehp_poll_mode) { |
@@ -235,7 +235,8 @@ static inline int pciehp_request_irq(struct controller *ctrl) | |||
235 | /* Installs the interrupt handler */ | 235 | /* Installs the interrupt handler */ |
236 | retval = request_irq(irq, pcie_isr, IRQF_SHARED, MY_NAME, ctrl); | 236 | retval = request_irq(irq, pcie_isr, IRQF_SHARED, MY_NAME, ctrl); |
237 | if (retval) | 237 | if (retval) |
238 | err("Cannot get irq %d for the hotplug controller\n", irq); | 238 | ctrl_err(ctrl, "Cannot get irq %d for the hotplug controller\n", |
239 | irq); | ||
239 | return retval; | 240 | return retval; |
240 | } | 241 | } |
241 | 242 | ||
@@ -244,7 +245,7 @@ static inline void pciehp_free_irq(struct controller *ctrl) | |||
244 | if (pciehp_poll_mode) | 245 | if (pciehp_poll_mode) |
245 | del_timer_sync(&ctrl->poll_timer); | 246 | del_timer_sync(&ctrl->poll_timer); |
246 | else | 247 | else |
247 | free_irq(ctrl->pci_dev->irq, ctrl); | 248 | free_irq(ctrl->pcie->irq, ctrl); |
248 | } | 249 | } |
249 | 250 | ||
250 | static int pcie_poll_cmd(struct controller *ctrl) | 251 | static int pcie_poll_cmd(struct controller *ctrl) |
@@ -282,7 +283,7 @@ static void pcie_wait_cmd(struct controller *ctrl, int poll) | |||
282 | else | 283 | else |
283 | rc = wait_event_timeout(ctrl->queue, !ctrl->cmd_busy, timeout); | 284 | rc = wait_event_timeout(ctrl->queue, !ctrl->cmd_busy, timeout); |
284 | if (!rc) | 285 | if (!rc) |
285 | dbg("Command not completed in 1000 msec\n"); | 286 | ctrl_dbg(ctrl, "Command not completed in 1000 msec\n"); |
286 | } | 287 | } |
287 | 288 | ||
288 | /** | 289 | /** |
@@ -301,7 +302,8 @@ static int pcie_write_cmd(struct controller *ctrl, u16 cmd, u16 mask) | |||
301 | 302 | ||
302 | retval = pciehp_readw(ctrl, SLOTSTATUS, &slot_status); | 303 | retval = pciehp_readw(ctrl, SLOTSTATUS, &slot_status); |
303 | if (retval) { | 304 | if (retval) { |
304 | err("%s: Cannot read SLOTSTATUS register\n", __func__); | 305 | ctrl_err(ctrl, "%s: Cannot read SLOTSTATUS register\n", |
306 | __func__); | ||
305 | goto out; | 307 | goto out; |
306 | } | 308 | } |
307 | 309 | ||
@@ -312,26 +314,28 @@ static int pcie_write_cmd(struct controller *ctrl, u16 cmd, u16 mask) | |||
312 | * proceed forward to issue the next command according | 314 | * proceed forward to issue the next command according |
313 | * to spec. Just print out the error message. | 315 | * to spec. Just print out the error message. |
314 | */ | 316 | */ |
315 | dbg("%s: CMD_COMPLETED not clear after 1 sec.\n", | 317 | ctrl_dbg(ctrl, |
316 | __func__); | 318 | "%s: CMD_COMPLETED not clear after 1 sec.\n", |
319 | __func__); | ||
317 | } else if (!NO_CMD_CMPL(ctrl)) { | 320 | } else if (!NO_CMD_CMPL(ctrl)) { |
318 | /* | 321 | /* |
319 | * This controller semms to notify of command completed | 322 | * This controller semms to notify of command completed |
320 | * event even though it supports none of power | 323 | * event even though it supports none of power |
321 | * controller, attention led, power led and EMI. | 324 | * controller, attention led, power led and EMI. |
322 | */ | 325 | */ |
323 | dbg("%s: Unexpected CMD_COMPLETED. Need to wait for " | 326 | ctrl_dbg(ctrl, "%s: Unexpected CMD_COMPLETED. Need to " |
324 | "command completed event.\n", __func__); | 327 | "wait for command completed event.\n", |
328 | __func__); | ||
325 | ctrl->no_cmd_complete = 0; | 329 | ctrl->no_cmd_complete = 0; |
326 | } else { | 330 | } else { |
327 | dbg("%s: Unexpected CMD_COMPLETED. Maybe the " | 331 | ctrl_dbg(ctrl, "%s: Unexpected CMD_COMPLETED. Maybe " |
328 | "controller is broken.\n", __func__); | 332 | "the controller is broken.\n", __func__); |
329 | } | 333 | } |
330 | } | 334 | } |
331 | 335 | ||
332 | retval = pciehp_readw(ctrl, SLOTCTRL, &slot_ctrl); | 336 | retval = pciehp_readw(ctrl, SLOTCTRL, &slot_ctrl); |
333 | if (retval) { | 337 | if (retval) { |
334 | err("%s: Cannot read SLOTCTRL register\n", __func__); | 338 | ctrl_err(ctrl, "%s: Cannot read SLOTCTRL register\n", __func__); |
335 | goto out; | 339 | goto out; |
336 | } | 340 | } |
337 | 341 | ||
@@ -341,7 +345,8 @@ static int pcie_write_cmd(struct controller *ctrl, u16 cmd, u16 mask) | |||
341 | smp_mb(); | 345 | smp_mb(); |
342 | retval = pciehp_writew(ctrl, SLOTCTRL, slot_ctrl); | 346 | retval = pciehp_writew(ctrl, SLOTCTRL, slot_ctrl); |
343 | if (retval) | 347 | if (retval) |
344 | err("%s: Cannot write to SLOTCTRL register\n", __func__); | 348 | ctrl_err(ctrl, "%s: Cannot write to SLOTCTRL register\n", |
349 | __func__); | ||
345 | 350 | ||
346 | /* | 351 | /* |
347 | * Wait for command completion. | 352 | * Wait for command completion. |
@@ -370,14 +375,15 @@ static int hpc_check_lnk_status(struct controller *ctrl) | |||
370 | 375 | ||
371 | retval = pciehp_readw(ctrl, LNKSTATUS, &lnk_status); | 376 | retval = pciehp_readw(ctrl, LNKSTATUS, &lnk_status); |
372 | if (retval) { | 377 | if (retval) { |
373 | err("%s: Cannot read LNKSTATUS register\n", __func__); | 378 | ctrl_err(ctrl, "%s: Cannot read LNKSTATUS register\n", |
379 | __func__); | ||
374 | return retval; | 380 | return retval; |
375 | } | 381 | } |
376 | 382 | ||
377 | dbg("%s: lnk_status = %x\n", __func__, lnk_status); | 383 | ctrl_dbg(ctrl, "%s: lnk_status = %x\n", __func__, lnk_status); |
378 | if ( (lnk_status & LNK_TRN) || (lnk_status & LNK_TRN_ERR) || | 384 | if ( (lnk_status & LNK_TRN) || (lnk_status & LNK_TRN_ERR) || |
379 | !(lnk_status & NEG_LINK_WD)) { | 385 | !(lnk_status & NEG_LINK_WD)) { |
380 | err("%s : Link Training Error occurs \n", __func__); | 386 | ctrl_err(ctrl, "%s : Link Training Error occurs \n", __func__); |
381 | retval = -1; | 387 | retval = -1; |
382 | return retval; | 388 | return retval; |
383 | } | 389 | } |
@@ -394,12 +400,12 @@ static int hpc_get_attention_status(struct slot *slot, u8 *status) | |||
394 | 400 | ||
395 | retval = pciehp_readw(ctrl, SLOTCTRL, &slot_ctrl); | 401 | retval = pciehp_readw(ctrl, SLOTCTRL, &slot_ctrl); |
396 | if (retval) { | 402 | if (retval) { |
397 | err("%s: Cannot read SLOTCTRL register\n", __func__); | 403 | ctrl_err(ctrl, "%s: Cannot read SLOTCTRL register\n", __func__); |
398 | return retval; | 404 | return retval; |
399 | } | 405 | } |
400 | 406 | ||
401 | dbg("%s: SLOTCTRL %x, value read %x\n", | 407 | ctrl_dbg(ctrl, "%s: SLOTCTRL %x, value read %x\n", |
402 | __func__, ctrl->cap_base + SLOTCTRL, slot_ctrl); | 408 | __func__, ctrl->cap_base + SLOTCTRL, slot_ctrl); |
403 | 409 | ||
404 | atten_led_state = (slot_ctrl & ATTN_LED_CTRL) >> 6; | 410 | atten_led_state = (slot_ctrl & ATTN_LED_CTRL) >> 6; |
405 | 411 | ||
@@ -433,11 +439,11 @@ static int hpc_get_power_status(struct slot *slot, u8 *status) | |||
433 | 439 | ||
434 | retval = pciehp_readw(ctrl, SLOTCTRL, &slot_ctrl); | 440 | retval = pciehp_readw(ctrl, SLOTCTRL, &slot_ctrl); |
435 | if (retval) { | 441 | if (retval) { |
436 | err("%s: Cannot read SLOTCTRL register\n", __func__); | 442 | ctrl_err(ctrl, "%s: Cannot read SLOTCTRL register\n", __func__); |
437 | return retval; | 443 | return retval; |
438 | } | 444 | } |
439 | dbg("%s: SLOTCTRL %x value read %x\n", | 445 | ctrl_dbg(ctrl, "%s: SLOTCTRL %x value read %x\n", |
440 | __func__, ctrl->cap_base + SLOTCTRL, slot_ctrl); | 446 | __func__, ctrl->cap_base + SLOTCTRL, slot_ctrl); |
441 | 447 | ||
442 | pwr_state = (slot_ctrl & PWR_CTRL) >> 10; | 448 | pwr_state = (slot_ctrl & PWR_CTRL) >> 10; |
443 | 449 | ||
@@ -464,7 +470,8 @@ static int hpc_get_latch_status(struct slot *slot, u8 *status) | |||
464 | 470 | ||
465 | retval = pciehp_readw(ctrl, SLOTSTATUS, &slot_status); | 471 | retval = pciehp_readw(ctrl, SLOTSTATUS, &slot_status); |
466 | if (retval) { | 472 | if (retval) { |
467 | err("%s: Cannot read SLOTSTATUS register\n", __func__); | 473 | ctrl_err(ctrl, "%s: Cannot read SLOTSTATUS register\n", |
474 | __func__); | ||
468 | return retval; | 475 | return retval; |
469 | } | 476 | } |
470 | 477 | ||
@@ -482,7 +489,8 @@ static int hpc_get_adapter_status(struct slot *slot, u8 *status) | |||
482 | 489 | ||
483 | retval = pciehp_readw(ctrl, SLOTSTATUS, &slot_status); | 490 | retval = pciehp_readw(ctrl, SLOTSTATUS, &slot_status); |
484 | if (retval) { | 491 | if (retval) { |
485 | err("%s: Cannot read SLOTSTATUS register\n", __func__); | 492 | ctrl_err(ctrl, "%s: Cannot read SLOTSTATUS register\n", |
493 | __func__); | ||
486 | return retval; | 494 | return retval; |
487 | } | 495 | } |
488 | card_state = (u8)((slot_status & PRSN_STATE) >> 6); | 496 | card_state = (u8)((slot_status & PRSN_STATE) >> 6); |
@@ -500,7 +508,7 @@ static int hpc_query_power_fault(struct slot *slot) | |||
500 | 508 | ||
501 | retval = pciehp_readw(ctrl, SLOTSTATUS, &slot_status); | 509 | retval = pciehp_readw(ctrl, SLOTSTATUS, &slot_status); |
502 | if (retval) { | 510 | if (retval) { |
503 | err("%s: Cannot check for power fault\n", __func__); | 511 | ctrl_err(ctrl, "%s: Cannot check for power fault\n", __func__); |
504 | return retval; | 512 | return retval; |
505 | } | 513 | } |
506 | pwr_fault = (u8)((slot_status & PWR_FAULT_DETECTED) >> 1); | 514 | pwr_fault = (u8)((slot_status & PWR_FAULT_DETECTED) >> 1); |
@@ -516,7 +524,7 @@ static int hpc_get_emi_status(struct slot *slot, u8 *status) | |||
516 | 524 | ||
517 | retval = pciehp_readw(ctrl, SLOTSTATUS, &slot_status); | 525 | retval = pciehp_readw(ctrl, SLOTSTATUS, &slot_status); |
518 | if (retval) { | 526 | if (retval) { |
519 | err("%s : Cannot check EMI status\n", __func__); | 527 | ctrl_err(ctrl, "%s : Cannot check EMI status\n", __func__); |
520 | return retval; | 528 | return retval; |
521 | } | 529 | } |
522 | *status = (slot_status & EMI_STATE) >> EMI_STATUS_BIT; | 530 | *status = (slot_status & EMI_STATE) >> EMI_STATUS_BIT; |
@@ -560,8 +568,8 @@ static int hpc_set_attention_status(struct slot *slot, u8 value) | |||
560 | return -1; | 568 | return -1; |
561 | } | 569 | } |
562 | rc = pcie_write_cmd(ctrl, slot_cmd, cmd_mask); | 570 | rc = pcie_write_cmd(ctrl, slot_cmd, cmd_mask); |
563 | dbg("%s: SLOTCTRL %x write cmd %x\n", | 571 | ctrl_dbg(ctrl, "%s: SLOTCTRL %x write cmd %x\n", |
564 | __func__, ctrl->cap_base + SLOTCTRL, slot_cmd); | 572 | __func__, ctrl->cap_base + SLOTCTRL, slot_cmd); |
565 | 573 | ||
566 | return rc; | 574 | return rc; |
567 | } | 575 | } |
@@ -575,8 +583,8 @@ static void hpc_set_green_led_on(struct slot *slot) | |||
575 | slot_cmd = 0x0100; | 583 | slot_cmd = 0x0100; |
576 | cmd_mask = PWR_LED_CTRL; | 584 | cmd_mask = PWR_LED_CTRL; |
577 | pcie_write_cmd(ctrl, slot_cmd, cmd_mask); | 585 | pcie_write_cmd(ctrl, slot_cmd, cmd_mask); |
578 | dbg("%s: SLOTCTRL %x write cmd %x\n", | 586 | ctrl_dbg(ctrl, "%s: SLOTCTRL %x write cmd %x\n", |
579 | __func__, ctrl->cap_base + SLOTCTRL, slot_cmd); | 587 | __func__, ctrl->cap_base + SLOTCTRL, slot_cmd); |
580 | } | 588 | } |
581 | 589 | ||
582 | static void hpc_set_green_led_off(struct slot *slot) | 590 | static void hpc_set_green_led_off(struct slot *slot) |
@@ -588,8 +596,8 @@ static void hpc_set_green_led_off(struct slot *slot) | |||
588 | slot_cmd = 0x0300; | 596 | slot_cmd = 0x0300; |
589 | cmd_mask = PWR_LED_CTRL; | 597 | cmd_mask = PWR_LED_CTRL; |
590 | pcie_write_cmd(ctrl, slot_cmd, cmd_mask); | 598 | pcie_write_cmd(ctrl, slot_cmd, cmd_mask); |
591 | dbg("%s: SLOTCTRL %x write cmd %x\n", | 599 | ctrl_dbg(ctrl, "%s: SLOTCTRL %x write cmd %x\n", |
592 | __func__, ctrl->cap_base + SLOTCTRL, slot_cmd); | 600 | __func__, ctrl->cap_base + SLOTCTRL, slot_cmd); |
593 | } | 601 | } |
594 | 602 | ||
595 | static void hpc_set_green_led_blink(struct slot *slot) | 603 | static void hpc_set_green_led_blink(struct slot *slot) |
@@ -601,8 +609,8 @@ static void hpc_set_green_led_blink(struct slot *slot) | |||
601 | slot_cmd = 0x0200; | 609 | slot_cmd = 0x0200; |
602 | cmd_mask = PWR_LED_CTRL; | 610 | cmd_mask = PWR_LED_CTRL; |
603 | pcie_write_cmd(ctrl, slot_cmd, cmd_mask); | 611 | pcie_write_cmd(ctrl, slot_cmd, cmd_mask); |
604 | dbg("%s: SLOTCTRL %x write cmd %x\n", | 612 | ctrl_dbg(ctrl, "%s: SLOTCTRL %x write cmd %x\n", |
605 | __func__, ctrl->cap_base + SLOTCTRL, slot_cmd); | 613 | __func__, ctrl->cap_base + SLOTCTRL, slot_cmd); |
606 | } | 614 | } |
607 | 615 | ||
608 | static int hpc_power_on_slot(struct slot * slot) | 616 | static int hpc_power_on_slot(struct slot * slot) |
@@ -613,20 +621,22 @@ static int hpc_power_on_slot(struct slot * slot) | |||
613 | u16 slot_status; | 621 | u16 slot_status; |
614 | int retval = 0; | 622 | int retval = 0; |
615 | 623 | ||
616 | dbg("%s: slot->hp_slot %x\n", __func__, slot->hp_slot); | 624 | ctrl_dbg(ctrl, "%s: slot->hp_slot %x\n", __func__, slot->hp_slot); |
617 | 625 | ||
618 | /* Clear sticky power-fault bit from previous power failures */ | 626 | /* Clear sticky power-fault bit from previous power failures */ |
619 | retval = pciehp_readw(ctrl, SLOTSTATUS, &slot_status); | 627 | retval = pciehp_readw(ctrl, SLOTSTATUS, &slot_status); |
620 | if (retval) { | 628 | if (retval) { |
621 | err("%s: Cannot read SLOTSTATUS register\n", __func__); | 629 | ctrl_err(ctrl, "%s: Cannot read SLOTSTATUS register\n", |
630 | __func__); | ||
622 | return retval; | 631 | return retval; |
623 | } | 632 | } |
624 | slot_status &= PWR_FAULT_DETECTED; | 633 | slot_status &= PWR_FAULT_DETECTED; |
625 | if (slot_status) { | 634 | if (slot_status) { |
626 | retval = pciehp_writew(ctrl, SLOTSTATUS, slot_status); | 635 | retval = pciehp_writew(ctrl, SLOTSTATUS, slot_status); |
627 | if (retval) { | 636 | if (retval) { |
628 | err("%s: Cannot write to SLOTSTATUS register\n", | 637 | ctrl_err(ctrl, |
629 | __func__); | 638 | "%s: Cannot write to SLOTSTATUS register\n", |
639 | __func__); | ||
630 | return retval; | 640 | return retval; |
631 | } | 641 | } |
632 | } | 642 | } |
@@ -644,11 +654,12 @@ static int hpc_power_on_slot(struct slot * slot) | |||
644 | retval = pcie_write_cmd(ctrl, slot_cmd, cmd_mask); | 654 | retval = pcie_write_cmd(ctrl, slot_cmd, cmd_mask); |
645 | 655 | ||
646 | if (retval) { | 656 | if (retval) { |
647 | err("%s: Write %x command failed!\n", __func__, slot_cmd); | 657 | ctrl_err(ctrl, "%s: Write %x command failed!\n", |
658 | __func__, slot_cmd); | ||
648 | return -1; | 659 | return -1; |
649 | } | 660 | } |
650 | dbg("%s: SLOTCTRL %x write cmd %x\n", | 661 | ctrl_dbg(ctrl, "%s: SLOTCTRL %x write cmd %x\n", |
651 | __func__, ctrl->cap_base + SLOTCTRL, slot_cmd); | 662 | __func__, ctrl->cap_base + SLOTCTRL, slot_cmd); |
652 | 663 | ||
653 | return retval; | 664 | return retval; |
654 | } | 665 | } |
@@ -694,7 +705,7 @@ static int hpc_power_off_slot(struct slot * slot) | |||
694 | int retval = 0; | 705 | int retval = 0; |
695 | int changed; | 706 | int changed; |
696 | 707 | ||
697 | dbg("%s: slot->hp_slot %x\n", __func__, slot->hp_slot); | 708 | ctrl_dbg(ctrl, "%s: slot->hp_slot %x\n", __func__, slot->hp_slot); |
698 | 709 | ||
699 | /* | 710 | /* |
700 | * Set Bad DLLP Mask bit in Correctable Error Mask | 711 | * Set Bad DLLP Mask bit in Correctable Error Mask |
@@ -722,12 +733,12 @@ static int hpc_power_off_slot(struct slot * slot) | |||
722 | 733 | ||
723 | retval = pcie_write_cmd(ctrl, slot_cmd, cmd_mask); | 734 | retval = pcie_write_cmd(ctrl, slot_cmd, cmd_mask); |
724 | if (retval) { | 735 | if (retval) { |
725 | err("%s: Write command failed!\n", __func__); | 736 | ctrl_err(ctrl, "%s: Write command failed!\n", __func__); |
726 | retval = -1; | 737 | retval = -1; |
727 | goto out; | 738 | goto out; |
728 | } | 739 | } |
729 | dbg("%s: SLOTCTRL %x write cmd %x\n", | 740 | ctrl_dbg(ctrl, "%s: SLOTCTRL %x write cmd %x\n", |
730 | __func__, ctrl->cap_base + SLOTCTRL, slot_cmd); | 741 | __func__, ctrl->cap_base + SLOTCTRL, slot_cmd); |
731 | out: | 742 | out: |
732 | if (changed) | 743 | if (changed) |
733 | pcie_unmask_bad_dllp(ctrl); | 744 | pcie_unmask_bad_dllp(ctrl); |
@@ -749,7 +760,8 @@ static irqreturn_t pcie_isr(int irq, void *dev_id) | |||
749 | intr_loc = 0; | 760 | intr_loc = 0; |
750 | do { | 761 | do { |
751 | if (pciehp_readw(ctrl, SLOTSTATUS, &detected)) { | 762 | if (pciehp_readw(ctrl, SLOTSTATUS, &detected)) { |
752 | err("%s: Cannot read SLOTSTATUS\n", __func__); | 763 | ctrl_err(ctrl, "%s: Cannot read SLOTSTATUS\n", |
764 | __func__); | ||
753 | return IRQ_NONE; | 765 | return IRQ_NONE; |
754 | } | 766 | } |
755 | 767 | ||
@@ -760,12 +772,13 @@ static irqreturn_t pcie_isr(int irq, void *dev_id) | |||
760 | if (!intr_loc) | 772 | if (!intr_loc) |
761 | return IRQ_NONE; | 773 | return IRQ_NONE; |
762 | if (detected && pciehp_writew(ctrl, SLOTSTATUS, detected)) { | 774 | if (detected && pciehp_writew(ctrl, SLOTSTATUS, detected)) { |
763 | err("%s: Cannot write to SLOTSTATUS\n", __func__); | 775 | ctrl_err(ctrl, "%s: Cannot write to SLOTSTATUS\n", |
776 | __func__); | ||
764 | return IRQ_NONE; | 777 | return IRQ_NONE; |
765 | } | 778 | } |
766 | } while (detected); | 779 | } while (detected); |
767 | 780 | ||
768 | dbg("%s: intr_loc %x\n", __FUNCTION__, intr_loc); | 781 | ctrl_dbg(ctrl, "%s: intr_loc %x\n", __func__, intr_loc); |
769 | 782 | ||
770 | /* Check Command Complete Interrupt Pending */ | 783 | /* Check Command Complete Interrupt Pending */ |
771 | if (intr_loc & CMD_COMPLETED) { | 784 | if (intr_loc & CMD_COMPLETED) { |
@@ -807,7 +820,7 @@ static int hpc_get_max_lnk_speed(struct slot *slot, enum pci_bus_speed *value) | |||
807 | 820 | ||
808 | retval = pciehp_readl(ctrl, LNKCAP, &lnk_cap); | 821 | retval = pciehp_readl(ctrl, LNKCAP, &lnk_cap); |
809 | if (retval) { | 822 | if (retval) { |
810 | err("%s: Cannot read LNKCAP register\n", __func__); | 823 | ctrl_err(ctrl, "%s: Cannot read LNKCAP register\n", __func__); |
811 | return retval; | 824 | return retval; |
812 | } | 825 | } |
813 | 826 | ||
@@ -821,7 +834,7 @@ static int hpc_get_max_lnk_speed(struct slot *slot, enum pci_bus_speed *value) | |||
821 | } | 834 | } |
822 | 835 | ||
823 | *value = lnk_speed; | 836 | *value = lnk_speed; |
824 | dbg("Max link speed = %d\n", lnk_speed); | 837 | ctrl_dbg(ctrl, "Max link speed = %d\n", lnk_speed); |
825 | 838 | ||
826 | return retval; | 839 | return retval; |
827 | } | 840 | } |
@@ -836,7 +849,7 @@ static int hpc_get_max_lnk_width(struct slot *slot, | |||
836 | 849 | ||
837 | retval = pciehp_readl(ctrl, LNKCAP, &lnk_cap); | 850 | retval = pciehp_readl(ctrl, LNKCAP, &lnk_cap); |
838 | if (retval) { | 851 | if (retval) { |
839 | err("%s: Cannot read LNKCAP register\n", __func__); | 852 | ctrl_err(ctrl, "%s: Cannot read LNKCAP register\n", __func__); |
840 | return retval; | 853 | return retval; |
841 | } | 854 | } |
842 | 855 | ||
@@ -871,7 +884,7 @@ static int hpc_get_max_lnk_width(struct slot *slot, | |||
871 | } | 884 | } |
872 | 885 | ||
873 | *value = lnk_wdth; | 886 | *value = lnk_wdth; |
874 | dbg("Max link width = %d\n", lnk_wdth); | 887 | ctrl_dbg(ctrl, "Max link width = %d\n", lnk_wdth); |
875 | 888 | ||
876 | return retval; | 889 | return retval; |
877 | } | 890 | } |
@@ -885,7 +898,8 @@ static int hpc_get_cur_lnk_speed(struct slot *slot, enum pci_bus_speed *value) | |||
885 | 898 | ||
886 | retval = pciehp_readw(ctrl, LNKSTATUS, &lnk_status); | 899 | retval = pciehp_readw(ctrl, LNKSTATUS, &lnk_status); |
887 | if (retval) { | 900 | if (retval) { |
888 | err("%s: Cannot read LNKSTATUS register\n", __func__); | 901 | ctrl_err(ctrl, "%s: Cannot read LNKSTATUS register\n", |
902 | __func__); | ||
889 | return retval; | 903 | return retval; |
890 | } | 904 | } |
891 | 905 | ||
@@ -899,7 +913,7 @@ static int hpc_get_cur_lnk_speed(struct slot *slot, enum pci_bus_speed *value) | |||
899 | } | 913 | } |
900 | 914 | ||
901 | *value = lnk_speed; | 915 | *value = lnk_speed; |
902 | dbg("Current link speed = %d\n", lnk_speed); | 916 | ctrl_dbg(ctrl, "Current link speed = %d\n", lnk_speed); |
903 | 917 | ||
904 | return retval; | 918 | return retval; |
905 | } | 919 | } |
@@ -914,7 +928,8 @@ static int hpc_get_cur_lnk_width(struct slot *slot, | |||
914 | 928 | ||
915 | retval = pciehp_readw(ctrl, LNKSTATUS, &lnk_status); | 929 | retval = pciehp_readw(ctrl, LNKSTATUS, &lnk_status); |
916 | if (retval) { | 930 | if (retval) { |
917 | err("%s: Cannot read LNKSTATUS register\n", __func__); | 931 | ctrl_err(ctrl, "%s: Cannot read LNKSTATUS register\n", |
932 | __func__); | ||
918 | return retval; | 933 | return retval; |
919 | } | 934 | } |
920 | 935 | ||
@@ -949,7 +964,7 @@ static int hpc_get_cur_lnk_width(struct slot *slot, | |||
949 | } | 964 | } |
950 | 965 | ||
951 | *value = lnk_wdth; | 966 | *value = lnk_wdth; |
952 | dbg("Current link width = %d\n", lnk_wdth); | 967 | ctrl_dbg(ctrl, "Current link width = %d\n", lnk_wdth); |
953 | 968 | ||
954 | return retval; | 969 | return retval; |
955 | } | 970 | } |
@@ -998,7 +1013,8 @@ int pcie_enable_notification(struct controller *ctrl) | |||
998 | PWR_FAULT_DETECT_ENABLE | HP_INTR_ENABLE | CMD_CMPL_INTR_ENABLE; | 1013 | PWR_FAULT_DETECT_ENABLE | HP_INTR_ENABLE | CMD_CMPL_INTR_ENABLE; |
999 | 1014 | ||
1000 | if (pcie_write_cmd(ctrl, cmd, mask)) { | 1015 | if (pcie_write_cmd(ctrl, cmd, mask)) { |
1001 | err("%s: Cannot enable software notification\n", __func__); | 1016 | ctrl_err(ctrl, "%s: Cannot enable software notification\n", |
1017 | __func__); | ||
1002 | return -1; | 1018 | return -1; |
1003 | } | 1019 | } |
1004 | return 0; | 1020 | return 0; |
@@ -1010,7 +1026,8 @@ static void pcie_disable_notification(struct controller *ctrl) | |||
1010 | mask = PRSN_DETECT_ENABLE | ATTN_BUTTN_ENABLE | MRL_DETECT_ENABLE | | 1026 | mask = PRSN_DETECT_ENABLE | ATTN_BUTTN_ENABLE | MRL_DETECT_ENABLE | |
1011 | PWR_FAULT_DETECT_ENABLE | HP_INTR_ENABLE | CMD_CMPL_INTR_ENABLE; | 1027 | PWR_FAULT_DETECT_ENABLE | HP_INTR_ENABLE | CMD_CMPL_INTR_ENABLE; |
1012 | if (pcie_write_cmd(ctrl, 0, mask)) | 1028 | if (pcie_write_cmd(ctrl, 0, mask)) |
1013 | warn("%s: Cannot disable software notification\n", __func__); | 1029 | ctrl_warn(ctrl, "%s: Cannot disable software notification\n", |
1030 | __func__); | ||
1014 | } | 1031 | } |
1015 | 1032 | ||
1016 | static int pcie_init_notification(struct controller *ctrl) | 1033 | static int pcie_init_notification(struct controller *ctrl) |
@@ -1071,34 +1088,45 @@ static inline void dbg_ctrl(struct controller *ctrl) | |||
1071 | if (!pciehp_debug) | 1088 | if (!pciehp_debug) |
1072 | return; | 1089 | return; |
1073 | 1090 | ||
1074 | dbg("Hotplug Controller:\n"); | 1091 | ctrl_info(ctrl, "Hotplug Controller:\n"); |
1075 | dbg(" Seg/Bus/Dev/Func/IRQ : %s IRQ %d\n", pci_name(pdev), pdev->irq); | 1092 | ctrl_info(ctrl, " Seg/Bus/Dev/Func/IRQ : %s IRQ %d\n", |
1076 | dbg(" Vendor ID : 0x%04x\n", pdev->vendor); | 1093 | pci_name(pdev), pdev->irq); |
1077 | dbg(" Device ID : 0x%04x\n", pdev->device); | 1094 | ctrl_info(ctrl, " Vendor ID : 0x%04x\n", pdev->vendor); |
1078 | dbg(" Subsystem ID : 0x%04x\n", pdev->subsystem_device); | 1095 | ctrl_info(ctrl, " Device ID : 0x%04x\n", pdev->device); |
1079 | dbg(" Subsystem Vendor ID : 0x%04x\n", pdev->subsystem_vendor); | 1096 | ctrl_info(ctrl, " Subsystem ID : 0x%04x\n", |
1080 | dbg(" PCIe Cap offset : 0x%02x\n", ctrl->cap_base); | 1097 | pdev->subsystem_device); |
1098 | ctrl_info(ctrl, " Subsystem Vendor ID : 0x%04x\n", | ||
1099 | pdev->subsystem_vendor); | ||
1100 | ctrl_info(ctrl, " PCIe Cap offset : 0x%02x\n", ctrl->cap_base); | ||
1081 | for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) { | 1101 | for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) { |
1082 | if (!pci_resource_len(pdev, i)) | 1102 | if (!pci_resource_len(pdev, i)) |
1083 | continue; | 1103 | continue; |
1084 | dbg(" PCI resource [%d] : 0x%llx@0x%llx\n", i, | 1104 | ctrl_info(ctrl, " PCI resource [%d] : 0x%llx@0x%llx\n", |
1085 | (unsigned long long)pci_resource_len(pdev, i), | 1105 | i, (unsigned long long)pci_resource_len(pdev, i), |
1086 | (unsigned long long)pci_resource_start(pdev, i)); | 1106 | (unsigned long long)pci_resource_start(pdev, i)); |
1087 | } | 1107 | } |
1088 | dbg("Slot Capabilities : 0x%08x\n", ctrl->slot_cap); | 1108 | ctrl_info(ctrl, "Slot Capabilities : 0x%08x\n", ctrl->slot_cap); |
1089 | dbg(" Physical Slot Number : %d\n", ctrl->first_slot); | 1109 | ctrl_info(ctrl, " Physical Slot Number : %d\n", ctrl->first_slot); |
1090 | dbg(" Attention Button : %3s\n", ATTN_BUTTN(ctrl) ? "yes" : "no"); | 1110 | ctrl_info(ctrl, " Attention Button : %3s\n", |
1091 | dbg(" Power Controller : %3s\n", POWER_CTRL(ctrl) ? "yes" : "no"); | 1111 | ATTN_BUTTN(ctrl) ? "yes" : "no"); |
1092 | dbg(" MRL Sensor : %3s\n", MRL_SENS(ctrl) ? "yes" : "no"); | 1112 | ctrl_info(ctrl, " Power Controller : %3s\n", |
1093 | dbg(" Attention Indicator : %3s\n", ATTN_LED(ctrl) ? "yes" : "no"); | 1113 | POWER_CTRL(ctrl) ? "yes" : "no"); |
1094 | dbg(" Power Indicator : %3s\n", PWR_LED(ctrl) ? "yes" : "no"); | 1114 | ctrl_info(ctrl, " MRL Sensor : %3s\n", |
1095 | dbg(" Hot-Plug Surprise : %3s\n", HP_SUPR_RM(ctrl) ? "yes" : "no"); | 1115 | MRL_SENS(ctrl) ? "yes" : "no"); |
1096 | dbg(" EMI Present : %3s\n", EMI(ctrl) ? "yes" : "no"); | 1116 | ctrl_info(ctrl, " Attention Indicator : %3s\n", |
1097 | dbg(" Command Completed : %3s\n", NO_CMD_CMPL(ctrl)? "no" : "yes"); | 1117 | ATTN_LED(ctrl) ? "yes" : "no"); |
1118 | ctrl_info(ctrl, " Power Indicator : %3s\n", | ||
1119 | PWR_LED(ctrl) ? "yes" : "no"); | ||
1120 | ctrl_info(ctrl, " Hot-Plug Surprise : %3s\n", | ||
1121 | HP_SUPR_RM(ctrl) ? "yes" : "no"); | ||
1122 | ctrl_info(ctrl, " EMI Present : %3s\n", | ||
1123 | EMI(ctrl) ? "yes" : "no"); | ||
1124 | ctrl_info(ctrl, " Command Completed : %3s\n", | ||
1125 | NO_CMD_CMPL(ctrl) ? "no" : "yes"); | ||
1098 | pciehp_readw(ctrl, SLOTSTATUS, ®16); | 1126 | pciehp_readw(ctrl, SLOTSTATUS, ®16); |
1099 | dbg("Slot Status : 0x%04x\n", reg16); | 1127 | ctrl_info(ctrl, "Slot Status : 0x%04x\n", reg16); |
1100 | pciehp_readw(ctrl, SLOTCTRL, ®16); | 1128 | pciehp_readw(ctrl, SLOTCTRL, ®16); |
1101 | dbg("Slot Control : 0x%04x\n", reg16); | 1129 | ctrl_info(ctrl, "Slot Control : 0x%04x\n", reg16); |
1102 | } | 1130 | } |
1103 | 1131 | ||
1104 | struct controller *pcie_init(struct pcie_device *dev) | 1132 | struct controller *pcie_init(struct pcie_device *dev) |
@@ -1109,19 +1137,21 @@ struct controller *pcie_init(struct pcie_device *dev) | |||
1109 | 1137 | ||
1110 | ctrl = kzalloc(sizeof(*ctrl), GFP_KERNEL); | 1138 | ctrl = kzalloc(sizeof(*ctrl), GFP_KERNEL); |
1111 | if (!ctrl) { | 1139 | if (!ctrl) { |
1112 | err("%s : out of memory\n", __func__); | 1140 | dev_err(&dev->device, "%s : out of memory\n", __func__); |
1113 | goto abort; | 1141 | goto abort; |
1114 | } | 1142 | } |
1115 | INIT_LIST_HEAD(&ctrl->slot_list); | 1143 | INIT_LIST_HEAD(&ctrl->slot_list); |
1116 | 1144 | ||
1145 | ctrl->pcie = dev; | ||
1117 | ctrl->pci_dev = pdev; | 1146 | ctrl->pci_dev = pdev; |
1118 | ctrl->cap_base = pci_find_capability(pdev, PCI_CAP_ID_EXP); | 1147 | ctrl->cap_base = pci_find_capability(pdev, PCI_CAP_ID_EXP); |
1119 | if (!ctrl->cap_base) { | 1148 | if (!ctrl->cap_base) { |
1120 | err("%s: Cannot find PCI Express capability\n", __func__); | 1149 | ctrl_err(ctrl, "%s: Cannot find PCI Express capability\n", |
1150 | __func__); | ||
1121 | goto abort; | 1151 | goto abort; |
1122 | } | 1152 | } |
1123 | if (pciehp_readl(ctrl, SLOTCAP, &slot_cap)) { | 1153 | if (pciehp_readl(ctrl, SLOTCAP, &slot_cap)) { |
1124 | err("%s: Cannot read SLOTCAP register\n", __func__); | 1154 | ctrl_err(ctrl, "%s: Cannot read SLOTCAP register\n", __func__); |
1125 | goto abort; | 1155 | goto abort; |
1126 | } | 1156 | } |
1127 | 1157 | ||
@@ -1161,9 +1191,9 @@ struct controller *pcie_init(struct pcie_device *dev) | |||
1161 | goto abort_ctrl; | 1191 | goto abort_ctrl; |
1162 | } | 1192 | } |
1163 | 1193 | ||
1164 | info("HPC vendor_id %x device_id %x ss_vid %x ss_did %x\n", | 1194 | ctrl_info(ctrl, "HPC vendor_id %x device_id %x ss_vid %x ss_did %x\n", |
1165 | pdev->vendor, pdev->device, | 1195 | pdev->vendor, pdev->device, pdev->subsystem_vendor, |
1166 | pdev->subsystem_vendor, pdev->subsystem_device); | 1196 | pdev->subsystem_device); |
1167 | 1197 | ||
1168 | if (pcie_init_slot(ctrl)) | 1198 | if (pcie_init_slot(ctrl)) |
1169 | goto abort_ctrl; | 1199 | goto abort_ctrl; |
diff --git a/drivers/pci/hotplug/pciehp_pci.c b/drivers/pci/hotplug/pciehp_pci.c index 6040dcceb256..ffd11148fbe2 100644 --- a/drivers/pci/hotplug/pciehp_pci.c +++ b/drivers/pci/hotplug/pciehp_pci.c | |||
@@ -198,18 +198,20 @@ int pciehp_configure_device(struct slot *p_slot) | |||
198 | struct pci_dev *dev; | 198 | struct pci_dev *dev; |
199 | struct pci_bus *parent = p_slot->ctrl->pci_dev->subordinate; | 199 | struct pci_bus *parent = p_slot->ctrl->pci_dev->subordinate; |
200 | int num, fn; | 200 | int num, fn; |
201 | struct controller *ctrl = p_slot->ctrl; | ||
201 | 202 | ||
202 | dev = pci_get_slot(parent, PCI_DEVFN(p_slot->device, 0)); | 203 | dev = pci_get_slot(parent, PCI_DEVFN(p_slot->device, 0)); |
203 | if (dev) { | 204 | if (dev) { |
204 | err("Device %s already exists at %x:%x, cannot hot-add\n", | 205 | ctrl_err(ctrl, |
205 | pci_name(dev), p_slot->bus, p_slot->device); | 206 | "Device %s already exists at %x:%x, cannot hot-add\n", |
207 | pci_name(dev), p_slot->bus, p_slot->device); | ||
206 | pci_dev_put(dev); | 208 | pci_dev_put(dev); |
207 | return -EINVAL; | 209 | return -EINVAL; |
208 | } | 210 | } |
209 | 211 | ||
210 | num = pci_scan_slot(parent, PCI_DEVFN(p_slot->device, 0)); | 212 | num = pci_scan_slot(parent, PCI_DEVFN(p_slot->device, 0)); |
211 | if (num == 0) { | 213 | if (num == 0) { |
212 | err("No new device found\n"); | 214 | ctrl_err(ctrl, "No new device found\n"); |
213 | return -ENODEV; | 215 | return -ENODEV; |
214 | } | 216 | } |
215 | 217 | ||
@@ -218,8 +220,8 @@ int pciehp_configure_device(struct slot *p_slot) | |||
218 | if (!dev) | 220 | if (!dev) |
219 | continue; | 221 | continue; |
220 | if ((dev->class >> 16) == PCI_BASE_CLASS_DISPLAY) { | 222 | if ((dev->class >> 16) == PCI_BASE_CLASS_DISPLAY) { |
221 | err("Cannot hot-add display device %s\n", | 223 | ctrl_err(ctrl, "Cannot hot-add display device %s\n", |
222 | pci_name(dev)); | 224 | pci_name(dev)); |
223 | pci_dev_put(dev); | 225 | pci_dev_put(dev); |
224 | continue; | 226 | continue; |
225 | } | 227 | } |
@@ -244,9 +246,10 @@ int pciehp_unconfigure_device(struct slot *p_slot) | |||
244 | u8 presence = 0; | 246 | u8 presence = 0; |
245 | struct pci_bus *parent = p_slot->ctrl->pci_dev->subordinate; | 247 | struct pci_bus *parent = p_slot->ctrl->pci_dev->subordinate; |
246 | u16 command; | 248 | u16 command; |
249 | struct controller *ctrl = p_slot->ctrl; | ||
247 | 250 | ||
248 | dbg("%s: bus/dev = %x/%x\n", __func__, p_slot->bus, | 251 | ctrl_dbg(ctrl, "%s: bus/dev = %x/%x\n", __func__, |
249 | p_slot->device); | 252 | p_slot->bus, p_slot->device); |
250 | ret = p_slot->hpc_ops->get_adapter_status(p_slot, &presence); | 253 | ret = p_slot->hpc_ops->get_adapter_status(p_slot, &presence); |
251 | if (ret) | 254 | if (ret) |
252 | presence = 0; | 255 | presence = 0; |
@@ -257,16 +260,17 @@ int pciehp_unconfigure_device(struct slot *p_slot) | |||
257 | if (!temp) | 260 | if (!temp) |
258 | continue; | 261 | continue; |
259 | if ((temp->class >> 16) == PCI_BASE_CLASS_DISPLAY) { | 262 | if ((temp->class >> 16) == PCI_BASE_CLASS_DISPLAY) { |
260 | err("Cannot remove display device %s\n", | 263 | ctrl_err(ctrl, "Cannot remove display device %s\n", |
261 | pci_name(temp)); | 264 | pci_name(temp)); |
262 | pci_dev_put(temp); | 265 | pci_dev_put(temp); |
263 | continue; | 266 | continue; |
264 | } | 267 | } |
265 | if (temp->hdr_type == PCI_HEADER_TYPE_BRIDGE && presence) { | 268 | if (temp->hdr_type == PCI_HEADER_TYPE_BRIDGE && presence) { |
266 | pci_read_config_byte(temp, PCI_BRIDGE_CONTROL, &bctl); | 269 | pci_read_config_byte(temp, PCI_BRIDGE_CONTROL, &bctl); |
267 | if (bctl & PCI_BRIDGE_CTL_VGA) { | 270 | if (bctl & PCI_BRIDGE_CTL_VGA) { |
268 | err("Cannot remove display device %s\n", | 271 | ctrl_err(ctrl, |
269 | pci_name(temp)); | 272 | "Cannot remove display device %s\n", |
273 | pci_name(temp)); | ||
270 | pci_dev_put(temp); | 274 | pci_dev_put(temp); |
271 | continue; | 275 | continue; |
272 | } | 276 | } |
diff --git a/drivers/pci/hotplug/rpaphp.h b/drivers/pci/hotplug/rpaphp.h index 7d5921b1ee78..419919a87b0f 100644 --- a/drivers/pci/hotplug/rpaphp.h +++ b/drivers/pci/hotplug/rpaphp.h | |||
@@ -46,10 +46,10 @@ | |||
46 | #define PRESENT 1 /* Card in slot */ | 46 | #define PRESENT 1 /* Card in slot */ |
47 | 47 | ||
48 | #define MY_NAME "rpaphp" | 48 | #define MY_NAME "rpaphp" |
49 | extern int debug; | 49 | extern int rpaphp_debug; |
50 | #define dbg(format, arg...) \ | 50 | #define dbg(format, arg...) \ |
51 | do { \ | 51 | do { \ |
52 | if (debug) \ | 52 | if (rpaphp_debug) \ |
53 | printk(KERN_DEBUG "%s: " format, \ | 53 | printk(KERN_DEBUG "%s: " format, \ |
54 | MY_NAME , ## arg); \ | 54 | MY_NAME , ## arg); \ |
55 | } while (0) | 55 | } while (0) |
diff --git a/drivers/pci/hotplug/rpaphp_core.c b/drivers/pci/hotplug/rpaphp_core.c index 1f84f402acdb..95d02a08fdc7 100644 --- a/drivers/pci/hotplug/rpaphp_core.c +++ b/drivers/pci/hotplug/rpaphp_core.c | |||
@@ -37,7 +37,7 @@ | |||
37 | /* and pci_do_scan_bus */ | 37 | /* and pci_do_scan_bus */ |
38 | #include "rpaphp.h" | 38 | #include "rpaphp.h" |
39 | 39 | ||
40 | int debug; | 40 | int rpaphp_debug; |
41 | LIST_HEAD(rpaphp_slot_head); | 41 | LIST_HEAD(rpaphp_slot_head); |
42 | 42 | ||
43 | #define DRIVER_VERSION "0.1" | 43 | #define DRIVER_VERSION "0.1" |
@@ -50,7 +50,7 @@ MODULE_AUTHOR(DRIVER_AUTHOR); | |||
50 | MODULE_DESCRIPTION(DRIVER_DESC); | 50 | MODULE_DESCRIPTION(DRIVER_DESC); |
51 | MODULE_LICENSE("GPL"); | 51 | MODULE_LICENSE("GPL"); |
52 | 52 | ||
53 | module_param(debug, bool, 0644); | 53 | module_param_named(debug, rpaphp_debug, bool, 0644); |
54 | 54 | ||
55 | /** | 55 | /** |
56 | * set_attention_status - set attention LED | 56 | * set_attention_status - set attention LED |
diff --git a/drivers/pci/hotplug/rpaphp_pci.c b/drivers/pci/hotplug/rpaphp_pci.c index 5acfd4f3d4cb..513e1e282391 100644 --- a/drivers/pci/hotplug/rpaphp_pci.c +++ b/drivers/pci/hotplug/rpaphp_pci.c | |||
@@ -123,7 +123,7 @@ int rpaphp_enable_slot(struct slot *slot) | |||
123 | slot->state = CONFIGURED; | 123 | slot->state = CONFIGURED; |
124 | } | 124 | } |
125 | 125 | ||
126 | if (debug) { | 126 | if (rpaphp_debug) { |
127 | struct pci_dev *dev; | 127 | struct pci_dev *dev; |
128 | dbg("%s: pci_devs of slot[%s]\n", __func__, slot->dn->full_name); | 128 | dbg("%s: pci_devs of slot[%s]\n", __func__, slot->dn->full_name); |
129 | list_for_each_entry (dev, &bus->devices, bus_list) | 129 | list_for_each_entry (dev, &bus->devices, bus_list) |
diff --git a/drivers/pci/htirq.c b/drivers/pci/htirq.c index 279c940a0039..bf7d6ce9bbb3 100644 --- a/drivers/pci/htirq.c +++ b/drivers/pci/htirq.c | |||
@@ -126,7 +126,8 @@ int __ht_create_irq(struct pci_dev *dev, int idx, ht_irq_update_t *update) | |||
126 | cfg->msg.address_hi = 0xffffffff; | 126 | cfg->msg.address_hi = 0xffffffff; |
127 | 127 | ||
128 | irq = create_irq(); | 128 | irq = create_irq(); |
129 | if (irq < 0) { | 129 | |
130 | if (irq <= 0) { | ||
130 | kfree(cfg); | 131 | kfree(cfg); |
131 | return -EBUSY; | 132 | return -EBUSY; |
132 | } | 133 | } |
diff --git a/drivers/pci/intr_remapping.c b/drivers/pci/intr_remapping.c index 738d4c89581c..2de5a3238c94 100644 --- a/drivers/pci/intr_remapping.c +++ b/drivers/pci/intr_remapping.c | |||
@@ -1,3 +1,4 @@ | |||
1 | #include <linux/interrupt.h> | ||
1 | #include <linux/dmar.h> | 2 | #include <linux/dmar.h> |
2 | #include <linux/spinlock.h> | 3 | #include <linux/spinlock.h> |
3 | #include <linux/jiffies.h> | 4 | #include <linux/jiffies.h> |
@@ -11,41 +12,64 @@ static struct ioapic_scope ir_ioapic[MAX_IO_APICS]; | |||
11 | static int ir_ioapic_num; | 12 | static int ir_ioapic_num; |
12 | int intr_remapping_enabled; | 13 | int intr_remapping_enabled; |
13 | 14 | ||
14 | static struct { | 15 | struct irq_2_iommu { |
15 | struct intel_iommu *iommu; | 16 | struct intel_iommu *iommu; |
16 | u16 irte_index; | 17 | u16 irte_index; |
17 | u16 sub_handle; | 18 | u16 sub_handle; |
18 | u8 irte_mask; | 19 | u8 irte_mask; |
19 | } irq_2_iommu[NR_IRQS]; | 20 | }; |
21 | |||
22 | static struct irq_2_iommu irq_2_iommuX[NR_IRQS]; | ||
23 | |||
24 | static struct irq_2_iommu *irq_2_iommu(unsigned int irq) | ||
25 | { | ||
26 | return (irq < nr_irqs) ? irq_2_iommuX + irq : NULL; | ||
27 | } | ||
28 | |||
29 | static struct irq_2_iommu *irq_2_iommu_alloc(unsigned int irq) | ||
30 | { | ||
31 | return irq_2_iommu(irq); | ||
32 | } | ||
20 | 33 | ||
21 | static DEFINE_SPINLOCK(irq_2_ir_lock); | 34 | static DEFINE_SPINLOCK(irq_2_ir_lock); |
22 | 35 | ||
23 | int irq_remapped(int irq) | 36 | static struct irq_2_iommu *valid_irq_2_iommu(unsigned int irq) |
24 | { | 37 | { |
25 | if (irq > NR_IRQS) | 38 | struct irq_2_iommu *irq_iommu; |
26 | return 0; | 39 | |
40 | irq_iommu = irq_2_iommu(irq); | ||
41 | |||
42 | if (!irq_iommu) | ||
43 | return NULL; | ||
44 | |||
45 | if (!irq_iommu->iommu) | ||
46 | return NULL; | ||
27 | 47 | ||
28 | if (!irq_2_iommu[irq].iommu) | 48 | return irq_iommu; |
29 | return 0; | 49 | } |
30 | 50 | ||
31 | return 1; | 51 | int irq_remapped(int irq) |
52 | { | ||
53 | return valid_irq_2_iommu(irq) != NULL; | ||
32 | } | 54 | } |
33 | 55 | ||
34 | int get_irte(int irq, struct irte *entry) | 56 | int get_irte(int irq, struct irte *entry) |
35 | { | 57 | { |
36 | int index; | 58 | int index; |
59 | struct irq_2_iommu *irq_iommu; | ||
37 | 60 | ||
38 | if (!entry || irq > NR_IRQS) | 61 | if (!entry) |
39 | return -1; | 62 | return -1; |
40 | 63 | ||
41 | spin_lock(&irq_2_ir_lock); | 64 | spin_lock(&irq_2_ir_lock); |
42 | if (!irq_2_iommu[irq].iommu) { | 65 | irq_iommu = valid_irq_2_iommu(irq); |
66 | if (!irq_iommu) { | ||
43 | spin_unlock(&irq_2_ir_lock); | 67 | spin_unlock(&irq_2_ir_lock); |
44 | return -1; | 68 | return -1; |
45 | } | 69 | } |
46 | 70 | ||
47 | index = irq_2_iommu[irq].irte_index + irq_2_iommu[irq].sub_handle; | 71 | index = irq_iommu->irte_index + irq_iommu->sub_handle; |
48 | *entry = *(irq_2_iommu[irq].iommu->ir_table->base + index); | 72 | *entry = *(irq_iommu->iommu->ir_table->base + index); |
49 | 73 | ||
50 | spin_unlock(&irq_2_ir_lock); | 74 | spin_unlock(&irq_2_ir_lock); |
51 | return 0; | 75 | return 0; |
@@ -54,6 +78,7 @@ int get_irte(int irq, struct irte *entry) | |||
54 | int alloc_irte(struct intel_iommu *iommu, int irq, u16 count) | 78 | int alloc_irte(struct intel_iommu *iommu, int irq, u16 count) |
55 | { | 79 | { |
56 | struct ir_table *table = iommu->ir_table; | 80 | struct ir_table *table = iommu->ir_table; |
81 | struct irq_2_iommu *irq_iommu; | ||
57 | u16 index, start_index; | 82 | u16 index, start_index; |
58 | unsigned int mask = 0; | 83 | unsigned int mask = 0; |
59 | int i; | 84 | int i; |
@@ -61,6 +86,10 @@ int alloc_irte(struct intel_iommu *iommu, int irq, u16 count) | |||
61 | if (!count) | 86 | if (!count) |
62 | return -1; | 87 | return -1; |
63 | 88 | ||
89 | /* protect irq_2_iommu_alloc later */ | ||
90 | if (irq >= nr_irqs) | ||
91 | return -1; | ||
92 | |||
64 | /* | 93 | /* |
65 | * start the IRTE search from index 0. | 94 | * start the IRTE search from index 0. |
66 | */ | 95 | */ |
@@ -100,10 +129,11 @@ int alloc_irte(struct intel_iommu *iommu, int irq, u16 count) | |||
100 | for (i = index; i < index + count; i++) | 129 | for (i = index; i < index + count; i++) |
101 | table->base[i].present = 1; | 130 | table->base[i].present = 1; |
102 | 131 | ||
103 | irq_2_iommu[irq].iommu = iommu; | 132 | irq_iommu = irq_2_iommu_alloc(irq); |
104 | irq_2_iommu[irq].irte_index = index; | 133 | irq_iommu->iommu = iommu; |
105 | irq_2_iommu[irq].sub_handle = 0; | 134 | irq_iommu->irte_index = index; |
106 | irq_2_iommu[irq].irte_mask = mask; | 135 | irq_iommu->sub_handle = 0; |
136 | irq_iommu->irte_mask = mask; | ||
107 | 137 | ||
108 | spin_unlock(&irq_2_ir_lock); | 138 | spin_unlock(&irq_2_ir_lock); |
109 | 139 | ||
@@ -124,31 +154,33 @@ static void qi_flush_iec(struct intel_iommu *iommu, int index, int mask) | |||
124 | int map_irq_to_irte_handle(int irq, u16 *sub_handle) | 154 | int map_irq_to_irte_handle(int irq, u16 *sub_handle) |
125 | { | 155 | { |
126 | int index; | 156 | int index; |
157 | struct irq_2_iommu *irq_iommu; | ||
127 | 158 | ||
128 | spin_lock(&irq_2_ir_lock); | 159 | spin_lock(&irq_2_ir_lock); |
129 | if (irq >= NR_IRQS || !irq_2_iommu[irq].iommu) { | 160 | irq_iommu = valid_irq_2_iommu(irq); |
161 | if (!irq_iommu) { | ||
130 | spin_unlock(&irq_2_ir_lock); | 162 | spin_unlock(&irq_2_ir_lock); |
131 | return -1; | 163 | return -1; |
132 | } | 164 | } |
133 | 165 | ||
134 | *sub_handle = irq_2_iommu[irq].sub_handle; | 166 | *sub_handle = irq_iommu->sub_handle; |
135 | index = irq_2_iommu[irq].irte_index; | 167 | index = irq_iommu->irte_index; |
136 | spin_unlock(&irq_2_ir_lock); | 168 | spin_unlock(&irq_2_ir_lock); |
137 | return index; | 169 | return index; |
138 | } | 170 | } |
139 | 171 | ||
140 | int set_irte_irq(int irq, struct intel_iommu *iommu, u16 index, u16 subhandle) | 172 | int set_irte_irq(int irq, struct intel_iommu *iommu, u16 index, u16 subhandle) |
141 | { | 173 | { |
174 | struct irq_2_iommu *irq_iommu; | ||
175 | |||
142 | spin_lock(&irq_2_ir_lock); | 176 | spin_lock(&irq_2_ir_lock); |
143 | if (irq >= NR_IRQS || irq_2_iommu[irq].iommu) { | ||
144 | spin_unlock(&irq_2_ir_lock); | ||
145 | return -1; | ||
146 | } | ||
147 | 177 | ||
148 | irq_2_iommu[irq].iommu = iommu; | 178 | irq_iommu = irq_2_iommu_alloc(irq); |
149 | irq_2_iommu[irq].irte_index = index; | 179 | |
150 | irq_2_iommu[irq].sub_handle = subhandle; | 180 | irq_iommu->iommu = iommu; |
151 | irq_2_iommu[irq].irte_mask = 0; | 181 | irq_iommu->irte_index = index; |
182 | irq_iommu->sub_handle = subhandle; | ||
183 | irq_iommu->irte_mask = 0; | ||
152 | 184 | ||
153 | spin_unlock(&irq_2_ir_lock); | 185 | spin_unlock(&irq_2_ir_lock); |
154 | 186 | ||
@@ -157,16 +189,19 @@ int set_irte_irq(int irq, struct intel_iommu *iommu, u16 index, u16 subhandle) | |||
157 | 189 | ||
158 | int clear_irte_irq(int irq, struct intel_iommu *iommu, u16 index) | 190 | int clear_irte_irq(int irq, struct intel_iommu *iommu, u16 index) |
159 | { | 191 | { |
192 | struct irq_2_iommu *irq_iommu; | ||
193 | |||
160 | spin_lock(&irq_2_ir_lock); | 194 | spin_lock(&irq_2_ir_lock); |
161 | if (irq >= NR_IRQS || !irq_2_iommu[irq].iommu) { | 195 | irq_iommu = valid_irq_2_iommu(irq); |
196 | if (!irq_iommu) { | ||
162 | spin_unlock(&irq_2_ir_lock); | 197 | spin_unlock(&irq_2_ir_lock); |
163 | return -1; | 198 | return -1; |
164 | } | 199 | } |
165 | 200 | ||
166 | irq_2_iommu[irq].iommu = NULL; | 201 | irq_iommu->iommu = NULL; |
167 | irq_2_iommu[irq].irte_index = 0; | 202 | irq_iommu->irte_index = 0; |
168 | irq_2_iommu[irq].sub_handle = 0; | 203 | irq_iommu->sub_handle = 0; |
169 | irq_2_iommu[irq].irte_mask = 0; | 204 | irq_2_iommu(irq)->irte_mask = 0; |
170 | 205 | ||
171 | spin_unlock(&irq_2_ir_lock); | 206 | spin_unlock(&irq_2_ir_lock); |
172 | 207 | ||
@@ -178,16 +213,18 @@ int modify_irte(int irq, struct irte *irte_modified) | |||
178 | int index; | 213 | int index; |
179 | struct irte *irte; | 214 | struct irte *irte; |
180 | struct intel_iommu *iommu; | 215 | struct intel_iommu *iommu; |
216 | struct irq_2_iommu *irq_iommu; | ||
181 | 217 | ||
182 | spin_lock(&irq_2_ir_lock); | 218 | spin_lock(&irq_2_ir_lock); |
183 | if (irq >= NR_IRQS || !irq_2_iommu[irq].iommu) { | 219 | irq_iommu = valid_irq_2_iommu(irq); |
220 | if (!irq_iommu) { | ||
184 | spin_unlock(&irq_2_ir_lock); | 221 | spin_unlock(&irq_2_ir_lock); |
185 | return -1; | 222 | return -1; |
186 | } | 223 | } |
187 | 224 | ||
188 | iommu = irq_2_iommu[irq].iommu; | 225 | iommu = irq_iommu->iommu; |
189 | 226 | ||
190 | index = irq_2_iommu[irq].irte_index + irq_2_iommu[irq].sub_handle; | 227 | index = irq_iommu->irte_index + irq_iommu->sub_handle; |
191 | irte = &iommu->ir_table->base[index]; | 228 | irte = &iommu->ir_table->base[index]; |
192 | 229 | ||
193 | set_64bit((unsigned long *)irte, irte_modified->low | (1 << 1)); | 230 | set_64bit((unsigned long *)irte, irte_modified->low | (1 << 1)); |
@@ -203,18 +240,20 @@ int flush_irte(int irq) | |||
203 | { | 240 | { |
204 | int index; | 241 | int index; |
205 | struct intel_iommu *iommu; | 242 | struct intel_iommu *iommu; |
243 | struct irq_2_iommu *irq_iommu; | ||
206 | 244 | ||
207 | spin_lock(&irq_2_ir_lock); | 245 | spin_lock(&irq_2_ir_lock); |
208 | if (irq >= NR_IRQS || !irq_2_iommu[irq].iommu) { | 246 | irq_iommu = valid_irq_2_iommu(irq); |
247 | if (!irq_iommu) { | ||
209 | spin_unlock(&irq_2_ir_lock); | 248 | spin_unlock(&irq_2_ir_lock); |
210 | return -1; | 249 | return -1; |
211 | } | 250 | } |
212 | 251 | ||
213 | iommu = irq_2_iommu[irq].iommu; | 252 | iommu = irq_iommu->iommu; |
214 | 253 | ||
215 | index = irq_2_iommu[irq].irte_index + irq_2_iommu[irq].sub_handle; | 254 | index = irq_iommu->irte_index + irq_iommu->sub_handle; |
216 | 255 | ||
217 | qi_flush_iec(iommu, index, irq_2_iommu[irq].irte_mask); | 256 | qi_flush_iec(iommu, index, irq_iommu->irte_mask); |
218 | spin_unlock(&irq_2_ir_lock); | 257 | spin_unlock(&irq_2_ir_lock); |
219 | 258 | ||
220 | return 0; | 259 | return 0; |
@@ -246,28 +285,30 @@ int free_irte(int irq) | |||
246 | int index, i; | 285 | int index, i; |
247 | struct irte *irte; | 286 | struct irte *irte; |
248 | struct intel_iommu *iommu; | 287 | struct intel_iommu *iommu; |
288 | struct irq_2_iommu *irq_iommu; | ||
249 | 289 | ||
250 | spin_lock(&irq_2_ir_lock); | 290 | spin_lock(&irq_2_ir_lock); |
251 | if (irq >= NR_IRQS || !irq_2_iommu[irq].iommu) { | 291 | irq_iommu = valid_irq_2_iommu(irq); |
292 | if (!irq_iommu) { | ||
252 | spin_unlock(&irq_2_ir_lock); | 293 | spin_unlock(&irq_2_ir_lock); |
253 | return -1; | 294 | return -1; |
254 | } | 295 | } |
255 | 296 | ||
256 | iommu = irq_2_iommu[irq].iommu; | 297 | iommu = irq_iommu->iommu; |
257 | 298 | ||
258 | index = irq_2_iommu[irq].irte_index + irq_2_iommu[irq].sub_handle; | 299 | index = irq_iommu->irte_index + irq_iommu->sub_handle; |
259 | irte = &iommu->ir_table->base[index]; | 300 | irte = &iommu->ir_table->base[index]; |
260 | 301 | ||
261 | if (!irq_2_iommu[irq].sub_handle) { | 302 | if (!irq_iommu->sub_handle) { |
262 | for (i = 0; i < (1 << irq_2_iommu[irq].irte_mask); i++) | 303 | for (i = 0; i < (1 << irq_iommu->irte_mask); i++) |
263 | set_64bit((unsigned long *)irte, 0); | 304 | set_64bit((unsigned long *)irte, 0); |
264 | qi_flush_iec(iommu, index, irq_2_iommu[irq].irte_mask); | 305 | qi_flush_iec(iommu, index, irq_iommu->irte_mask); |
265 | } | 306 | } |
266 | 307 | ||
267 | irq_2_iommu[irq].iommu = NULL; | 308 | irq_iommu->iommu = NULL; |
268 | irq_2_iommu[irq].irte_index = 0; | 309 | irq_iommu->irte_index = 0; |
269 | irq_2_iommu[irq].sub_handle = 0; | 310 | irq_iommu->sub_handle = 0; |
270 | irq_2_iommu[irq].irte_mask = 0; | 311 | irq_iommu->irte_mask = 0; |
271 | 312 | ||
272 | spin_unlock(&irq_2_ir_lock); | 313 | spin_unlock(&irq_2_ir_lock); |
273 | 314 | ||
diff --git a/drivers/pci/msi.c b/drivers/pci/msi.c index 4a10b5624f72..d2812013fd22 100644 --- a/drivers/pci/msi.c +++ b/drivers/pci/msi.c | |||
@@ -378,23 +378,21 @@ static int msi_capability_init(struct pci_dev *dev) | |||
378 | entry->msi_attrib.masked = 1; | 378 | entry->msi_attrib.masked = 1; |
379 | entry->msi_attrib.default_irq = dev->irq; /* Save IOAPIC IRQ */ | 379 | entry->msi_attrib.default_irq = dev->irq; /* Save IOAPIC IRQ */ |
380 | entry->msi_attrib.pos = pos; | 380 | entry->msi_attrib.pos = pos; |
381 | if (is_mask_bit_support(control)) { | 381 | if (entry->msi_attrib.maskbit) { |
382 | entry->mask_base = (void __iomem *)(long)msi_mask_bits_reg(pos, | 382 | entry->mask_base = (void __iomem *)(long)msi_mask_bits_reg(pos, |
383 | is_64bit_address(control)); | 383 | entry->msi_attrib.is_64); |
384 | } | 384 | } |
385 | entry->dev = dev; | 385 | entry->dev = dev; |
386 | if (entry->msi_attrib.maskbit) { | 386 | if (entry->msi_attrib.maskbit) { |
387 | unsigned int maskbits, temp; | 387 | unsigned int maskbits, temp; |
388 | /* All MSIs are unmasked by default, Mask them all */ | 388 | /* All MSIs are unmasked by default, Mask them all */ |
389 | pci_read_config_dword(dev, | 389 | pci_read_config_dword(dev, |
390 | msi_mask_bits_reg(pos, is_64bit_address(control)), | 390 | msi_mask_bits_reg(pos, entry->msi_attrib.is_64), |
391 | &maskbits); | 391 | &maskbits); |
392 | temp = (1 << multi_msi_capable(control)); | 392 | temp = (1 << multi_msi_capable(control)); |
393 | temp = ((temp - 1) & ~temp); | 393 | temp = ((temp - 1) & ~temp); |
394 | maskbits |= temp; | 394 | maskbits |= temp; |
395 | pci_write_config_dword(dev, | 395 | pci_write_config_dword(dev, entry->msi_attrib.is_64, maskbits); |
396 | msi_mask_bits_reg(pos, is_64bit_address(control)), | ||
397 | maskbits); | ||
398 | entry->msi_attrib.maskbits_mask = temp; | 396 | entry->msi_attrib.maskbits_mask = temp; |
399 | } | 397 | } |
400 | list_add_tail(&entry->list, &dev->msi_list); | 398 | list_add_tail(&entry->list, &dev->msi_list); |
diff --git a/drivers/pci/pci-driver.c b/drivers/pci/pci-driver.c index a13f53486114..b4cdd690ae71 100644 --- a/drivers/pci/pci-driver.c +++ b/drivers/pci/pci-driver.c | |||
@@ -43,18 +43,32 @@ store_new_id(struct device_driver *driver, const char *buf, size_t count) | |||
43 | { | 43 | { |
44 | struct pci_dynid *dynid; | 44 | struct pci_dynid *dynid; |
45 | struct pci_driver *pdrv = to_pci_driver(driver); | 45 | struct pci_driver *pdrv = to_pci_driver(driver); |
46 | const struct pci_device_id *ids = pdrv->id_table; | ||
46 | __u32 vendor, device, subvendor=PCI_ANY_ID, | 47 | __u32 vendor, device, subvendor=PCI_ANY_ID, |
47 | subdevice=PCI_ANY_ID, class=0, class_mask=0; | 48 | subdevice=PCI_ANY_ID, class=0, class_mask=0; |
48 | unsigned long driver_data=0; | 49 | unsigned long driver_data=0; |
49 | int fields=0; | 50 | int fields=0; |
50 | int retval = 0; | 51 | int retval; |
51 | 52 | ||
52 | fields = sscanf(buf, "%x %x %x %x %x %x %lux", | 53 | fields = sscanf(buf, "%x %x %x %x %x %x %lx", |
53 | &vendor, &device, &subvendor, &subdevice, | 54 | &vendor, &device, &subvendor, &subdevice, |
54 | &class, &class_mask, &driver_data); | 55 | &class, &class_mask, &driver_data); |
55 | if (fields < 2) | 56 | if (fields < 2) |
56 | return -EINVAL; | 57 | return -EINVAL; |
57 | 58 | ||
59 | /* Only accept driver_data values that match an existing id_table | ||
60 | entry */ | ||
61 | retval = -EINVAL; | ||
62 | while (ids->vendor || ids->subvendor || ids->class_mask) { | ||
63 | if (driver_data == ids->driver_data) { | ||
64 | retval = 0; | ||
65 | break; | ||
66 | } | ||
67 | ids++; | ||
68 | } | ||
69 | if (retval) /* No match */ | ||
70 | return retval; | ||
71 | |||
58 | dynid = kzalloc(sizeof(*dynid), GFP_KERNEL); | 72 | dynid = kzalloc(sizeof(*dynid), GFP_KERNEL); |
59 | if (!dynid) | 73 | if (!dynid) |
60 | return -ENOMEM; | 74 | return -ENOMEM; |
@@ -65,8 +79,7 @@ store_new_id(struct device_driver *driver, const char *buf, size_t count) | |||
65 | dynid->id.subdevice = subdevice; | 79 | dynid->id.subdevice = subdevice; |
66 | dynid->id.class = class; | 80 | dynid->id.class = class; |
67 | dynid->id.class_mask = class_mask; | 81 | dynid->id.class_mask = class_mask; |
68 | dynid->id.driver_data = pdrv->dynids.use_driver_data ? | 82 | dynid->id.driver_data = driver_data; |
69 | driver_data : 0UL; | ||
70 | 83 | ||
71 | spin_lock(&pdrv->dynids.lock); | 84 | spin_lock(&pdrv->dynids.lock); |
72 | list_add_tail(&dynid->node, &pdrv->dynids.list); | 85 | list_add_tail(&dynid->node, &pdrv->dynids.list); |
diff --git a/drivers/pci/pci-sysfs.c b/drivers/pci/pci-sysfs.c index 77baff022f71..110022d78689 100644 --- a/drivers/pci/pci-sysfs.c +++ b/drivers/pci/pci-sysfs.c | |||
@@ -423,7 +423,7 @@ pci_write_vpd(struct kobject *kobj, struct bin_attribute *bin_attr, | |||
423 | * Reads 1, 2, or 4 bytes from legacy I/O port space using an arch specific | 423 | * Reads 1, 2, or 4 bytes from legacy I/O port space using an arch specific |
424 | * callback routine (pci_legacy_read). | 424 | * callback routine (pci_legacy_read). |
425 | */ | 425 | */ |
426 | ssize_t | 426 | static ssize_t |
427 | pci_read_legacy_io(struct kobject *kobj, struct bin_attribute *bin_attr, | 427 | pci_read_legacy_io(struct kobject *kobj, struct bin_attribute *bin_attr, |
428 | char *buf, loff_t off, size_t count) | 428 | char *buf, loff_t off, size_t count) |
429 | { | 429 | { |
@@ -448,7 +448,7 @@ pci_read_legacy_io(struct kobject *kobj, struct bin_attribute *bin_attr, | |||
448 | * Writes 1, 2, or 4 bytes from legacy I/O port space using an arch specific | 448 | * Writes 1, 2, or 4 bytes from legacy I/O port space using an arch specific |
449 | * callback routine (pci_legacy_write). | 449 | * callback routine (pci_legacy_write). |
450 | */ | 450 | */ |
451 | ssize_t | 451 | static ssize_t |
452 | pci_write_legacy_io(struct kobject *kobj, struct bin_attribute *bin_attr, | 452 | pci_write_legacy_io(struct kobject *kobj, struct bin_attribute *bin_attr, |
453 | char *buf, loff_t off, size_t count) | 453 | char *buf, loff_t off, size_t count) |
454 | { | 454 | { |
@@ -468,11 +468,11 @@ pci_write_legacy_io(struct kobject *kobj, struct bin_attribute *bin_attr, | |||
468 | * @attr: struct bin_attribute for this file | 468 | * @attr: struct bin_attribute for this file |
469 | * @vma: struct vm_area_struct passed to mmap | 469 | * @vma: struct vm_area_struct passed to mmap |
470 | * | 470 | * |
471 | * Uses an arch specific callback, pci_mmap_legacy_page_range, to mmap | 471 | * Uses an arch specific callback, pci_mmap_legacy_mem_page_range, to mmap |
472 | * legacy memory space (first meg of bus space) into application virtual | 472 | * legacy memory space (first meg of bus space) into application virtual |
473 | * memory space. | 473 | * memory space. |
474 | */ | 474 | */ |
475 | int | 475 | static int |
476 | pci_mmap_legacy_mem(struct kobject *kobj, struct bin_attribute *attr, | 476 | pci_mmap_legacy_mem(struct kobject *kobj, struct bin_attribute *attr, |
477 | struct vm_area_struct *vma) | 477 | struct vm_area_struct *vma) |
478 | { | 478 | { |
@@ -480,7 +480,90 @@ pci_mmap_legacy_mem(struct kobject *kobj, struct bin_attribute *attr, | |||
480 | struct device, | 480 | struct device, |
481 | kobj)); | 481 | kobj)); |
482 | 482 | ||
483 | return pci_mmap_legacy_page_range(bus, vma); | 483 | return pci_mmap_legacy_page_range(bus, vma, pci_mmap_mem); |
484 | } | ||
485 | |||
486 | /** | ||
487 | * pci_mmap_legacy_io - map legacy PCI IO into user memory space | ||
488 | * @kobj: kobject corresponding to device to be mapped | ||
489 | * @attr: struct bin_attribute for this file | ||
490 | * @vma: struct vm_area_struct passed to mmap | ||
491 | * | ||
492 | * Uses an arch specific callback, pci_mmap_legacy_io_page_range, to mmap | ||
493 | * legacy IO space (first meg of bus space) into application virtual | ||
494 | * memory space. Returns -ENOSYS if the operation isn't supported | ||
495 | */ | ||
496 | static int | ||
497 | pci_mmap_legacy_io(struct kobject *kobj, struct bin_attribute *attr, | ||
498 | struct vm_area_struct *vma) | ||
499 | { | ||
500 | struct pci_bus *bus = to_pci_bus(container_of(kobj, | ||
501 | struct device, | ||
502 | kobj)); | ||
503 | |||
504 | return pci_mmap_legacy_page_range(bus, vma, pci_mmap_io); | ||
505 | } | ||
506 | |||
507 | /** | ||
508 | * pci_create_legacy_files - create legacy I/O port and memory files | ||
509 | * @b: bus to create files under | ||
510 | * | ||
511 | * Some platforms allow access to legacy I/O port and ISA memory space on | ||
512 | * a per-bus basis. This routine creates the files and ties them into | ||
513 | * their associated read, write and mmap files from pci-sysfs.c | ||
514 | * | ||
515 | * On error unwind, but don't propogate the error to the caller | ||
516 | * as it is ok to set up the PCI bus without these files. | ||
517 | */ | ||
518 | void pci_create_legacy_files(struct pci_bus *b) | ||
519 | { | ||
520 | int error; | ||
521 | |||
522 | b->legacy_io = kzalloc(sizeof(struct bin_attribute) * 2, | ||
523 | GFP_ATOMIC); | ||
524 | if (!b->legacy_io) | ||
525 | goto kzalloc_err; | ||
526 | |||
527 | b->legacy_io->attr.name = "legacy_io"; | ||
528 | b->legacy_io->size = 0xffff; | ||
529 | b->legacy_io->attr.mode = S_IRUSR | S_IWUSR; | ||
530 | b->legacy_io->read = pci_read_legacy_io; | ||
531 | b->legacy_io->write = pci_write_legacy_io; | ||
532 | b->legacy_io->mmap = pci_mmap_legacy_io; | ||
533 | error = device_create_bin_file(&b->dev, b->legacy_io); | ||
534 | if (error) | ||
535 | goto legacy_io_err; | ||
536 | |||
537 | /* Allocated above after the legacy_io struct */ | ||
538 | b->legacy_mem = b->legacy_io + 1; | ||
539 | b->legacy_mem->attr.name = "legacy_mem"; | ||
540 | b->legacy_mem->size = 1024*1024; | ||
541 | b->legacy_mem->attr.mode = S_IRUSR | S_IWUSR; | ||
542 | b->legacy_mem->mmap = pci_mmap_legacy_mem; | ||
543 | error = device_create_bin_file(&b->dev, b->legacy_mem); | ||
544 | if (error) | ||
545 | goto legacy_mem_err; | ||
546 | |||
547 | return; | ||
548 | |||
549 | legacy_mem_err: | ||
550 | device_remove_bin_file(&b->dev, b->legacy_io); | ||
551 | legacy_io_err: | ||
552 | kfree(b->legacy_io); | ||
553 | b->legacy_io = NULL; | ||
554 | kzalloc_err: | ||
555 | printk(KERN_WARNING "pci: warning: could not create legacy I/O port " | ||
556 | "and ISA memory resources to sysfs\n"); | ||
557 | return; | ||
558 | } | ||
559 | |||
560 | void pci_remove_legacy_files(struct pci_bus *b) | ||
561 | { | ||
562 | if (b->legacy_io) { | ||
563 | device_remove_bin_file(&b->dev, b->legacy_io); | ||
564 | device_remove_bin_file(&b->dev, b->legacy_mem); | ||
565 | kfree(b->legacy_io); /* both are allocated here */ | ||
566 | } | ||
484 | } | 567 | } |
485 | #endif /* HAVE_PCI_LEGACY */ | 568 | #endif /* HAVE_PCI_LEGACY */ |
486 | 569 | ||
@@ -715,7 +798,7 @@ static struct bin_attribute pci_config_attr = { | |||
715 | .name = "config", | 798 | .name = "config", |
716 | .mode = S_IRUGO | S_IWUSR, | 799 | .mode = S_IRUGO | S_IWUSR, |
717 | }, | 800 | }, |
718 | .size = 256, | 801 | .size = PCI_CFG_SPACE_SIZE, |
719 | .read = pci_read_config, | 802 | .read = pci_read_config, |
720 | .write = pci_write_config, | 803 | .write = pci_write_config, |
721 | }; | 804 | }; |
@@ -725,7 +808,7 @@ static struct bin_attribute pcie_config_attr = { | |||
725 | .name = "config", | 808 | .name = "config", |
726 | .mode = S_IRUGO | S_IWUSR, | 809 | .mode = S_IRUGO | S_IWUSR, |
727 | }, | 810 | }, |
728 | .size = 4096, | 811 | .size = PCI_CFG_SPACE_EXP_SIZE, |
729 | .read = pci_read_config, | 812 | .read = pci_read_config, |
730 | .write = pci_write_config, | 813 | .write = pci_write_config, |
731 | }; | 814 | }; |
@@ -735,86 +818,103 @@ int __attribute__ ((weak)) pcibios_add_platform_entries(struct pci_dev *dev) | |||
735 | return 0; | 818 | return 0; |
736 | } | 819 | } |
737 | 820 | ||
821 | static int pci_create_capabilities_sysfs(struct pci_dev *dev) | ||
822 | { | ||
823 | int retval; | ||
824 | struct bin_attribute *attr; | ||
825 | |||
826 | /* If the device has VPD, try to expose it in sysfs. */ | ||
827 | if (dev->vpd) { | ||
828 | attr = kzalloc(sizeof(*attr), GFP_ATOMIC); | ||
829 | if (!attr) | ||
830 | return -ENOMEM; | ||
831 | |||
832 | attr->size = dev->vpd->len; | ||
833 | attr->attr.name = "vpd"; | ||
834 | attr->attr.mode = S_IRUSR | S_IWUSR; | ||
835 | attr->read = pci_read_vpd; | ||
836 | attr->write = pci_write_vpd; | ||
837 | retval = sysfs_create_bin_file(&dev->dev.kobj, attr); | ||
838 | if (retval) { | ||
839 | kfree(dev->vpd->attr); | ||
840 | return retval; | ||
841 | } | ||
842 | dev->vpd->attr = attr; | ||
843 | } | ||
844 | |||
845 | /* Active State Power Management */ | ||
846 | pcie_aspm_create_sysfs_dev_files(dev); | ||
847 | |||
848 | return 0; | ||
849 | } | ||
850 | |||
738 | int __must_check pci_create_sysfs_dev_files (struct pci_dev *pdev) | 851 | int __must_check pci_create_sysfs_dev_files (struct pci_dev *pdev) |
739 | { | 852 | { |
740 | struct bin_attribute *attr = NULL; | ||
741 | int retval; | 853 | int retval; |
854 | int rom_size = 0; | ||
855 | struct bin_attribute *attr; | ||
742 | 856 | ||
743 | if (!sysfs_initialized) | 857 | if (!sysfs_initialized) |
744 | return -EACCES; | 858 | return -EACCES; |
745 | 859 | ||
746 | if (pdev->cfg_size < 4096) | 860 | if (pdev->cfg_size < PCI_CFG_SPACE_EXP_SIZE) |
747 | retval = sysfs_create_bin_file(&pdev->dev.kobj, &pci_config_attr); | 861 | retval = sysfs_create_bin_file(&pdev->dev.kobj, &pci_config_attr); |
748 | else | 862 | else |
749 | retval = sysfs_create_bin_file(&pdev->dev.kobj, &pcie_config_attr); | 863 | retval = sysfs_create_bin_file(&pdev->dev.kobj, &pcie_config_attr); |
750 | if (retval) | 864 | if (retval) |
751 | goto err; | 865 | goto err; |
752 | 866 | ||
753 | /* If the device has VPD, try to expose it in sysfs. */ | ||
754 | if (pdev->vpd) { | ||
755 | attr = kzalloc(sizeof(*attr), GFP_ATOMIC); | ||
756 | if (attr) { | ||
757 | pdev->vpd->attr = attr; | ||
758 | attr->size = pdev->vpd->len; | ||
759 | attr->attr.name = "vpd"; | ||
760 | attr->attr.mode = S_IRUSR | S_IWUSR; | ||
761 | attr->read = pci_read_vpd; | ||
762 | attr->write = pci_write_vpd; | ||
763 | retval = sysfs_create_bin_file(&pdev->dev.kobj, attr); | ||
764 | if (retval) | ||
765 | goto err_vpd; | ||
766 | } else { | ||
767 | retval = -ENOMEM; | ||
768 | goto err_config_file; | ||
769 | } | ||
770 | } | ||
771 | |||
772 | retval = pci_create_resource_files(pdev); | 867 | retval = pci_create_resource_files(pdev); |
773 | if (retval) | 868 | if (retval) |
774 | goto err_vpd_file; | 869 | goto err_config_file; |
870 | |||
871 | if (pci_resource_len(pdev, PCI_ROM_RESOURCE)) | ||
872 | rom_size = pci_resource_len(pdev, PCI_ROM_RESOURCE); | ||
873 | else if (pdev->resource[PCI_ROM_RESOURCE].flags & IORESOURCE_ROM_SHADOW) | ||
874 | rom_size = 0x20000; | ||
775 | 875 | ||
776 | /* If the device has a ROM, try to expose it in sysfs. */ | 876 | /* If the device has a ROM, try to expose it in sysfs. */ |
777 | if (pci_resource_len(pdev, PCI_ROM_RESOURCE) || | 877 | if (rom_size) { |
778 | (pdev->resource[PCI_ROM_RESOURCE].flags & IORESOURCE_ROM_SHADOW)) { | ||
779 | attr = kzalloc(sizeof(*attr), GFP_ATOMIC); | 878 | attr = kzalloc(sizeof(*attr), GFP_ATOMIC); |
780 | if (attr) { | 879 | if (!attr) { |
781 | pdev->rom_attr = attr; | ||
782 | attr->size = pci_resource_len(pdev, PCI_ROM_RESOURCE); | ||
783 | attr->attr.name = "rom"; | ||
784 | attr->attr.mode = S_IRUSR; | ||
785 | attr->read = pci_read_rom; | ||
786 | attr->write = pci_write_rom; | ||
787 | retval = sysfs_create_bin_file(&pdev->dev.kobj, attr); | ||
788 | if (retval) | ||
789 | goto err_rom; | ||
790 | } else { | ||
791 | retval = -ENOMEM; | 880 | retval = -ENOMEM; |
792 | goto err_resource_files; | 881 | goto err_resource_files; |
793 | } | 882 | } |
883 | attr->size = rom_size; | ||
884 | attr->attr.name = "rom"; | ||
885 | attr->attr.mode = S_IRUSR; | ||
886 | attr->read = pci_read_rom; | ||
887 | attr->write = pci_write_rom; | ||
888 | retval = sysfs_create_bin_file(&pdev->dev.kobj, attr); | ||
889 | if (retval) { | ||
890 | kfree(attr); | ||
891 | goto err_resource_files; | ||
892 | } | ||
893 | pdev->rom_attr = attr; | ||
794 | } | 894 | } |
895 | |||
795 | /* add platform-specific attributes */ | 896 | /* add platform-specific attributes */ |
796 | if (pcibios_add_platform_entries(pdev)) | 897 | retval = pcibios_add_platform_entries(pdev); |
898 | if (retval) | ||
797 | goto err_rom_file; | 899 | goto err_rom_file; |
798 | 900 | ||
799 | pcie_aspm_create_sysfs_dev_files(pdev); | 901 | /* add sysfs entries for various capabilities */ |
902 | retval = pci_create_capabilities_sysfs(pdev); | ||
903 | if (retval) | ||
904 | goto err_rom_file; | ||
800 | 905 | ||
801 | return 0; | 906 | return 0; |
802 | 907 | ||
803 | err_rom_file: | 908 | err_rom_file: |
804 | if (pci_resource_len(pdev, PCI_ROM_RESOURCE)) | 909 | if (rom_size) { |
805 | sysfs_remove_bin_file(&pdev->dev.kobj, pdev->rom_attr); | 910 | sysfs_remove_bin_file(&pdev->dev.kobj, pdev->rom_attr); |
806 | err_rom: | 911 | kfree(pdev->rom_attr); |
807 | kfree(pdev->rom_attr); | 912 | pdev->rom_attr = NULL; |
913 | } | ||
808 | err_resource_files: | 914 | err_resource_files: |
809 | pci_remove_resource_files(pdev); | 915 | pci_remove_resource_files(pdev); |
810 | err_vpd_file: | ||
811 | if (pdev->vpd) { | ||
812 | sysfs_remove_bin_file(&pdev->dev.kobj, pdev->vpd->attr); | ||
813 | err_vpd: | ||
814 | kfree(pdev->vpd->attr); | ||
815 | } | ||
816 | err_config_file: | 916 | err_config_file: |
817 | if (pdev->cfg_size < 4096) | 917 | if (pdev->cfg_size < PCI_CFG_SPACE_EXP_SIZE) |
818 | sysfs_remove_bin_file(&pdev->dev.kobj, &pci_config_attr); | 918 | sysfs_remove_bin_file(&pdev->dev.kobj, &pci_config_attr); |
819 | else | 919 | else |
820 | sysfs_remove_bin_file(&pdev->dev.kobj, &pcie_config_attr); | 920 | sysfs_remove_bin_file(&pdev->dev.kobj, &pcie_config_attr); |
@@ -822,6 +922,16 @@ err: | |||
822 | return retval; | 922 | return retval; |
823 | } | 923 | } |
824 | 924 | ||
925 | static void pci_remove_capabilities_sysfs(struct pci_dev *dev) | ||
926 | { | ||
927 | if (dev->vpd && dev->vpd->attr) { | ||
928 | sysfs_remove_bin_file(&dev->dev.kobj, dev->vpd->attr); | ||
929 | kfree(dev->vpd->attr); | ||
930 | } | ||
931 | |||
932 | pcie_aspm_remove_sysfs_dev_files(dev); | ||
933 | } | ||
934 | |||
825 | /** | 935 | /** |
826 | * pci_remove_sysfs_dev_files - cleanup PCI specific sysfs files | 936 | * pci_remove_sysfs_dev_files - cleanup PCI specific sysfs files |
827 | * @pdev: device whose entries we should free | 937 | * @pdev: device whose entries we should free |
@@ -830,27 +940,28 @@ err: | |||
830 | */ | 940 | */ |
831 | void pci_remove_sysfs_dev_files(struct pci_dev *pdev) | 941 | void pci_remove_sysfs_dev_files(struct pci_dev *pdev) |
832 | { | 942 | { |
943 | int rom_size = 0; | ||
944 | |||
833 | if (!sysfs_initialized) | 945 | if (!sysfs_initialized) |
834 | return; | 946 | return; |
835 | 947 | ||
836 | pcie_aspm_remove_sysfs_dev_files(pdev); | 948 | pci_remove_capabilities_sysfs(pdev); |
837 | 949 | ||
838 | if (pdev->vpd) { | 950 | if (pdev->cfg_size < PCI_CFG_SPACE_EXP_SIZE) |
839 | sysfs_remove_bin_file(&pdev->dev.kobj, pdev->vpd->attr); | ||
840 | kfree(pdev->vpd->attr); | ||
841 | } | ||
842 | if (pdev->cfg_size < 4096) | ||
843 | sysfs_remove_bin_file(&pdev->dev.kobj, &pci_config_attr); | 951 | sysfs_remove_bin_file(&pdev->dev.kobj, &pci_config_attr); |
844 | else | 952 | else |
845 | sysfs_remove_bin_file(&pdev->dev.kobj, &pcie_config_attr); | 953 | sysfs_remove_bin_file(&pdev->dev.kobj, &pcie_config_attr); |
846 | 954 | ||
847 | pci_remove_resource_files(pdev); | 955 | pci_remove_resource_files(pdev); |
848 | 956 | ||
849 | if (pci_resource_len(pdev, PCI_ROM_RESOURCE)) { | 957 | if (pci_resource_len(pdev, PCI_ROM_RESOURCE)) |
850 | if (pdev->rom_attr) { | 958 | rom_size = pci_resource_len(pdev, PCI_ROM_RESOURCE); |
851 | sysfs_remove_bin_file(&pdev->dev.kobj, pdev->rom_attr); | 959 | else if (pdev->resource[PCI_ROM_RESOURCE].flags & IORESOURCE_ROM_SHADOW) |
852 | kfree(pdev->rom_attr); | 960 | rom_size = 0x20000; |
853 | } | 961 | |
962 | if (rom_size && pdev->rom_attr) { | ||
963 | sysfs_remove_bin_file(&pdev->dev.kobj, pdev->rom_attr); | ||
964 | kfree(pdev->rom_attr); | ||
854 | } | 965 | } |
855 | } | 966 | } |
856 | 967 | ||
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c index dbe9f39f4436..4db261e13e69 100644 --- a/drivers/pci/pci.c +++ b/drivers/pci/pci.c | |||
@@ -213,10 +213,13 @@ int pci_bus_find_capability(struct pci_bus *bus, unsigned int devfn, int cap) | |||
213 | int pci_find_ext_capability(struct pci_dev *dev, int cap) | 213 | int pci_find_ext_capability(struct pci_dev *dev, int cap) |
214 | { | 214 | { |
215 | u32 header; | 215 | u32 header; |
216 | int ttl = 480; /* 3840 bytes, minimum 8 bytes per capability */ | 216 | int ttl; |
217 | int pos = 0x100; | 217 | int pos = PCI_CFG_SPACE_SIZE; |
218 | 218 | ||
219 | if (dev->cfg_size <= 256) | 219 | /* minimum 8 bytes per capability */ |
220 | ttl = (PCI_CFG_SPACE_EXP_SIZE - PCI_CFG_SPACE_SIZE) / 8; | ||
221 | |||
222 | if (dev->cfg_size <= PCI_CFG_SPACE_SIZE) | ||
220 | return 0; | 223 | return 0; |
221 | 224 | ||
222 | if (pci_read_config_dword(dev, pos, &header) != PCIBIOS_SUCCESSFUL) | 225 | if (pci_read_config_dword(dev, pos, &header) != PCIBIOS_SUCCESSFUL) |
@@ -234,7 +237,7 @@ int pci_find_ext_capability(struct pci_dev *dev, int cap) | |||
234 | return pos; | 237 | return pos; |
235 | 238 | ||
236 | pos = PCI_EXT_CAP_NEXT(header); | 239 | pos = PCI_EXT_CAP_NEXT(header); |
237 | if (pos < 0x100) | 240 | if (pos < PCI_CFG_SPACE_SIZE) |
238 | break; | 241 | break; |
239 | 242 | ||
240 | if (pci_read_config_dword(dev, pos, &header) != PCIBIOS_SUCCESSFUL) | 243 | if (pci_read_config_dword(dev, pos, &header) != PCIBIOS_SUCCESSFUL) |
@@ -1127,6 +1130,27 @@ int pci_enable_wake(struct pci_dev *dev, pci_power_t state, int enable) | |||
1127 | } | 1130 | } |
1128 | 1131 | ||
1129 | /** | 1132 | /** |
1133 | * pci_wake_from_d3 - enable/disable device to wake up from D3_hot or D3_cold | ||
1134 | * @dev: PCI device to prepare | ||
1135 | * @enable: True to enable wake-up event generation; false to disable | ||
1136 | * | ||
1137 | * Many drivers want the device to wake up the system from D3_hot or D3_cold | ||
1138 | * and this function allows them to set that up cleanly - pci_enable_wake() | ||
1139 | * should not be called twice in a row to enable wake-up due to PCI PM vs ACPI | ||
1140 | * ordering constraints. | ||
1141 | * | ||
1142 | * This function only returns error code if the device is not capable of | ||
1143 | * generating PME# from both D3_hot and D3_cold, and the platform is unable to | ||
1144 | * enable wake-up power for it. | ||
1145 | */ | ||
1146 | int pci_wake_from_d3(struct pci_dev *dev, bool enable) | ||
1147 | { | ||
1148 | return pci_pme_capable(dev, PCI_D3cold) ? | ||
1149 | pci_enable_wake(dev, PCI_D3cold, enable) : | ||
1150 | pci_enable_wake(dev, PCI_D3hot, enable); | ||
1151 | } | ||
1152 | |||
1153 | /** | ||
1130 | * pci_target_state - find an appropriate low power state for a given PCI dev | 1154 | * pci_target_state - find an appropriate low power state for a given PCI dev |
1131 | * @dev: PCI device | 1155 | * @dev: PCI device |
1132 | * | 1156 | * |
@@ -1242,25 +1266,25 @@ void pci_pm_init(struct pci_dev *dev) | |||
1242 | dev->d1_support = false; | 1266 | dev->d1_support = false; |
1243 | dev->d2_support = false; | 1267 | dev->d2_support = false; |
1244 | if (!pci_no_d1d2(dev)) { | 1268 | if (!pci_no_d1d2(dev)) { |
1245 | if (pmc & PCI_PM_CAP_D1) { | 1269 | if (pmc & PCI_PM_CAP_D1) |
1246 | dev_printk(KERN_DEBUG, &dev->dev, "supports D1\n"); | ||
1247 | dev->d1_support = true; | 1270 | dev->d1_support = true; |
1248 | } | 1271 | if (pmc & PCI_PM_CAP_D2) |
1249 | if (pmc & PCI_PM_CAP_D2) { | ||
1250 | dev_printk(KERN_DEBUG, &dev->dev, "supports D2\n"); | ||
1251 | dev->d2_support = true; | 1272 | dev->d2_support = true; |
1252 | } | 1273 | |
1274 | if (dev->d1_support || dev->d2_support) | ||
1275 | dev_printk(KERN_DEBUG, &dev->dev, "supports%s%s\n", | ||
1276 | dev->d1_support ? " D1" : "", | ||
1277 | dev->d2_support ? " D2" : ""); | ||
1253 | } | 1278 | } |
1254 | 1279 | ||
1255 | pmc &= PCI_PM_CAP_PME_MASK; | 1280 | pmc &= PCI_PM_CAP_PME_MASK; |
1256 | if (pmc) { | 1281 | if (pmc) { |
1257 | dev_printk(KERN_INFO, &dev->dev, | 1282 | dev_info(&dev->dev, "PME# supported from%s%s%s%s%s\n", |
1258 | "PME# supported from%s%s%s%s%s\n", | 1283 | (pmc & PCI_PM_CAP_PME_D0) ? " D0" : "", |
1259 | (pmc & PCI_PM_CAP_PME_D0) ? " D0" : "", | 1284 | (pmc & PCI_PM_CAP_PME_D1) ? " D1" : "", |
1260 | (pmc & PCI_PM_CAP_PME_D1) ? " D1" : "", | 1285 | (pmc & PCI_PM_CAP_PME_D2) ? " D2" : "", |
1261 | (pmc & PCI_PM_CAP_PME_D2) ? " D2" : "", | 1286 | (pmc & PCI_PM_CAP_PME_D3) ? " D3hot" : "", |
1262 | (pmc & PCI_PM_CAP_PME_D3) ? " D3hot" : "", | 1287 | (pmc & PCI_PM_CAP_PME_D3cold) ? " D3cold" : ""); |
1263 | (pmc & PCI_PM_CAP_PME_D3cold) ? " D3cold" : ""); | ||
1264 | dev->pme_support = pmc >> PCI_PM_CAP_PME_SHIFT; | 1288 | dev->pme_support = pmc >> PCI_PM_CAP_PME_SHIFT; |
1265 | /* | 1289 | /* |
1266 | * Make device's PM flags reflect the wake-up capability, but | 1290 | * Make device's PM flags reflect the wake-up capability, but |
@@ -1275,6 +1299,38 @@ void pci_pm_init(struct pci_dev *dev) | |||
1275 | } | 1299 | } |
1276 | } | 1300 | } |
1277 | 1301 | ||
1302 | /** | ||
1303 | * pci_enable_ari - enable ARI forwarding if hardware support it | ||
1304 | * @dev: the PCI device | ||
1305 | */ | ||
1306 | void pci_enable_ari(struct pci_dev *dev) | ||
1307 | { | ||
1308 | int pos; | ||
1309 | u32 cap; | ||
1310 | u16 ctrl; | ||
1311 | |||
1312 | if (!dev->is_pcie) | ||
1313 | return; | ||
1314 | |||
1315 | if (dev->pcie_type != PCI_EXP_TYPE_ROOT_PORT && | ||
1316 | dev->pcie_type != PCI_EXP_TYPE_DOWNSTREAM) | ||
1317 | return; | ||
1318 | |||
1319 | pos = pci_find_capability(dev, PCI_CAP_ID_EXP); | ||
1320 | if (!pos) | ||
1321 | return; | ||
1322 | |||
1323 | pci_read_config_dword(dev, pos + PCI_EXP_DEVCAP2, &cap); | ||
1324 | if (!(cap & PCI_EXP_DEVCAP2_ARI)) | ||
1325 | return; | ||
1326 | |||
1327 | pci_read_config_word(dev, pos + PCI_EXP_DEVCTL2, &ctrl); | ||
1328 | ctrl |= PCI_EXP_DEVCTL2_ARI; | ||
1329 | pci_write_config_word(dev, pos + PCI_EXP_DEVCTL2, ctrl); | ||
1330 | |||
1331 | dev->ari_enabled = 1; | ||
1332 | } | ||
1333 | |||
1278 | int | 1334 | int |
1279 | pci_get_interrupt_pin(struct pci_dev *dev, struct pci_dev **bridge) | 1335 | pci_get_interrupt_pin(struct pci_dev *dev, struct pci_dev **bridge) |
1280 | { | 1336 | { |
@@ -1942,6 +1998,7 @@ EXPORT_SYMBOL(pci_restore_state); | |||
1942 | EXPORT_SYMBOL(pci_pme_capable); | 1998 | EXPORT_SYMBOL(pci_pme_capable); |
1943 | EXPORT_SYMBOL(pci_pme_active); | 1999 | EXPORT_SYMBOL(pci_pme_active); |
1944 | EXPORT_SYMBOL(pci_enable_wake); | 2000 | EXPORT_SYMBOL(pci_enable_wake); |
2001 | EXPORT_SYMBOL(pci_wake_from_d3); | ||
1945 | EXPORT_SYMBOL(pci_target_state); | 2002 | EXPORT_SYMBOL(pci_target_state); |
1946 | EXPORT_SYMBOL(pci_prepare_to_sleep); | 2003 | EXPORT_SYMBOL(pci_prepare_to_sleep); |
1947 | EXPORT_SYMBOL(pci_back_from_sleep); | 2004 | EXPORT_SYMBOL(pci_back_from_sleep); |
diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h index d807cd786f20..b205ab866a1d 100644 --- a/drivers/pci/pci.h +++ b/drivers/pci/pci.h | |||
@@ -1,3 +1,9 @@ | |||
1 | #ifndef DRIVERS_PCI_H | ||
2 | #define DRIVERS_PCI_H | ||
3 | |||
4 | #define PCI_CFG_SPACE_SIZE 256 | ||
5 | #define PCI_CFG_SPACE_EXP_SIZE 4096 | ||
6 | |||
1 | /* Functions internal to the PCI core code */ | 7 | /* Functions internal to the PCI core code */ |
2 | 8 | ||
3 | extern int pci_uevent(struct device *dev, struct kobj_uevent_env *env); | 9 | extern int pci_uevent(struct device *dev, struct kobj_uevent_env *env); |
@@ -76,7 +82,13 @@ static inline int pci_proc_detach_bus(struct pci_bus *bus) { return 0; } | |||
76 | /* Functions for PCI Hotplug drivers to use */ | 82 | /* Functions for PCI Hotplug drivers to use */ |
77 | extern unsigned int pci_do_scan_bus(struct pci_bus *bus); | 83 | extern unsigned int pci_do_scan_bus(struct pci_bus *bus); |
78 | 84 | ||
85 | #ifdef HAVE_PCI_LEGACY | ||
86 | extern void pci_create_legacy_files(struct pci_bus *bus); | ||
79 | extern void pci_remove_legacy_files(struct pci_bus *bus); | 87 | extern void pci_remove_legacy_files(struct pci_bus *bus); |
88 | #else | ||
89 | static inline void pci_create_legacy_files(struct pci_bus *bus) { return; } | ||
90 | static inline void pci_remove_legacy_files(struct pci_bus *bus) { return; } | ||
91 | #endif | ||
80 | 92 | ||
81 | /* Lock for read/write access to pci device and bus lists */ | 93 | /* Lock for read/write access to pci device and bus lists */ |
82 | extern struct rw_semaphore pci_bus_sem; | 94 | extern struct rw_semaphore pci_bus_sem; |
@@ -109,6 +121,7 @@ static inline int pci_no_d1d2(struct pci_dev *dev) | |||
109 | extern int pcie_mch_quirk; | 121 | extern int pcie_mch_quirk; |
110 | extern struct device_attribute pci_dev_attrs[]; | 122 | extern struct device_attribute pci_dev_attrs[]; |
111 | extern struct device_attribute dev_attr_cpuaffinity; | 123 | extern struct device_attribute dev_attr_cpuaffinity; |
124 | extern struct device_attribute dev_attr_cpulistaffinity; | ||
112 | 125 | ||
113 | /** | 126 | /** |
114 | * pci_match_one_device - Tell if a PCI device structure has a matching | 127 | * pci_match_one_device - Tell if a PCI device structure has a matching |
@@ -144,3 +157,16 @@ struct pci_slot_attribute { | |||
144 | }; | 157 | }; |
145 | #define to_pci_slot_attr(s) container_of(s, struct pci_slot_attribute, attr) | 158 | #define to_pci_slot_attr(s) container_of(s, struct pci_slot_attribute, attr) |
146 | 159 | ||
160 | extern void pci_enable_ari(struct pci_dev *dev); | ||
161 | /** | ||
162 | * pci_ari_enabled - query ARI forwarding status | ||
163 | * @dev: the PCI device | ||
164 | * | ||
165 | * Returns 1 if ARI forwarding is enabled, or 0 if not enabled; | ||
166 | */ | ||
167 | static inline int pci_ari_enabled(struct pci_dev *dev) | ||
168 | { | ||
169 | return dev->ari_enabled; | ||
170 | } | ||
171 | |||
172 | #endif /* DRIVERS_PCI_H */ | ||
diff --git a/drivers/pci/pcie/aer/aerdrv.c b/drivers/pci/pcie/aer/aerdrv.c index 77036f46acfe..e390707661dd 100644 --- a/drivers/pci/pcie/aer/aerdrv.c +++ b/drivers/pci/pcie/aer/aerdrv.c | |||
@@ -105,7 +105,7 @@ static irqreturn_t aer_irq(int irq, void *context) | |||
105 | unsigned long flags; | 105 | unsigned long flags; |
106 | int pos; | 106 | int pos; |
107 | 107 | ||
108 | pos = pci_find_aer_capability(pdev->port); | 108 | pos = pci_find_ext_capability(pdev->port, PCI_EXT_CAP_ID_ERR); |
109 | /* | 109 | /* |
110 | * Must lock access to Root Error Status Reg, Root Error ID Reg, | 110 | * Must lock access to Root Error Status Reg, Root Error ID Reg, |
111 | * and Root error producer/consumer index | 111 | * and Root error producer/consumer index |
@@ -252,7 +252,7 @@ static pci_ers_result_t aer_root_reset(struct pci_dev *dev) | |||
252 | u32 status; | 252 | u32 status; |
253 | int pos; | 253 | int pos; |
254 | 254 | ||
255 | pos = pci_find_aer_capability(dev); | 255 | pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR); |
256 | 256 | ||
257 | /* Disable Root's interrupt in response to error messages */ | 257 | /* Disable Root's interrupt in response to error messages */ |
258 | pci_write_config_dword(dev, pos + PCI_ERR_ROOT_COMMAND, 0); | 258 | pci_write_config_dword(dev, pos + PCI_ERR_ROOT_COMMAND, 0); |
@@ -316,7 +316,7 @@ static void aer_error_resume(struct pci_dev *dev) | |||
316 | pci_write_config_word(dev, pos + PCI_EXP_DEVSTA, reg16); | 316 | pci_write_config_word(dev, pos + PCI_EXP_DEVSTA, reg16); |
317 | 317 | ||
318 | /* Clean AER Root Error Status */ | 318 | /* Clean AER Root Error Status */ |
319 | pos = pci_find_aer_capability(dev); | 319 | pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR); |
320 | pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_STATUS, &status); | 320 | pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_STATUS, &status); |
321 | pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_SEVER, &mask); | 321 | pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_SEVER, &mask); |
322 | if (dev->error_state == pci_channel_io_normal) | 322 | if (dev->error_state == pci_channel_io_normal) |
diff --git a/drivers/pci/pcie/aer/aerdrv_core.c b/drivers/pci/pcie/aer/aerdrv_core.c index ee5e7b5176d0..dfc63d01f20a 100644 --- a/drivers/pci/pcie/aer/aerdrv_core.c +++ b/drivers/pci/pcie/aer/aerdrv_core.c | |||
@@ -28,41 +28,15 @@ | |||
28 | static int forceload; | 28 | static int forceload; |
29 | module_param(forceload, bool, 0); | 29 | module_param(forceload, bool, 0); |
30 | 30 | ||
31 | #define PCI_CFG_SPACE_SIZE (0x100) | ||
32 | int pci_find_aer_capability(struct pci_dev *dev) | ||
33 | { | ||
34 | int pos; | ||
35 | u32 reg32 = 0; | ||
36 | |||
37 | /* Check if it's a pci-express device */ | ||
38 | pos = pci_find_capability(dev, PCI_CAP_ID_EXP); | ||
39 | if (!pos) | ||
40 | return 0; | ||
41 | |||
42 | /* Check if it supports pci-express AER */ | ||
43 | pos = PCI_CFG_SPACE_SIZE; | ||
44 | while (pos) { | ||
45 | if (pci_read_config_dword(dev, pos, ®32)) | ||
46 | return 0; | ||
47 | |||
48 | /* some broken boards return ~0 */ | ||
49 | if (reg32 == 0xffffffff) | ||
50 | return 0; | ||
51 | |||
52 | if (PCI_EXT_CAP_ID(reg32) == PCI_EXT_CAP_ID_ERR) | ||
53 | break; | ||
54 | |||
55 | pos = reg32 >> 20; | ||
56 | } | ||
57 | |||
58 | return pos; | ||
59 | } | ||
60 | |||
61 | int pci_enable_pcie_error_reporting(struct pci_dev *dev) | 31 | int pci_enable_pcie_error_reporting(struct pci_dev *dev) |
62 | { | 32 | { |
63 | u16 reg16 = 0; | 33 | u16 reg16 = 0; |
64 | int pos; | 34 | int pos; |
65 | 35 | ||
36 | pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR); | ||
37 | if (!pos) | ||
38 | return -EIO; | ||
39 | |||
66 | pos = pci_find_capability(dev, PCI_CAP_ID_EXP); | 40 | pos = pci_find_capability(dev, PCI_CAP_ID_EXP); |
67 | if (!pos) | 41 | if (!pos) |
68 | return -EIO; | 42 | return -EIO; |
@@ -102,7 +76,7 @@ int pci_cleanup_aer_uncorrect_error_status(struct pci_dev *dev) | |||
102 | int pos; | 76 | int pos; |
103 | u32 status, mask; | 77 | u32 status, mask; |
104 | 78 | ||
105 | pos = pci_find_aer_capability(dev); | 79 | pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR); |
106 | if (!pos) | 80 | if (!pos) |
107 | return -EIO; | 81 | return -EIO; |
108 | 82 | ||
@@ -123,7 +97,7 @@ int pci_cleanup_aer_correct_error_status(struct pci_dev *dev) | |||
123 | int pos; | 97 | int pos; |
124 | u32 status; | 98 | u32 status; |
125 | 99 | ||
126 | pos = pci_find_aer_capability(dev); | 100 | pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR); |
127 | if (!pos) | 101 | if (!pos) |
128 | return -EIO; | 102 | return -EIO; |
129 | 103 | ||
@@ -502,7 +476,7 @@ static void handle_error_source(struct pcie_device * aerdev, | |||
502 | * Correctable error does not need software intevention. | 476 | * Correctable error does not need software intevention. |
503 | * No need to go through error recovery process. | 477 | * No need to go through error recovery process. |
504 | */ | 478 | */ |
505 | pos = pci_find_aer_capability(dev); | 479 | pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR); |
506 | if (pos) | 480 | if (pos) |
507 | pci_write_config_dword(dev, pos + PCI_ERR_COR_STATUS, | 481 | pci_write_config_dword(dev, pos + PCI_ERR_COR_STATUS, |
508 | info.status); | 482 | info.status); |
@@ -542,7 +516,7 @@ void aer_enable_rootport(struct aer_rpc *rpc) | |||
542 | reg16 &= ~(SYSTEM_ERROR_INTR_ON_MESG_MASK); | 516 | reg16 &= ~(SYSTEM_ERROR_INTR_ON_MESG_MASK); |
543 | pci_write_config_word(pdev, pos + PCI_EXP_RTCTL, reg16); | 517 | pci_write_config_word(pdev, pos + PCI_EXP_RTCTL, reg16); |
544 | 518 | ||
545 | aer_pos = pci_find_aer_capability(pdev); | 519 | aer_pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_ERR); |
546 | /* Clear error status */ | 520 | /* Clear error status */ |
547 | pci_read_config_dword(pdev, aer_pos + PCI_ERR_ROOT_STATUS, ®32); | 521 | pci_read_config_dword(pdev, aer_pos + PCI_ERR_ROOT_STATUS, ®32); |
548 | pci_write_config_dword(pdev, aer_pos + PCI_ERR_ROOT_STATUS, reg32); | 522 | pci_write_config_dword(pdev, aer_pos + PCI_ERR_ROOT_STATUS, reg32); |
@@ -579,7 +553,7 @@ static void disable_root_aer(struct aer_rpc *rpc) | |||
579 | u32 reg32; | 553 | u32 reg32; |
580 | int pos; | 554 | int pos; |
581 | 555 | ||
582 | pos = pci_find_aer_capability(pdev); | 556 | pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_ERR); |
583 | /* Disable Root's interrupt in response to error messages */ | 557 | /* Disable Root's interrupt in response to error messages */ |
584 | pci_write_config_dword(pdev, pos + PCI_ERR_ROOT_COMMAND, 0); | 558 | pci_write_config_dword(pdev, pos + PCI_ERR_ROOT_COMMAND, 0); |
585 | 559 | ||
@@ -618,7 +592,7 @@ static int get_device_error_info(struct pci_dev *dev, struct aer_err_info *info) | |||
618 | { | 592 | { |
619 | int pos; | 593 | int pos; |
620 | 594 | ||
621 | pos = pci_find_aer_capability(dev); | 595 | pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR); |
622 | 596 | ||
623 | /* The device might not support AER */ | 597 | /* The device might not support AER */ |
624 | if (!pos) | 598 | if (!pos) |
@@ -755,7 +729,6 @@ int aer_init(struct pcie_device *dev) | |||
755 | return AER_SUCCESS; | 729 | return AER_SUCCESS; |
756 | } | 730 | } |
757 | 731 | ||
758 | EXPORT_SYMBOL_GPL(pci_find_aer_capability); | ||
759 | EXPORT_SYMBOL_GPL(pci_enable_pcie_error_reporting); | 732 | EXPORT_SYMBOL_GPL(pci_enable_pcie_error_reporting); |
760 | EXPORT_SYMBOL_GPL(pci_disable_pcie_error_reporting); | 733 | EXPORT_SYMBOL_GPL(pci_disable_pcie_error_reporting); |
761 | EXPORT_SYMBOL_GPL(pci_cleanup_aer_uncorrect_error_status); | 734 | EXPORT_SYMBOL_GPL(pci_cleanup_aer_uncorrect_error_status); |
diff --git a/drivers/pci/pcie/aspm.c b/drivers/pci/pcie/aspm.c index 851f5b83cdbc..8f63f4c6b85f 100644 --- a/drivers/pci/pcie/aspm.c +++ b/drivers/pci/pcie/aspm.c | |||
@@ -528,9 +528,9 @@ static int pcie_aspm_sanity_check(struct pci_dev *pdev) | |||
528 | pci_read_config_dword(child_dev, child_pos + PCI_EXP_DEVCAP, | 528 | pci_read_config_dword(child_dev, child_pos + PCI_EXP_DEVCAP, |
529 | ®32); | 529 | ®32); |
530 | if (!(reg32 & PCI_EXP_DEVCAP_RBER) && !aspm_force) { | 530 | if (!(reg32 & PCI_EXP_DEVCAP_RBER) && !aspm_force) { |
531 | printk("Pre-1.1 PCIe device detected, " | 531 | dev_printk(KERN_INFO, &child_dev->dev, "disabling ASPM" |
532 | "disable ASPM for %s. It can be enabled forcedly" | 532 | " on pre-1.1 PCIe device. You can enable it" |
533 | " with 'pcie_aspm=force'\n", pci_name(pdev)); | 533 | " with 'pcie_aspm=force'\n"); |
534 | return -EINVAL; | 534 | return -EINVAL; |
535 | } | 535 | } |
536 | } | 536 | } |
diff --git a/drivers/pci/pcie/portdrv.h b/drivers/pci/pcie/portdrv.h index 3656e0349dd1..2529f3f2ea5a 100644 --- a/drivers/pci/pcie/portdrv.h +++ b/drivers/pci/pcie/portdrv.h | |||
@@ -25,7 +25,6 @@ | |||
25 | #define PCIE_CAPABILITIES_REG 0x2 | 25 | #define PCIE_CAPABILITIES_REG 0x2 |
26 | #define PCIE_SLOT_CAPABILITIES_REG 0x14 | 26 | #define PCIE_SLOT_CAPABILITIES_REG 0x14 |
27 | #define PCIE_PORT_DEVICE_MAXSERVICES 4 | 27 | #define PCIE_PORT_DEVICE_MAXSERVICES 4 |
28 | #define PCI_CFG_SPACE_SIZE 256 | ||
29 | 28 | ||
30 | #define get_descriptor_id(type, service) (((type - 4) << 4) | service) | 29 | #define get_descriptor_id(type, service) (((type - 4) << 4) | service) |
31 | 30 | ||
diff --git a/drivers/pci/pcie/portdrv_core.c b/drivers/pci/pcie/portdrv_core.c index 890f0d2b370a..2e091e014829 100644 --- a/drivers/pci/pcie/portdrv_core.c +++ b/drivers/pci/pcie/portdrv_core.c | |||
@@ -195,24 +195,11 @@ static int get_port_device_capability(struct pci_dev *dev) | |||
195 | /* PME Capable - root port capability */ | 195 | /* PME Capable - root port capability */ |
196 | if (((reg16 >> 4) & PORT_TYPE_MASK) == PCIE_RC_PORT) | 196 | if (((reg16 >> 4) & PORT_TYPE_MASK) == PCIE_RC_PORT) |
197 | services |= PCIE_PORT_SERVICE_PME; | 197 | services |= PCIE_PORT_SERVICE_PME; |
198 | 198 | ||
199 | pos = PCI_CFG_SPACE_SIZE; | 199 | if (pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR)) |
200 | while (pos) { | 200 | services |= PCIE_PORT_SERVICE_AER; |
201 | pci_read_config_dword(dev, pos, ®32); | 201 | if (pci_find_ext_capability(dev, PCI_EXT_CAP_ID_VC)) |
202 | switch (reg32 & 0xffff) { | 202 | services |= PCIE_PORT_SERVICE_VC; |
203 | case PCI_EXT_CAP_ID_ERR: | ||
204 | services |= PCIE_PORT_SERVICE_AER; | ||
205 | pos = reg32 >> 20; | ||
206 | break; | ||
207 | case PCI_EXT_CAP_ID_VC: | ||
208 | services |= PCIE_PORT_SERVICE_VC; | ||
209 | pos = reg32 >> 20; | ||
210 | break; | ||
211 | default: | ||
212 | pos = 0; | ||
213 | break; | ||
214 | } | ||
215 | } | ||
216 | 203 | ||
217 | return services; | 204 | return services; |
218 | } | 205 | } |
diff --git a/drivers/pci/pcie/portdrv_pci.c b/drivers/pci/pcie/portdrv_pci.c index 367c9c20000d..584422da8d8b 100644 --- a/drivers/pci/pcie/portdrv_pci.c +++ b/drivers/pci/pcie/portdrv_pci.c | |||
@@ -91,7 +91,7 @@ static int __devinit pcie_portdrv_probe (struct pci_dev *dev, | |||
91 | 91 | ||
92 | pci_set_master(dev); | 92 | pci_set_master(dev); |
93 | if (!dev->irq && dev->pin) { | 93 | if (!dev->irq && dev->pin) { |
94 | dev_warn(&dev->dev, "device [%04x/%04x] has invalid IRQ; " | 94 | dev_warn(&dev->dev, "device [%04x:%04x] has invalid IRQ; " |
95 | "check vendor BIOS\n", dev->vendor, dev->device); | 95 | "check vendor BIOS\n", dev->vendor, dev->device); |
96 | } | 96 | } |
97 | if (pcie_port_device_register(dev)) { | 97 | if (pcie_port_device_register(dev)) { |
diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c index d3db8b249729..aaaf0a1fed22 100644 --- a/drivers/pci/probe.c +++ b/drivers/pci/probe.c | |||
@@ -14,8 +14,6 @@ | |||
14 | 14 | ||
15 | #define CARDBUS_LATENCY_TIMER 176 /* secondary latency timer */ | 15 | #define CARDBUS_LATENCY_TIMER 176 /* secondary latency timer */ |
16 | #define CARDBUS_RESERVE_BUSNR 3 | 16 | #define CARDBUS_RESERVE_BUSNR 3 |
17 | #define PCI_CFG_SPACE_SIZE 256 | ||
18 | #define PCI_CFG_SPACE_EXP_SIZE 4096 | ||
19 | 17 | ||
20 | /* Ugh. Need to stop exporting this to modules. */ | 18 | /* Ugh. Need to stop exporting this to modules. */ |
21 | LIST_HEAD(pci_root_buses); | 19 | LIST_HEAD(pci_root_buses); |
@@ -44,72 +42,6 @@ int no_pci_devices(void) | |||
44 | } | 42 | } |
45 | EXPORT_SYMBOL(no_pci_devices); | 43 | EXPORT_SYMBOL(no_pci_devices); |
46 | 44 | ||
47 | #ifdef HAVE_PCI_LEGACY | ||
48 | /** | ||
49 | * pci_create_legacy_files - create legacy I/O port and memory files | ||
50 | * @b: bus to create files under | ||
51 | * | ||
52 | * Some platforms allow access to legacy I/O port and ISA memory space on | ||
53 | * a per-bus basis. This routine creates the files and ties them into | ||
54 | * their associated read, write and mmap files from pci-sysfs.c | ||
55 | * | ||
56 | * On error unwind, but don't propogate the error to the caller | ||
57 | * as it is ok to set up the PCI bus without these files. | ||
58 | */ | ||
59 | static void pci_create_legacy_files(struct pci_bus *b) | ||
60 | { | ||
61 | int error; | ||
62 | |||
63 | b->legacy_io = kzalloc(sizeof(struct bin_attribute) * 2, | ||
64 | GFP_ATOMIC); | ||
65 | if (!b->legacy_io) | ||
66 | goto kzalloc_err; | ||
67 | |||
68 | b->legacy_io->attr.name = "legacy_io"; | ||
69 | b->legacy_io->size = 0xffff; | ||
70 | b->legacy_io->attr.mode = S_IRUSR | S_IWUSR; | ||
71 | b->legacy_io->read = pci_read_legacy_io; | ||
72 | b->legacy_io->write = pci_write_legacy_io; | ||
73 | error = device_create_bin_file(&b->dev, b->legacy_io); | ||
74 | if (error) | ||
75 | goto legacy_io_err; | ||
76 | |||
77 | /* Allocated above after the legacy_io struct */ | ||
78 | b->legacy_mem = b->legacy_io + 1; | ||
79 | b->legacy_mem->attr.name = "legacy_mem"; | ||
80 | b->legacy_mem->size = 1024*1024; | ||
81 | b->legacy_mem->attr.mode = S_IRUSR | S_IWUSR; | ||
82 | b->legacy_mem->mmap = pci_mmap_legacy_mem; | ||
83 | error = device_create_bin_file(&b->dev, b->legacy_mem); | ||
84 | if (error) | ||
85 | goto legacy_mem_err; | ||
86 | |||
87 | return; | ||
88 | |||
89 | legacy_mem_err: | ||
90 | device_remove_bin_file(&b->dev, b->legacy_io); | ||
91 | legacy_io_err: | ||
92 | kfree(b->legacy_io); | ||
93 | b->legacy_io = NULL; | ||
94 | kzalloc_err: | ||
95 | printk(KERN_WARNING "pci: warning: could not create legacy I/O port " | ||
96 | "and ISA memory resources to sysfs\n"); | ||
97 | return; | ||
98 | } | ||
99 | |||
100 | void pci_remove_legacy_files(struct pci_bus *b) | ||
101 | { | ||
102 | if (b->legacy_io) { | ||
103 | device_remove_bin_file(&b->dev, b->legacy_io); | ||
104 | device_remove_bin_file(&b->dev, b->legacy_mem); | ||
105 | kfree(b->legacy_io); /* both are allocated here */ | ||
106 | } | ||
107 | } | ||
108 | #else /* !HAVE_PCI_LEGACY */ | ||
109 | static inline void pci_create_legacy_files(struct pci_bus *bus) { return; } | ||
110 | void pci_remove_legacy_files(struct pci_bus *bus) { return; } | ||
111 | #endif /* HAVE_PCI_LEGACY */ | ||
112 | |||
113 | /* | 45 | /* |
114 | * PCI Bus Class Devices | 46 | * PCI Bus Class Devices |
115 | */ | 47 | */ |
@@ -219,7 +151,7 @@ static inline enum pci_bar_type decode_bar(struct resource *res, u32 bar) | |||
219 | 151 | ||
220 | res->flags = bar & ~PCI_BASE_ADDRESS_MEM_MASK; | 152 | res->flags = bar & ~PCI_BASE_ADDRESS_MEM_MASK; |
221 | 153 | ||
222 | if (res->flags == PCI_BASE_ADDRESS_MEM_TYPE_64) | 154 | if (res->flags & PCI_BASE_ADDRESS_MEM_TYPE_64) |
223 | return pci_bar_mem64; | 155 | return pci_bar_mem64; |
224 | return pci_bar_mem32; | 156 | return pci_bar_mem32; |
225 | } | 157 | } |
@@ -304,8 +236,8 @@ static int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type, | |||
304 | } else { | 236 | } else { |
305 | res->start = l64; | 237 | res->start = l64; |
306 | res->end = l64 + sz64; | 238 | res->end = l64 + sz64; |
307 | printk(KERN_DEBUG "PCI: %s reg %x 64bit mmio: %pR\n", | 239 | dev_printk(KERN_DEBUG, &dev->dev, |
308 | pci_name(dev), pos, res); | 240 | "reg %x 64bit mmio: %pR\n", pos, res); |
309 | } | 241 | } |
310 | } else { | 242 | } else { |
311 | sz = pci_size(l, sz, mask); | 243 | sz = pci_size(l, sz, mask); |
@@ -315,10 +247,10 @@ static int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type, | |||
315 | 247 | ||
316 | res->start = l; | 248 | res->start = l; |
317 | res->end = l + sz; | 249 | res->end = l + sz; |
318 | printk(KERN_DEBUG "PCI: %s reg %x %s: %pR\n", | 250 | |
319 | pci_name(dev), pos, | 251 | dev_printk(KERN_DEBUG, &dev->dev, "reg %x %s: %pR\n", pos, |
320 | (res->flags & IORESOURCE_IO) ? "io port":"32bit mmio", | 252 | (res->flags & IORESOURCE_IO) ? "io port" : "32bit mmio", |
321 | res); | 253 | res); |
322 | } | 254 | } |
323 | 255 | ||
324 | out: | 256 | out: |
@@ -389,8 +321,7 @@ void __devinit pci_read_bridge_bases(struct pci_bus *child) | |||
389 | res->start = base; | 321 | res->start = base; |
390 | if (!res->end) | 322 | if (!res->end) |
391 | res->end = limit + 0xfff; | 323 | res->end = limit + 0xfff; |
392 | printk(KERN_DEBUG "PCI: bridge %s io port: %pR\n", | 324 | dev_printk(KERN_DEBUG, &dev->dev, "bridge io port: %pR\n", res); |
393 | pci_name(dev), res); | ||
394 | } | 325 | } |
395 | 326 | ||
396 | res = child->resource[1]; | 327 | res = child->resource[1]; |
@@ -402,8 +333,8 @@ void __devinit pci_read_bridge_bases(struct pci_bus *child) | |||
402 | res->flags = (mem_base_lo & PCI_MEMORY_RANGE_TYPE_MASK) | IORESOURCE_MEM; | 333 | res->flags = (mem_base_lo & PCI_MEMORY_RANGE_TYPE_MASK) | IORESOURCE_MEM; |
403 | res->start = base; | 334 | res->start = base; |
404 | res->end = limit + 0xfffff; | 335 | res->end = limit + 0xfffff; |
405 | printk(KERN_DEBUG "PCI: bridge %s 32bit mmio: %pR\n", | 336 | dev_printk(KERN_DEBUG, &dev->dev, "bridge 32bit mmio: %pR\n", |
406 | pci_name(dev), res); | 337 | res); |
407 | } | 338 | } |
408 | 339 | ||
409 | res = child->resource[2]; | 340 | res = child->resource[2]; |
@@ -439,9 +370,9 @@ void __devinit pci_read_bridge_bases(struct pci_bus *child) | |||
439 | res->flags = (mem_base_lo & PCI_MEMORY_RANGE_TYPE_MASK) | IORESOURCE_MEM | IORESOURCE_PREFETCH; | 370 | res->flags = (mem_base_lo & PCI_MEMORY_RANGE_TYPE_MASK) | IORESOURCE_MEM | IORESOURCE_PREFETCH; |
440 | res->start = base; | 371 | res->start = base; |
441 | res->end = limit + 0xfffff; | 372 | res->end = limit + 0xfffff; |
442 | printk(KERN_DEBUG "PCI: bridge %s %sbit mmio pref: %pR\n", | 373 | dev_printk(KERN_DEBUG, &dev->dev, "bridge %sbit mmio pref: %pR\n", |
443 | pci_name(dev), | 374 | (res->flags & PCI_PREF_RANGE_TYPE_64) ? "64" : "32", |
444 | (res->flags & PCI_PREF_RANGE_TYPE_64) ? "64":"32", res); | 375 | res); |
445 | } | 376 | } |
446 | } | 377 | } |
447 | 378 | ||
@@ -762,7 +693,7 @@ static int pci_setup_device(struct pci_dev * dev) | |||
762 | dev->class = class; | 693 | dev->class = class; |
763 | class >>= 8; | 694 | class >>= 8; |
764 | 695 | ||
765 | dev_dbg(&dev->dev, "found [%04x/%04x] class %06x header type %02x\n", | 696 | dev_dbg(&dev->dev, "found [%04x:%04x] class %06x header type %02x\n", |
766 | dev->vendor, dev->device, class, dev->hdr_type); | 697 | dev->vendor, dev->device, class, dev->hdr_type); |
767 | 698 | ||
768 | /* "Unknown power state" */ | 699 | /* "Unknown power state" */ |
@@ -844,6 +775,11 @@ static int pci_setup_device(struct pci_dev * dev) | |||
844 | return 0; | 775 | return 0; |
845 | } | 776 | } |
846 | 777 | ||
778 | static void pci_release_capabilities(struct pci_dev *dev) | ||
779 | { | ||
780 | pci_vpd_release(dev); | ||
781 | } | ||
782 | |||
847 | /** | 783 | /** |
848 | * pci_release_dev - free a pci device structure when all users of it are finished. | 784 | * pci_release_dev - free a pci device structure when all users of it are finished. |
849 | * @dev: device that's been disconnected | 785 | * @dev: device that's been disconnected |
@@ -856,7 +792,7 @@ static void pci_release_dev(struct device *dev) | |||
856 | struct pci_dev *pci_dev; | 792 | struct pci_dev *pci_dev; |
857 | 793 | ||
858 | pci_dev = to_pci_dev(dev); | 794 | pci_dev = to_pci_dev(dev); |
859 | pci_vpd_release(pci_dev); | 795 | pci_release_capabilities(pci_dev); |
860 | kfree(pci_dev); | 796 | kfree(pci_dev); |
861 | } | 797 | } |
862 | 798 | ||
@@ -887,8 +823,9 @@ static void set_pcie_port_type(struct pci_dev *pdev) | |||
887 | int pci_cfg_space_size_ext(struct pci_dev *dev) | 823 | int pci_cfg_space_size_ext(struct pci_dev *dev) |
888 | { | 824 | { |
889 | u32 status; | 825 | u32 status; |
826 | int pos = PCI_CFG_SPACE_SIZE; | ||
890 | 827 | ||
891 | if (pci_read_config_dword(dev, 256, &status) != PCIBIOS_SUCCESSFUL) | 828 | if (pci_read_config_dword(dev, pos, &status) != PCIBIOS_SUCCESSFUL) |
892 | goto fail; | 829 | goto fail; |
893 | if (status == 0xffffffff) | 830 | if (status == 0xffffffff) |
894 | goto fail; | 831 | goto fail; |
@@ -936,8 +873,6 @@ struct pci_dev *alloc_pci_dev(void) | |||
936 | 873 | ||
937 | INIT_LIST_HEAD(&dev->bus_list); | 874 | INIT_LIST_HEAD(&dev->bus_list); |
938 | 875 | ||
939 | pci_msi_init_pci_dev(dev); | ||
940 | |||
941 | return dev; | 876 | return dev; |
942 | } | 877 | } |
943 | EXPORT_SYMBOL(alloc_pci_dev); | 878 | EXPORT_SYMBOL(alloc_pci_dev); |
@@ -949,6 +884,7 @@ EXPORT_SYMBOL(alloc_pci_dev); | |||
949 | static struct pci_dev *pci_scan_device(struct pci_bus *bus, int devfn) | 884 | static struct pci_dev *pci_scan_device(struct pci_bus *bus, int devfn) |
950 | { | 885 | { |
951 | struct pci_dev *dev; | 886 | struct pci_dev *dev; |
887 | struct pci_slot *slot; | ||
952 | u32 l; | 888 | u32 l; |
953 | u8 hdr_type; | 889 | u8 hdr_type; |
954 | int delay = 1; | 890 | int delay = 1; |
@@ -997,6 +933,10 @@ static struct pci_dev *pci_scan_device(struct pci_bus *bus, int devfn) | |||
997 | dev->error_state = pci_channel_io_normal; | 933 | dev->error_state = pci_channel_io_normal; |
998 | set_pcie_port_type(dev); | 934 | set_pcie_port_type(dev); |
999 | 935 | ||
936 | list_for_each_entry(slot, &bus->slots, list) | ||
937 | if (PCI_SLOT(devfn) == slot->number) | ||
938 | dev->slot = slot; | ||
939 | |||
1000 | /* Assume 32-bit PCI; let 64-bit PCI cards (which are far rarer) | 940 | /* Assume 32-bit PCI; let 64-bit PCI cards (which are far rarer) |
1001 | set this higher, assuming the system even supports it. */ | 941 | set this higher, assuming the system even supports it. */ |
1002 | dev->dma_mask = 0xffffffff; | 942 | dev->dma_mask = 0xffffffff; |
@@ -1005,9 +945,22 @@ static struct pci_dev *pci_scan_device(struct pci_bus *bus, int devfn) | |||
1005 | return NULL; | 945 | return NULL; |
1006 | } | 946 | } |
1007 | 947 | ||
948 | return dev; | ||
949 | } | ||
950 | |||
951 | static void pci_init_capabilities(struct pci_dev *dev) | ||
952 | { | ||
953 | /* MSI/MSI-X list */ | ||
954 | pci_msi_init_pci_dev(dev); | ||
955 | |||
956 | /* Power Management */ | ||
957 | pci_pm_init(dev); | ||
958 | |||
959 | /* Vital Product Data */ | ||
1008 | pci_vpd_pci22_init(dev); | 960 | pci_vpd_pci22_init(dev); |
1009 | 961 | ||
1010 | return dev; | 962 | /* Alternative Routing-ID Forwarding */ |
963 | pci_enable_ari(dev); | ||
1011 | } | 964 | } |
1012 | 965 | ||
1013 | void pci_device_add(struct pci_dev *dev, struct pci_bus *bus) | 966 | void pci_device_add(struct pci_dev *dev, struct pci_bus *bus) |
@@ -1026,8 +979,8 @@ void pci_device_add(struct pci_dev *dev, struct pci_bus *bus) | |||
1026 | /* Fix up broken headers */ | 979 | /* Fix up broken headers */ |
1027 | pci_fixup_device(pci_fixup_header, dev); | 980 | pci_fixup_device(pci_fixup_header, dev); |
1028 | 981 | ||
1029 | /* Initialize power management of the device */ | 982 | /* Initialize various capabilities */ |
1030 | pci_pm_init(dev); | 983 | pci_init_capabilities(dev); |
1031 | 984 | ||
1032 | /* | 985 | /* |
1033 | * Add the device to our list of discovered devices | 986 | * Add the device to our list of discovered devices |
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c index 832175d9ca25..96cf8ecd04ce 100644 --- a/drivers/pci/quirks.c +++ b/drivers/pci/quirks.c | |||
@@ -24,6 +24,14 @@ | |||
24 | #include <linux/kallsyms.h> | 24 | #include <linux/kallsyms.h> |
25 | #include "pci.h" | 25 | #include "pci.h" |
26 | 26 | ||
27 | int isa_dma_bridge_buggy; | ||
28 | EXPORT_SYMBOL(isa_dma_bridge_buggy); | ||
29 | int pci_pci_problems; | ||
30 | EXPORT_SYMBOL(pci_pci_problems); | ||
31 | int pcie_mch_quirk; | ||
32 | EXPORT_SYMBOL(pcie_mch_quirk); | ||
33 | |||
34 | #ifdef CONFIG_PCI_QUIRKS | ||
27 | /* The Mellanox Tavor device gives false positive parity errors | 35 | /* The Mellanox Tavor device gives false positive parity errors |
28 | * Mark this device with a broken_parity_status, to allow | 36 | * Mark this device with a broken_parity_status, to allow |
29 | * PCI scanning code to "skip" this now blacklisted device. | 37 | * PCI scanning code to "skip" this now blacklisted device. |
@@ -76,8 +84,6 @@ DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82441, quirk_p | |||
76 | 84 | ||
77 | This appears to be BIOS not version dependent. So presumably there is a | 85 | This appears to be BIOS not version dependent. So presumably there is a |
78 | chipset level fix */ | 86 | chipset level fix */ |
79 | int isa_dma_bridge_buggy; | ||
80 | EXPORT_SYMBOL(isa_dma_bridge_buggy); | ||
81 | 87 | ||
82 | static void __devinit quirk_isa_dma_hangs(struct pci_dev *dev) | 88 | static void __devinit quirk_isa_dma_hangs(struct pci_dev *dev) |
83 | { | 89 | { |
@@ -98,9 +104,6 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_NEC, PCI_DEVICE_ID_NEC_CBUS_1, quirk_isa_d | |||
98 | DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_NEC, PCI_DEVICE_ID_NEC_CBUS_2, quirk_isa_dma_hangs); | 104 | DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_NEC, PCI_DEVICE_ID_NEC_CBUS_2, quirk_isa_dma_hangs); |
99 | DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_NEC, PCI_DEVICE_ID_NEC_CBUS_3, quirk_isa_dma_hangs); | 105 | DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_NEC, PCI_DEVICE_ID_NEC_CBUS_3, quirk_isa_dma_hangs); |
100 | 106 | ||
101 | int pci_pci_problems; | ||
102 | EXPORT_SYMBOL(pci_pci_problems); | ||
103 | |||
104 | /* | 107 | /* |
105 | * Chipsets where PCI->PCI transfers vanish or hang | 108 | * Chipsets where PCI->PCI transfers vanish or hang |
106 | */ | 109 | */ |
@@ -1376,9 +1379,6 @@ static void __init quirk_alder_ioapic(struct pci_dev *pdev) | |||
1376 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_EESSC, quirk_alder_ioapic); | 1379 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_EESSC, quirk_alder_ioapic); |
1377 | #endif | 1380 | #endif |
1378 | 1381 | ||
1379 | int pcie_mch_quirk; | ||
1380 | EXPORT_SYMBOL(pcie_mch_quirk); | ||
1381 | |||
1382 | static void __devinit quirk_pcie_mch(struct pci_dev *pdev) | 1382 | static void __devinit quirk_pcie_mch(struct pci_dev *pdev) |
1383 | { | 1383 | { |
1384 | pcie_mch_quirk = 1; | 1384 | pcie_mch_quirk = 1; |
@@ -1569,84 +1569,6 @@ static void __devinit fixup_rev1_53c810(struct pci_dev* dev) | |||
1569 | } | 1569 | } |
1570 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NCR, PCI_DEVICE_ID_NCR_53C810, fixup_rev1_53c810); | 1570 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NCR, PCI_DEVICE_ID_NCR_53C810, fixup_rev1_53c810); |
1571 | 1571 | ||
1572 | static void pci_do_fixups(struct pci_dev *dev, struct pci_fixup *f, struct pci_fixup *end) | ||
1573 | { | ||
1574 | while (f < end) { | ||
1575 | if ((f->vendor == dev->vendor || f->vendor == (u16) PCI_ANY_ID) && | ||
1576 | (f->device == dev->device || f->device == (u16) PCI_ANY_ID)) { | ||
1577 | #ifdef DEBUG | ||
1578 | dev_dbg(&dev->dev, "calling %pF\n", f->hook); | ||
1579 | #endif | ||
1580 | f->hook(dev); | ||
1581 | } | ||
1582 | f++; | ||
1583 | } | ||
1584 | } | ||
1585 | |||
1586 | extern struct pci_fixup __start_pci_fixups_early[]; | ||
1587 | extern struct pci_fixup __end_pci_fixups_early[]; | ||
1588 | extern struct pci_fixup __start_pci_fixups_header[]; | ||
1589 | extern struct pci_fixup __end_pci_fixups_header[]; | ||
1590 | extern struct pci_fixup __start_pci_fixups_final[]; | ||
1591 | extern struct pci_fixup __end_pci_fixups_final[]; | ||
1592 | extern struct pci_fixup __start_pci_fixups_enable[]; | ||
1593 | extern struct pci_fixup __end_pci_fixups_enable[]; | ||
1594 | extern struct pci_fixup __start_pci_fixups_resume[]; | ||
1595 | extern struct pci_fixup __end_pci_fixups_resume[]; | ||
1596 | extern struct pci_fixup __start_pci_fixups_resume_early[]; | ||
1597 | extern struct pci_fixup __end_pci_fixups_resume_early[]; | ||
1598 | extern struct pci_fixup __start_pci_fixups_suspend[]; | ||
1599 | extern struct pci_fixup __end_pci_fixups_suspend[]; | ||
1600 | |||
1601 | |||
1602 | void pci_fixup_device(enum pci_fixup_pass pass, struct pci_dev *dev) | ||
1603 | { | ||
1604 | struct pci_fixup *start, *end; | ||
1605 | |||
1606 | switch(pass) { | ||
1607 | case pci_fixup_early: | ||
1608 | start = __start_pci_fixups_early; | ||
1609 | end = __end_pci_fixups_early; | ||
1610 | break; | ||
1611 | |||
1612 | case pci_fixup_header: | ||
1613 | start = __start_pci_fixups_header; | ||
1614 | end = __end_pci_fixups_header; | ||
1615 | break; | ||
1616 | |||
1617 | case pci_fixup_final: | ||
1618 | start = __start_pci_fixups_final; | ||
1619 | end = __end_pci_fixups_final; | ||
1620 | break; | ||
1621 | |||
1622 | case pci_fixup_enable: | ||
1623 | start = __start_pci_fixups_enable; | ||
1624 | end = __end_pci_fixups_enable; | ||
1625 | break; | ||
1626 | |||
1627 | case pci_fixup_resume: | ||
1628 | start = __start_pci_fixups_resume; | ||
1629 | end = __end_pci_fixups_resume; | ||
1630 | break; | ||
1631 | |||
1632 | case pci_fixup_resume_early: | ||
1633 | start = __start_pci_fixups_resume_early; | ||
1634 | end = __end_pci_fixups_resume_early; | ||
1635 | break; | ||
1636 | |||
1637 | case pci_fixup_suspend: | ||
1638 | start = __start_pci_fixups_suspend; | ||
1639 | end = __end_pci_fixups_suspend; | ||
1640 | break; | ||
1641 | |||
1642 | default: | ||
1643 | /* stupid compiler warning, you would think with an enum... */ | ||
1644 | return; | ||
1645 | } | ||
1646 | pci_do_fixups(dev, start, end); | ||
1647 | } | ||
1648 | EXPORT_SYMBOL(pci_fixup_device); | ||
1649 | |||
1650 | /* Enable 1k I/O space granularity on the Intel P64H2 */ | 1572 | /* Enable 1k I/O space granularity on the Intel P64H2 */ |
1651 | static void __devinit quirk_p64h2_1k_io(struct pci_dev *dev) | 1573 | static void __devinit quirk_p64h2_1k_io(struct pci_dev *dev) |
1652 | { | 1574 | { |
@@ -2020,3 +1942,82 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x4375, | |||
2020 | quirk_msi_intx_disable_bug); | 1942 | quirk_msi_intx_disable_bug); |
2021 | 1943 | ||
2022 | #endif /* CONFIG_PCI_MSI */ | 1944 | #endif /* CONFIG_PCI_MSI */ |
1945 | |||
1946 | static void pci_do_fixups(struct pci_dev *dev, struct pci_fixup *f, struct pci_fixup *end) | ||
1947 | { | ||
1948 | while (f < end) { | ||
1949 | if ((f->vendor == dev->vendor || f->vendor == (u16) PCI_ANY_ID) && | ||
1950 | (f->device == dev->device || f->device == (u16) PCI_ANY_ID)) { | ||
1951 | dev_dbg(&dev->dev, "calling %pF\n", f->hook); | ||
1952 | f->hook(dev); | ||
1953 | } | ||
1954 | f++; | ||
1955 | } | ||
1956 | } | ||
1957 | |||
1958 | extern struct pci_fixup __start_pci_fixups_early[]; | ||
1959 | extern struct pci_fixup __end_pci_fixups_early[]; | ||
1960 | extern struct pci_fixup __start_pci_fixups_header[]; | ||
1961 | extern struct pci_fixup __end_pci_fixups_header[]; | ||
1962 | extern struct pci_fixup __start_pci_fixups_final[]; | ||
1963 | extern struct pci_fixup __end_pci_fixups_final[]; | ||
1964 | extern struct pci_fixup __start_pci_fixups_enable[]; | ||
1965 | extern struct pci_fixup __end_pci_fixups_enable[]; | ||
1966 | extern struct pci_fixup __start_pci_fixups_resume[]; | ||
1967 | extern struct pci_fixup __end_pci_fixups_resume[]; | ||
1968 | extern struct pci_fixup __start_pci_fixups_resume_early[]; | ||
1969 | extern struct pci_fixup __end_pci_fixups_resume_early[]; | ||
1970 | extern struct pci_fixup __start_pci_fixups_suspend[]; | ||
1971 | extern struct pci_fixup __end_pci_fixups_suspend[]; | ||
1972 | |||
1973 | |||
1974 | void pci_fixup_device(enum pci_fixup_pass pass, struct pci_dev *dev) | ||
1975 | { | ||
1976 | struct pci_fixup *start, *end; | ||
1977 | |||
1978 | switch(pass) { | ||
1979 | case pci_fixup_early: | ||
1980 | start = __start_pci_fixups_early; | ||
1981 | end = __end_pci_fixups_early; | ||
1982 | break; | ||
1983 | |||
1984 | case pci_fixup_header: | ||
1985 | start = __start_pci_fixups_header; | ||
1986 | end = __end_pci_fixups_header; | ||
1987 | break; | ||
1988 | |||
1989 | case pci_fixup_final: | ||
1990 | start = __start_pci_fixups_final; | ||
1991 | end = __end_pci_fixups_final; | ||
1992 | break; | ||
1993 | |||
1994 | case pci_fixup_enable: | ||
1995 | start = __start_pci_fixups_enable; | ||
1996 | end = __end_pci_fixups_enable; | ||
1997 | break; | ||
1998 | |||
1999 | case pci_fixup_resume: | ||
2000 | start = __start_pci_fixups_resume; | ||
2001 | end = __end_pci_fixups_resume; | ||
2002 | break; | ||
2003 | |||
2004 | case pci_fixup_resume_early: | ||
2005 | start = __start_pci_fixups_resume_early; | ||
2006 | end = __end_pci_fixups_resume_early; | ||
2007 | break; | ||
2008 | |||
2009 | case pci_fixup_suspend: | ||
2010 | start = __start_pci_fixups_suspend; | ||
2011 | end = __end_pci_fixups_suspend; | ||
2012 | break; | ||
2013 | |||
2014 | default: | ||
2015 | /* stupid compiler warning, you would think with an enum... */ | ||
2016 | return; | ||
2017 | } | ||
2018 | pci_do_fixups(dev, start, end); | ||
2019 | } | ||
2020 | #else | ||
2021 | void pci_fixup_device(enum pci_fixup_pass pass, struct pci_dev *dev) {} | ||
2022 | #endif | ||
2023 | EXPORT_SYMBOL(pci_fixup_device); | ||
diff --git a/drivers/pci/remove.c b/drivers/pci/remove.c index bdc2a44d68e1..042e08924421 100644 --- a/drivers/pci/remove.c +++ b/drivers/pci/remove.c | |||
@@ -73,6 +73,7 @@ void pci_remove_bus(struct pci_bus *pci_bus) | |||
73 | up_write(&pci_bus_sem); | 73 | up_write(&pci_bus_sem); |
74 | pci_remove_legacy_files(pci_bus); | 74 | pci_remove_legacy_files(pci_bus); |
75 | device_remove_file(&pci_bus->dev, &dev_attr_cpuaffinity); | 75 | device_remove_file(&pci_bus->dev, &dev_attr_cpuaffinity); |
76 | device_remove_file(&pci_bus->dev, &dev_attr_cpulistaffinity); | ||
76 | device_unregister(&pci_bus->dev); | 77 | device_unregister(&pci_bus->dev); |
77 | } | 78 | } |
78 | EXPORT_SYMBOL(pci_remove_bus); | 79 | EXPORT_SYMBOL(pci_remove_bus); |
@@ -114,13 +115,9 @@ void pci_remove_behind_bridge(struct pci_dev *dev) | |||
114 | { | 115 | { |
115 | struct list_head *l, *n; | 116 | struct list_head *l, *n; |
116 | 117 | ||
117 | if (dev->subordinate) { | 118 | if (dev->subordinate) |
118 | list_for_each_safe(l, n, &dev->subordinate->devices) { | 119 | list_for_each_safe(l, n, &dev->subordinate->devices) |
119 | struct pci_dev *dev = pci_dev_b(l); | 120 | pci_remove_bus_device(pci_dev_b(l)); |
120 | |||
121 | pci_remove_bus_device(dev); | ||
122 | } | ||
123 | } | ||
124 | } | 121 | } |
125 | 122 | ||
126 | static void pci_stop_bus_devices(struct pci_bus *bus) | 123 | static void pci_stop_bus_devices(struct pci_bus *bus) |
diff --git a/drivers/pci/setup-bus.c b/drivers/pci/setup-bus.c index 471a429d7a20..ea979f2bc6db 100644 --- a/drivers/pci/setup-bus.c +++ b/drivers/pci/setup-bus.c | |||
@@ -299,7 +299,7 @@ static void pbus_size_io(struct pci_bus *bus) | |||
299 | 299 | ||
300 | if (r->parent || !(r->flags & IORESOURCE_IO)) | 300 | if (r->parent || !(r->flags & IORESOURCE_IO)) |
301 | continue; | 301 | continue; |
302 | r_size = r->end - r->start + 1; | 302 | r_size = resource_size(r); |
303 | 303 | ||
304 | if (r_size < 0x400) | 304 | if (r_size < 0x400) |
305 | /* Might be re-aligned for ISA */ | 305 | /* Might be re-aligned for ISA */ |
@@ -350,7 +350,7 @@ static int pbus_size_mem(struct pci_bus *bus, unsigned long mask, unsigned long | |||
350 | 350 | ||
351 | if (r->parent || (r->flags & mask) != type) | 351 | if (r->parent || (r->flags & mask) != type) |
352 | continue; | 352 | continue; |
353 | r_size = r->end - r->start + 1; | 353 | r_size = resource_size(r); |
354 | /* For bridges size != alignment */ | 354 | /* For bridges size != alignment */ |
355 | align = resource_alignment(r); | 355 | align = resource_alignment(r); |
356 | order = __ffs(align) - 20; | 356 | order = __ffs(align) - 20; |
diff --git a/drivers/pci/setup-res.c b/drivers/pci/setup-res.c index d4b5c690eaa7..2dbd96cce2d8 100644 --- a/drivers/pci/setup-res.c +++ b/drivers/pci/setup-res.c | |||
@@ -129,7 +129,7 @@ int pci_assign_resource(struct pci_dev *dev, int resno) | |||
129 | resource_size_t size, min, align; | 129 | resource_size_t size, min, align; |
130 | int ret; | 130 | int ret; |
131 | 131 | ||
132 | size = res->end - res->start + 1; | 132 | size = resource_size(res); |
133 | min = (res->flags & IORESOURCE_IO) ? PCIBIOS_MIN_IO : PCIBIOS_MIN_MEM; | 133 | min = (res->flags & IORESOURCE_IO) ? PCIBIOS_MIN_IO : PCIBIOS_MIN_MEM; |
134 | 134 | ||
135 | align = resource_alignment(res); | 135 | align = resource_alignment(res); |
diff --git a/drivers/pci/slot.c b/drivers/pci/slot.c index 7e5b85cbd948..0c6db03698ea 100644 --- a/drivers/pci/slot.c +++ b/drivers/pci/slot.c | |||
@@ -49,11 +49,16 @@ static ssize_t address_read_file(struct pci_slot *slot, char *buf) | |||
49 | 49 | ||
50 | static void pci_slot_release(struct kobject *kobj) | 50 | static void pci_slot_release(struct kobject *kobj) |
51 | { | 51 | { |
52 | struct pci_dev *dev; | ||
52 | struct pci_slot *slot = to_pci_slot(kobj); | 53 | struct pci_slot *slot = to_pci_slot(kobj); |
53 | 54 | ||
54 | pr_debug("%s: releasing pci_slot on %x:%d\n", __func__, | 55 | pr_debug("%s: releasing pci_slot on %x:%d\n", __func__, |
55 | slot->bus->number, slot->number); | 56 | slot->bus->number, slot->number); |
56 | 57 | ||
58 | list_for_each_entry(dev, &slot->bus->devices, bus_list) | ||
59 | if (PCI_SLOT(dev->devfn) == slot->number) | ||
60 | dev->slot = NULL; | ||
61 | |||
57 | list_del(&slot->list); | 62 | list_del(&slot->list); |
58 | 63 | ||
59 | kfree(slot); | 64 | kfree(slot); |
@@ -108,6 +113,7 @@ static struct kobj_type pci_slot_ktype = { | |||
108 | struct pci_slot *pci_create_slot(struct pci_bus *parent, int slot_nr, | 113 | struct pci_slot *pci_create_slot(struct pci_bus *parent, int slot_nr, |
109 | const char *name) | 114 | const char *name) |
110 | { | 115 | { |
116 | struct pci_dev *dev; | ||
111 | struct pci_slot *slot; | 117 | struct pci_slot *slot; |
112 | int err; | 118 | int err; |
113 | 119 | ||
@@ -150,6 +156,10 @@ placeholder: | |||
150 | INIT_LIST_HEAD(&slot->list); | 156 | INIT_LIST_HEAD(&slot->list); |
151 | list_add(&slot->list, &parent->slots); | 157 | list_add(&slot->list, &parent->slots); |
152 | 158 | ||
159 | list_for_each_entry(dev, &parent->devices, bus_list) | ||
160 | if (PCI_SLOT(dev->devfn) == slot_nr) | ||
161 | dev->slot = slot; | ||
162 | |||
153 | /* Don't care if debug printk has a -1 for slot_nr */ | 163 | /* Don't care if debug printk has a -1 for slot_nr */ |
154 | pr_debug("%s: created pci_slot on %04x:%02x:%02x\n", | 164 | pr_debug("%s: created pci_slot on %04x:%02x:%02x\n", |
155 | __func__, pci_domain_nr(parent), parent->number, slot_nr); | 165 | __func__, pci_domain_nr(parent), parent->number, slot_nr); |