aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/pci
diff options
context:
space:
mode:
authorLen Brown <len.brown@intel.com>2008-10-22 23:57:26 -0400
committerLen Brown <len.brown@intel.com>2008-10-23 00:11:07 -0400
commit057316cc6a5b521b332a1d7ccc871cd60c904c74 (patch)
tree4333e608da237c73ff69b10878025cca96dcb4c8 /drivers/pci
parent3e2dab9a1c2deb03c311eb3f83466009147ed4d3 (diff)
parent2515ddc6db8eb49a79f0fe5e67ff09ac7c81eab4 (diff)
Merge branch 'linus' into test
Conflicts: MAINTAINERS arch/x86/kernel/acpi/boot.c arch/x86/kernel/acpi/sleep.c drivers/acpi/Kconfig drivers/pnp/Makefile drivers/pnp/quirks.c Signed-off-by: Len Brown <len.brown@intel.com>
Diffstat (limited to 'drivers/pci')
-rw-r--r--drivers/pci/Makefile2
-rw-r--r--drivers/pci/bus.c7
-rw-r--r--drivers/pci/dmar.c430
-rw-r--r--drivers/pci/hotplug/ibmphp_ebda.c94
-rw-r--r--drivers/pci/hotplug/pci_hotplug_core.c14
-rw-r--r--drivers/pci/hotplug/pciehp.h16
-rw-r--r--drivers/pci/hotplug/pciehp_core.c78
-rw-r--r--drivers/pci/hotplug/pciehp_ctrl.c136
-rw-r--r--drivers/pci/hotplug/pciehp_hpc.c202
-rw-r--r--drivers/pci/hotplug/pciehp_pci.c26
-rw-r--r--drivers/pci/hotplug/rpaphp.h4
-rw-r--r--drivers/pci/hotplug/rpaphp_core.c4
-rw-r--r--drivers/pci/hotplug/rpaphp_pci.c2
-rw-r--r--drivers/pci/hotplug/rpaphp_slot.c4
-rw-r--r--drivers/pci/htirq.c3
-rw-r--r--drivers/pci/intel-iommu.c307
-rw-r--r--drivers/pci/intel-iommu.h344
-rw-r--r--drivers/pci/intr_remapping.c512
-rw-r--r--drivers/pci/intr_remapping.h8
-rw-r--r--drivers/pci/iova.c2
-rw-r--r--drivers/pci/iova.h52
-rw-r--r--drivers/pci/msi.c10
-rw-r--r--drivers/pci/pci-driver.c21
-rw-r--r--drivers/pci/pci-sysfs.c241
-rw-r--r--drivers/pci/pci.c96
-rw-r--r--drivers/pci/pci.h26
-rw-r--r--drivers/pci/pcie/aer/aerdrv.c6
-rw-r--r--drivers/pci/pcie/aer/aerdrv_core.c47
-rw-r--r--drivers/pci/pcie/aspm.c6
-rw-r--r--drivers/pci/pcie/portdrv.h1
-rw-r--r--drivers/pci/pcie/portdrv_core.c23
-rw-r--r--drivers/pci/pcie/portdrv_pci.c2
-rw-r--r--drivers/pci/probe.c185
-rw-r--r--drivers/pci/quirks.c174
-rw-r--r--drivers/pci/remove.c11
-rw-r--r--drivers/pci/rom.c6
-rw-r--r--drivers/pci/setup-bus.c26
-rw-r--r--drivers/pci/setup-res.c42
-rw-r--r--drivers/pci/slot.c10
39 files changed, 1872 insertions, 1308 deletions
diff --git a/drivers/pci/Makefile b/drivers/pci/Makefile
index 7d63f8ced24..4b47f4ece5b 100644
--- a/drivers/pci/Makefile
+++ b/drivers/pci/Makefile
@@ -26,6 +26,8 @@ obj-$(CONFIG_HT_IRQ) += htirq.o
26# Build Intel IOMMU support 26# Build Intel IOMMU support
27obj-$(CONFIG_DMAR) += dmar.o iova.o intel-iommu.o 27obj-$(CONFIG_DMAR) += dmar.o iova.o intel-iommu.o
28 28
29obj-$(CONFIG_INTR_REMAP) += dmar.o intr_remapping.o
30
29# 31#
30# Some architectures use the generic PCI setup functions 32# Some architectures use the generic PCI setup functions
31# 33#
diff --git a/drivers/pci/bus.c b/drivers/pci/bus.c
index 529d9d7727b..999cc4088b5 100644
--- a/drivers/pci/bus.c
+++ b/drivers/pci/bus.c
@@ -151,6 +151,13 @@ void pci_bus_add_devices(struct pci_bus *bus)
151 if (retval) 151 if (retval)
152 dev_err(&dev->dev, "Error creating cpuaffinity" 152 dev_err(&dev->dev, "Error creating cpuaffinity"
153 " file, continuing...\n"); 153 " file, continuing...\n");
154
155 retval = device_create_file(&child_bus->dev,
156 &dev_attr_cpulistaffinity);
157 if (retval)
158 dev_err(&dev->dev,
159 "Error creating cpulistaffinity"
160 " file, continuing...\n");
154 } 161 }
155 } 162 }
156} 163}
diff --git a/drivers/pci/dmar.c b/drivers/pci/dmar.c
index 8bf86ae2333..8b29c307f1a 100644
--- a/drivers/pci/dmar.c
+++ b/drivers/pci/dmar.c
@@ -19,15 +19,18 @@
19 * Author: Shaohua Li <shaohua.li@intel.com> 19 * Author: Shaohua Li <shaohua.li@intel.com>
20 * Author: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com> 20 * Author: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
21 * 21 *
22 * This file implements early detection/parsing of DMA Remapping Devices 22 * This file implements early detection/parsing of Remapping Devices
23 * reported to OS through BIOS via DMA remapping reporting (DMAR) ACPI 23 * reported to OS through BIOS via DMA remapping reporting (DMAR) ACPI
24 * tables. 24 * tables.
25 *
26 * These routines are used by both DMA-remapping and Interrupt-remapping
25 */ 27 */
26 28
27#include <linux/pci.h> 29#include <linux/pci.h>
28#include <linux/dmar.h> 30#include <linux/dmar.h>
29#include "iova.h" 31#include <linux/iova.h>
30#include "intel-iommu.h" 32#include <linux/intel-iommu.h>
33#include <linux/timer.h>
31 34
32#undef PREFIX 35#undef PREFIX
33#define PREFIX "DMAR:" 36#define PREFIX "DMAR:"
@@ -37,7 +40,6 @@
37 * these units are not supported by the architecture. 40 * these units are not supported by the architecture.
38 */ 41 */
39LIST_HEAD(dmar_drhd_units); 42LIST_HEAD(dmar_drhd_units);
40LIST_HEAD(dmar_rmrr_units);
41 43
42static struct acpi_table_header * __initdata dmar_tbl; 44static struct acpi_table_header * __initdata dmar_tbl;
43 45
@@ -53,11 +55,6 @@ static void __init dmar_register_drhd_unit(struct dmar_drhd_unit *drhd)
53 list_add(&drhd->list, &dmar_drhd_units); 55 list_add(&drhd->list, &dmar_drhd_units);
54} 56}
55 57
56static void __init dmar_register_rmrr_unit(struct dmar_rmrr_unit *rmrr)
57{
58 list_add(&rmrr->list, &dmar_rmrr_units);
59}
60
61static int __init dmar_parse_one_dev_scope(struct acpi_dmar_device_scope *scope, 58static int __init dmar_parse_one_dev_scope(struct acpi_dmar_device_scope *scope,
62 struct pci_dev **dev, u16 segment) 59 struct pci_dev **dev, u16 segment)
63{ 60{
@@ -172,19 +169,37 @@ dmar_parse_one_drhd(struct acpi_dmar_header *header)
172 struct acpi_dmar_hardware_unit *drhd; 169 struct acpi_dmar_hardware_unit *drhd;
173 struct dmar_drhd_unit *dmaru; 170 struct dmar_drhd_unit *dmaru;
174 int ret = 0; 171 int ret = 0;
175 static int include_all;
176 172
177 dmaru = kzalloc(sizeof(*dmaru), GFP_KERNEL); 173 dmaru = kzalloc(sizeof(*dmaru), GFP_KERNEL);
178 if (!dmaru) 174 if (!dmaru)
179 return -ENOMEM; 175 return -ENOMEM;
180 176
177 dmaru->hdr = header;
181 drhd = (struct acpi_dmar_hardware_unit *)header; 178 drhd = (struct acpi_dmar_hardware_unit *)header;
182 dmaru->reg_base_addr = drhd->address; 179 dmaru->reg_base_addr = drhd->address;
183 dmaru->include_all = drhd->flags & 0x1; /* BIT0: INCLUDE_ALL */ 180 dmaru->include_all = drhd->flags & 0x1; /* BIT0: INCLUDE_ALL */
184 181
182 ret = alloc_iommu(dmaru);
183 if (ret) {
184 kfree(dmaru);
185 return ret;
186 }
187 dmar_register_drhd_unit(dmaru);
188 return 0;
189}
190
191static int __init
192dmar_parse_dev(struct dmar_drhd_unit *dmaru)
193{
194 struct acpi_dmar_hardware_unit *drhd;
195 static int include_all;
196 int ret = 0;
197
198 drhd = (struct acpi_dmar_hardware_unit *) dmaru->hdr;
199
185 if (!dmaru->include_all) 200 if (!dmaru->include_all)
186 ret = dmar_parse_dev_scope((void *)(drhd + 1), 201 ret = dmar_parse_dev_scope((void *)(drhd + 1),
187 ((void *)drhd) + header->length, 202 ((void *)drhd) + drhd->header.length,
188 &dmaru->devices_cnt, &dmaru->devices, 203 &dmaru->devices_cnt, &dmaru->devices,
189 drhd->segment); 204 drhd->segment);
190 else { 205 else {
@@ -197,37 +212,59 @@ dmar_parse_one_drhd(struct acpi_dmar_header *header)
197 include_all = 1; 212 include_all = 1;
198 } 213 }
199 214
200 if (ret || (dmaru->devices_cnt == 0 && !dmaru->include_all)) 215 if (ret) {
216 list_del(&dmaru->list);
201 kfree(dmaru); 217 kfree(dmaru);
202 else 218 }
203 dmar_register_drhd_unit(dmaru);
204 return ret; 219 return ret;
205} 220}
206 221
222#ifdef CONFIG_DMAR
223LIST_HEAD(dmar_rmrr_units);
224
225static void __init dmar_register_rmrr_unit(struct dmar_rmrr_unit *rmrr)
226{
227 list_add(&rmrr->list, &dmar_rmrr_units);
228}
229
230
207static int __init 231static int __init
208dmar_parse_one_rmrr(struct acpi_dmar_header *header) 232dmar_parse_one_rmrr(struct acpi_dmar_header *header)
209{ 233{
210 struct acpi_dmar_reserved_memory *rmrr; 234 struct acpi_dmar_reserved_memory *rmrr;
211 struct dmar_rmrr_unit *rmrru; 235 struct dmar_rmrr_unit *rmrru;
212 int ret = 0;
213 236
214 rmrru = kzalloc(sizeof(*rmrru), GFP_KERNEL); 237 rmrru = kzalloc(sizeof(*rmrru), GFP_KERNEL);
215 if (!rmrru) 238 if (!rmrru)
216 return -ENOMEM; 239 return -ENOMEM;
217 240
241 rmrru->hdr = header;
218 rmrr = (struct acpi_dmar_reserved_memory *)header; 242 rmrr = (struct acpi_dmar_reserved_memory *)header;
219 rmrru->base_address = rmrr->base_address; 243 rmrru->base_address = rmrr->base_address;
220 rmrru->end_address = rmrr->end_address; 244 rmrru->end_address = rmrr->end_address;
245
246 dmar_register_rmrr_unit(rmrru);
247 return 0;
248}
249
250static int __init
251rmrr_parse_dev(struct dmar_rmrr_unit *rmrru)
252{
253 struct acpi_dmar_reserved_memory *rmrr;
254 int ret;
255
256 rmrr = (struct acpi_dmar_reserved_memory *) rmrru->hdr;
221 ret = dmar_parse_dev_scope((void *)(rmrr + 1), 257 ret = dmar_parse_dev_scope((void *)(rmrr + 1),
222 ((void *)rmrr) + header->length, 258 ((void *)rmrr) + rmrr->header.length,
223 &rmrru->devices_cnt, &rmrru->devices, rmrr->segment); 259 &rmrru->devices_cnt, &rmrru->devices, rmrr->segment);
224 260
225 if (ret || (rmrru->devices_cnt == 0)) 261 if (ret || (rmrru->devices_cnt == 0)) {
262 list_del(&rmrru->list);
226 kfree(rmrru); 263 kfree(rmrru);
227 else 264 }
228 dmar_register_rmrr_unit(rmrru);
229 return ret; 265 return ret;
230} 266}
267#endif
231 268
232static void __init 269static void __init
233dmar_table_print_dmar_entry(struct acpi_dmar_header *header) 270dmar_table_print_dmar_entry(struct acpi_dmar_header *header)
@@ -253,6 +290,25 @@ dmar_table_print_dmar_entry(struct acpi_dmar_header *header)
253} 290}
254 291
255/** 292/**
293 * dmar_table_detect - checks to see if the platform supports DMAR devices
294 */
295static int __init dmar_table_detect(void)
296{
297 acpi_status status = AE_OK;
298
299 /* if we could find DMAR table, then there are DMAR devices */
300 status = acpi_get_table(ACPI_SIG_DMAR, 0,
301 (struct acpi_table_header **)&dmar_tbl);
302
303 if (ACPI_SUCCESS(status) && !dmar_tbl) {
304 printk (KERN_WARNING PREFIX "Unable to map DMAR\n");
305 status = AE_NOT_FOUND;
306 }
307
308 return (ACPI_SUCCESS(status) ? 1 : 0);
309}
310
311/**
256 * parse_dmar_table - parses the DMA reporting table 312 * parse_dmar_table - parses the DMA reporting table
257 */ 313 */
258static int __init 314static int __init
@@ -262,6 +318,12 @@ parse_dmar_table(void)
262 struct acpi_dmar_header *entry_header; 318 struct acpi_dmar_header *entry_header;
263 int ret = 0; 319 int ret = 0;
264 320
321 /*
322 * Do it again, earlier dmar_tbl mapping could be mapped with
323 * fixed map.
324 */
325 dmar_table_detect();
326
265 dmar = (struct acpi_table_dmar *)dmar_tbl; 327 dmar = (struct acpi_table_dmar *)dmar_tbl;
266 if (!dmar) 328 if (!dmar)
267 return -ENODEV; 329 return -ENODEV;
@@ -284,7 +346,9 @@ parse_dmar_table(void)
284 ret = dmar_parse_one_drhd(entry_header); 346 ret = dmar_parse_one_drhd(entry_header);
285 break; 347 break;
286 case ACPI_DMAR_TYPE_RESERVED_MEMORY: 348 case ACPI_DMAR_TYPE_RESERVED_MEMORY:
349#ifdef CONFIG_DMAR
287 ret = dmar_parse_one_rmrr(entry_header); 350 ret = dmar_parse_one_rmrr(entry_header);
351#endif
288 break; 352 break;
289 default: 353 default:
290 printk(KERN_WARNING PREFIX 354 printk(KERN_WARNING PREFIX
@@ -300,15 +364,77 @@ parse_dmar_table(void)
300 return ret; 364 return ret;
301} 365}
302 366
367int dmar_pci_device_match(struct pci_dev *devices[], int cnt,
368 struct pci_dev *dev)
369{
370 int index;
371
372 while (dev) {
373 for (index = 0; index < cnt; index++)
374 if (dev == devices[index])
375 return 1;
303 376
304int __init dmar_table_init(void) 377 /* Check our parent */
378 dev = dev->bus->self;
379 }
380
381 return 0;
382}
383
384struct dmar_drhd_unit *
385dmar_find_matched_drhd_unit(struct pci_dev *dev)
386{
387 struct dmar_drhd_unit *drhd = NULL;
388
389 list_for_each_entry(drhd, &dmar_drhd_units, list) {
390 if (drhd->include_all || dmar_pci_device_match(drhd->devices,
391 drhd->devices_cnt, dev))
392 return drhd;
393 }
394
395 return NULL;
396}
397
398int __init dmar_dev_scope_init(void)
305{ 399{
400 struct dmar_drhd_unit *drhd, *drhd_n;
401 int ret = -ENODEV;
402
403 list_for_each_entry_safe(drhd, drhd_n, &dmar_drhd_units, list) {
404 ret = dmar_parse_dev(drhd);
405 if (ret)
406 return ret;
407 }
408
409#ifdef CONFIG_DMAR
410 {
411 struct dmar_rmrr_unit *rmrr, *rmrr_n;
412 list_for_each_entry_safe(rmrr, rmrr_n, &dmar_rmrr_units, list) {
413 ret = rmrr_parse_dev(rmrr);
414 if (ret)
415 return ret;
416 }
417 }
418#endif
419
420 return ret;
421}
306 422
423
424int __init dmar_table_init(void)
425{
426 static int dmar_table_initialized;
307 int ret; 427 int ret;
308 428
429 if (dmar_table_initialized)
430 return 0;
431
432 dmar_table_initialized = 1;
433
309 ret = parse_dmar_table(); 434 ret = parse_dmar_table();
310 if (ret) { 435 if (ret) {
311 printk(KERN_INFO PREFIX "parse DMAR table failure.\n"); 436 if (ret != -ENODEV)
437 printk(KERN_INFO PREFIX "parse DMAR table failure.\n");
312 return ret; 438 return ret;
313 } 439 }
314 440
@@ -317,27 +443,267 @@ int __init dmar_table_init(void)
317 return -ENODEV; 443 return -ENODEV;
318 } 444 }
319 445
446#ifdef CONFIG_DMAR
320 if (list_empty(&dmar_rmrr_units)) 447 if (list_empty(&dmar_rmrr_units))
321 printk(KERN_INFO PREFIX "No RMRR found\n"); 448 printk(KERN_INFO PREFIX "No RMRR found\n");
449#endif
322 450
451#ifdef CONFIG_INTR_REMAP
452 parse_ioapics_under_ir();
453#endif
323 return 0; 454 return 0;
324} 455}
325 456
326/** 457void __init detect_intel_iommu(void)
327 * early_dmar_detect - checks to see if the platform supports DMAR devices 458{
459 int ret;
460
461 ret = dmar_table_detect();
462
463#ifdef CONFIG_DMAR
464 {
465 struct acpi_table_dmar *dmar;
466 /*
467 * for now we will disable dma-remapping when interrupt
468 * remapping is enabled.
469 * When support for queued invalidation for IOTLB invalidation
470 * is added, we will not need this any more.
471 */
472 dmar = (struct acpi_table_dmar *) dmar_tbl;
473 if (ret && cpu_has_x2apic && dmar->flags & 0x1) {
474 printk(KERN_INFO
475 "Queued invalidation will be enabled to support "
476 "x2apic and Intr-remapping.\n");
477 printk(KERN_INFO
478 "Disabling IOMMU detection, because of missing "
479 "queued invalidation support for IOTLB "
480 "invalidation\n");
481 printk(KERN_INFO
482 "Use \"nox2apic\", if you want to use Intel "
483 " IOMMU for DMA-remapping and don't care about "
484 " x2apic support\n");
485
486 dmar_disabled = 1;
487 goto end;
488 }
489
490 if (ret && !no_iommu && !iommu_detected && !swiotlb &&
491 !dmar_disabled)
492 iommu_detected = 1;
493 }
494end:
495#endif
496 dmar_tbl = NULL;
497}
498
499
500int alloc_iommu(struct dmar_drhd_unit *drhd)
501{
502 struct intel_iommu *iommu;
503 int map_size;
504 u32 ver;
505 static int iommu_allocated = 0;
506
507 iommu = kzalloc(sizeof(*iommu), GFP_KERNEL);
508 if (!iommu)
509 return -ENOMEM;
510
511 iommu->seq_id = iommu_allocated++;
512
513 iommu->reg = ioremap(drhd->reg_base_addr, PAGE_SIZE_4K);
514 if (!iommu->reg) {
515 printk(KERN_ERR "IOMMU: can't map the region\n");
516 goto error;
517 }
518 iommu->cap = dmar_readq(iommu->reg + DMAR_CAP_REG);
519 iommu->ecap = dmar_readq(iommu->reg + DMAR_ECAP_REG);
520
521 /* the registers might be more than one page */
522 map_size = max_t(int, ecap_max_iotlb_offset(iommu->ecap),
523 cap_max_fault_reg_offset(iommu->cap));
524 map_size = PAGE_ALIGN_4K(map_size);
525 if (map_size > PAGE_SIZE_4K) {
526 iounmap(iommu->reg);
527 iommu->reg = ioremap(drhd->reg_base_addr, map_size);
528 if (!iommu->reg) {
529 printk(KERN_ERR "IOMMU: can't map the region\n");
530 goto error;
531 }
532 }
533
534 ver = readl(iommu->reg + DMAR_VER_REG);
535 pr_debug("IOMMU %llx: ver %d:%d cap %llx ecap %llx\n",
536 drhd->reg_base_addr, DMAR_VER_MAJOR(ver), DMAR_VER_MINOR(ver),
537 iommu->cap, iommu->ecap);
538
539 spin_lock_init(&iommu->register_lock);
540
541 drhd->iommu = iommu;
542 return 0;
543error:
544 kfree(iommu);
545 return -1;
546}
547
548void free_iommu(struct intel_iommu *iommu)
549{
550 if (!iommu)
551 return;
552
553#ifdef CONFIG_DMAR
554 free_dmar_iommu(iommu);
555#endif
556
557 if (iommu->reg)
558 iounmap(iommu->reg);
559 kfree(iommu);
560}
561
562/*
563 * Reclaim all the submitted descriptors which have completed its work.
328 */ 564 */
329int __init early_dmar_detect(void) 565static inline void reclaim_free_desc(struct q_inval *qi)
330{ 566{
331 acpi_status status = AE_OK; 567 while (qi->desc_status[qi->free_tail] == QI_DONE) {
568 qi->desc_status[qi->free_tail] = QI_FREE;
569 qi->free_tail = (qi->free_tail + 1) % QI_LENGTH;
570 qi->free_cnt++;
571 }
572}
332 573
333 /* if we could find DMAR table, then there are DMAR devices */ 574/*
334 status = acpi_get_table(ACPI_SIG_DMAR, 0, 575 * Submit the queued invalidation descriptor to the remapping
335 (struct acpi_table_header **)&dmar_tbl); 576 * hardware unit and wait for its completion.
577 */
578void qi_submit_sync(struct qi_desc *desc, struct intel_iommu *iommu)
579{
580 struct q_inval *qi = iommu->qi;
581 struct qi_desc *hw, wait_desc;
582 int wait_index, index;
583 unsigned long flags;
336 584
337 if (ACPI_SUCCESS(status) && !dmar_tbl) { 585 if (!qi)
338 printk (KERN_WARNING PREFIX "Unable to map DMAR\n"); 586 return;
339 status = AE_NOT_FOUND; 587
588 hw = qi->desc;
589
590 spin_lock(&qi->q_lock);
591 while (qi->free_cnt < 3) {
592 spin_unlock(&qi->q_lock);
593 cpu_relax();
594 spin_lock(&qi->q_lock);
340 } 595 }
341 596
342 return (ACPI_SUCCESS(status) ? 1 : 0); 597 index = qi->free_head;
598 wait_index = (index + 1) % QI_LENGTH;
599
600 qi->desc_status[index] = qi->desc_status[wait_index] = QI_IN_USE;
601
602 hw[index] = *desc;
603
604 wait_desc.low = QI_IWD_STATUS_DATA(2) | QI_IWD_STATUS_WRITE | QI_IWD_TYPE;
605 wait_desc.high = virt_to_phys(&qi->desc_status[wait_index]);
606
607 hw[wait_index] = wait_desc;
608
609 __iommu_flush_cache(iommu, &hw[index], sizeof(struct qi_desc));
610 __iommu_flush_cache(iommu, &hw[wait_index], sizeof(struct qi_desc));
611
612 qi->free_head = (qi->free_head + 2) % QI_LENGTH;
613 qi->free_cnt -= 2;
614
615 spin_lock_irqsave(&iommu->register_lock, flags);
616 /*
617 * update the HW tail register indicating the presence of
618 * new descriptors.
619 */
620 writel(qi->free_head << 4, iommu->reg + DMAR_IQT_REG);
621 spin_unlock_irqrestore(&iommu->register_lock, flags);
622
623 while (qi->desc_status[wait_index] != QI_DONE) {
624 spin_unlock(&qi->q_lock);
625 cpu_relax();
626 spin_lock(&qi->q_lock);
627 }
628
629 qi->desc_status[index] = QI_DONE;
630
631 reclaim_free_desc(qi);
632 spin_unlock(&qi->q_lock);
633}
634
635/*
636 * Flush the global interrupt entry cache.
637 */
638void qi_global_iec(struct intel_iommu *iommu)
639{
640 struct qi_desc desc;
641
642 desc.low = QI_IEC_TYPE;
643 desc.high = 0;
644
645 qi_submit_sync(&desc, iommu);
646}
647
648/*
649 * Enable Queued Invalidation interface. This is a must to support
650 * interrupt-remapping. Also used by DMA-remapping, which replaces
651 * register based IOTLB invalidation.
652 */
653int dmar_enable_qi(struct intel_iommu *iommu)
654{
655 u32 cmd, sts;
656 unsigned long flags;
657 struct q_inval *qi;
658
659 if (!ecap_qis(iommu->ecap))
660 return -ENOENT;
661
662 /*
663 * queued invalidation is already setup and enabled.
664 */
665 if (iommu->qi)
666 return 0;
667
668 iommu->qi = kmalloc(sizeof(*qi), GFP_KERNEL);
669 if (!iommu->qi)
670 return -ENOMEM;
671
672 qi = iommu->qi;
673
674 qi->desc = (void *)(get_zeroed_page(GFP_KERNEL));
675 if (!qi->desc) {
676 kfree(qi);
677 iommu->qi = 0;
678 return -ENOMEM;
679 }
680
681 qi->desc_status = kmalloc(QI_LENGTH * sizeof(int), GFP_KERNEL);
682 if (!qi->desc_status) {
683 free_page((unsigned long) qi->desc);
684 kfree(qi);
685 iommu->qi = 0;
686 return -ENOMEM;
687 }
688
689 qi->free_head = qi->free_tail = 0;
690 qi->free_cnt = QI_LENGTH;
691
692 spin_lock_init(&qi->q_lock);
693
694 spin_lock_irqsave(&iommu->register_lock, flags);
695 /* write zero to the tail reg */
696 writel(0, iommu->reg + DMAR_IQT_REG);
697
698 dmar_writeq(iommu->reg + DMAR_IQA_REG, virt_to_phys(qi->desc));
699
700 cmd = iommu->gcmd | DMA_GCMD_QIE;
701 iommu->gcmd |= DMA_GCMD_QIE;
702 writel(cmd, iommu->reg + DMAR_GCMD_REG);
703
704 /* Make sure hardware complete it */
705 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, readl, (sts & DMA_GSTS_QIES), sts);
706 spin_unlock_irqrestore(&iommu->register_lock, flags);
707
708 return 0;
343} 709}
diff --git a/drivers/pci/hotplug/ibmphp_ebda.c b/drivers/pci/hotplug/ibmphp_ebda.c
index 8467d028732..8cfd1c4926c 100644
--- a/drivers/pci/hotplug/ibmphp_ebda.c
+++ b/drivers/pci/hotplug/ibmphp_ebda.c
@@ -123,10 +123,8 @@ static struct ebda_pci_rsrc *alloc_ebda_pci_rsrc (void)
123static void __init print_bus_info (void) 123static void __init print_bus_info (void)
124{ 124{
125 struct bus_info *ptr; 125 struct bus_info *ptr;
126 struct list_head *ptr1;
127 126
128 list_for_each (ptr1, &bus_info_head) { 127 list_for_each_entry(ptr, &bus_info_head, bus_info_list) {
129 ptr = list_entry (ptr1, struct bus_info, bus_info_list);
130 debug ("%s - slot_min = %x\n", __func__, ptr->slot_min); 128 debug ("%s - slot_min = %x\n", __func__, ptr->slot_min);
131 debug ("%s - slot_max = %x\n", __func__, ptr->slot_max); 129 debug ("%s - slot_max = %x\n", __func__, ptr->slot_max);
132 debug ("%s - slot_count = %x\n", __func__, ptr->slot_count); 130 debug ("%s - slot_count = %x\n", __func__, ptr->slot_count);
@@ -146,10 +144,8 @@ static void __init print_bus_info (void)
146static void print_lo_info (void) 144static void print_lo_info (void)
147{ 145{
148 struct rio_detail *ptr; 146 struct rio_detail *ptr;
149 struct list_head *ptr1;
150 debug ("print_lo_info ----\n"); 147 debug ("print_lo_info ----\n");
151 list_for_each (ptr1, &rio_lo_head) { 148 list_for_each_entry(ptr, &rio_lo_head, rio_detail_list) {
152 ptr = list_entry (ptr1, struct rio_detail, rio_detail_list);
153 debug ("%s - rio_node_id = %x\n", __func__, ptr->rio_node_id); 149 debug ("%s - rio_node_id = %x\n", __func__, ptr->rio_node_id);
154 debug ("%s - rio_type = %x\n", __func__, ptr->rio_type); 150 debug ("%s - rio_type = %x\n", __func__, ptr->rio_type);
155 debug ("%s - owner_id = %x\n", __func__, ptr->owner_id); 151 debug ("%s - owner_id = %x\n", __func__, ptr->owner_id);
@@ -163,10 +159,8 @@ static void print_lo_info (void)
163static void print_vg_info (void) 159static void print_vg_info (void)
164{ 160{
165 struct rio_detail *ptr; 161 struct rio_detail *ptr;
166 struct list_head *ptr1;
167 debug ("%s ---\n", __func__); 162 debug ("%s ---\n", __func__);
168 list_for_each (ptr1, &rio_vg_head) { 163 list_for_each_entry(ptr, &rio_vg_head, rio_detail_list) {
169 ptr = list_entry (ptr1, struct rio_detail, rio_detail_list);
170 debug ("%s - rio_node_id = %x\n", __func__, ptr->rio_node_id); 164 debug ("%s - rio_node_id = %x\n", __func__, ptr->rio_node_id);
171 debug ("%s - rio_type = %x\n", __func__, ptr->rio_type); 165 debug ("%s - rio_type = %x\n", __func__, ptr->rio_type);
172 debug ("%s - owner_id = %x\n", __func__, ptr->owner_id); 166 debug ("%s - owner_id = %x\n", __func__, ptr->owner_id);
@@ -180,10 +174,8 @@ static void print_vg_info (void)
180static void __init print_ebda_pci_rsrc (void) 174static void __init print_ebda_pci_rsrc (void)
181{ 175{
182 struct ebda_pci_rsrc *ptr; 176 struct ebda_pci_rsrc *ptr;
183 struct list_head *ptr1;
184 177
185 list_for_each (ptr1, &ibmphp_ebda_pci_rsrc_head) { 178 list_for_each_entry(ptr, &ibmphp_ebda_pci_rsrc_head, ebda_pci_rsrc_list) {
186 ptr = list_entry (ptr1, struct ebda_pci_rsrc, ebda_pci_rsrc_list);
187 debug ("%s - rsrc type: %x bus#: %x dev_func: %x start addr: %x end addr: %x\n", 179 debug ("%s - rsrc type: %x bus#: %x dev_func: %x start addr: %x end addr: %x\n",
188 __func__, ptr->rsrc_type ,ptr->bus_num, ptr->dev_fun,ptr->start_addr, ptr->end_addr); 180 __func__, ptr->rsrc_type ,ptr->bus_num, ptr->dev_fun,ptr->start_addr, ptr->end_addr);
189 } 181 }
@@ -192,10 +184,8 @@ static void __init print_ebda_pci_rsrc (void)
192static void __init print_ibm_slot (void) 184static void __init print_ibm_slot (void)
193{ 185{
194 struct slot *ptr; 186 struct slot *ptr;
195 struct list_head *ptr1;
196 187
197 list_for_each (ptr1, &ibmphp_slot_head) { 188 list_for_each_entry(ptr, &ibmphp_slot_head, ibm_slot_list) {
198 ptr = list_entry (ptr1, struct slot, ibm_slot_list);
199 debug ("%s - slot_number: %x\n", __func__, ptr->number); 189 debug ("%s - slot_number: %x\n", __func__, ptr->number);
200 } 190 }
201} 191}
@@ -203,10 +193,8 @@ static void __init print_ibm_slot (void)
203static void __init print_opt_vg (void) 193static void __init print_opt_vg (void)
204{ 194{
205 struct opt_rio *ptr; 195 struct opt_rio *ptr;
206 struct list_head *ptr1;
207 debug ("%s ---\n", __func__); 196 debug ("%s ---\n", __func__);
208 list_for_each (ptr1, &opt_vg_head) { 197 list_for_each_entry(ptr, &opt_vg_head, opt_rio_list) {
209 ptr = list_entry (ptr1, struct opt_rio, opt_rio_list);
210 debug ("%s - rio_type %x\n", __func__, ptr->rio_type); 198 debug ("%s - rio_type %x\n", __func__, ptr->rio_type);
211 debug ("%s - chassis_num: %x\n", __func__, ptr->chassis_num); 199 debug ("%s - chassis_num: %x\n", __func__, ptr->chassis_num);
212 debug ("%s - first_slot_num: %x\n", __func__, ptr->first_slot_num); 200 debug ("%s - first_slot_num: %x\n", __func__, ptr->first_slot_num);
@@ -217,13 +205,9 @@ static void __init print_opt_vg (void)
217static void __init print_ebda_hpc (void) 205static void __init print_ebda_hpc (void)
218{ 206{
219 struct controller *hpc_ptr; 207 struct controller *hpc_ptr;
220 struct list_head *ptr1;
221 u16 index; 208 u16 index;
222 209
223 list_for_each (ptr1, &ebda_hpc_head) { 210 list_for_each_entry(hpc_ptr, &ebda_hpc_head, ebda_hpc_list) {
224
225 hpc_ptr = list_entry (ptr1, struct controller, ebda_hpc_list);
226
227 for (index = 0; index < hpc_ptr->slot_count; index++) { 211 for (index = 0; index < hpc_ptr->slot_count; index++) {
228 debug ("%s - physical slot#: %x\n", __func__, hpc_ptr->slots[index].slot_num); 212 debug ("%s - physical slot#: %x\n", __func__, hpc_ptr->slots[index].slot_num);
229 debug ("%s - pci bus# of the slot: %x\n", __func__, hpc_ptr->slots[index].slot_bus_num); 213 debug ("%s - pci bus# of the slot: %x\n", __func__, hpc_ptr->slots[index].slot_bus_num);
@@ -276,7 +260,7 @@ int __init ibmphp_access_ebda (void)
276 iounmap (io_mem); 260 iounmap (io_mem);
277 debug ("returned ebda segment: %x\n", ebda_seg); 261 debug ("returned ebda segment: %x\n", ebda_seg);
278 262
279 io_mem = ioremap (ebda_seg<<4, 65000); 263 io_mem = ioremap(ebda_seg<<4, 1024);
280 if (!io_mem ) 264 if (!io_mem )
281 return -ENOMEM; 265 return -ENOMEM;
282 next_offset = 0x180; 266 next_offset = 0x180;
@@ -460,9 +444,7 @@ static int __init ebda_rio_table (void)
460static struct opt_rio *search_opt_vg (u8 chassis_num) 444static struct opt_rio *search_opt_vg (u8 chassis_num)
461{ 445{
462 struct opt_rio *ptr; 446 struct opt_rio *ptr;
463 struct list_head *ptr1; 447 list_for_each_entry(ptr, &opt_vg_head, opt_rio_list) {
464 list_for_each (ptr1, &opt_vg_head) {
465 ptr = list_entry (ptr1, struct opt_rio, opt_rio_list);
466 if (ptr->chassis_num == chassis_num) 448 if (ptr->chassis_num == chassis_num)
467 return ptr; 449 return ptr;
468 } 450 }
@@ -473,10 +455,8 @@ static int __init combine_wpg_for_chassis (void)
473{ 455{
474 struct opt_rio *opt_rio_ptr = NULL; 456 struct opt_rio *opt_rio_ptr = NULL;
475 struct rio_detail *rio_detail_ptr = NULL; 457 struct rio_detail *rio_detail_ptr = NULL;
476 struct list_head *list_head_ptr = NULL;
477 458
478 list_for_each (list_head_ptr, &rio_vg_head) { 459 list_for_each_entry(rio_detail_ptr, &rio_vg_head, rio_detail_list) {
479 rio_detail_ptr = list_entry (list_head_ptr, struct rio_detail, rio_detail_list);
480 opt_rio_ptr = search_opt_vg (rio_detail_ptr->chassis_num); 460 opt_rio_ptr = search_opt_vg (rio_detail_ptr->chassis_num);
481 if (!opt_rio_ptr) { 461 if (!opt_rio_ptr) {
482 opt_rio_ptr = kzalloc(sizeof(struct opt_rio), GFP_KERNEL); 462 opt_rio_ptr = kzalloc(sizeof(struct opt_rio), GFP_KERNEL);
@@ -497,14 +477,12 @@ static int __init combine_wpg_for_chassis (void)
497} 477}
498 478
499/* 479/*
500 * reorgnizing linked list of expansion box 480 * reorganizing linked list of expansion box
501 */ 481 */
502static struct opt_rio_lo *search_opt_lo (u8 chassis_num) 482static struct opt_rio_lo *search_opt_lo (u8 chassis_num)
503{ 483{
504 struct opt_rio_lo *ptr; 484 struct opt_rio_lo *ptr;
505 struct list_head *ptr1; 485 list_for_each_entry(ptr, &opt_lo_head, opt_rio_lo_list) {
506 list_for_each (ptr1, &opt_lo_head) {
507 ptr = list_entry (ptr1, struct opt_rio_lo, opt_rio_lo_list);
508 if (ptr->chassis_num == chassis_num) 486 if (ptr->chassis_num == chassis_num)
509 return ptr; 487 return ptr;
510 } 488 }
@@ -515,10 +493,8 @@ static int combine_wpg_for_expansion (void)
515{ 493{
516 struct opt_rio_lo *opt_rio_lo_ptr = NULL; 494 struct opt_rio_lo *opt_rio_lo_ptr = NULL;
517 struct rio_detail *rio_detail_ptr = NULL; 495 struct rio_detail *rio_detail_ptr = NULL;
518 struct list_head *list_head_ptr = NULL;
519 496
520 list_for_each (list_head_ptr, &rio_lo_head) { 497 list_for_each_entry(rio_detail_ptr, &rio_lo_head, rio_detail_list) {
521 rio_detail_ptr = list_entry (list_head_ptr, struct rio_detail, rio_detail_list);
522 opt_rio_lo_ptr = search_opt_lo (rio_detail_ptr->chassis_num); 498 opt_rio_lo_ptr = search_opt_lo (rio_detail_ptr->chassis_num);
523 if (!opt_rio_lo_ptr) { 499 if (!opt_rio_lo_ptr) {
524 opt_rio_lo_ptr = kzalloc(sizeof(struct opt_rio_lo), GFP_KERNEL); 500 opt_rio_lo_ptr = kzalloc(sizeof(struct opt_rio_lo), GFP_KERNEL);
@@ -550,20 +526,17 @@ static int first_slot_num (u8 slot_num, u8 first_slot, u8 var)
550{ 526{
551 struct opt_rio *opt_vg_ptr = NULL; 527 struct opt_rio *opt_vg_ptr = NULL;
552 struct opt_rio_lo *opt_lo_ptr = NULL; 528 struct opt_rio_lo *opt_lo_ptr = NULL;
553 struct list_head *ptr = NULL;
554 int rc = 0; 529 int rc = 0;
555 530
556 if (!var) { 531 if (!var) {
557 list_for_each (ptr, &opt_vg_head) { 532 list_for_each_entry(opt_vg_ptr, &opt_vg_head, opt_rio_list) {
558 opt_vg_ptr = list_entry (ptr, struct opt_rio, opt_rio_list);
559 if ((first_slot < opt_vg_ptr->first_slot_num) && (slot_num >= opt_vg_ptr->first_slot_num)) { 533 if ((first_slot < opt_vg_ptr->first_slot_num) && (slot_num >= opt_vg_ptr->first_slot_num)) {
560 rc = -ENODEV; 534 rc = -ENODEV;
561 break; 535 break;
562 } 536 }
563 } 537 }
564 } else { 538 } else {
565 list_for_each (ptr, &opt_lo_head) { 539 list_for_each_entry(opt_lo_ptr, &opt_lo_head, opt_rio_lo_list) {
566 opt_lo_ptr = list_entry (ptr, struct opt_rio_lo, opt_rio_lo_list);
567 if ((first_slot < opt_lo_ptr->first_slot_num) && (slot_num >= opt_lo_ptr->first_slot_num)) { 540 if ((first_slot < opt_lo_ptr->first_slot_num) && (slot_num >= opt_lo_ptr->first_slot_num)) {
568 rc = -ENODEV; 541 rc = -ENODEV;
569 break; 542 break;
@@ -576,10 +549,8 @@ static int first_slot_num (u8 slot_num, u8 first_slot, u8 var)
576static struct opt_rio_lo * find_rxe_num (u8 slot_num) 549static struct opt_rio_lo * find_rxe_num (u8 slot_num)
577{ 550{
578 struct opt_rio_lo *opt_lo_ptr; 551 struct opt_rio_lo *opt_lo_ptr;
579 struct list_head *ptr;
580 552
581 list_for_each (ptr, &opt_lo_head) { 553 list_for_each_entry(opt_lo_ptr, &opt_lo_head, opt_rio_lo_list) {
582 opt_lo_ptr = list_entry (ptr, struct opt_rio_lo, opt_rio_lo_list);
583 //check to see if this slot_num belongs to expansion box 554 //check to see if this slot_num belongs to expansion box
584 if ((slot_num >= opt_lo_ptr->first_slot_num) && (!first_slot_num (slot_num, opt_lo_ptr->first_slot_num, 1))) 555 if ((slot_num >= opt_lo_ptr->first_slot_num) && (!first_slot_num (slot_num, opt_lo_ptr->first_slot_num, 1)))
585 return opt_lo_ptr; 556 return opt_lo_ptr;
@@ -590,10 +561,8 @@ static struct opt_rio_lo * find_rxe_num (u8 slot_num)
590static struct opt_rio * find_chassis_num (u8 slot_num) 561static struct opt_rio * find_chassis_num (u8 slot_num)
591{ 562{
592 struct opt_rio *opt_vg_ptr; 563 struct opt_rio *opt_vg_ptr;
593 struct list_head *ptr;
594 564
595 list_for_each (ptr, &opt_vg_head) { 565 list_for_each_entry(opt_vg_ptr, &opt_vg_head, opt_rio_list) {
596 opt_vg_ptr = list_entry (ptr, struct opt_rio, opt_rio_list);
597 //check to see if this slot_num belongs to chassis 566 //check to see if this slot_num belongs to chassis
598 if ((slot_num >= opt_vg_ptr->first_slot_num) && (!first_slot_num (slot_num, opt_vg_ptr->first_slot_num, 0))) 567 if ((slot_num >= opt_vg_ptr->first_slot_num) && (!first_slot_num (slot_num, opt_vg_ptr->first_slot_num, 0)))
599 return opt_vg_ptr; 568 return opt_vg_ptr;
@@ -607,11 +576,9 @@ static struct opt_rio * find_chassis_num (u8 slot_num)
607static u8 calculate_first_slot (u8 slot_num) 576static u8 calculate_first_slot (u8 slot_num)
608{ 577{
609 u8 first_slot = 1; 578 u8 first_slot = 1;
610 struct list_head * list;
611 struct slot * slot_cur; 579 struct slot * slot_cur;
612 580
613 list_for_each (list, &ibmphp_slot_head) { 581 list_for_each_entry(slot_cur, &ibmphp_slot_head, ibm_slot_list) {
614 slot_cur = list_entry (list, struct slot, ibm_slot_list);
615 if (slot_cur->ctrl) { 582 if (slot_cur->ctrl) {
616 if ((slot_cur->ctrl->ctlr_type != 4) && (slot_cur->ctrl->ending_slot_num > first_slot) && (slot_num > slot_cur->ctrl->ending_slot_num)) 583 if ((slot_cur->ctrl->ctlr_type != 4) && (slot_cur->ctrl->ending_slot_num > first_slot) && (slot_num > slot_cur->ctrl->ending_slot_num))
617 first_slot = slot_cur->ctrl->ending_slot_num; 584 first_slot = slot_cur->ctrl->ending_slot_num;
@@ -767,7 +734,6 @@ static int __init ebda_rsrc_controller (void)
767 struct bus_info *bus_info_ptr1, *bus_info_ptr2; 734 struct bus_info *bus_info_ptr1, *bus_info_ptr2;
768 int rc; 735 int rc;
769 struct slot *tmp_slot; 736 struct slot *tmp_slot;
770 struct list_head *list;
771 737
772 addr = hpc_list_ptr->phys_addr; 738 addr = hpc_list_ptr->phys_addr;
773 for (ctlr = 0; ctlr < hpc_list_ptr->num_ctlrs; ctlr++) { 739 for (ctlr = 0; ctlr < hpc_list_ptr->num_ctlrs; ctlr++) {
@@ -997,9 +963,7 @@ static int __init ebda_rsrc_controller (void)
997 963
998 } /* each hpc */ 964 } /* each hpc */
999 965
1000 list_for_each (list, &ibmphp_slot_head) { 966 list_for_each_entry(tmp_slot, &ibmphp_slot_head, ibm_slot_list) {
1001 tmp_slot = list_entry (list, struct slot, ibm_slot_list);
1002
1003 snprintf (tmp_slot->hotplug_slot->name, 30, "%s", create_file_name (tmp_slot)); 967 snprintf (tmp_slot->hotplug_slot->name, 30, "%s", create_file_name (tmp_slot));
1004 pci_hp_register(tmp_slot->hotplug_slot, 968 pci_hp_register(tmp_slot->hotplug_slot,
1005 pci_find_bus(0, tmp_slot->bus), tmp_slot->device); 969 pci_find_bus(0, tmp_slot->bus), tmp_slot->device);
@@ -1101,10 +1065,8 @@ u16 ibmphp_get_total_controllers (void)
1101struct slot *ibmphp_get_slot_from_physical_num (u8 physical_num) 1065struct slot *ibmphp_get_slot_from_physical_num (u8 physical_num)
1102{ 1066{
1103 struct slot *slot; 1067 struct slot *slot;
1104 struct list_head *list;
1105 1068
1106 list_for_each (list, &ibmphp_slot_head) { 1069 list_for_each_entry(slot, &ibmphp_slot_head, ibm_slot_list) {
1107 slot = list_entry (list, struct slot, ibm_slot_list);
1108 if (slot->number == physical_num) 1070 if (slot->number == physical_num)
1109 return slot; 1071 return slot;
1110 } 1072 }
@@ -1120,10 +1082,8 @@ struct slot *ibmphp_get_slot_from_physical_num (u8 physical_num)
1120struct bus_info *ibmphp_find_same_bus_num (u32 num) 1082struct bus_info *ibmphp_find_same_bus_num (u32 num)
1121{ 1083{
1122 struct bus_info *ptr; 1084 struct bus_info *ptr;
1123 struct list_head *ptr1;
1124 1085
1125 list_for_each (ptr1, &bus_info_head) { 1086 list_for_each_entry(ptr, &bus_info_head, bus_info_list) {
1126 ptr = list_entry (ptr1, struct bus_info, bus_info_list);
1127 if (ptr->busno == num) 1087 if (ptr->busno == num)
1128 return ptr; 1088 return ptr;
1129 } 1089 }
@@ -1136,10 +1096,8 @@ struct bus_info *ibmphp_find_same_bus_num (u32 num)
1136int ibmphp_get_bus_index (u8 num) 1096int ibmphp_get_bus_index (u8 num)
1137{ 1097{
1138 struct bus_info *ptr; 1098 struct bus_info *ptr;
1139 struct list_head *ptr1;
1140 1099
1141 list_for_each (ptr1, &bus_info_head) { 1100 list_for_each_entry(ptr, &bus_info_head, bus_info_list) {
1142 ptr = list_entry (ptr1, struct bus_info, bus_info_list);
1143 if (ptr->busno == num) 1101 if (ptr->busno == num)
1144 return ptr->index; 1102 return ptr->index;
1145 } 1103 }
@@ -1212,11 +1170,9 @@ static struct pci_driver ibmphp_driver = {
1212int ibmphp_register_pci (void) 1170int ibmphp_register_pci (void)
1213{ 1171{
1214 struct controller *ctrl; 1172 struct controller *ctrl;
1215 struct list_head *tmp;
1216 int rc = 0; 1173 int rc = 0;
1217 1174
1218 list_for_each (tmp, &ebda_hpc_head) { 1175 list_for_each_entry(ctrl, &ebda_hpc_head, ebda_hpc_list) {
1219 ctrl = list_entry (tmp, struct controller, ebda_hpc_list);
1220 if (ctrl->ctlr_type == 1) { 1176 if (ctrl->ctlr_type == 1) {
1221 rc = pci_register_driver(&ibmphp_driver); 1177 rc = pci_register_driver(&ibmphp_driver);
1222 break; 1178 break;
@@ -1227,12 +1183,10 @@ int ibmphp_register_pci (void)
1227static int ibmphp_probe (struct pci_dev * dev, const struct pci_device_id *ids) 1183static int ibmphp_probe (struct pci_dev * dev, const struct pci_device_id *ids)
1228{ 1184{
1229 struct controller *ctrl; 1185 struct controller *ctrl;
1230 struct list_head *tmp;
1231 1186
1232 debug ("inside ibmphp_probe\n"); 1187 debug ("inside ibmphp_probe\n");
1233 1188
1234 list_for_each (tmp, &ebda_hpc_head) { 1189 list_for_each_entry(ctrl, &ebda_hpc_head, ebda_hpc_list) {
1235 ctrl = list_entry (tmp, struct controller, ebda_hpc_list);
1236 if (ctrl->ctlr_type == 1) { 1190 if (ctrl->ctlr_type == 1) {
1237 if ((dev->devfn == ctrl->u.pci_ctlr.dev_fun) && (dev->bus->number == ctrl->u.pci_ctlr.bus)) { 1191 if ((dev->devfn == ctrl->u.pci_ctlr.dev_fun) && (dev->bus->number == ctrl->u.pci_ctlr.bus)) {
1238 ctrl->ctrl_dev = dev; 1192 ctrl->ctrl_dev = dev;
diff --git a/drivers/pci/hotplug/pci_hotplug_core.c b/drivers/pci/hotplug/pci_hotplug_core.c
index 5f85b1b120e..2e6c4474644 100644
--- a/drivers/pci/hotplug/pci_hotplug_core.c
+++ b/drivers/pci/hotplug/pci_hotplug_core.c
@@ -102,13 +102,13 @@ static int get_##name (struct hotplug_slot *slot, type *value) \
102{ \ 102{ \
103 struct hotplug_slot_ops *ops = slot->ops; \ 103 struct hotplug_slot_ops *ops = slot->ops; \
104 int retval = 0; \ 104 int retval = 0; \
105 if (try_module_get(ops->owner)) { \ 105 if (!try_module_get(ops->owner)) \
106 if (ops->get_##name) \ 106 return -ENODEV; \
107 retval = ops->get_##name(slot, value); \ 107 if (ops->get_##name) \
108 else \ 108 retval = ops->get_##name(slot, value); \
109 *value = slot->info->name; \ 109 else \
110 module_put(ops->owner); \ 110 *value = slot->info->name; \
111 } \ 111 module_put(ops->owner); \
112 return retval; \ 112 return retval; \
113} 113}
114 114
diff --git a/drivers/pci/hotplug/pciehp.h b/drivers/pci/hotplug/pciehp.h
index 9e6cec67e1c..c367978bd7f 100644
--- a/drivers/pci/hotplug/pciehp.h
+++ b/drivers/pci/hotplug/pciehp.h
@@ -57,6 +57,19 @@ extern struct workqueue_struct *pciehp_wq;
57#define warn(format, arg...) \ 57#define warn(format, arg...) \
58 printk(KERN_WARNING "%s: " format, MY_NAME , ## arg) 58 printk(KERN_WARNING "%s: " format, MY_NAME , ## arg)
59 59
60#define ctrl_dbg(ctrl, format, arg...) \
61 do { \
62 if (pciehp_debug) \
63 dev_printk(, &ctrl->pcie->device, \
64 format, ## arg); \
65 } while (0)
66#define ctrl_err(ctrl, format, arg...) \
67 dev_err(&ctrl->pcie->device, format, ## arg)
68#define ctrl_info(ctrl, format, arg...) \
69 dev_info(&ctrl->pcie->device, format, ## arg)
70#define ctrl_warn(ctrl, format, arg...) \
71 dev_warn(&ctrl->pcie->device, format, ## arg)
72
60#define SLOT_NAME_SIZE 10 73#define SLOT_NAME_SIZE 10
61struct slot { 74struct slot {
62 u8 bus; 75 u8 bus;
@@ -87,6 +100,7 @@ struct controller {
87 int num_slots; /* Number of slots on ctlr */ 100 int num_slots; /* Number of slots on ctlr */
88 int slot_num_inc; /* 1 or -1 */ 101 int slot_num_inc; /* 1 or -1 */
89 struct pci_dev *pci_dev; 102 struct pci_dev *pci_dev;
103 struct pcie_device *pcie; /* PCI Express port service */
90 struct list_head slot_list; 104 struct list_head slot_list;
91 struct hpc_ops *hpc_ops; 105 struct hpc_ops *hpc_ops;
92 wait_queue_head_t queue; /* sleep & wake process */ 106 wait_queue_head_t queue; /* sleep & wake process */
@@ -170,7 +184,7 @@ static inline struct slot *pciehp_find_slot(struct controller *ctrl, u8 device)
170 return slot; 184 return slot;
171 } 185 }
172 186
173 err("%s: slot (device=0x%x) not found\n", __func__, device); 187 ctrl_err(ctrl, "%s: slot (device=0x%x) not found\n", __func__, device);
174 return NULL; 188 return NULL;
175} 189}
176 190
diff --git a/drivers/pci/hotplug/pciehp_core.c b/drivers/pci/hotplug/pciehp_core.c
index 4fd5355bc3b..c748a19db89 100644
--- a/drivers/pci/hotplug/pciehp_core.c
+++ b/drivers/pci/hotplug/pciehp_core.c
@@ -144,9 +144,10 @@ set_lock_exit:
144 * sysfs interface which allows the user to toggle the Electro Mechanical 144 * sysfs interface which allows the user to toggle the Electro Mechanical
145 * Interlock. Valid values are either 0 or 1. 0 == unlock, 1 == lock 145 * Interlock. Valid values are either 0 or 1. 0 == unlock, 1 == lock
146 */ 146 */
147static ssize_t lock_write_file(struct hotplug_slot *slot, const char *buf, 147static ssize_t lock_write_file(struct hotplug_slot *hotplug_slot,
148 size_t count) 148 const char *buf, size_t count)
149{ 149{
150 struct slot *slot = hotplug_slot->private;
150 unsigned long llock; 151 unsigned long llock;
151 u8 lock; 152 u8 lock;
152 int retval = 0; 153 int retval = 0;
@@ -157,10 +158,11 @@ static ssize_t lock_write_file(struct hotplug_slot *slot, const char *buf,
157 switch (lock) { 158 switch (lock) {
158 case 0: 159 case 0:
159 case 1: 160 case 1:
160 retval = set_lock_status(slot, lock); 161 retval = set_lock_status(hotplug_slot, lock);
161 break; 162 break;
162 default: 163 default:
163 err ("%d is an invalid lock value\n", lock); 164 ctrl_err(slot->ctrl, "%d is an invalid lock value\n",
165 lock);
164 retval = -EINVAL; 166 retval = -EINVAL;
165 } 167 }
166 if (retval) 168 if (retval)
@@ -180,7 +182,10 @@ static struct hotplug_slot_attribute hotplug_slot_attr_lock = {
180 */ 182 */
181static void release_slot(struct hotplug_slot *hotplug_slot) 183static void release_slot(struct hotplug_slot *hotplug_slot)
182{ 184{
183 dbg("%s - physical_slot = %s\n", __func__, hotplug_slot->name); 185 struct slot *slot = hotplug_slot->private;
186
187 ctrl_dbg(slot->ctrl, "%s - physical_slot = %s\n",
188 __func__, hotplug_slot->name);
184 189
185 kfree(hotplug_slot->info); 190 kfree(hotplug_slot->info);
186 kfree(hotplug_slot); 191 kfree(hotplug_slot);
@@ -215,9 +220,9 @@ static int init_slots(struct controller *ctrl)
215 get_adapter_status(hotplug_slot, &info->adapter_status); 220 get_adapter_status(hotplug_slot, &info->adapter_status);
216 slot->hotplug_slot = hotplug_slot; 221 slot->hotplug_slot = hotplug_slot;
217 222
218 dbg("Registering bus=%x dev=%x hp_slot=%x sun=%x " 223 ctrl_dbg(ctrl, "Registering bus=%x dev=%x hp_slot=%x sun=%x "
219 "slot_device_offset=%x\n", slot->bus, slot->device, 224 "slot_device_offset=%x\n", slot->bus, slot->device,
220 slot->hp_slot, slot->number, ctrl->slot_device_offset); 225 slot->hp_slot, slot->number, ctrl->slot_device_offset);
221duplicate_name: 226duplicate_name:
222 retval = pci_hp_register(hotplug_slot, 227 retval = pci_hp_register(hotplug_slot,
223 ctrl->pci_dev->subordinate, 228 ctrl->pci_dev->subordinate,
@@ -233,9 +238,11 @@ duplicate_name:
233 if (len < SLOT_NAME_SIZE) 238 if (len < SLOT_NAME_SIZE)
234 goto duplicate_name; 239 goto duplicate_name;
235 else 240 else
236 err("duplicate slot name overflow\n"); 241 ctrl_err(ctrl, "duplicate slot name "
242 "overflow\n");
237 } 243 }
238 err("pci_hp_register failed with error %d\n", retval); 244 ctrl_err(ctrl, "pci_hp_register failed with error %d\n",
245 retval);
239 goto error_info; 246 goto error_info;
240 } 247 }
241 /* create additional sysfs entries */ 248 /* create additional sysfs entries */
@@ -244,7 +251,8 @@ duplicate_name:
244 &hotplug_slot_attr_lock.attr); 251 &hotplug_slot_attr_lock.attr);
245 if (retval) { 252 if (retval) {
246 pci_hp_deregister(hotplug_slot); 253 pci_hp_deregister(hotplug_slot);
247 err("cannot create additional sysfs entries\n"); 254 ctrl_err(ctrl, "cannot create additional sysfs "
255 "entries\n");
248 goto error_info; 256 goto error_info;
249 } 257 }
250 } 258 }
@@ -278,7 +286,8 @@ static int set_attention_status(struct hotplug_slot *hotplug_slot, u8 status)
278{ 286{
279 struct slot *slot = hotplug_slot->private; 287 struct slot *slot = hotplug_slot->private;
280 288
281 dbg("%s - physical_slot = %s\n", __func__, hotplug_slot->name); 289 ctrl_dbg(slot->ctrl, "%s - physical_slot = %s\n",
290 __func__, hotplug_slot->name);
282 291
283 hotplug_slot->info->attention_status = status; 292 hotplug_slot->info->attention_status = status;
284 293
@@ -293,7 +302,8 @@ static int enable_slot(struct hotplug_slot *hotplug_slot)
293{ 302{
294 struct slot *slot = hotplug_slot->private; 303 struct slot *slot = hotplug_slot->private;
295 304
296 dbg("%s - physical_slot = %s\n", __func__, hotplug_slot->name); 305 ctrl_dbg(slot->ctrl, "%s - physical_slot = %s\n",
306 __func__, hotplug_slot->name);
297 307
298 return pciehp_sysfs_enable_slot(slot); 308 return pciehp_sysfs_enable_slot(slot);
299} 309}
@@ -303,7 +313,8 @@ static int disable_slot(struct hotplug_slot *hotplug_slot)
303{ 313{
304 struct slot *slot = hotplug_slot->private; 314 struct slot *slot = hotplug_slot->private;
305 315
306 dbg("%s - physical_slot = %s\n", __func__, hotplug_slot->name); 316 ctrl_dbg(slot->ctrl, "%s - physical_slot = %s\n",
317 __func__, hotplug_slot->name);
307 318
308 return pciehp_sysfs_disable_slot(slot); 319 return pciehp_sysfs_disable_slot(slot);
309} 320}
@@ -313,7 +324,8 @@ static int get_power_status(struct hotplug_slot *hotplug_slot, u8 *value)
313 struct slot *slot = hotplug_slot->private; 324 struct slot *slot = hotplug_slot->private;
314 int retval; 325 int retval;
315 326
316 dbg("%s - physical_slot = %s\n", __func__, hotplug_slot->name); 327 ctrl_dbg(slot->ctrl, "%s - physical_slot = %s\n",
328 __func__, hotplug_slot->name);
317 329
318 retval = slot->hpc_ops->get_power_status(slot, value); 330 retval = slot->hpc_ops->get_power_status(slot, value);
319 if (retval < 0) 331 if (retval < 0)
@@ -327,7 +339,8 @@ static int get_attention_status(struct hotplug_slot *hotplug_slot, u8 *value)
327 struct slot *slot = hotplug_slot->private; 339 struct slot *slot = hotplug_slot->private;
328 int retval; 340 int retval;
329 341
330 dbg("%s - physical_slot = %s\n", __func__, hotplug_slot->name); 342 ctrl_dbg(slot->ctrl, "%s - physical_slot = %s\n",
343 __func__, hotplug_slot->name);
331 344
332 retval = slot->hpc_ops->get_attention_status(slot, value); 345 retval = slot->hpc_ops->get_attention_status(slot, value);
333 if (retval < 0) 346 if (retval < 0)
@@ -341,7 +354,8 @@ static int get_latch_status(struct hotplug_slot *hotplug_slot, u8 *value)
341 struct slot *slot = hotplug_slot->private; 354 struct slot *slot = hotplug_slot->private;
342 int retval; 355 int retval;
343 356
344 dbg("%s - physical_slot = %s\n", __func__, hotplug_slot->name); 357 ctrl_dbg(slot->ctrl, "%s - physical_slot = %s\n",
358 __func__, hotplug_slot->name);
345 359
346 retval = slot->hpc_ops->get_latch_status(slot, value); 360 retval = slot->hpc_ops->get_latch_status(slot, value);
347 if (retval < 0) 361 if (retval < 0)
@@ -355,7 +369,8 @@ static int get_adapter_status(struct hotplug_slot *hotplug_slot, u8 *value)
355 struct slot *slot = hotplug_slot->private; 369 struct slot *slot = hotplug_slot->private;
356 int retval; 370 int retval;
357 371
358 dbg("%s - physical_slot = %s\n", __func__, hotplug_slot->name); 372 ctrl_dbg(slot->ctrl, "%s - physical_slot = %s\n",
373 __func__, hotplug_slot->name);
359 374
360 retval = slot->hpc_ops->get_adapter_status(slot, value); 375 retval = slot->hpc_ops->get_adapter_status(slot, value);
361 if (retval < 0) 376 if (retval < 0)
@@ -370,7 +385,8 @@ static int get_max_bus_speed(struct hotplug_slot *hotplug_slot,
370 struct slot *slot = hotplug_slot->private; 385 struct slot *slot = hotplug_slot->private;
371 int retval; 386 int retval;
372 387
373 dbg("%s - physical_slot = %s\n", __func__, hotplug_slot->name); 388 ctrl_dbg(slot->ctrl, "%s - physical_slot = %s\n",
389 __func__, hotplug_slot->name);
374 390
375 retval = slot->hpc_ops->get_max_bus_speed(slot, value); 391 retval = slot->hpc_ops->get_max_bus_speed(slot, value);
376 if (retval < 0) 392 if (retval < 0)
@@ -384,7 +400,8 @@ static int get_cur_bus_speed(struct hotplug_slot *hotplug_slot, enum pci_bus_spe
384 struct slot *slot = hotplug_slot->private; 400 struct slot *slot = hotplug_slot->private;
385 int retval; 401 int retval;
386 402
387 dbg("%s - physical_slot = %s\n", __func__, hotplug_slot->name); 403 ctrl_dbg(slot->ctrl, "%s - physical_slot = %s\n",
404 __func__, hotplug_slot->name);
388 405
389 retval = slot->hpc_ops->get_cur_bus_speed(slot, value); 406 retval = slot->hpc_ops->get_cur_bus_speed(slot, value);
390 if (retval < 0) 407 if (retval < 0)
@@ -402,14 +419,15 @@ static int pciehp_probe(struct pcie_device *dev, const struct pcie_port_service_
402 struct pci_dev *pdev = dev->port; 419 struct pci_dev *pdev = dev->port;
403 420
404 if (pciehp_force) 421 if (pciehp_force)
405 dbg("Bypassing BIOS check for pciehp use on %s\n", 422 dev_info(&dev->device,
406 pci_name(pdev)); 423 "Bypassing BIOS check for pciehp use on %s\n",
424 pci_name(pdev));
407 else if (pciehp_get_hp_hw_control_from_firmware(pdev)) 425 else if (pciehp_get_hp_hw_control_from_firmware(pdev))
408 goto err_out_none; 426 goto err_out_none;
409 427
410 ctrl = pcie_init(dev); 428 ctrl = pcie_init(dev);
411 if (!ctrl) { 429 if (!ctrl) {
412 dbg("%s: controller initialization failed\n", PCIE_MODULE_NAME); 430 dev_err(&dev->device, "controller initialization failed\n");
413 goto err_out_none; 431 goto err_out_none;
414 } 432 }
415 set_service_data(dev, ctrl); 433 set_service_data(dev, ctrl);
@@ -418,11 +436,10 @@ static int pciehp_probe(struct pcie_device *dev, const struct pcie_port_service_
418 rc = init_slots(ctrl); 436 rc = init_slots(ctrl);
419 if (rc) { 437 if (rc) {
420 if (rc == -EBUSY) 438 if (rc == -EBUSY)
421 warn("%s: slot already registered by another " 439 ctrl_warn(ctrl, "slot already registered by another "
422 "hotplug driver\n", PCIE_MODULE_NAME); 440 "hotplug driver\n");
423 else 441 else
424 err("%s: slot initialization failed\n", 442 ctrl_err(ctrl, "slot initialization failed\n");
425 PCIE_MODULE_NAME);
426 goto err_out_release_ctlr; 443 goto err_out_release_ctlr;
427 } 444 }
428 445
@@ -461,13 +478,13 @@ static void pciehp_remove (struct pcie_device *dev)
461#ifdef CONFIG_PM 478#ifdef CONFIG_PM
462static int pciehp_suspend (struct pcie_device *dev, pm_message_t state) 479static int pciehp_suspend (struct pcie_device *dev, pm_message_t state)
463{ 480{
464 printk("%s ENTRY\n", __func__); 481 dev_info(&dev->device, "%s ENTRY\n", __func__);
465 return 0; 482 return 0;
466} 483}
467 484
468static int pciehp_resume (struct pcie_device *dev) 485static int pciehp_resume (struct pcie_device *dev)
469{ 486{
470 printk("%s ENTRY\n", __func__); 487 dev_info(&dev->device, "%s ENTRY\n", __func__);
471 if (pciehp_force) { 488 if (pciehp_force) {
472 struct controller *ctrl = get_service_data(dev); 489 struct controller *ctrl = get_service_data(dev);
473 struct slot *t_slot; 490 struct slot *t_slot;
@@ -497,10 +514,9 @@ static struct pcie_port_service_id port_pci_ids[] = { {
497 .driver_data = 0, 514 .driver_data = 0,
498 }, { /* end: all zeroes */ } 515 }, { /* end: all zeroes */ }
499}; 516};
500static const char device_name[] = "hpdriver";
501 517
502static struct pcie_port_service_driver hpdriver_portdrv = { 518static struct pcie_port_service_driver hpdriver_portdrv = {
503 .name = (char *)device_name, 519 .name = PCIE_MODULE_NAME,
504 .id_table = &port_pci_ids[0], 520 .id_table = &port_pci_ids[0],
505 521
506 .probe = pciehp_probe, 522 .probe = pciehp_probe,
diff --git a/drivers/pci/hotplug/pciehp_ctrl.c b/drivers/pci/hotplug/pciehp_ctrl.c
index 96a5d55a498..acb7f9efd18 100644
--- a/drivers/pci/hotplug/pciehp_ctrl.c
+++ b/drivers/pci/hotplug/pciehp_ctrl.c
@@ -58,14 +58,15 @@ static int queue_interrupt_event(struct slot *p_slot, u32 event_type)
58u8 pciehp_handle_attention_button(struct slot *p_slot) 58u8 pciehp_handle_attention_button(struct slot *p_slot)
59{ 59{
60 u32 event_type; 60 u32 event_type;
61 struct controller *ctrl = p_slot->ctrl;
61 62
62 /* Attention Button Change */ 63 /* Attention Button Change */
63 dbg("pciehp: Attention button interrupt received.\n"); 64 ctrl_dbg(ctrl, "Attention button interrupt received.\n");
64 65
65 /* 66 /*
66 * Button pressed - See if need to TAKE ACTION!!! 67 * Button pressed - See if need to TAKE ACTION!!!
67 */ 68 */
68 info("Button pressed on Slot(%s)\n", p_slot->name); 69 ctrl_info(ctrl, "Button pressed on Slot(%s)\n", p_slot->name);
69 event_type = INT_BUTTON_PRESS; 70 event_type = INT_BUTTON_PRESS;
70 71
71 queue_interrupt_event(p_slot, event_type); 72 queue_interrupt_event(p_slot, event_type);
@@ -77,22 +78,23 @@ u8 pciehp_handle_switch_change(struct slot *p_slot)
77{ 78{
78 u8 getstatus; 79 u8 getstatus;
79 u32 event_type; 80 u32 event_type;
81 struct controller *ctrl = p_slot->ctrl;
80 82
81 /* Switch Change */ 83 /* Switch Change */
82 dbg("pciehp: Switch interrupt received.\n"); 84 ctrl_dbg(ctrl, "Switch interrupt received.\n");
83 85
84 p_slot->hpc_ops->get_latch_status(p_slot, &getstatus); 86 p_slot->hpc_ops->get_latch_status(p_slot, &getstatus);
85 if (getstatus) { 87 if (getstatus) {
86 /* 88 /*
87 * Switch opened 89 * Switch opened
88 */ 90 */
89 info("Latch open on Slot(%s)\n", p_slot->name); 91 ctrl_info(ctrl, "Latch open on Slot(%s)\n", p_slot->name);
90 event_type = INT_SWITCH_OPEN; 92 event_type = INT_SWITCH_OPEN;
91 } else { 93 } else {
92 /* 94 /*
93 * Switch closed 95 * Switch closed
94 */ 96 */
95 info("Latch close on Slot(%s)\n", p_slot->name); 97 ctrl_info(ctrl, "Latch close on Slot(%s)\n", p_slot->name);
96 event_type = INT_SWITCH_CLOSE; 98 event_type = INT_SWITCH_CLOSE;
97 } 99 }
98 100
@@ -105,9 +107,10 @@ u8 pciehp_handle_presence_change(struct slot *p_slot)
105{ 107{
106 u32 event_type; 108 u32 event_type;
107 u8 presence_save; 109 u8 presence_save;
110 struct controller *ctrl = p_slot->ctrl;
108 111
109 /* Presence Change */ 112 /* Presence Change */
110 dbg("pciehp: Presence/Notify input change.\n"); 113 ctrl_dbg(ctrl, "Presence/Notify input change.\n");
111 114
112 /* Switch is open, assume a presence change 115 /* Switch is open, assume a presence change
113 * Save the presence state 116 * Save the presence state
@@ -117,13 +120,13 @@ u8 pciehp_handle_presence_change(struct slot *p_slot)
117 /* 120 /*
118 * Card Present 121 * Card Present
119 */ 122 */
120 info("Card present on Slot(%s)\n", p_slot->name); 123 ctrl_info(ctrl, "Card present on Slot(%s)\n", p_slot->name);
121 event_type = INT_PRESENCE_ON; 124 event_type = INT_PRESENCE_ON;
122 } else { 125 } else {
123 /* 126 /*
124 * Not Present 127 * Not Present
125 */ 128 */
126 info("Card not present on Slot(%s)\n", p_slot->name); 129 ctrl_info(ctrl, "Card not present on Slot(%s)\n", p_slot->name);
127 event_type = INT_PRESENCE_OFF; 130 event_type = INT_PRESENCE_OFF;
128 } 131 }
129 132
@@ -135,23 +138,25 @@ u8 pciehp_handle_presence_change(struct slot *p_slot)
135u8 pciehp_handle_power_fault(struct slot *p_slot) 138u8 pciehp_handle_power_fault(struct slot *p_slot)
136{ 139{
137 u32 event_type; 140 u32 event_type;
141 struct controller *ctrl = p_slot->ctrl;
138 142
139 /* power fault */ 143 /* power fault */
140 dbg("pciehp: Power fault interrupt received.\n"); 144 ctrl_dbg(ctrl, "Power fault interrupt received.\n");
141 145
142 if ( !(p_slot->hpc_ops->query_power_fault(p_slot))) { 146 if ( !(p_slot->hpc_ops->query_power_fault(p_slot))) {
143 /* 147 /*
144 * power fault Cleared 148 * power fault Cleared
145 */ 149 */
146 info("Power fault cleared on Slot(%s)\n", p_slot->name); 150 ctrl_info(ctrl, "Power fault cleared on Slot(%s)\n",
151 p_slot->name);
147 event_type = INT_POWER_FAULT_CLEAR; 152 event_type = INT_POWER_FAULT_CLEAR;
148 } else { 153 } else {
149 /* 154 /*
150 * power fault 155 * power fault
151 */ 156 */
152 info("Power fault on Slot(%s)\n", p_slot->name); 157 ctrl_info(ctrl, "Power fault on Slot(%s)\n", p_slot->name);
153 event_type = INT_POWER_FAULT; 158 event_type = INT_POWER_FAULT;
154 info("power fault bit %x set\n", 0); 159 ctrl_info(ctrl, "power fault bit %x set\n", 0);
155 } 160 }
156 161
157 queue_interrupt_event(p_slot, event_type); 162 queue_interrupt_event(p_slot, event_type);
@@ -168,8 +173,9 @@ static void set_slot_off(struct controller *ctrl, struct slot * pslot)
168 /* turn off slot, turn on Amber LED, turn off Green LED if supported*/ 173 /* turn off slot, turn on Amber LED, turn off Green LED if supported*/
169 if (POWER_CTRL(ctrl)) { 174 if (POWER_CTRL(ctrl)) {
170 if (pslot->hpc_ops->power_off_slot(pslot)) { 175 if (pslot->hpc_ops->power_off_slot(pslot)) {
171 err("%s: Issue of Slot Power Off command failed\n", 176 ctrl_err(ctrl,
172 __func__); 177 "%s: Issue of Slot Power Off command failed\n",
178 __func__);
173 return; 179 return;
174 } 180 }
175 } 181 }
@@ -186,8 +192,8 @@ static void set_slot_off(struct controller *ctrl, struct slot * pslot)
186 192
187 if (ATTN_LED(ctrl)) { 193 if (ATTN_LED(ctrl)) {
188 if (pslot->hpc_ops->set_attention_status(pslot, 1)) { 194 if (pslot->hpc_ops->set_attention_status(pslot, 1)) {
189 err("%s: Issue of Set Attention Led command failed\n", 195 ctrl_err(ctrl, "%s: Issue of Set Attention "
190 __func__); 196 "Led command failed\n", __func__);
191 return; 197 return;
192 } 198 }
193 } 199 }
@@ -205,9 +211,9 @@ static int board_added(struct slot *p_slot)
205 int retval = 0; 211 int retval = 0;
206 struct controller *ctrl = p_slot->ctrl; 212 struct controller *ctrl = p_slot->ctrl;
207 213
208 dbg("%s: slot device, slot offset, hp slot = %d, %d ,%d\n", 214 ctrl_dbg(ctrl, "%s: slot device, slot offset, hp slot = %d, %d ,%d\n",
209 __func__, p_slot->device, 215 __func__, p_slot->device, ctrl->slot_device_offset,
210 ctrl->slot_device_offset, p_slot->hp_slot); 216 p_slot->hp_slot);
211 217
212 if (POWER_CTRL(ctrl)) { 218 if (POWER_CTRL(ctrl)) {
213 /* Power on slot */ 219 /* Power on slot */
@@ -225,22 +231,22 @@ static int board_added(struct slot *p_slot)
225 /* Check link training status */ 231 /* Check link training status */
226 retval = p_slot->hpc_ops->check_lnk_status(ctrl); 232 retval = p_slot->hpc_ops->check_lnk_status(ctrl);
227 if (retval) { 233 if (retval) {
228 err("%s: Failed to check link status\n", __func__); 234 ctrl_err(ctrl, "%s: Failed to check link status\n", __func__);
229 set_slot_off(ctrl, p_slot); 235 set_slot_off(ctrl, p_slot);
230 return retval; 236 return retval;
231 } 237 }
232 238
233 /* Check for a power fault */ 239 /* Check for a power fault */
234 if (p_slot->hpc_ops->query_power_fault(p_slot)) { 240 if (p_slot->hpc_ops->query_power_fault(p_slot)) {
235 dbg("%s: power fault detected\n", __func__); 241 ctrl_dbg(ctrl, "%s: power fault detected\n", __func__);
236 retval = POWER_FAILURE; 242 retval = POWER_FAILURE;
237 goto err_exit; 243 goto err_exit;
238 } 244 }
239 245
240 retval = pciehp_configure_device(p_slot); 246 retval = pciehp_configure_device(p_slot);
241 if (retval) { 247 if (retval) {
242 err("Cannot add device 0x%x:%x\n", p_slot->bus, 248 ctrl_err(ctrl, "Cannot add device 0x%x:%x\n",
243 p_slot->device); 249 p_slot->bus, p_slot->device);
244 goto err_exit; 250 goto err_exit;
245 } 251 }
246 252
@@ -272,14 +278,14 @@ static int remove_board(struct slot *p_slot)
272 if (retval) 278 if (retval)
273 return retval; 279 return retval;
274 280
275 dbg("In %s, hp_slot = %d\n", __func__, p_slot->hp_slot); 281 ctrl_dbg(ctrl, "In %s, hp_slot = %d\n", __func__, p_slot->hp_slot);
276 282
277 if (POWER_CTRL(ctrl)) { 283 if (POWER_CTRL(ctrl)) {
278 /* power off slot */ 284 /* power off slot */
279 retval = p_slot->hpc_ops->power_off_slot(p_slot); 285 retval = p_slot->hpc_ops->power_off_slot(p_slot);
280 if (retval) { 286 if (retval) {
281 err("%s: Issue of Slot Disable command failed\n", 287 ctrl_err(ctrl, "%s: Issue of Slot Disable command "
282 __func__); 288 "failed\n", __func__);
283 return retval; 289 return retval;
284 } 290 }
285 } 291 }
@@ -320,8 +326,8 @@ static void pciehp_power_thread(struct work_struct *work)
320 switch (p_slot->state) { 326 switch (p_slot->state) {
321 case POWEROFF_STATE: 327 case POWEROFF_STATE:
322 mutex_unlock(&p_slot->lock); 328 mutex_unlock(&p_slot->lock);
323 dbg("%s: disabling bus:device(%x:%x)\n", 329 ctrl_dbg(p_slot->ctrl, "%s: disabling bus:device(%x:%x)\n",
324 __func__, p_slot->bus, p_slot->device); 330 __func__, p_slot->bus, p_slot->device);
325 pciehp_disable_slot(p_slot); 331 pciehp_disable_slot(p_slot);
326 mutex_lock(&p_slot->lock); 332 mutex_lock(&p_slot->lock);
327 p_slot->state = STATIC_STATE; 333 p_slot->state = STATIC_STATE;
@@ -349,7 +355,8 @@ void pciehp_queue_pushbutton_work(struct work_struct *work)
349 355
350 info = kmalloc(sizeof(*info), GFP_KERNEL); 356 info = kmalloc(sizeof(*info), GFP_KERNEL);
351 if (!info) { 357 if (!info) {
352 err("%s: Cannot allocate memory\n", __func__); 358 ctrl_err(p_slot->ctrl, "%s: Cannot allocate memory\n",
359 __func__);
353 return; 360 return;
354 } 361 }
355 info->p_slot = p_slot; 362 info->p_slot = p_slot;
@@ -403,12 +410,14 @@ static void handle_button_press_event(struct slot *p_slot)
403 p_slot->hpc_ops->get_power_status(p_slot, &getstatus); 410 p_slot->hpc_ops->get_power_status(p_slot, &getstatus);
404 if (getstatus) { 411 if (getstatus) {
405 p_slot->state = BLINKINGOFF_STATE; 412 p_slot->state = BLINKINGOFF_STATE;
406 info("PCI slot #%s - powering off due to button " 413 ctrl_info(ctrl,
407 "press.\n", p_slot->name); 414 "PCI slot #%s - powering off due to button "
415 "press.\n", p_slot->name);
408 } else { 416 } else {
409 p_slot->state = BLINKINGON_STATE; 417 p_slot->state = BLINKINGON_STATE;
410 info("PCI slot #%s - powering on due to button " 418 ctrl_info(ctrl,
411 "press.\n", p_slot->name); 419 "PCI slot #%s - powering on due to button "
420 "press.\n", p_slot->name);
412 } 421 }
413 /* blink green LED and turn off amber */ 422 /* blink green LED and turn off amber */
414 if (PWR_LED(ctrl)) 423 if (PWR_LED(ctrl))
@@ -425,8 +434,8 @@ static void handle_button_press_event(struct slot *p_slot)
425 * press the attention again before the 5 sec. limit 434 * press the attention again before the 5 sec. limit
426 * expires to cancel hot-add or hot-remove 435 * expires to cancel hot-add or hot-remove
427 */ 436 */
428 info("Button cancel on Slot(%s)\n", p_slot->name); 437 ctrl_info(ctrl, "Button cancel on Slot(%s)\n", p_slot->name);
429 dbg("%s: button cancel\n", __func__); 438 ctrl_dbg(ctrl, "%s: button cancel\n", __func__);
430 cancel_delayed_work(&p_slot->work); 439 cancel_delayed_work(&p_slot->work);
431 if (p_slot->state == BLINKINGOFF_STATE) { 440 if (p_slot->state == BLINKINGOFF_STATE) {
432 if (PWR_LED(ctrl)) 441 if (PWR_LED(ctrl))
@@ -437,8 +446,8 @@ static void handle_button_press_event(struct slot *p_slot)
437 } 446 }
438 if (ATTN_LED(ctrl)) 447 if (ATTN_LED(ctrl))
439 p_slot->hpc_ops->set_attention_status(p_slot, 0); 448 p_slot->hpc_ops->set_attention_status(p_slot, 0);
440 info("PCI slot #%s - action canceled due to button press\n", 449 ctrl_info(ctrl, "PCI slot #%s - action canceled "
441 p_slot->name); 450 "due to button press\n", p_slot->name);
442 p_slot->state = STATIC_STATE; 451 p_slot->state = STATIC_STATE;
443 break; 452 break;
444 case POWEROFF_STATE: 453 case POWEROFF_STATE:
@@ -448,11 +457,11 @@ static void handle_button_press_event(struct slot *p_slot)
448 * this means that the previous attention button action 457 * this means that the previous attention button action
449 * to hot-add or hot-remove is undergoing 458 * to hot-add or hot-remove is undergoing
450 */ 459 */
451 info("Button ignore on Slot(%s)\n", p_slot->name); 460 ctrl_info(ctrl, "Button ignore on Slot(%s)\n", p_slot->name);
452 update_slot_info(p_slot); 461 update_slot_info(p_slot);
453 break; 462 break;
454 default: 463 default:
455 warn("Not a valid state\n"); 464 ctrl_warn(ctrl, "Not a valid state\n");
456 break; 465 break;
457 } 466 }
458} 467}
@@ -467,7 +476,8 @@ static void handle_surprise_event(struct slot *p_slot)
467 476
468 info = kmalloc(sizeof(*info), GFP_KERNEL); 477 info = kmalloc(sizeof(*info), GFP_KERNEL);
469 if (!info) { 478 if (!info) {
470 err("%s: Cannot allocate memory\n", __func__); 479 ctrl_err(p_slot->ctrl, "%s: Cannot allocate memory\n",
480 __func__);
471 return; 481 return;
472 } 482 }
473 info->p_slot = p_slot; 483 info->p_slot = p_slot;
@@ -505,7 +515,7 @@ static void interrupt_event_handler(struct work_struct *work)
505 case INT_PRESENCE_OFF: 515 case INT_PRESENCE_OFF:
506 if (!HP_SUPR_RM(ctrl)) 516 if (!HP_SUPR_RM(ctrl))
507 break; 517 break;
508 dbg("Surprise Removal\n"); 518 ctrl_dbg(ctrl, "Surprise Removal\n");
509 update_slot_info(p_slot); 519 update_slot_info(p_slot);
510 handle_surprise_event(p_slot); 520 handle_surprise_event(p_slot);
511 break; 521 break;
@@ -522,22 +532,23 @@ int pciehp_enable_slot(struct slot *p_slot)
522{ 532{
523 u8 getstatus = 0; 533 u8 getstatus = 0;
524 int rc; 534 int rc;
535 struct controller *ctrl = p_slot->ctrl;
525 536
526 /* Check to see if (latch closed, card present, power off) */ 537 /* Check to see if (latch closed, card present, power off) */
527 mutex_lock(&p_slot->ctrl->crit_sect); 538 mutex_lock(&p_slot->ctrl->crit_sect);
528 539
529 rc = p_slot->hpc_ops->get_adapter_status(p_slot, &getstatus); 540 rc = p_slot->hpc_ops->get_adapter_status(p_slot, &getstatus);
530 if (rc || !getstatus) { 541 if (rc || !getstatus) {
531 info("%s: no adapter on slot(%s)\n", __func__, 542 ctrl_info(ctrl, "%s: no adapter on slot(%s)\n",
532 p_slot->name); 543 __func__, p_slot->name);
533 mutex_unlock(&p_slot->ctrl->crit_sect); 544 mutex_unlock(&p_slot->ctrl->crit_sect);
534 return -ENODEV; 545 return -ENODEV;
535 } 546 }
536 if (MRL_SENS(p_slot->ctrl)) { 547 if (MRL_SENS(p_slot->ctrl)) {
537 rc = p_slot->hpc_ops->get_latch_status(p_slot, &getstatus); 548 rc = p_slot->hpc_ops->get_latch_status(p_slot, &getstatus);
538 if (rc || getstatus) { 549 if (rc || getstatus) {
539 info("%s: latch open on slot(%s)\n", __func__, 550 ctrl_info(ctrl, "%s: latch open on slot(%s)\n",
540 p_slot->name); 551 __func__, p_slot->name);
541 mutex_unlock(&p_slot->ctrl->crit_sect); 552 mutex_unlock(&p_slot->ctrl->crit_sect);
542 return -ENODEV; 553 return -ENODEV;
543 } 554 }
@@ -546,8 +557,8 @@ int pciehp_enable_slot(struct slot *p_slot)
546 if (POWER_CTRL(p_slot->ctrl)) { 557 if (POWER_CTRL(p_slot->ctrl)) {
547 rc = p_slot->hpc_ops->get_power_status(p_slot, &getstatus); 558 rc = p_slot->hpc_ops->get_power_status(p_slot, &getstatus);
548 if (rc || getstatus) { 559 if (rc || getstatus) {
549 info("%s: already enabled on slot(%s)\n", __func__, 560 ctrl_info(ctrl, "%s: already enabled on slot(%s)\n",
550 p_slot->name); 561 __func__, p_slot->name);
551 mutex_unlock(&p_slot->ctrl->crit_sect); 562 mutex_unlock(&p_slot->ctrl->crit_sect);
552 return -EINVAL; 563 return -EINVAL;
553 } 564 }
@@ -571,6 +582,7 @@ int pciehp_disable_slot(struct slot *p_slot)
571{ 582{
572 u8 getstatus = 0; 583 u8 getstatus = 0;
573 int ret = 0; 584 int ret = 0;
585 struct controller *ctrl = p_slot->ctrl;
574 586
575 if (!p_slot->ctrl) 587 if (!p_slot->ctrl)
576 return 1; 588 return 1;
@@ -581,8 +593,8 @@ int pciehp_disable_slot(struct slot *p_slot)
581 if (!HP_SUPR_RM(p_slot->ctrl)) { 593 if (!HP_SUPR_RM(p_slot->ctrl)) {
582 ret = p_slot->hpc_ops->get_adapter_status(p_slot, &getstatus); 594 ret = p_slot->hpc_ops->get_adapter_status(p_slot, &getstatus);
583 if (ret || !getstatus) { 595 if (ret || !getstatus) {
584 info("%s: no adapter on slot(%s)\n", __func__, 596 ctrl_info(ctrl, "%s: no adapter on slot(%s)\n",
585 p_slot->name); 597 __func__, p_slot->name);
586 mutex_unlock(&p_slot->ctrl->crit_sect); 598 mutex_unlock(&p_slot->ctrl->crit_sect);
587 return -ENODEV; 599 return -ENODEV;
588 } 600 }
@@ -591,8 +603,8 @@ int pciehp_disable_slot(struct slot *p_slot)
591 if (MRL_SENS(p_slot->ctrl)) { 603 if (MRL_SENS(p_slot->ctrl)) {
592 ret = p_slot->hpc_ops->get_latch_status(p_slot, &getstatus); 604 ret = p_slot->hpc_ops->get_latch_status(p_slot, &getstatus);
593 if (ret || getstatus) { 605 if (ret || getstatus) {
594 info("%s: latch open on slot(%s)\n", __func__, 606 ctrl_info(ctrl, "%s: latch open on slot(%s)\n",
595 p_slot->name); 607 __func__, p_slot->name);
596 mutex_unlock(&p_slot->ctrl->crit_sect); 608 mutex_unlock(&p_slot->ctrl->crit_sect);
597 return -ENODEV; 609 return -ENODEV;
598 } 610 }
@@ -601,8 +613,8 @@ int pciehp_disable_slot(struct slot *p_slot)
601 if (POWER_CTRL(p_slot->ctrl)) { 613 if (POWER_CTRL(p_slot->ctrl)) {
602 ret = p_slot->hpc_ops->get_power_status(p_slot, &getstatus); 614 ret = p_slot->hpc_ops->get_power_status(p_slot, &getstatus);
603 if (ret || !getstatus) { 615 if (ret || !getstatus) {
604 info("%s: already disabled slot(%s)\n", __func__, 616 ctrl_info(ctrl, "%s: already disabled slot(%s)\n",
605 p_slot->name); 617 __func__, p_slot->name);
606 mutex_unlock(&p_slot->ctrl->crit_sect); 618 mutex_unlock(&p_slot->ctrl->crit_sect);
607 return -EINVAL; 619 return -EINVAL;
608 } 620 }
@@ -618,6 +630,7 @@ int pciehp_disable_slot(struct slot *p_slot)
618int pciehp_sysfs_enable_slot(struct slot *p_slot) 630int pciehp_sysfs_enable_slot(struct slot *p_slot)
619{ 631{
620 int retval = -ENODEV; 632 int retval = -ENODEV;
633 struct controller *ctrl = p_slot->ctrl;
621 634
622 mutex_lock(&p_slot->lock); 635 mutex_lock(&p_slot->lock);
623 switch (p_slot->state) { 636 switch (p_slot->state) {
@@ -631,15 +644,15 @@ int pciehp_sysfs_enable_slot(struct slot *p_slot)
631 p_slot->state = STATIC_STATE; 644 p_slot->state = STATIC_STATE;
632 break; 645 break;
633 case POWERON_STATE: 646 case POWERON_STATE:
634 info("Slot %s is already in powering on state\n", 647 ctrl_info(ctrl, "Slot %s is already in powering on state\n",
635 p_slot->name); 648 p_slot->name);
636 break; 649 break;
637 case BLINKINGOFF_STATE: 650 case BLINKINGOFF_STATE:
638 case POWEROFF_STATE: 651 case POWEROFF_STATE:
639 info("Already enabled on slot %s\n", p_slot->name); 652 ctrl_info(ctrl, "Already enabled on slot %s\n", p_slot->name);
640 break; 653 break;
641 default: 654 default:
642 err("Not a valid state on slot %s\n", p_slot->name); 655 ctrl_err(ctrl, "Not a valid state on slot %s\n", p_slot->name);
643 break; 656 break;
644 } 657 }
645 mutex_unlock(&p_slot->lock); 658 mutex_unlock(&p_slot->lock);
@@ -650,6 +663,7 @@ int pciehp_sysfs_enable_slot(struct slot *p_slot)
650int pciehp_sysfs_disable_slot(struct slot *p_slot) 663int pciehp_sysfs_disable_slot(struct slot *p_slot)
651{ 664{
652 int retval = -ENODEV; 665 int retval = -ENODEV;
666 struct controller *ctrl = p_slot->ctrl;
653 667
654 mutex_lock(&p_slot->lock); 668 mutex_lock(&p_slot->lock);
655 switch (p_slot->state) { 669 switch (p_slot->state) {
@@ -663,15 +677,15 @@ int pciehp_sysfs_disable_slot(struct slot *p_slot)
663 p_slot->state = STATIC_STATE; 677 p_slot->state = STATIC_STATE;
664 break; 678 break;
665 case POWEROFF_STATE: 679 case POWEROFF_STATE:
666 info("Slot %s is already in powering off state\n", 680 ctrl_info(ctrl, "Slot %s is already in powering off state\n",
667 p_slot->name); 681 p_slot->name);
668 break; 682 break;
669 case BLINKINGON_STATE: 683 case BLINKINGON_STATE:
670 case POWERON_STATE: 684 case POWERON_STATE:
671 info("Already disabled on slot %s\n", p_slot->name); 685 ctrl_info(ctrl, "Already disabled on slot %s\n", p_slot->name);
672 break; 686 break;
673 default: 687 default:
674 err("Not a valid state on slot %s\n", p_slot->name); 688 ctrl_err(ctrl, "Not a valid state on slot %s\n", p_slot->name);
675 break; 689 break;
676 } 690 }
677 mutex_unlock(&p_slot->lock); 691 mutex_unlock(&p_slot->lock);
diff --git a/drivers/pci/hotplug/pciehp_hpc.c b/drivers/pci/hotplug/pciehp_hpc.c
index 9d934ddee95..8e9530c4c36 100644
--- a/drivers/pci/hotplug/pciehp_hpc.c
+++ b/drivers/pci/hotplug/pciehp_hpc.c
@@ -223,7 +223,7 @@ static void start_int_poll_timer(struct controller *ctrl, int sec)
223 223
224static inline int pciehp_request_irq(struct controller *ctrl) 224static inline int pciehp_request_irq(struct controller *ctrl)
225{ 225{
226 int retval, irq = ctrl->pci_dev->irq; 226 int retval, irq = ctrl->pcie->irq;
227 227
228 /* Install interrupt polling timer. Start with 10 sec delay */ 228 /* Install interrupt polling timer. Start with 10 sec delay */
229 if (pciehp_poll_mode) { 229 if (pciehp_poll_mode) {
@@ -235,7 +235,8 @@ static inline int pciehp_request_irq(struct controller *ctrl)
235 /* Installs the interrupt handler */ 235 /* Installs the interrupt handler */
236 retval = request_irq(irq, pcie_isr, IRQF_SHARED, MY_NAME, ctrl); 236 retval = request_irq(irq, pcie_isr, IRQF_SHARED, MY_NAME, ctrl);
237 if (retval) 237 if (retval)
238 err("Cannot get irq %d for the hotplug controller\n", irq); 238 ctrl_err(ctrl, "Cannot get irq %d for the hotplug controller\n",
239 irq);
239 return retval; 240 return retval;
240} 241}
241 242
@@ -244,7 +245,7 @@ static inline void pciehp_free_irq(struct controller *ctrl)
244 if (pciehp_poll_mode) 245 if (pciehp_poll_mode)
245 del_timer_sync(&ctrl->poll_timer); 246 del_timer_sync(&ctrl->poll_timer);
246 else 247 else
247 free_irq(ctrl->pci_dev->irq, ctrl); 248 free_irq(ctrl->pcie->irq, ctrl);
248} 249}
249 250
250static int pcie_poll_cmd(struct controller *ctrl) 251static int pcie_poll_cmd(struct controller *ctrl)
@@ -282,7 +283,7 @@ static void pcie_wait_cmd(struct controller *ctrl, int poll)
282 else 283 else
283 rc = wait_event_timeout(ctrl->queue, !ctrl->cmd_busy, timeout); 284 rc = wait_event_timeout(ctrl->queue, !ctrl->cmd_busy, timeout);
284 if (!rc) 285 if (!rc)
285 dbg("Command not completed in 1000 msec\n"); 286 ctrl_dbg(ctrl, "Command not completed in 1000 msec\n");
286} 287}
287 288
288/** 289/**
@@ -301,7 +302,8 @@ static int pcie_write_cmd(struct controller *ctrl, u16 cmd, u16 mask)
301 302
302 retval = pciehp_readw(ctrl, SLOTSTATUS, &slot_status); 303 retval = pciehp_readw(ctrl, SLOTSTATUS, &slot_status);
303 if (retval) { 304 if (retval) {
304 err("%s: Cannot read SLOTSTATUS register\n", __func__); 305 ctrl_err(ctrl, "%s: Cannot read SLOTSTATUS register\n",
306 __func__);
305 goto out; 307 goto out;
306 } 308 }
307 309
@@ -312,26 +314,28 @@ static int pcie_write_cmd(struct controller *ctrl, u16 cmd, u16 mask)
312 * proceed forward to issue the next command according 314 * proceed forward to issue the next command according
313 * to spec. Just print out the error message. 315 * to spec. Just print out the error message.
314 */ 316 */
315 dbg("%s: CMD_COMPLETED not clear after 1 sec.\n", 317 ctrl_dbg(ctrl,
316 __func__); 318 "%s: CMD_COMPLETED not clear after 1 sec.\n",
319 __func__);
317 } else if (!NO_CMD_CMPL(ctrl)) { 320 } else if (!NO_CMD_CMPL(ctrl)) {
318 /* 321 /*
319 * This controller semms to notify of command completed 322 * This controller semms to notify of command completed
320 * event even though it supports none of power 323 * event even though it supports none of power
321 * controller, attention led, power led and EMI. 324 * controller, attention led, power led and EMI.
322 */ 325 */
323 dbg("%s: Unexpected CMD_COMPLETED. Need to wait for " 326 ctrl_dbg(ctrl, "%s: Unexpected CMD_COMPLETED. Need to "
324 "command completed event.\n", __func__); 327 "wait for command completed event.\n",
328 __func__);
325 ctrl->no_cmd_complete = 0; 329 ctrl->no_cmd_complete = 0;
326 } else { 330 } else {
327 dbg("%s: Unexpected CMD_COMPLETED. Maybe the " 331 ctrl_dbg(ctrl, "%s: Unexpected CMD_COMPLETED. Maybe "
328 "controller is broken.\n", __func__); 332 "the controller is broken.\n", __func__);
329 } 333 }
330 } 334 }
331 335
332 retval = pciehp_readw(ctrl, SLOTCTRL, &slot_ctrl); 336 retval = pciehp_readw(ctrl, SLOTCTRL, &slot_ctrl);
333 if (retval) { 337 if (retval) {
334 err("%s: Cannot read SLOTCTRL register\n", __func__); 338 ctrl_err(ctrl, "%s: Cannot read SLOTCTRL register\n", __func__);
335 goto out; 339 goto out;
336 } 340 }
337 341
@@ -341,7 +345,8 @@ static int pcie_write_cmd(struct controller *ctrl, u16 cmd, u16 mask)
341 smp_mb(); 345 smp_mb();
342 retval = pciehp_writew(ctrl, SLOTCTRL, slot_ctrl); 346 retval = pciehp_writew(ctrl, SLOTCTRL, slot_ctrl);
343 if (retval) 347 if (retval)
344 err("%s: Cannot write to SLOTCTRL register\n", __func__); 348 ctrl_err(ctrl, "%s: Cannot write to SLOTCTRL register\n",
349 __func__);
345 350
346 /* 351 /*
347 * Wait for command completion. 352 * Wait for command completion.
@@ -370,14 +375,15 @@ static int hpc_check_lnk_status(struct controller *ctrl)
370 375
371 retval = pciehp_readw(ctrl, LNKSTATUS, &lnk_status); 376 retval = pciehp_readw(ctrl, LNKSTATUS, &lnk_status);
372 if (retval) { 377 if (retval) {
373 err("%s: Cannot read LNKSTATUS register\n", __func__); 378 ctrl_err(ctrl, "%s: Cannot read LNKSTATUS register\n",
379 __func__);
374 return retval; 380 return retval;
375 } 381 }
376 382
377 dbg("%s: lnk_status = %x\n", __func__, lnk_status); 383 ctrl_dbg(ctrl, "%s: lnk_status = %x\n", __func__, lnk_status);
378 if ( (lnk_status & LNK_TRN) || (lnk_status & LNK_TRN_ERR) || 384 if ( (lnk_status & LNK_TRN) || (lnk_status & LNK_TRN_ERR) ||
379 !(lnk_status & NEG_LINK_WD)) { 385 !(lnk_status & NEG_LINK_WD)) {
380 err("%s : Link Training Error occurs \n", __func__); 386 ctrl_err(ctrl, "%s : Link Training Error occurs \n", __func__);
381 retval = -1; 387 retval = -1;
382 return retval; 388 return retval;
383 } 389 }
@@ -394,12 +400,12 @@ static int hpc_get_attention_status(struct slot *slot, u8 *status)
394 400
395 retval = pciehp_readw(ctrl, SLOTCTRL, &slot_ctrl); 401 retval = pciehp_readw(ctrl, SLOTCTRL, &slot_ctrl);
396 if (retval) { 402 if (retval) {
397 err("%s: Cannot read SLOTCTRL register\n", __func__); 403 ctrl_err(ctrl, "%s: Cannot read SLOTCTRL register\n", __func__);
398 return retval; 404 return retval;
399 } 405 }
400 406
401 dbg("%s: SLOTCTRL %x, value read %x\n", 407 ctrl_dbg(ctrl, "%s: SLOTCTRL %x, value read %x\n",
402 __func__, ctrl->cap_base + SLOTCTRL, slot_ctrl); 408 __func__, ctrl->cap_base + SLOTCTRL, slot_ctrl);
403 409
404 atten_led_state = (slot_ctrl & ATTN_LED_CTRL) >> 6; 410 atten_led_state = (slot_ctrl & ATTN_LED_CTRL) >> 6;
405 411
@@ -433,11 +439,11 @@ static int hpc_get_power_status(struct slot *slot, u8 *status)
433 439
434 retval = pciehp_readw(ctrl, SLOTCTRL, &slot_ctrl); 440 retval = pciehp_readw(ctrl, SLOTCTRL, &slot_ctrl);
435 if (retval) { 441 if (retval) {
436 err("%s: Cannot read SLOTCTRL register\n", __func__); 442 ctrl_err(ctrl, "%s: Cannot read SLOTCTRL register\n", __func__);
437 return retval; 443 return retval;
438 } 444 }
439 dbg("%s: SLOTCTRL %x value read %x\n", 445 ctrl_dbg(ctrl, "%s: SLOTCTRL %x value read %x\n",
440 __func__, ctrl->cap_base + SLOTCTRL, slot_ctrl); 446 __func__, ctrl->cap_base + SLOTCTRL, slot_ctrl);
441 447
442 pwr_state = (slot_ctrl & PWR_CTRL) >> 10; 448 pwr_state = (slot_ctrl & PWR_CTRL) >> 10;
443 449
@@ -464,7 +470,8 @@ static int hpc_get_latch_status(struct slot *slot, u8 *status)
464 470
465 retval = pciehp_readw(ctrl, SLOTSTATUS, &slot_status); 471 retval = pciehp_readw(ctrl, SLOTSTATUS, &slot_status);
466 if (retval) { 472 if (retval) {
467 err("%s: Cannot read SLOTSTATUS register\n", __func__); 473 ctrl_err(ctrl, "%s: Cannot read SLOTSTATUS register\n",
474 __func__);
468 return retval; 475 return retval;
469 } 476 }
470 477
@@ -482,7 +489,8 @@ static int hpc_get_adapter_status(struct slot *slot, u8 *status)
482 489
483 retval = pciehp_readw(ctrl, SLOTSTATUS, &slot_status); 490 retval = pciehp_readw(ctrl, SLOTSTATUS, &slot_status);
484 if (retval) { 491 if (retval) {
485 err("%s: Cannot read SLOTSTATUS register\n", __func__); 492 ctrl_err(ctrl, "%s: Cannot read SLOTSTATUS register\n",
493 __func__);
486 return retval; 494 return retval;
487 } 495 }
488 card_state = (u8)((slot_status & PRSN_STATE) >> 6); 496 card_state = (u8)((slot_status & PRSN_STATE) >> 6);
@@ -500,7 +508,7 @@ static int hpc_query_power_fault(struct slot *slot)
500 508
501 retval = pciehp_readw(ctrl, SLOTSTATUS, &slot_status); 509 retval = pciehp_readw(ctrl, SLOTSTATUS, &slot_status);
502 if (retval) { 510 if (retval) {
503 err("%s: Cannot check for power fault\n", __func__); 511 ctrl_err(ctrl, "%s: Cannot check for power fault\n", __func__);
504 return retval; 512 return retval;
505 } 513 }
506 pwr_fault = (u8)((slot_status & PWR_FAULT_DETECTED) >> 1); 514 pwr_fault = (u8)((slot_status & PWR_FAULT_DETECTED) >> 1);
@@ -516,7 +524,7 @@ static int hpc_get_emi_status(struct slot *slot, u8 *status)
516 524
517 retval = pciehp_readw(ctrl, SLOTSTATUS, &slot_status); 525 retval = pciehp_readw(ctrl, SLOTSTATUS, &slot_status);
518 if (retval) { 526 if (retval) {
519 err("%s : Cannot check EMI status\n", __func__); 527 ctrl_err(ctrl, "%s : Cannot check EMI status\n", __func__);
520 return retval; 528 return retval;
521 } 529 }
522 *status = (slot_status & EMI_STATE) >> EMI_STATUS_BIT; 530 *status = (slot_status & EMI_STATE) >> EMI_STATUS_BIT;
@@ -560,8 +568,8 @@ static int hpc_set_attention_status(struct slot *slot, u8 value)
560 return -1; 568 return -1;
561 } 569 }
562 rc = pcie_write_cmd(ctrl, slot_cmd, cmd_mask); 570 rc = pcie_write_cmd(ctrl, slot_cmd, cmd_mask);
563 dbg("%s: SLOTCTRL %x write cmd %x\n", 571 ctrl_dbg(ctrl, "%s: SLOTCTRL %x write cmd %x\n",
564 __func__, ctrl->cap_base + SLOTCTRL, slot_cmd); 572 __func__, ctrl->cap_base + SLOTCTRL, slot_cmd);
565 573
566 return rc; 574 return rc;
567} 575}
@@ -575,8 +583,8 @@ static void hpc_set_green_led_on(struct slot *slot)
575 slot_cmd = 0x0100; 583 slot_cmd = 0x0100;
576 cmd_mask = PWR_LED_CTRL; 584 cmd_mask = PWR_LED_CTRL;
577 pcie_write_cmd(ctrl, slot_cmd, cmd_mask); 585 pcie_write_cmd(ctrl, slot_cmd, cmd_mask);
578 dbg("%s: SLOTCTRL %x write cmd %x\n", 586 ctrl_dbg(ctrl, "%s: SLOTCTRL %x write cmd %x\n",
579 __func__, ctrl->cap_base + SLOTCTRL, slot_cmd); 587 __func__, ctrl->cap_base + SLOTCTRL, slot_cmd);
580} 588}
581 589
582static void hpc_set_green_led_off(struct slot *slot) 590static void hpc_set_green_led_off(struct slot *slot)
@@ -588,8 +596,8 @@ static void hpc_set_green_led_off(struct slot *slot)
588 slot_cmd = 0x0300; 596 slot_cmd = 0x0300;
589 cmd_mask = PWR_LED_CTRL; 597 cmd_mask = PWR_LED_CTRL;
590 pcie_write_cmd(ctrl, slot_cmd, cmd_mask); 598 pcie_write_cmd(ctrl, slot_cmd, cmd_mask);
591 dbg("%s: SLOTCTRL %x write cmd %x\n", 599 ctrl_dbg(ctrl, "%s: SLOTCTRL %x write cmd %x\n",
592 __func__, ctrl->cap_base + SLOTCTRL, slot_cmd); 600 __func__, ctrl->cap_base + SLOTCTRL, slot_cmd);
593} 601}
594 602
595static void hpc_set_green_led_blink(struct slot *slot) 603static void hpc_set_green_led_blink(struct slot *slot)
@@ -601,8 +609,8 @@ static void hpc_set_green_led_blink(struct slot *slot)
601 slot_cmd = 0x0200; 609 slot_cmd = 0x0200;
602 cmd_mask = PWR_LED_CTRL; 610 cmd_mask = PWR_LED_CTRL;
603 pcie_write_cmd(ctrl, slot_cmd, cmd_mask); 611 pcie_write_cmd(ctrl, slot_cmd, cmd_mask);
604 dbg("%s: SLOTCTRL %x write cmd %x\n", 612 ctrl_dbg(ctrl, "%s: SLOTCTRL %x write cmd %x\n",
605 __func__, ctrl->cap_base + SLOTCTRL, slot_cmd); 613 __func__, ctrl->cap_base + SLOTCTRL, slot_cmd);
606} 614}
607 615
608static int hpc_power_on_slot(struct slot * slot) 616static int hpc_power_on_slot(struct slot * slot)
@@ -613,20 +621,22 @@ static int hpc_power_on_slot(struct slot * slot)
613 u16 slot_status; 621 u16 slot_status;
614 int retval = 0; 622 int retval = 0;
615 623
616 dbg("%s: slot->hp_slot %x\n", __func__, slot->hp_slot); 624 ctrl_dbg(ctrl, "%s: slot->hp_slot %x\n", __func__, slot->hp_slot);
617 625
618 /* Clear sticky power-fault bit from previous power failures */ 626 /* Clear sticky power-fault bit from previous power failures */
619 retval = pciehp_readw(ctrl, SLOTSTATUS, &slot_status); 627 retval = pciehp_readw(ctrl, SLOTSTATUS, &slot_status);
620 if (retval) { 628 if (retval) {
621 err("%s: Cannot read SLOTSTATUS register\n", __func__); 629 ctrl_err(ctrl, "%s: Cannot read SLOTSTATUS register\n",
630 __func__);
622 return retval; 631 return retval;
623 } 632 }
624 slot_status &= PWR_FAULT_DETECTED; 633 slot_status &= PWR_FAULT_DETECTED;
625 if (slot_status) { 634 if (slot_status) {
626 retval = pciehp_writew(ctrl, SLOTSTATUS, slot_status); 635 retval = pciehp_writew(ctrl, SLOTSTATUS, slot_status);
627 if (retval) { 636 if (retval) {
628 err("%s: Cannot write to SLOTSTATUS register\n", 637 ctrl_err(ctrl,
629 __func__); 638 "%s: Cannot write to SLOTSTATUS register\n",
639 __func__);
630 return retval; 640 return retval;
631 } 641 }
632 } 642 }
@@ -644,11 +654,12 @@ static int hpc_power_on_slot(struct slot * slot)
644 retval = pcie_write_cmd(ctrl, slot_cmd, cmd_mask); 654 retval = pcie_write_cmd(ctrl, slot_cmd, cmd_mask);
645 655
646 if (retval) { 656 if (retval) {
647 err("%s: Write %x command failed!\n", __func__, slot_cmd); 657 ctrl_err(ctrl, "%s: Write %x command failed!\n",
658 __func__, slot_cmd);
648 return -1; 659 return -1;
649 } 660 }
650 dbg("%s: SLOTCTRL %x write cmd %x\n", 661 ctrl_dbg(ctrl, "%s: SLOTCTRL %x write cmd %x\n",
651 __func__, ctrl->cap_base + SLOTCTRL, slot_cmd); 662 __func__, ctrl->cap_base + SLOTCTRL, slot_cmd);
652 663
653 return retval; 664 return retval;
654} 665}
@@ -694,7 +705,7 @@ static int hpc_power_off_slot(struct slot * slot)
694 int retval = 0; 705 int retval = 0;
695 int changed; 706 int changed;
696 707
697 dbg("%s: slot->hp_slot %x\n", __func__, slot->hp_slot); 708 ctrl_dbg(ctrl, "%s: slot->hp_slot %x\n", __func__, slot->hp_slot);
698 709
699 /* 710 /*
700 * Set Bad DLLP Mask bit in Correctable Error Mask 711 * Set Bad DLLP Mask bit in Correctable Error Mask
@@ -722,12 +733,12 @@ static int hpc_power_off_slot(struct slot * slot)
722 733
723 retval = pcie_write_cmd(ctrl, slot_cmd, cmd_mask); 734 retval = pcie_write_cmd(ctrl, slot_cmd, cmd_mask);
724 if (retval) { 735 if (retval) {
725 err("%s: Write command failed!\n", __func__); 736 ctrl_err(ctrl, "%s: Write command failed!\n", __func__);
726 retval = -1; 737 retval = -1;
727 goto out; 738 goto out;
728 } 739 }
729 dbg("%s: SLOTCTRL %x write cmd %x\n", 740 ctrl_dbg(ctrl, "%s: SLOTCTRL %x write cmd %x\n",
730 __func__, ctrl->cap_base + SLOTCTRL, slot_cmd); 741 __func__, ctrl->cap_base + SLOTCTRL, slot_cmd);
731 out: 742 out:
732 if (changed) 743 if (changed)
733 pcie_unmask_bad_dllp(ctrl); 744 pcie_unmask_bad_dllp(ctrl);
@@ -749,7 +760,8 @@ static irqreturn_t pcie_isr(int irq, void *dev_id)
749 intr_loc = 0; 760 intr_loc = 0;
750 do { 761 do {
751 if (pciehp_readw(ctrl, SLOTSTATUS, &detected)) { 762 if (pciehp_readw(ctrl, SLOTSTATUS, &detected)) {
752 err("%s: Cannot read SLOTSTATUS\n", __func__); 763 ctrl_err(ctrl, "%s: Cannot read SLOTSTATUS\n",
764 __func__);
753 return IRQ_NONE; 765 return IRQ_NONE;
754 } 766 }
755 767
@@ -760,12 +772,13 @@ static irqreturn_t pcie_isr(int irq, void *dev_id)
760 if (!intr_loc) 772 if (!intr_loc)
761 return IRQ_NONE; 773 return IRQ_NONE;
762 if (detected && pciehp_writew(ctrl, SLOTSTATUS, detected)) { 774 if (detected && pciehp_writew(ctrl, SLOTSTATUS, detected)) {
763 err("%s: Cannot write to SLOTSTATUS\n", __func__); 775 ctrl_err(ctrl, "%s: Cannot write to SLOTSTATUS\n",
776 __func__);
764 return IRQ_NONE; 777 return IRQ_NONE;
765 } 778 }
766 } while (detected); 779 } while (detected);
767 780
768 dbg("%s: intr_loc %x\n", __FUNCTION__, intr_loc); 781 ctrl_dbg(ctrl, "%s: intr_loc %x\n", __func__, intr_loc);
769 782
770 /* Check Command Complete Interrupt Pending */ 783 /* Check Command Complete Interrupt Pending */
771 if (intr_loc & CMD_COMPLETED) { 784 if (intr_loc & CMD_COMPLETED) {
@@ -807,7 +820,7 @@ static int hpc_get_max_lnk_speed(struct slot *slot, enum pci_bus_speed *value)
807 820
808 retval = pciehp_readl(ctrl, LNKCAP, &lnk_cap); 821 retval = pciehp_readl(ctrl, LNKCAP, &lnk_cap);
809 if (retval) { 822 if (retval) {
810 err("%s: Cannot read LNKCAP register\n", __func__); 823 ctrl_err(ctrl, "%s: Cannot read LNKCAP register\n", __func__);
811 return retval; 824 return retval;
812 } 825 }
813 826
@@ -821,7 +834,7 @@ static int hpc_get_max_lnk_speed(struct slot *slot, enum pci_bus_speed *value)
821 } 834 }
822 835
823 *value = lnk_speed; 836 *value = lnk_speed;
824 dbg("Max link speed = %d\n", lnk_speed); 837 ctrl_dbg(ctrl, "Max link speed = %d\n", lnk_speed);
825 838
826 return retval; 839 return retval;
827} 840}
@@ -836,7 +849,7 @@ static int hpc_get_max_lnk_width(struct slot *slot,
836 849
837 retval = pciehp_readl(ctrl, LNKCAP, &lnk_cap); 850 retval = pciehp_readl(ctrl, LNKCAP, &lnk_cap);
838 if (retval) { 851 if (retval) {
839 err("%s: Cannot read LNKCAP register\n", __func__); 852 ctrl_err(ctrl, "%s: Cannot read LNKCAP register\n", __func__);
840 return retval; 853 return retval;
841 } 854 }
842 855
@@ -871,7 +884,7 @@ static int hpc_get_max_lnk_width(struct slot *slot,
871 } 884 }
872 885
873 *value = lnk_wdth; 886 *value = lnk_wdth;
874 dbg("Max link width = %d\n", lnk_wdth); 887 ctrl_dbg(ctrl, "Max link width = %d\n", lnk_wdth);
875 888
876 return retval; 889 return retval;
877} 890}
@@ -885,7 +898,8 @@ static int hpc_get_cur_lnk_speed(struct slot *slot, enum pci_bus_speed *value)
885 898
886 retval = pciehp_readw(ctrl, LNKSTATUS, &lnk_status); 899 retval = pciehp_readw(ctrl, LNKSTATUS, &lnk_status);
887 if (retval) { 900 if (retval) {
888 err("%s: Cannot read LNKSTATUS register\n", __func__); 901 ctrl_err(ctrl, "%s: Cannot read LNKSTATUS register\n",
902 __func__);
889 return retval; 903 return retval;
890 } 904 }
891 905
@@ -899,7 +913,7 @@ static int hpc_get_cur_lnk_speed(struct slot *slot, enum pci_bus_speed *value)
899 } 913 }
900 914
901 *value = lnk_speed; 915 *value = lnk_speed;
902 dbg("Current link speed = %d\n", lnk_speed); 916 ctrl_dbg(ctrl, "Current link speed = %d\n", lnk_speed);
903 917
904 return retval; 918 return retval;
905} 919}
@@ -914,7 +928,8 @@ static int hpc_get_cur_lnk_width(struct slot *slot,
914 928
915 retval = pciehp_readw(ctrl, LNKSTATUS, &lnk_status); 929 retval = pciehp_readw(ctrl, LNKSTATUS, &lnk_status);
916 if (retval) { 930 if (retval) {
917 err("%s: Cannot read LNKSTATUS register\n", __func__); 931 ctrl_err(ctrl, "%s: Cannot read LNKSTATUS register\n",
932 __func__);
918 return retval; 933 return retval;
919 } 934 }
920 935
@@ -949,7 +964,7 @@ static int hpc_get_cur_lnk_width(struct slot *slot,
949 } 964 }
950 965
951 *value = lnk_wdth; 966 *value = lnk_wdth;
952 dbg("Current link width = %d\n", lnk_wdth); 967 ctrl_dbg(ctrl, "Current link width = %d\n", lnk_wdth);
953 968
954 return retval; 969 return retval;
955} 970}
@@ -998,7 +1013,8 @@ int pcie_enable_notification(struct controller *ctrl)
998 PWR_FAULT_DETECT_ENABLE | HP_INTR_ENABLE | CMD_CMPL_INTR_ENABLE; 1013 PWR_FAULT_DETECT_ENABLE | HP_INTR_ENABLE | CMD_CMPL_INTR_ENABLE;
999 1014
1000 if (pcie_write_cmd(ctrl, cmd, mask)) { 1015 if (pcie_write_cmd(ctrl, cmd, mask)) {
1001 err("%s: Cannot enable software notification\n", __func__); 1016 ctrl_err(ctrl, "%s: Cannot enable software notification\n",
1017 __func__);
1002 return -1; 1018 return -1;
1003 } 1019 }
1004 return 0; 1020 return 0;
@@ -1010,7 +1026,8 @@ static void pcie_disable_notification(struct controller *ctrl)
1010 mask = PRSN_DETECT_ENABLE | ATTN_BUTTN_ENABLE | MRL_DETECT_ENABLE | 1026 mask = PRSN_DETECT_ENABLE | ATTN_BUTTN_ENABLE | MRL_DETECT_ENABLE |
1011 PWR_FAULT_DETECT_ENABLE | HP_INTR_ENABLE | CMD_CMPL_INTR_ENABLE; 1027 PWR_FAULT_DETECT_ENABLE | HP_INTR_ENABLE | CMD_CMPL_INTR_ENABLE;
1012 if (pcie_write_cmd(ctrl, 0, mask)) 1028 if (pcie_write_cmd(ctrl, 0, mask))
1013 warn("%s: Cannot disable software notification\n", __func__); 1029 ctrl_warn(ctrl, "%s: Cannot disable software notification\n",
1030 __func__);
1014} 1031}
1015 1032
1016static int pcie_init_notification(struct controller *ctrl) 1033static int pcie_init_notification(struct controller *ctrl)
@@ -1071,34 +1088,45 @@ static inline void dbg_ctrl(struct controller *ctrl)
1071 if (!pciehp_debug) 1088 if (!pciehp_debug)
1072 return; 1089 return;
1073 1090
1074 dbg("Hotplug Controller:\n"); 1091 ctrl_info(ctrl, "Hotplug Controller:\n");
1075 dbg(" Seg/Bus/Dev/Func/IRQ : %s IRQ %d\n", pci_name(pdev), pdev->irq); 1092 ctrl_info(ctrl, " Seg/Bus/Dev/Func/IRQ : %s IRQ %d\n",
1076 dbg(" Vendor ID : 0x%04x\n", pdev->vendor); 1093 pci_name(pdev), pdev->irq);
1077 dbg(" Device ID : 0x%04x\n", pdev->device); 1094 ctrl_info(ctrl, " Vendor ID : 0x%04x\n", pdev->vendor);
1078 dbg(" Subsystem ID : 0x%04x\n", pdev->subsystem_device); 1095 ctrl_info(ctrl, " Device ID : 0x%04x\n", pdev->device);
1079 dbg(" Subsystem Vendor ID : 0x%04x\n", pdev->subsystem_vendor); 1096 ctrl_info(ctrl, " Subsystem ID : 0x%04x\n",
1080 dbg(" PCIe Cap offset : 0x%02x\n", ctrl->cap_base); 1097 pdev->subsystem_device);
1098 ctrl_info(ctrl, " Subsystem Vendor ID : 0x%04x\n",
1099 pdev->subsystem_vendor);
1100 ctrl_info(ctrl, " PCIe Cap offset : 0x%02x\n", ctrl->cap_base);
1081 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) { 1101 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
1082 if (!pci_resource_len(pdev, i)) 1102 if (!pci_resource_len(pdev, i))
1083 continue; 1103 continue;
1084 dbg(" PCI resource [%d] : 0x%llx@0x%llx\n", i, 1104 ctrl_info(ctrl, " PCI resource [%d] : 0x%llx@0x%llx\n",
1085 (unsigned long long)pci_resource_len(pdev, i), 1105 i, (unsigned long long)pci_resource_len(pdev, i),
1086 (unsigned long long)pci_resource_start(pdev, i)); 1106 (unsigned long long)pci_resource_start(pdev, i));
1087 } 1107 }
1088 dbg("Slot Capabilities : 0x%08x\n", ctrl->slot_cap); 1108 ctrl_info(ctrl, "Slot Capabilities : 0x%08x\n", ctrl->slot_cap);
1089 dbg(" Physical Slot Number : %d\n", ctrl->first_slot); 1109 ctrl_info(ctrl, " Physical Slot Number : %d\n", ctrl->first_slot);
1090 dbg(" Attention Button : %3s\n", ATTN_BUTTN(ctrl) ? "yes" : "no"); 1110 ctrl_info(ctrl, " Attention Button : %3s\n",
1091 dbg(" Power Controller : %3s\n", POWER_CTRL(ctrl) ? "yes" : "no"); 1111 ATTN_BUTTN(ctrl) ? "yes" : "no");
1092 dbg(" MRL Sensor : %3s\n", MRL_SENS(ctrl) ? "yes" : "no"); 1112 ctrl_info(ctrl, " Power Controller : %3s\n",
1093 dbg(" Attention Indicator : %3s\n", ATTN_LED(ctrl) ? "yes" : "no"); 1113 POWER_CTRL(ctrl) ? "yes" : "no");
1094 dbg(" Power Indicator : %3s\n", PWR_LED(ctrl) ? "yes" : "no"); 1114 ctrl_info(ctrl, " MRL Sensor : %3s\n",
1095 dbg(" Hot-Plug Surprise : %3s\n", HP_SUPR_RM(ctrl) ? "yes" : "no"); 1115 MRL_SENS(ctrl) ? "yes" : "no");
1096 dbg(" EMI Present : %3s\n", EMI(ctrl) ? "yes" : "no"); 1116 ctrl_info(ctrl, " Attention Indicator : %3s\n",
1097 dbg(" Command Completed : %3s\n", NO_CMD_CMPL(ctrl)? "no" : "yes"); 1117 ATTN_LED(ctrl) ? "yes" : "no");
1118 ctrl_info(ctrl, " Power Indicator : %3s\n",
1119 PWR_LED(ctrl) ? "yes" : "no");
1120 ctrl_info(ctrl, " Hot-Plug Surprise : %3s\n",
1121 HP_SUPR_RM(ctrl) ? "yes" : "no");
1122 ctrl_info(ctrl, " EMI Present : %3s\n",
1123 EMI(ctrl) ? "yes" : "no");
1124 ctrl_info(ctrl, " Command Completed : %3s\n",
1125 NO_CMD_CMPL(ctrl) ? "no" : "yes");
1098 pciehp_readw(ctrl, SLOTSTATUS, &reg16); 1126 pciehp_readw(ctrl, SLOTSTATUS, &reg16);
1099 dbg("Slot Status : 0x%04x\n", reg16); 1127 ctrl_info(ctrl, "Slot Status : 0x%04x\n", reg16);
1100 pciehp_readw(ctrl, SLOTCTRL, &reg16); 1128 pciehp_readw(ctrl, SLOTCTRL, &reg16);
1101 dbg("Slot Control : 0x%04x\n", reg16); 1129 ctrl_info(ctrl, "Slot Control : 0x%04x\n", reg16);
1102} 1130}
1103 1131
1104struct controller *pcie_init(struct pcie_device *dev) 1132struct controller *pcie_init(struct pcie_device *dev)
@@ -1109,19 +1137,21 @@ struct controller *pcie_init(struct pcie_device *dev)
1109 1137
1110 ctrl = kzalloc(sizeof(*ctrl), GFP_KERNEL); 1138 ctrl = kzalloc(sizeof(*ctrl), GFP_KERNEL);
1111 if (!ctrl) { 1139 if (!ctrl) {
1112 err("%s : out of memory\n", __func__); 1140 dev_err(&dev->device, "%s : out of memory\n", __func__);
1113 goto abort; 1141 goto abort;
1114 } 1142 }
1115 INIT_LIST_HEAD(&ctrl->slot_list); 1143 INIT_LIST_HEAD(&ctrl->slot_list);
1116 1144
1145 ctrl->pcie = dev;
1117 ctrl->pci_dev = pdev; 1146 ctrl->pci_dev = pdev;
1118 ctrl->cap_base = pci_find_capability(pdev, PCI_CAP_ID_EXP); 1147 ctrl->cap_base = pci_find_capability(pdev, PCI_CAP_ID_EXP);
1119 if (!ctrl->cap_base) { 1148 if (!ctrl->cap_base) {
1120 err("%s: Cannot find PCI Express capability\n", __func__); 1149 ctrl_err(ctrl, "%s: Cannot find PCI Express capability\n",
1150 __func__);
1121 goto abort; 1151 goto abort;
1122 } 1152 }
1123 if (pciehp_readl(ctrl, SLOTCAP, &slot_cap)) { 1153 if (pciehp_readl(ctrl, SLOTCAP, &slot_cap)) {
1124 err("%s: Cannot read SLOTCAP register\n", __func__); 1154 ctrl_err(ctrl, "%s: Cannot read SLOTCAP register\n", __func__);
1125 goto abort; 1155 goto abort;
1126 } 1156 }
1127 1157
@@ -1161,9 +1191,9 @@ struct controller *pcie_init(struct pcie_device *dev)
1161 goto abort_ctrl; 1191 goto abort_ctrl;
1162 } 1192 }
1163 1193
1164 info("HPC vendor_id %x device_id %x ss_vid %x ss_did %x\n", 1194 ctrl_info(ctrl, "HPC vendor_id %x device_id %x ss_vid %x ss_did %x\n",
1165 pdev->vendor, pdev->device, 1195 pdev->vendor, pdev->device, pdev->subsystem_vendor,
1166 pdev->subsystem_vendor, pdev->subsystem_device); 1196 pdev->subsystem_device);
1167 1197
1168 if (pcie_init_slot(ctrl)) 1198 if (pcie_init_slot(ctrl))
1169 goto abort_ctrl; 1199 goto abort_ctrl;
diff --git a/drivers/pci/hotplug/pciehp_pci.c b/drivers/pci/hotplug/pciehp_pci.c
index 6040dcceb25..ffd11148fbe 100644
--- a/drivers/pci/hotplug/pciehp_pci.c
+++ b/drivers/pci/hotplug/pciehp_pci.c
@@ -198,18 +198,20 @@ int pciehp_configure_device(struct slot *p_slot)
198 struct pci_dev *dev; 198 struct pci_dev *dev;
199 struct pci_bus *parent = p_slot->ctrl->pci_dev->subordinate; 199 struct pci_bus *parent = p_slot->ctrl->pci_dev->subordinate;
200 int num, fn; 200 int num, fn;
201 struct controller *ctrl = p_slot->ctrl;
201 202
202 dev = pci_get_slot(parent, PCI_DEVFN(p_slot->device, 0)); 203 dev = pci_get_slot(parent, PCI_DEVFN(p_slot->device, 0));
203 if (dev) { 204 if (dev) {
204 err("Device %s already exists at %x:%x, cannot hot-add\n", 205 ctrl_err(ctrl,
205 pci_name(dev), p_slot->bus, p_slot->device); 206 "Device %s already exists at %x:%x, cannot hot-add\n",
207 pci_name(dev), p_slot->bus, p_slot->device);
206 pci_dev_put(dev); 208 pci_dev_put(dev);
207 return -EINVAL; 209 return -EINVAL;
208 } 210 }
209 211
210 num = pci_scan_slot(parent, PCI_DEVFN(p_slot->device, 0)); 212 num = pci_scan_slot(parent, PCI_DEVFN(p_slot->device, 0));
211 if (num == 0) { 213 if (num == 0) {
212 err("No new device found\n"); 214 ctrl_err(ctrl, "No new device found\n");
213 return -ENODEV; 215 return -ENODEV;
214 } 216 }
215 217
@@ -218,8 +220,8 @@ int pciehp_configure_device(struct slot *p_slot)
218 if (!dev) 220 if (!dev)
219 continue; 221 continue;
220 if ((dev->class >> 16) == PCI_BASE_CLASS_DISPLAY) { 222 if ((dev->class >> 16) == PCI_BASE_CLASS_DISPLAY) {
221 err("Cannot hot-add display device %s\n", 223 ctrl_err(ctrl, "Cannot hot-add display device %s\n",
222 pci_name(dev)); 224 pci_name(dev));
223 pci_dev_put(dev); 225 pci_dev_put(dev);
224 continue; 226 continue;
225 } 227 }
@@ -244,9 +246,10 @@ int pciehp_unconfigure_device(struct slot *p_slot)
244 u8 presence = 0; 246 u8 presence = 0;
245 struct pci_bus *parent = p_slot->ctrl->pci_dev->subordinate; 247 struct pci_bus *parent = p_slot->ctrl->pci_dev->subordinate;
246 u16 command; 248 u16 command;
249 struct controller *ctrl = p_slot->ctrl;
247 250
248 dbg("%s: bus/dev = %x/%x\n", __func__, p_slot->bus, 251 ctrl_dbg(ctrl, "%s: bus/dev = %x/%x\n", __func__,
249 p_slot->device); 252 p_slot->bus, p_slot->device);
250 ret = p_slot->hpc_ops->get_adapter_status(p_slot, &presence); 253 ret = p_slot->hpc_ops->get_adapter_status(p_slot, &presence);
251 if (ret) 254 if (ret)
252 presence = 0; 255 presence = 0;
@@ -257,16 +260,17 @@ int pciehp_unconfigure_device(struct slot *p_slot)
257 if (!temp) 260 if (!temp)
258 continue; 261 continue;
259 if ((temp->class >> 16) == PCI_BASE_CLASS_DISPLAY) { 262 if ((temp->class >> 16) == PCI_BASE_CLASS_DISPLAY) {
260 err("Cannot remove display device %s\n", 263 ctrl_err(ctrl, "Cannot remove display device %s\n",
261 pci_name(temp)); 264 pci_name(temp));
262 pci_dev_put(temp); 265 pci_dev_put(temp);
263 continue; 266 continue;
264 } 267 }
265 if (temp->hdr_type == PCI_HEADER_TYPE_BRIDGE && presence) { 268 if (temp->hdr_type == PCI_HEADER_TYPE_BRIDGE && presence) {
266 pci_read_config_byte(temp, PCI_BRIDGE_CONTROL, &bctl); 269 pci_read_config_byte(temp, PCI_BRIDGE_CONTROL, &bctl);
267 if (bctl & PCI_BRIDGE_CTL_VGA) { 270 if (bctl & PCI_BRIDGE_CTL_VGA) {
268 err("Cannot remove display device %s\n", 271 ctrl_err(ctrl,
269 pci_name(temp)); 272 "Cannot remove display device %s\n",
273 pci_name(temp));
270 pci_dev_put(temp); 274 pci_dev_put(temp);
271 continue; 275 continue;
272 } 276 }
diff --git a/drivers/pci/hotplug/rpaphp.h b/drivers/pci/hotplug/rpaphp.h
index 7d5921b1ee7..419919a87b0 100644
--- a/drivers/pci/hotplug/rpaphp.h
+++ b/drivers/pci/hotplug/rpaphp.h
@@ -46,10 +46,10 @@
46#define PRESENT 1 /* Card in slot */ 46#define PRESENT 1 /* Card in slot */
47 47
48#define MY_NAME "rpaphp" 48#define MY_NAME "rpaphp"
49extern int debug; 49extern int rpaphp_debug;
50#define dbg(format, arg...) \ 50#define dbg(format, arg...) \
51 do { \ 51 do { \
52 if (debug) \ 52 if (rpaphp_debug) \
53 printk(KERN_DEBUG "%s: " format, \ 53 printk(KERN_DEBUG "%s: " format, \
54 MY_NAME , ## arg); \ 54 MY_NAME , ## arg); \
55 } while (0) 55 } while (0)
diff --git a/drivers/pci/hotplug/rpaphp_core.c b/drivers/pci/hotplug/rpaphp_core.c
index 1f84f402acd..95d02a08fdc 100644
--- a/drivers/pci/hotplug/rpaphp_core.c
+++ b/drivers/pci/hotplug/rpaphp_core.c
@@ -37,7 +37,7 @@
37 /* and pci_do_scan_bus */ 37 /* and pci_do_scan_bus */
38#include "rpaphp.h" 38#include "rpaphp.h"
39 39
40int debug; 40int rpaphp_debug;
41LIST_HEAD(rpaphp_slot_head); 41LIST_HEAD(rpaphp_slot_head);
42 42
43#define DRIVER_VERSION "0.1" 43#define DRIVER_VERSION "0.1"
@@ -50,7 +50,7 @@ MODULE_AUTHOR(DRIVER_AUTHOR);
50MODULE_DESCRIPTION(DRIVER_DESC); 50MODULE_DESCRIPTION(DRIVER_DESC);
51MODULE_LICENSE("GPL"); 51MODULE_LICENSE("GPL");
52 52
53module_param(debug, bool, 0644); 53module_param_named(debug, rpaphp_debug, bool, 0644);
54 54
55/** 55/**
56 * set_attention_status - set attention LED 56 * set_attention_status - set attention LED
diff --git a/drivers/pci/hotplug/rpaphp_pci.c b/drivers/pci/hotplug/rpaphp_pci.c
index 5acfd4f3d4c..513e1e28239 100644
--- a/drivers/pci/hotplug/rpaphp_pci.c
+++ b/drivers/pci/hotplug/rpaphp_pci.c
@@ -123,7 +123,7 @@ int rpaphp_enable_slot(struct slot *slot)
123 slot->state = CONFIGURED; 123 slot->state = CONFIGURED;
124 } 124 }
125 125
126 if (debug) { 126 if (rpaphp_debug) {
127 struct pci_dev *dev; 127 struct pci_dev *dev;
128 dbg("%s: pci_devs of slot[%s]\n", __func__, slot->dn->full_name); 128 dbg("%s: pci_devs of slot[%s]\n", __func__, slot->dn->full_name);
129 list_for_each_entry (dev, &bus->devices, bus_list) 129 list_for_each_entry (dev, &bus->devices, bus_list)
diff --git a/drivers/pci/hotplug/rpaphp_slot.c b/drivers/pci/hotplug/rpaphp_slot.c
index 9b714ea93d2..50884507b8b 100644
--- a/drivers/pci/hotplug/rpaphp_slot.c
+++ b/drivers/pci/hotplug/rpaphp_slot.c
@@ -147,9 +147,5 @@ int rpaphp_register_slot(struct slot *slot)
147 list_add(&slot->rpaphp_slot_list, &rpaphp_slot_head); 147 list_add(&slot->rpaphp_slot_list, &rpaphp_slot_head);
148 info("Slot [%s] registered\n", slot->name); 148 info("Slot [%s] registered\n", slot->name);
149 return 0; 149 return 0;
150
151sysfs_fail:
152 pci_hp_deregister(php_slot);
153 return retval;
154} 150}
155 151
diff --git a/drivers/pci/htirq.c b/drivers/pci/htirq.c
index 279c940a003..bf7d6ce9bbb 100644
--- a/drivers/pci/htirq.c
+++ b/drivers/pci/htirq.c
@@ -126,7 +126,8 @@ int __ht_create_irq(struct pci_dev *dev, int idx, ht_irq_update_t *update)
126 cfg->msg.address_hi = 0xffffffff; 126 cfg->msg.address_hi = 0xffffffff;
127 127
128 irq = create_irq(); 128 irq = create_irq();
129 if (irq < 0) { 129
130 if (irq <= 0) {
130 kfree(cfg); 131 kfree(cfg);
131 return -EBUSY; 132 return -EBUSY;
132 } 133 }
diff --git a/drivers/pci/intel-iommu.c b/drivers/pci/intel-iommu.c
index c3edcdc08e7..8b51e10b778 100644
--- a/drivers/pci/intel-iommu.c
+++ b/drivers/pci/intel-iommu.c
@@ -33,8 +33,8 @@
33#include <linux/dma-mapping.h> 33#include <linux/dma-mapping.h>
34#include <linux/mempool.h> 34#include <linux/mempool.h>
35#include <linux/timer.h> 35#include <linux/timer.h>
36#include "iova.h" 36#include <linux/iova.h>
37#include "intel-iommu.h" 37#include <linux/intel-iommu.h>
38#include <asm/proto.h> /* force_iommu in this header in x86-64*/ 38#include <asm/proto.h> /* force_iommu in this header in x86-64*/
39#include <asm/cacheflush.h> 39#include <asm/cacheflush.h>
40#include <asm/iommu.h> 40#include <asm/iommu.h>
@@ -49,8 +49,6 @@
49 49
50#define DEFAULT_DOMAIN_ADDRESS_WIDTH 48 50#define DEFAULT_DOMAIN_ADDRESS_WIDTH 48
51 51
52#define DMAR_OPERATION_TIMEOUT ((cycles_t) tsc_khz*10*1000) /* 10sec */
53
54#define DOMAIN_MAX_ADDR(gaw) ((((u64)1) << gaw) - 1) 52#define DOMAIN_MAX_ADDR(gaw) ((((u64)1) << gaw) - 1)
55 53
56 54
@@ -58,8 +56,6 @@ static void flush_unmaps_timeout(unsigned long data);
58 56
59DEFINE_TIMER(unmap_timer, flush_unmaps_timeout, 0, 0); 57DEFINE_TIMER(unmap_timer, flush_unmaps_timeout, 0, 0);
60 58
61static struct intel_iommu *g_iommus;
62
63#define HIGH_WATER_MARK 250 59#define HIGH_WATER_MARK 250
64struct deferred_flush_tables { 60struct deferred_flush_tables {
65 int next; 61 int next;
@@ -80,7 +76,7 @@ static long list_size;
80 76
81static void domain_remove_dev_info(struct dmar_domain *domain); 77static void domain_remove_dev_info(struct dmar_domain *domain);
82 78
83static int dmar_disabled; 79int dmar_disabled;
84static int __initdata dmar_map_gfx = 1; 80static int __initdata dmar_map_gfx = 1;
85static int dmar_forcedac; 81static int dmar_forcedac;
86static int intel_iommu_strict; 82static int intel_iommu_strict;
@@ -160,7 +156,7 @@ static inline void *alloc_domain_mem(void)
160 return iommu_kmem_cache_alloc(iommu_domain_cache); 156 return iommu_kmem_cache_alloc(iommu_domain_cache);
161} 157}
162 158
163static inline void free_domain_mem(void *vaddr) 159static void free_domain_mem(void *vaddr)
164{ 160{
165 kmem_cache_free(iommu_domain_cache, vaddr); 161 kmem_cache_free(iommu_domain_cache, vaddr);
166} 162}
@@ -185,13 +181,6 @@ void free_iova_mem(struct iova *iova)
185 kmem_cache_free(iommu_iova_cache, iova); 181 kmem_cache_free(iommu_iova_cache, iova);
186} 182}
187 183
188static inline void __iommu_flush_cache(
189 struct intel_iommu *iommu, void *addr, int size)
190{
191 if (!ecap_coherent(iommu->ecap))
192 clflush_cache_range(addr, size);
193}
194
195/* Gets context entry for a given bus and devfn */ 184/* Gets context entry for a given bus and devfn */
196static struct context_entry * device_to_context_entry(struct intel_iommu *iommu, 185static struct context_entry * device_to_context_entry(struct intel_iommu *iommu,
197 u8 bus, u8 devfn) 186 u8 bus, u8 devfn)
@@ -488,19 +477,6 @@ static int iommu_alloc_root_entry(struct intel_iommu *iommu)
488 return 0; 477 return 0;
489} 478}
490 479
491#define IOMMU_WAIT_OP(iommu, offset, op, cond, sts) \
492{\
493 cycles_t start_time = get_cycles();\
494 while (1) {\
495 sts = op (iommu->reg + offset);\
496 if (cond)\
497 break;\
498 if (DMAR_OPERATION_TIMEOUT < (get_cycles() - start_time))\
499 panic("DMAR hardware is malfunctioning\n");\
500 cpu_relax();\
501 }\
502}
503
504static void iommu_set_root_entry(struct intel_iommu *iommu) 480static void iommu_set_root_entry(struct intel_iommu *iommu)
505{ 481{
506 void *addr; 482 void *addr;
@@ -587,7 +563,7 @@ static int __iommu_flush_context(struct intel_iommu *iommu,
587 563
588 spin_unlock_irqrestore(&iommu->register_lock, flag); 564 spin_unlock_irqrestore(&iommu->register_lock, flag);
589 565
590 /* flush context entry will implictly flush write buffer */ 566 /* flush context entry will implicitly flush write buffer */
591 return 0; 567 return 0;
592} 568}
593 569
@@ -680,7 +656,7 @@ static int __iommu_flush_iotlb(struct intel_iommu *iommu, u16 did,
680 if (DMA_TLB_IAIG(val) != DMA_TLB_IIRG(type)) 656 if (DMA_TLB_IAIG(val) != DMA_TLB_IIRG(type))
681 pr_debug("IOMMU: tlb flush request %Lx, actual %Lx\n", 657 pr_debug("IOMMU: tlb flush request %Lx, actual %Lx\n",
682 DMA_TLB_IIRG(type), DMA_TLB_IAIG(val)); 658 DMA_TLB_IIRG(type), DMA_TLB_IAIG(val));
683 /* flush context entry will implictly flush write buffer */ 659 /* flush iotlb entry will implicitly flush write buffer */
684 return 0; 660 return 0;
685} 661}
686 662
@@ -990,6 +966,8 @@ static int iommu_init_domains(struct intel_iommu *iommu)
990 return -ENOMEM; 966 return -ENOMEM;
991 } 967 }
992 968
969 spin_lock_init(&iommu->lock);
970
993 /* 971 /*
994 * if Caching mode is set, then invalid translations are tagged 972 * if Caching mode is set, then invalid translations are tagged
995 * with domainid 0. Hence we need to pre-allocate it. 973 * with domainid 0. Hence we need to pre-allocate it.
@@ -998,62 +976,15 @@ static int iommu_init_domains(struct intel_iommu *iommu)
998 set_bit(0, iommu->domain_ids); 976 set_bit(0, iommu->domain_ids);
999 return 0; 977 return 0;
1000} 978}
1001static struct intel_iommu *alloc_iommu(struct intel_iommu *iommu,
1002 struct dmar_drhd_unit *drhd)
1003{
1004 int ret;
1005 int map_size;
1006 u32 ver;
1007
1008 iommu->reg = ioremap(drhd->reg_base_addr, PAGE_SIZE_4K);
1009 if (!iommu->reg) {
1010 printk(KERN_ERR "IOMMU: can't map the region\n");
1011 goto error;
1012 }
1013 iommu->cap = dmar_readq(iommu->reg + DMAR_CAP_REG);
1014 iommu->ecap = dmar_readq(iommu->reg + DMAR_ECAP_REG);
1015
1016 /* the registers might be more than one page */
1017 map_size = max_t(int, ecap_max_iotlb_offset(iommu->ecap),
1018 cap_max_fault_reg_offset(iommu->cap));
1019 map_size = PAGE_ALIGN_4K(map_size);
1020 if (map_size > PAGE_SIZE_4K) {
1021 iounmap(iommu->reg);
1022 iommu->reg = ioremap(drhd->reg_base_addr, map_size);
1023 if (!iommu->reg) {
1024 printk(KERN_ERR "IOMMU: can't map the region\n");
1025 goto error;
1026 }
1027 }
1028
1029 ver = readl(iommu->reg + DMAR_VER_REG);
1030 pr_debug("IOMMU %llx: ver %d:%d cap %llx ecap %llx\n",
1031 drhd->reg_base_addr, DMAR_VER_MAJOR(ver), DMAR_VER_MINOR(ver),
1032 iommu->cap, iommu->ecap);
1033 ret = iommu_init_domains(iommu);
1034 if (ret)
1035 goto error_unmap;
1036 spin_lock_init(&iommu->lock);
1037 spin_lock_init(&iommu->register_lock);
1038 979
1039 drhd->iommu = iommu;
1040 return iommu;
1041error_unmap:
1042 iounmap(iommu->reg);
1043error:
1044 kfree(iommu);
1045 return NULL;
1046}
1047 980
1048static void domain_exit(struct dmar_domain *domain); 981static void domain_exit(struct dmar_domain *domain);
1049static void free_iommu(struct intel_iommu *iommu) 982
983void free_dmar_iommu(struct intel_iommu *iommu)
1050{ 984{
1051 struct dmar_domain *domain; 985 struct dmar_domain *domain;
1052 int i; 986 int i;
1053 987
1054 if (!iommu)
1055 return;
1056
1057 i = find_first_bit(iommu->domain_ids, cap_ndoms(iommu->cap)); 988 i = find_first_bit(iommu->domain_ids, cap_ndoms(iommu->cap));
1058 for (; i < cap_ndoms(iommu->cap); ) { 989 for (; i < cap_ndoms(iommu->cap); ) {
1059 domain = iommu->domains[i]; 990 domain = iommu->domains[i];
@@ -1078,10 +1009,6 @@ static void free_iommu(struct intel_iommu *iommu)
1078 1009
1079 /* free context mapping */ 1010 /* free context mapping */
1080 free_context_table(iommu); 1011 free_context_table(iommu);
1081
1082 if (iommu->reg)
1083 iounmap(iommu->reg);
1084 kfree(iommu);
1085} 1012}
1086 1013
1087static struct dmar_domain * iommu_alloc_domain(struct intel_iommu *iommu) 1014static struct dmar_domain * iommu_alloc_domain(struct intel_iommu *iommu)
@@ -1414,7 +1341,7 @@ static void domain_remove_dev_info(struct dmar_domain *domain)
1414 * find_domain 1341 * find_domain
1415 * Note: we use struct pci_dev->dev.archdata.iommu stores the info 1342 * Note: we use struct pci_dev->dev.archdata.iommu stores the info
1416 */ 1343 */
1417struct dmar_domain * 1344static struct dmar_domain *
1418find_domain(struct pci_dev *pdev) 1345find_domain(struct pci_dev *pdev)
1419{ 1346{
1420 struct device_domain_info *info; 1347 struct device_domain_info *info;
@@ -1426,37 +1353,6 @@ find_domain(struct pci_dev *pdev)
1426 return NULL; 1353 return NULL;
1427} 1354}
1428 1355
1429static int dmar_pci_device_match(struct pci_dev *devices[], int cnt,
1430 struct pci_dev *dev)
1431{
1432 int index;
1433
1434 while (dev) {
1435 for (index = 0; index < cnt; index++)
1436 if (dev == devices[index])
1437 return 1;
1438
1439 /* Check our parent */
1440 dev = dev->bus->self;
1441 }
1442
1443 return 0;
1444}
1445
1446static struct dmar_drhd_unit *
1447dmar_find_matched_drhd_unit(struct pci_dev *dev)
1448{
1449 struct dmar_drhd_unit *drhd = NULL;
1450
1451 list_for_each_entry(drhd, &dmar_drhd_units, list) {
1452 if (drhd->include_all || dmar_pci_device_match(drhd->devices,
1453 drhd->devices_cnt, dev))
1454 return drhd;
1455 }
1456
1457 return NULL;
1458}
1459
1460/* domain is initialized */ 1356/* domain is initialized */
1461static struct dmar_domain *get_domain_for_dev(struct pci_dev *pdev, int gaw) 1357static struct dmar_domain *get_domain_for_dev(struct pci_dev *pdev, int gaw)
1462{ 1358{
@@ -1729,8 +1625,6 @@ int __init init_dmars(void)
1729 * endfor 1625 * endfor
1730 */ 1626 */
1731 for_each_drhd_unit(drhd) { 1627 for_each_drhd_unit(drhd) {
1732 if (drhd->ignored)
1733 continue;
1734 g_num_of_iommus++; 1628 g_num_of_iommus++;
1735 /* 1629 /*
1736 * lock not needed as this is only incremented in the single 1630 * lock not needed as this is only incremented in the single
@@ -1739,12 +1633,6 @@ int __init init_dmars(void)
1739 */ 1633 */
1740 } 1634 }
1741 1635
1742 g_iommus = kzalloc(g_num_of_iommus * sizeof(*iommu), GFP_KERNEL);
1743 if (!g_iommus) {
1744 ret = -ENOMEM;
1745 goto error;
1746 }
1747
1748 deferred_flush = kzalloc(g_num_of_iommus * 1636 deferred_flush = kzalloc(g_num_of_iommus *
1749 sizeof(struct deferred_flush_tables), GFP_KERNEL); 1637 sizeof(struct deferred_flush_tables), GFP_KERNEL);
1750 if (!deferred_flush) { 1638 if (!deferred_flush) {
@@ -1752,16 +1640,15 @@ int __init init_dmars(void)
1752 goto error; 1640 goto error;
1753 } 1641 }
1754 1642
1755 i = 0;
1756 for_each_drhd_unit(drhd) { 1643 for_each_drhd_unit(drhd) {
1757 if (drhd->ignored) 1644 if (drhd->ignored)
1758 continue; 1645 continue;
1759 iommu = alloc_iommu(&g_iommus[i], drhd); 1646
1760 i++; 1647 iommu = drhd->iommu;
1761 if (!iommu) { 1648
1762 ret = -ENOMEM; 1649 ret = iommu_init_domains(iommu);
1650 if (ret)
1763 goto error; 1651 goto error;
1764 }
1765 1652
1766 /* 1653 /*
1767 * TBD: 1654 * TBD:
@@ -1845,7 +1732,6 @@ error:
1845 iommu = drhd->iommu; 1732 iommu = drhd->iommu;
1846 free_iommu(iommu); 1733 free_iommu(iommu);
1847 } 1734 }
1848 kfree(g_iommus);
1849 return ret; 1735 return ret;
1850} 1736}
1851 1737
@@ -2002,7 +1888,10 @@ static void flush_unmaps(void)
2002 /* just flush them all */ 1888 /* just flush them all */
2003 for (i = 0; i < g_num_of_iommus; i++) { 1889 for (i = 0; i < g_num_of_iommus; i++) {
2004 if (deferred_flush[i].next) { 1890 if (deferred_flush[i].next) {
2005 iommu_flush_iotlb_global(&g_iommus[i], 0); 1891 struct intel_iommu *iommu =
1892 deferred_flush[i].domain[0]->iommu;
1893
1894 iommu_flush_iotlb_global(iommu, 0);
2006 for (j = 0; j < deferred_flush[i].next; j++) { 1895 for (j = 0; j < deferred_flush[i].next; j++) {
2007 __free_iova(&deferred_flush[i].domain[j]->iovad, 1896 __free_iova(&deferred_flush[i].domain[j]->iovad,
2008 deferred_flush[i].iova[j]); 1897 deferred_flush[i].iova[j]);
@@ -2032,7 +1921,8 @@ static void add_unmap(struct dmar_domain *dom, struct iova *iova)
2032 if (list_size == HIGH_WATER_MARK) 1921 if (list_size == HIGH_WATER_MARK)
2033 flush_unmaps(); 1922 flush_unmaps();
2034 1923
2035 iommu_id = dom->iommu - g_iommus; 1924 iommu_id = dom->iommu->seq_id;
1925
2036 next = deferred_flush[iommu_id].next; 1926 next = deferred_flush[iommu_id].next;
2037 deferred_flush[iommu_id].domain[next] = dom; 1927 deferred_flush[iommu_id].domain[next] = dom;
2038 deferred_flush[iommu_id].iova[next] = iova; 1928 deferred_flush[iommu_id].iova[next] = iova;
@@ -2348,38 +2238,6 @@ static void __init iommu_exit_mempool(void)
2348 2238
2349} 2239}
2350 2240
2351static int blacklist_iommu(const struct dmi_system_id *id)
2352{
2353 printk(KERN_INFO "%s detected; disabling IOMMU\n",
2354 id->ident);
2355 dmar_disabled = 1;
2356 return 0;
2357}
2358
2359static struct dmi_system_id __initdata intel_iommu_dmi_table[] = {
2360 { /* Some DG33BU BIOS revisions advertised non-existent VT-d */
2361 .callback = blacklist_iommu,
2362 .ident = "Intel DG33BU",
2363 { DMI_MATCH(DMI_BOARD_VENDOR, "Intel Corporation"),
2364 DMI_MATCH(DMI_BOARD_NAME, "DG33BU"),
2365 }
2366 },
2367 { }
2368};
2369
2370
2371void __init detect_intel_iommu(void)
2372{
2373 if (swiotlb || no_iommu || iommu_detected || dmar_disabled)
2374 return;
2375 if (early_dmar_detect()) {
2376 dmi_check_system(intel_iommu_dmi_table);
2377 if (dmar_disabled)
2378 return;
2379 iommu_detected = 1;
2380 }
2381}
2382
2383static void __init init_no_remapping_devices(void) 2241static void __init init_no_remapping_devices(void)
2384{ 2242{
2385 struct dmar_drhd_unit *drhd; 2243 struct dmar_drhd_unit *drhd;
@@ -2426,12 +2284,19 @@ int __init intel_iommu_init(void)
2426{ 2284{
2427 int ret = 0; 2285 int ret = 0;
2428 2286
2429 if (no_iommu || swiotlb || dmar_disabled)
2430 return -ENODEV;
2431
2432 if (dmar_table_init()) 2287 if (dmar_table_init())
2433 return -ENODEV; 2288 return -ENODEV;
2434 2289
2290 if (dmar_dev_scope_init())
2291 return -ENODEV;
2292
2293 /*
2294 * Check the need for DMA-remapping initialization now.
2295 * Above initialization will also be used by Interrupt-remapping.
2296 */
2297 if (no_iommu || swiotlb || dmar_disabled)
2298 return -ENODEV;
2299
2435 iommu_init_mempool(); 2300 iommu_init_mempool();
2436 dmar_init_reserved_ranges(); 2301 dmar_init_reserved_ranges();
2437 2302
@@ -2453,3 +2318,111 @@ int __init intel_iommu_init(void)
2453 return 0; 2318 return 0;
2454} 2319}
2455 2320
2321void intel_iommu_domain_exit(struct dmar_domain *domain)
2322{
2323 u64 end;
2324
2325 /* Domain 0 is reserved, so dont process it */
2326 if (!domain)
2327 return;
2328
2329 end = DOMAIN_MAX_ADDR(domain->gaw);
2330 end = end & (~PAGE_MASK_4K);
2331
2332 /* clear ptes */
2333 dma_pte_clear_range(domain, 0, end);
2334
2335 /* free page tables */
2336 dma_pte_free_pagetable(domain, 0, end);
2337
2338 iommu_free_domain(domain);
2339 free_domain_mem(domain);
2340}
2341EXPORT_SYMBOL_GPL(intel_iommu_domain_exit);
2342
2343struct dmar_domain *intel_iommu_domain_alloc(struct pci_dev *pdev)
2344{
2345 struct dmar_drhd_unit *drhd;
2346 struct dmar_domain *domain;
2347 struct intel_iommu *iommu;
2348
2349 drhd = dmar_find_matched_drhd_unit(pdev);
2350 if (!drhd) {
2351 printk(KERN_ERR "intel_iommu_domain_alloc: drhd == NULL\n");
2352 return NULL;
2353 }
2354
2355 iommu = drhd->iommu;
2356 if (!iommu) {
2357 printk(KERN_ERR
2358 "intel_iommu_domain_alloc: iommu == NULL\n");
2359 return NULL;
2360 }
2361 domain = iommu_alloc_domain(iommu);
2362 if (!domain) {
2363 printk(KERN_ERR
2364 "intel_iommu_domain_alloc: domain == NULL\n");
2365 return NULL;
2366 }
2367 if (domain_init(domain, DEFAULT_DOMAIN_ADDRESS_WIDTH)) {
2368 printk(KERN_ERR
2369 "intel_iommu_domain_alloc: domain_init() failed\n");
2370 intel_iommu_domain_exit(domain);
2371 return NULL;
2372 }
2373 return domain;
2374}
2375EXPORT_SYMBOL_GPL(intel_iommu_domain_alloc);
2376
2377int intel_iommu_context_mapping(
2378 struct dmar_domain *domain, struct pci_dev *pdev)
2379{
2380 int rc;
2381 rc = domain_context_mapping(domain, pdev);
2382 return rc;
2383}
2384EXPORT_SYMBOL_GPL(intel_iommu_context_mapping);
2385
2386int intel_iommu_page_mapping(
2387 struct dmar_domain *domain, dma_addr_t iova,
2388 u64 hpa, size_t size, int prot)
2389{
2390 int rc;
2391 rc = domain_page_mapping(domain, iova, hpa, size, prot);
2392 return rc;
2393}
2394EXPORT_SYMBOL_GPL(intel_iommu_page_mapping);
2395
2396void intel_iommu_detach_dev(struct dmar_domain *domain, u8 bus, u8 devfn)
2397{
2398 detach_domain_for_dev(domain, bus, devfn);
2399}
2400EXPORT_SYMBOL_GPL(intel_iommu_detach_dev);
2401
2402struct dmar_domain *
2403intel_iommu_find_domain(struct pci_dev *pdev)
2404{
2405 return find_domain(pdev);
2406}
2407EXPORT_SYMBOL_GPL(intel_iommu_find_domain);
2408
2409int intel_iommu_found(void)
2410{
2411 return g_num_of_iommus;
2412}
2413EXPORT_SYMBOL_GPL(intel_iommu_found);
2414
2415u64 intel_iommu_iova_to_pfn(struct dmar_domain *domain, u64 iova)
2416{
2417 struct dma_pte *pte;
2418 u64 pfn;
2419
2420 pfn = 0;
2421 pte = addr_to_dma_pte(domain, iova);
2422
2423 if (pte)
2424 pfn = dma_pte_addr(*pte);
2425
2426 return pfn >> PAGE_SHIFT_4K;
2427}
2428EXPORT_SYMBOL_GPL(intel_iommu_iova_to_pfn);
diff --git a/drivers/pci/intel-iommu.h b/drivers/pci/intel-iommu.h
deleted file mode 100644
index afc0ad96122..00000000000
--- a/drivers/pci/intel-iommu.h
+++ /dev/null
@@ -1,344 +0,0 @@
1/*
2 * Copyright (c) 2006, Intel Corporation.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
15 * Place - Suite 330, Boston, MA 02111-1307 USA.
16 *
17 * Copyright (C) 2006-2008 Intel Corporation
18 * Author: Ashok Raj <ashok.raj@intel.com>
19 * Author: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
20 */
21
22#ifndef _INTEL_IOMMU_H_
23#define _INTEL_IOMMU_H_
24
25#include <linux/types.h>
26#include <linux/msi.h>
27#include <linux/sysdev.h>
28#include "iova.h"
29#include <linux/io.h>
30
31/*
32 * We need a fixed PAGE_SIZE of 4K irrespective of
33 * arch PAGE_SIZE for IOMMU page tables.
34 */
35#define PAGE_SHIFT_4K (12)
36#define PAGE_SIZE_4K (1UL << PAGE_SHIFT_4K)
37#define PAGE_MASK_4K (((u64)-1) << PAGE_SHIFT_4K)
38#define PAGE_ALIGN_4K(addr) (((addr) + PAGE_SIZE_4K - 1) & PAGE_MASK_4K)
39
40#define IOVA_PFN(addr) ((addr) >> PAGE_SHIFT_4K)
41#define DMA_32BIT_PFN IOVA_PFN(DMA_32BIT_MASK)
42#define DMA_64BIT_PFN IOVA_PFN(DMA_64BIT_MASK)
43
44/*
45 * Intel IOMMU register specification per version 1.0 public spec.
46 */
47
48#define DMAR_VER_REG 0x0 /* Arch version supported by this IOMMU */
49#define DMAR_CAP_REG 0x8 /* Hardware supported capabilities */
50#define DMAR_ECAP_REG 0x10 /* Extended capabilities supported */
51#define DMAR_GCMD_REG 0x18 /* Global command register */
52#define DMAR_GSTS_REG 0x1c /* Global status register */
53#define DMAR_RTADDR_REG 0x20 /* Root entry table */
54#define DMAR_CCMD_REG 0x28 /* Context command reg */
55#define DMAR_FSTS_REG 0x34 /* Fault Status register */
56#define DMAR_FECTL_REG 0x38 /* Fault control register */
57#define DMAR_FEDATA_REG 0x3c /* Fault event interrupt data register */
58#define DMAR_FEADDR_REG 0x40 /* Fault event interrupt addr register */
59#define DMAR_FEUADDR_REG 0x44 /* Upper address register */
60#define DMAR_AFLOG_REG 0x58 /* Advanced Fault control */
61#define DMAR_PMEN_REG 0x64 /* Enable Protected Memory Region */
62#define DMAR_PLMBASE_REG 0x68 /* PMRR Low addr */
63#define DMAR_PLMLIMIT_REG 0x6c /* PMRR low limit */
64#define DMAR_PHMBASE_REG 0x70 /* pmrr high base addr */
65#define DMAR_PHMLIMIT_REG 0x78 /* pmrr high limit */
66
67#define OFFSET_STRIDE (9)
68/*
69#define dmar_readl(dmar, reg) readl(dmar + reg)
70#define dmar_readq(dmar, reg) ({ \
71 u32 lo, hi; \
72 lo = readl(dmar + reg); \
73 hi = readl(dmar + reg + 4); \
74 (((u64) hi) << 32) + lo; })
75*/
76static inline u64 dmar_readq(void __iomem *addr)
77{
78 u32 lo, hi;
79 lo = readl(addr);
80 hi = readl(addr + 4);
81 return (((u64) hi) << 32) + lo;
82}
83
84static inline void dmar_writeq(void __iomem *addr, u64 val)
85{
86 writel((u32)val, addr);
87 writel((u32)(val >> 32), addr + 4);
88}
89
90#define DMAR_VER_MAJOR(v) (((v) & 0xf0) >> 4)
91#define DMAR_VER_MINOR(v) ((v) & 0x0f)
92
93/*
94 * Decoding Capability Register
95 */
96#define cap_read_drain(c) (((c) >> 55) & 1)
97#define cap_write_drain(c) (((c) >> 54) & 1)
98#define cap_max_amask_val(c) (((c) >> 48) & 0x3f)
99#define cap_num_fault_regs(c) ((((c) >> 40) & 0xff) + 1)
100#define cap_pgsel_inv(c) (((c) >> 39) & 1)
101
102#define cap_super_page_val(c) (((c) >> 34) & 0xf)
103#define cap_super_offset(c) (((find_first_bit(&cap_super_page_val(c), 4)) \
104 * OFFSET_STRIDE) + 21)
105
106#define cap_fault_reg_offset(c) ((((c) >> 24) & 0x3ff) * 16)
107#define cap_max_fault_reg_offset(c) \
108 (cap_fault_reg_offset(c) + cap_num_fault_regs(c) * 16)
109
110#define cap_zlr(c) (((c) >> 22) & 1)
111#define cap_isoch(c) (((c) >> 23) & 1)
112#define cap_mgaw(c) ((((c) >> 16) & 0x3f) + 1)
113#define cap_sagaw(c) (((c) >> 8) & 0x1f)
114#define cap_caching_mode(c) (((c) >> 7) & 1)
115#define cap_phmr(c) (((c) >> 6) & 1)
116#define cap_plmr(c) (((c) >> 5) & 1)
117#define cap_rwbf(c) (((c) >> 4) & 1)
118#define cap_afl(c) (((c) >> 3) & 1)
119#define cap_ndoms(c) (((unsigned long)1) << (4 + 2 * ((c) & 0x7)))
120/*
121 * Extended Capability Register
122 */
123
124#define ecap_niotlb_iunits(e) ((((e) >> 24) & 0xff) + 1)
125#define ecap_iotlb_offset(e) ((((e) >> 8) & 0x3ff) * 16)
126#define ecap_max_iotlb_offset(e) \
127 (ecap_iotlb_offset(e) + ecap_niotlb_iunits(e) * 16)
128#define ecap_coherent(e) ((e) & 0x1)
129
130
131/* IOTLB_REG */
132#define DMA_TLB_GLOBAL_FLUSH (((u64)1) << 60)
133#define DMA_TLB_DSI_FLUSH (((u64)2) << 60)
134#define DMA_TLB_PSI_FLUSH (((u64)3) << 60)
135#define DMA_TLB_IIRG(type) ((type >> 60) & 7)
136#define DMA_TLB_IAIG(val) (((val) >> 57) & 7)
137#define DMA_TLB_READ_DRAIN (((u64)1) << 49)
138#define DMA_TLB_WRITE_DRAIN (((u64)1) << 48)
139#define DMA_TLB_DID(id) (((u64)((id) & 0xffff)) << 32)
140#define DMA_TLB_IVT (((u64)1) << 63)
141#define DMA_TLB_IH_NONLEAF (((u64)1) << 6)
142#define DMA_TLB_MAX_SIZE (0x3f)
143
144/* PMEN_REG */
145#define DMA_PMEN_EPM (((u32)1)<<31)
146#define DMA_PMEN_PRS (((u32)1)<<0)
147
148/* GCMD_REG */
149#define DMA_GCMD_TE (((u32)1) << 31)
150#define DMA_GCMD_SRTP (((u32)1) << 30)
151#define DMA_GCMD_SFL (((u32)1) << 29)
152#define DMA_GCMD_EAFL (((u32)1) << 28)
153#define DMA_GCMD_WBF (((u32)1) << 27)
154
155/* GSTS_REG */
156#define DMA_GSTS_TES (((u32)1) << 31)
157#define DMA_GSTS_RTPS (((u32)1) << 30)
158#define DMA_GSTS_FLS (((u32)1) << 29)
159#define DMA_GSTS_AFLS (((u32)1) << 28)
160#define DMA_GSTS_WBFS (((u32)1) << 27)
161
162/* CCMD_REG */
163#define DMA_CCMD_ICC (((u64)1) << 63)
164#define DMA_CCMD_GLOBAL_INVL (((u64)1) << 61)
165#define DMA_CCMD_DOMAIN_INVL (((u64)2) << 61)
166#define DMA_CCMD_DEVICE_INVL (((u64)3) << 61)
167#define DMA_CCMD_FM(m) (((u64)((m) & 0x3)) << 32)
168#define DMA_CCMD_MASK_NOBIT 0
169#define DMA_CCMD_MASK_1BIT 1
170#define DMA_CCMD_MASK_2BIT 2
171#define DMA_CCMD_MASK_3BIT 3
172#define DMA_CCMD_SID(s) (((u64)((s) & 0xffff)) << 16)
173#define DMA_CCMD_DID(d) ((u64)((d) & 0xffff))
174
175/* FECTL_REG */
176#define DMA_FECTL_IM (((u32)1) << 31)
177
178/* FSTS_REG */
179#define DMA_FSTS_PPF ((u32)2)
180#define DMA_FSTS_PFO ((u32)1)
181#define dma_fsts_fault_record_index(s) (((s) >> 8) & 0xff)
182
183/* FRCD_REG, 32 bits access */
184#define DMA_FRCD_F (((u32)1) << 31)
185#define dma_frcd_type(d) ((d >> 30) & 1)
186#define dma_frcd_fault_reason(c) (c & 0xff)
187#define dma_frcd_source_id(c) (c & 0xffff)
188#define dma_frcd_page_addr(d) (d & (((u64)-1) << 12)) /* low 64 bit */
189
190/*
191 * 0: Present
192 * 1-11: Reserved
193 * 12-63: Context Ptr (12 - (haw-1))
194 * 64-127: Reserved
195 */
196struct root_entry {
197 u64 val;
198 u64 rsvd1;
199};
200#define ROOT_ENTRY_NR (PAGE_SIZE_4K/sizeof(struct root_entry))
201static inline bool root_present(struct root_entry *root)
202{
203 return (root->val & 1);
204}
205static inline void set_root_present(struct root_entry *root)
206{
207 root->val |= 1;
208}
209static inline void set_root_value(struct root_entry *root, unsigned long value)
210{
211 root->val |= value & PAGE_MASK_4K;
212}
213
214struct context_entry;
215static inline struct context_entry *
216get_context_addr_from_root(struct root_entry *root)
217{
218 return (struct context_entry *)
219 (root_present(root)?phys_to_virt(
220 root->val & PAGE_MASK_4K):
221 NULL);
222}
223
224/*
225 * low 64 bits:
226 * 0: present
227 * 1: fault processing disable
228 * 2-3: translation type
229 * 12-63: address space root
230 * high 64 bits:
231 * 0-2: address width
232 * 3-6: aval
233 * 8-23: domain id
234 */
235struct context_entry {
236 u64 lo;
237 u64 hi;
238};
239#define context_present(c) ((c).lo & 1)
240#define context_fault_disable(c) (((c).lo >> 1) & 1)
241#define context_translation_type(c) (((c).lo >> 2) & 3)
242#define context_address_root(c) ((c).lo & PAGE_MASK_4K)
243#define context_address_width(c) ((c).hi & 7)
244#define context_domain_id(c) (((c).hi >> 8) & ((1 << 16) - 1))
245
246#define context_set_present(c) do {(c).lo |= 1;} while (0)
247#define context_set_fault_enable(c) \
248 do {(c).lo &= (((u64)-1) << 2) | 1;} while (0)
249#define context_set_translation_type(c, val) \
250 do { \
251 (c).lo &= (((u64)-1) << 4) | 3; \
252 (c).lo |= ((val) & 3) << 2; \
253 } while (0)
254#define CONTEXT_TT_MULTI_LEVEL 0
255#define context_set_address_root(c, val) \
256 do {(c).lo |= (val) & PAGE_MASK_4K;} while (0)
257#define context_set_address_width(c, val) do {(c).hi |= (val) & 7;} while (0)
258#define context_set_domain_id(c, val) \
259 do {(c).hi |= ((val) & ((1 << 16) - 1)) << 8;} while (0)
260#define context_clear_entry(c) do {(c).lo = 0; (c).hi = 0;} while (0)
261
262/*
263 * 0: readable
264 * 1: writable
265 * 2-6: reserved
266 * 7: super page
267 * 8-11: available
268 * 12-63: Host physcial address
269 */
270struct dma_pte {
271 u64 val;
272};
273#define dma_clear_pte(p) do {(p).val = 0;} while (0)
274
275#define DMA_PTE_READ (1)
276#define DMA_PTE_WRITE (2)
277
278#define dma_set_pte_readable(p) do {(p).val |= DMA_PTE_READ;} while (0)
279#define dma_set_pte_writable(p) do {(p).val |= DMA_PTE_WRITE;} while (0)
280#define dma_set_pte_prot(p, prot) \
281 do {(p).val = ((p).val & ~3) | ((prot) & 3); } while (0)
282#define dma_pte_addr(p) ((p).val & PAGE_MASK_4K)
283#define dma_set_pte_addr(p, addr) do {\
284 (p).val |= ((addr) & PAGE_MASK_4K); } while (0)
285#define dma_pte_present(p) (((p).val & 3) != 0)
286
287struct intel_iommu;
288
289struct dmar_domain {
290 int id; /* domain id */
291 struct intel_iommu *iommu; /* back pointer to owning iommu */
292
293 struct list_head devices; /* all devices' list */
294 struct iova_domain iovad; /* iova's that belong to this domain */
295
296 struct dma_pte *pgd; /* virtual address */
297 spinlock_t mapping_lock; /* page table lock */
298 int gaw; /* max guest address width */
299
300 /* adjusted guest address width, 0 is level 2 30-bit */
301 int agaw;
302
303#define DOMAIN_FLAG_MULTIPLE_DEVICES 1
304 int flags;
305};
306
307/* PCI domain-device relationship */
308struct device_domain_info {
309 struct list_head link; /* link to domain siblings */
310 struct list_head global; /* link to global list */
311 u8 bus; /* PCI bus numer */
312 u8 devfn; /* PCI devfn number */
313 struct pci_dev *dev; /* it's NULL for PCIE-to-PCI bridge */
314 struct dmar_domain *domain; /* pointer to domain */
315};
316
317extern int init_dmars(void);
318
319struct intel_iommu {
320 void __iomem *reg; /* Pointer to hardware regs, virtual addr */
321 u64 cap;
322 u64 ecap;
323 unsigned long *domain_ids; /* bitmap of domains */
324 struct dmar_domain **domains; /* ptr to domains */
325 int seg;
326 u32 gcmd; /* Holds TE, EAFL. Don't need SRTP, SFL, WBF */
327 spinlock_t lock; /* protect context, domain ids */
328 spinlock_t register_lock; /* protect register handling */
329 struct root_entry *root_entry; /* virtual address */
330
331 unsigned int irq;
332 unsigned char name[7]; /* Device Name */
333 struct msi_msg saved_msg;
334 struct sys_device sysdev;
335};
336
337#ifndef CONFIG_DMAR_GFX_WA
338static inline void iommu_prepare_gfx_mapping(void)
339{
340 return;
341}
342#endif /* !CONFIG_DMAR_GFX_WA */
343
344#endif
diff --git a/drivers/pci/intr_remapping.c b/drivers/pci/intr_remapping.c
new file mode 100644
index 00000000000..2de5a3238c9
--- /dev/null
+++ b/drivers/pci/intr_remapping.c
@@ -0,0 +1,512 @@
1#include <linux/interrupt.h>
2#include <linux/dmar.h>
3#include <linux/spinlock.h>
4#include <linux/jiffies.h>
5#include <linux/pci.h>
6#include <linux/irq.h>
7#include <asm/io_apic.h>
8#include <linux/intel-iommu.h>
9#include "intr_remapping.h"
10
11static struct ioapic_scope ir_ioapic[MAX_IO_APICS];
12static int ir_ioapic_num;
13int intr_remapping_enabled;
14
15struct irq_2_iommu {
16 struct intel_iommu *iommu;
17 u16 irte_index;
18 u16 sub_handle;
19 u8 irte_mask;
20};
21
22static struct irq_2_iommu irq_2_iommuX[NR_IRQS];
23
24static struct irq_2_iommu *irq_2_iommu(unsigned int irq)
25{
26 return (irq < nr_irqs) ? irq_2_iommuX + irq : NULL;
27}
28
29static struct irq_2_iommu *irq_2_iommu_alloc(unsigned int irq)
30{
31 return irq_2_iommu(irq);
32}
33
34static DEFINE_SPINLOCK(irq_2_ir_lock);
35
36static struct irq_2_iommu *valid_irq_2_iommu(unsigned int irq)
37{
38 struct irq_2_iommu *irq_iommu;
39
40 irq_iommu = irq_2_iommu(irq);
41
42 if (!irq_iommu)
43 return NULL;
44
45 if (!irq_iommu->iommu)
46 return NULL;
47
48 return irq_iommu;
49}
50
51int irq_remapped(int irq)
52{
53 return valid_irq_2_iommu(irq) != NULL;
54}
55
56int get_irte(int irq, struct irte *entry)
57{
58 int index;
59 struct irq_2_iommu *irq_iommu;
60
61 if (!entry)
62 return -1;
63
64 spin_lock(&irq_2_ir_lock);
65 irq_iommu = valid_irq_2_iommu(irq);
66 if (!irq_iommu) {
67 spin_unlock(&irq_2_ir_lock);
68 return -1;
69 }
70
71 index = irq_iommu->irte_index + irq_iommu->sub_handle;
72 *entry = *(irq_iommu->iommu->ir_table->base + index);
73
74 spin_unlock(&irq_2_ir_lock);
75 return 0;
76}
77
78int alloc_irte(struct intel_iommu *iommu, int irq, u16 count)
79{
80 struct ir_table *table = iommu->ir_table;
81 struct irq_2_iommu *irq_iommu;
82 u16 index, start_index;
83 unsigned int mask = 0;
84 int i;
85
86 if (!count)
87 return -1;
88
89 /* protect irq_2_iommu_alloc later */
90 if (irq >= nr_irqs)
91 return -1;
92
93 /*
94 * start the IRTE search from index 0.
95 */
96 index = start_index = 0;
97
98 if (count > 1) {
99 count = __roundup_pow_of_two(count);
100 mask = ilog2(count);
101 }
102
103 if (mask > ecap_max_handle_mask(iommu->ecap)) {
104 printk(KERN_ERR
105 "Requested mask %x exceeds the max invalidation handle"
106 " mask value %Lx\n", mask,
107 ecap_max_handle_mask(iommu->ecap));
108 return -1;
109 }
110
111 spin_lock(&irq_2_ir_lock);
112 do {
113 for (i = index; i < index + count; i++)
114 if (table->base[i].present)
115 break;
116 /* empty index found */
117 if (i == index + count)
118 break;
119
120 index = (index + count) % INTR_REMAP_TABLE_ENTRIES;
121
122 if (index == start_index) {
123 spin_unlock(&irq_2_ir_lock);
124 printk(KERN_ERR "can't allocate an IRTE\n");
125 return -1;
126 }
127 } while (1);
128
129 for (i = index; i < index + count; i++)
130 table->base[i].present = 1;
131
132 irq_iommu = irq_2_iommu_alloc(irq);
133 irq_iommu->iommu = iommu;
134 irq_iommu->irte_index = index;
135 irq_iommu->sub_handle = 0;
136 irq_iommu->irte_mask = mask;
137
138 spin_unlock(&irq_2_ir_lock);
139
140 return index;
141}
142
143static void qi_flush_iec(struct intel_iommu *iommu, int index, int mask)
144{
145 struct qi_desc desc;
146
147 desc.low = QI_IEC_IIDEX(index) | QI_IEC_TYPE | QI_IEC_IM(mask)
148 | QI_IEC_SELECTIVE;
149 desc.high = 0;
150
151 qi_submit_sync(&desc, iommu);
152}
153
154int map_irq_to_irte_handle(int irq, u16 *sub_handle)
155{
156 int index;
157 struct irq_2_iommu *irq_iommu;
158
159 spin_lock(&irq_2_ir_lock);
160 irq_iommu = valid_irq_2_iommu(irq);
161 if (!irq_iommu) {
162 spin_unlock(&irq_2_ir_lock);
163 return -1;
164 }
165
166 *sub_handle = irq_iommu->sub_handle;
167 index = irq_iommu->irte_index;
168 spin_unlock(&irq_2_ir_lock);
169 return index;
170}
171
172int set_irte_irq(int irq, struct intel_iommu *iommu, u16 index, u16 subhandle)
173{
174 struct irq_2_iommu *irq_iommu;
175
176 spin_lock(&irq_2_ir_lock);
177
178 irq_iommu = irq_2_iommu_alloc(irq);
179
180 irq_iommu->iommu = iommu;
181 irq_iommu->irte_index = index;
182 irq_iommu->sub_handle = subhandle;
183 irq_iommu->irte_mask = 0;
184
185 spin_unlock(&irq_2_ir_lock);
186
187 return 0;
188}
189
190int clear_irte_irq(int irq, struct intel_iommu *iommu, u16 index)
191{
192 struct irq_2_iommu *irq_iommu;
193
194 spin_lock(&irq_2_ir_lock);
195 irq_iommu = valid_irq_2_iommu(irq);
196 if (!irq_iommu) {
197 spin_unlock(&irq_2_ir_lock);
198 return -1;
199 }
200
201 irq_iommu->iommu = NULL;
202 irq_iommu->irte_index = 0;
203 irq_iommu->sub_handle = 0;
204 irq_2_iommu(irq)->irte_mask = 0;
205
206 spin_unlock(&irq_2_ir_lock);
207
208 return 0;
209}
210
211int modify_irte(int irq, struct irte *irte_modified)
212{
213 int index;
214 struct irte *irte;
215 struct intel_iommu *iommu;
216 struct irq_2_iommu *irq_iommu;
217
218 spin_lock(&irq_2_ir_lock);
219 irq_iommu = valid_irq_2_iommu(irq);
220 if (!irq_iommu) {
221 spin_unlock(&irq_2_ir_lock);
222 return -1;
223 }
224
225 iommu = irq_iommu->iommu;
226
227 index = irq_iommu->irte_index + irq_iommu->sub_handle;
228 irte = &iommu->ir_table->base[index];
229
230 set_64bit((unsigned long *)irte, irte_modified->low | (1 << 1));
231 __iommu_flush_cache(iommu, irte, sizeof(*irte));
232
233 qi_flush_iec(iommu, index, 0);
234
235 spin_unlock(&irq_2_ir_lock);
236 return 0;
237}
238
239int flush_irte(int irq)
240{
241 int index;
242 struct intel_iommu *iommu;
243 struct irq_2_iommu *irq_iommu;
244
245 spin_lock(&irq_2_ir_lock);
246 irq_iommu = valid_irq_2_iommu(irq);
247 if (!irq_iommu) {
248 spin_unlock(&irq_2_ir_lock);
249 return -1;
250 }
251
252 iommu = irq_iommu->iommu;
253
254 index = irq_iommu->irte_index + irq_iommu->sub_handle;
255
256 qi_flush_iec(iommu, index, irq_iommu->irte_mask);
257 spin_unlock(&irq_2_ir_lock);
258
259 return 0;
260}
261
262struct intel_iommu *map_ioapic_to_ir(int apic)
263{
264 int i;
265
266 for (i = 0; i < MAX_IO_APICS; i++)
267 if (ir_ioapic[i].id == apic)
268 return ir_ioapic[i].iommu;
269 return NULL;
270}
271
272struct intel_iommu *map_dev_to_ir(struct pci_dev *dev)
273{
274 struct dmar_drhd_unit *drhd;
275
276 drhd = dmar_find_matched_drhd_unit(dev);
277 if (!drhd)
278 return NULL;
279
280 return drhd->iommu;
281}
282
283int free_irte(int irq)
284{
285 int index, i;
286 struct irte *irte;
287 struct intel_iommu *iommu;
288 struct irq_2_iommu *irq_iommu;
289
290 spin_lock(&irq_2_ir_lock);
291 irq_iommu = valid_irq_2_iommu(irq);
292 if (!irq_iommu) {
293 spin_unlock(&irq_2_ir_lock);
294 return -1;
295 }
296
297 iommu = irq_iommu->iommu;
298
299 index = irq_iommu->irte_index + irq_iommu->sub_handle;
300 irte = &iommu->ir_table->base[index];
301
302 if (!irq_iommu->sub_handle) {
303 for (i = 0; i < (1 << irq_iommu->irte_mask); i++)
304 set_64bit((unsigned long *)irte, 0);
305 qi_flush_iec(iommu, index, irq_iommu->irte_mask);
306 }
307
308 irq_iommu->iommu = NULL;
309 irq_iommu->irte_index = 0;
310 irq_iommu->sub_handle = 0;
311 irq_iommu->irte_mask = 0;
312
313 spin_unlock(&irq_2_ir_lock);
314
315 return 0;
316}
317
318static void iommu_set_intr_remapping(struct intel_iommu *iommu, int mode)
319{
320 u64 addr;
321 u32 cmd, sts;
322 unsigned long flags;
323
324 addr = virt_to_phys((void *)iommu->ir_table->base);
325
326 spin_lock_irqsave(&iommu->register_lock, flags);
327
328 dmar_writeq(iommu->reg + DMAR_IRTA_REG,
329 (addr) | IR_X2APIC_MODE(mode) | INTR_REMAP_TABLE_REG_SIZE);
330
331 /* Set interrupt-remapping table pointer */
332 cmd = iommu->gcmd | DMA_GCMD_SIRTP;
333 writel(cmd, iommu->reg + DMAR_GCMD_REG);
334
335 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
336 readl, (sts & DMA_GSTS_IRTPS), sts);
337 spin_unlock_irqrestore(&iommu->register_lock, flags);
338
339 /*
340 * global invalidation of interrupt entry cache before enabling
341 * interrupt-remapping.
342 */
343 qi_global_iec(iommu);
344
345 spin_lock_irqsave(&iommu->register_lock, flags);
346
347 /* Enable interrupt-remapping */
348 cmd = iommu->gcmd | DMA_GCMD_IRE;
349 iommu->gcmd |= DMA_GCMD_IRE;
350 writel(cmd, iommu->reg + DMAR_GCMD_REG);
351
352 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
353 readl, (sts & DMA_GSTS_IRES), sts);
354
355 spin_unlock_irqrestore(&iommu->register_lock, flags);
356}
357
358
359static int setup_intr_remapping(struct intel_iommu *iommu, int mode)
360{
361 struct ir_table *ir_table;
362 struct page *pages;
363
364 ir_table = iommu->ir_table = kzalloc(sizeof(struct ir_table),
365 GFP_KERNEL);
366
367 if (!iommu->ir_table)
368 return -ENOMEM;
369
370 pages = alloc_pages(GFP_KERNEL | __GFP_ZERO, INTR_REMAP_PAGE_ORDER);
371
372 if (!pages) {
373 printk(KERN_ERR "failed to allocate pages of order %d\n",
374 INTR_REMAP_PAGE_ORDER);
375 kfree(iommu->ir_table);
376 return -ENOMEM;
377 }
378
379 ir_table->base = page_address(pages);
380
381 iommu_set_intr_remapping(iommu, mode);
382 return 0;
383}
384
385int __init enable_intr_remapping(int eim)
386{
387 struct dmar_drhd_unit *drhd;
388 int setup = 0;
389
390 /*
391 * check for the Interrupt-remapping support
392 */
393 for_each_drhd_unit(drhd) {
394 struct intel_iommu *iommu = drhd->iommu;
395
396 if (!ecap_ir_support(iommu->ecap))
397 continue;
398
399 if (eim && !ecap_eim_support(iommu->ecap)) {
400 printk(KERN_INFO "DRHD %Lx: EIM not supported by DRHD, "
401 " ecap %Lx\n", drhd->reg_base_addr, iommu->ecap);
402 return -1;
403 }
404 }
405
406 /*
407 * Enable queued invalidation for all the DRHD's.
408 */
409 for_each_drhd_unit(drhd) {
410 int ret;
411 struct intel_iommu *iommu = drhd->iommu;
412 ret = dmar_enable_qi(iommu);
413
414 if (ret) {
415 printk(KERN_ERR "DRHD %Lx: failed to enable queued, "
416 " invalidation, ecap %Lx, ret %d\n",
417 drhd->reg_base_addr, iommu->ecap, ret);
418 return -1;
419 }
420 }
421
422 /*
423 * Setup Interrupt-remapping for all the DRHD's now.
424 */
425 for_each_drhd_unit(drhd) {
426 struct intel_iommu *iommu = drhd->iommu;
427
428 if (!ecap_ir_support(iommu->ecap))
429 continue;
430
431 if (setup_intr_remapping(iommu, eim))
432 goto error;
433
434 setup = 1;
435 }
436
437 if (!setup)
438 goto error;
439
440 intr_remapping_enabled = 1;
441
442 return 0;
443
444error:
445 /*
446 * handle error condition gracefully here!
447 */
448 return -1;
449}
450
451static int ir_parse_ioapic_scope(struct acpi_dmar_header *header,
452 struct intel_iommu *iommu)
453{
454 struct acpi_dmar_hardware_unit *drhd;
455 struct acpi_dmar_device_scope *scope;
456 void *start, *end;
457
458 drhd = (struct acpi_dmar_hardware_unit *)header;
459
460 start = (void *)(drhd + 1);
461 end = ((void *)drhd) + header->length;
462
463 while (start < end) {
464 scope = start;
465 if (scope->entry_type == ACPI_DMAR_SCOPE_TYPE_IOAPIC) {
466 if (ir_ioapic_num == MAX_IO_APICS) {
467 printk(KERN_WARNING "Exceeded Max IO APICS\n");
468 return -1;
469 }
470
471 printk(KERN_INFO "IOAPIC id %d under DRHD base"
472 " 0x%Lx\n", scope->enumeration_id,
473 drhd->address);
474
475 ir_ioapic[ir_ioapic_num].iommu = iommu;
476 ir_ioapic[ir_ioapic_num].id = scope->enumeration_id;
477 ir_ioapic_num++;
478 }
479 start += scope->length;
480 }
481
482 return 0;
483}
484
485/*
486 * Finds the assocaition between IOAPIC's and its Interrupt-remapping
487 * hardware unit.
488 */
489int __init parse_ioapics_under_ir(void)
490{
491 struct dmar_drhd_unit *drhd;
492 int ir_supported = 0;
493
494 for_each_drhd_unit(drhd) {
495 struct intel_iommu *iommu = drhd->iommu;
496
497 if (ecap_ir_support(iommu->ecap)) {
498 if (ir_parse_ioapic_scope(drhd->hdr, iommu))
499 return -1;
500
501 ir_supported = 1;
502 }
503 }
504
505 if (ir_supported && ir_ioapic_num != nr_ioapics) {
506 printk(KERN_WARNING
507 "Not all IO-APIC's listed under remapping hardware\n");
508 return -1;
509 }
510
511 return ir_supported;
512}
diff --git a/drivers/pci/intr_remapping.h b/drivers/pci/intr_remapping.h
new file mode 100644
index 00000000000..ca48f0df8ac
--- /dev/null
+++ b/drivers/pci/intr_remapping.h
@@ -0,0 +1,8 @@
1#include <linux/intel-iommu.h>
2
3struct ioapic_scope {
4 struct intel_iommu *iommu;
5 unsigned int id;
6};
7
8#define IR_X2APIC_MODE(mode) (mode ? (1 << 11) : 0)
diff --git a/drivers/pci/iova.c b/drivers/pci/iova.c
index 3ef4ac06431..2287116e982 100644
--- a/drivers/pci/iova.c
+++ b/drivers/pci/iova.c
@@ -7,7 +7,7 @@
7 * Author: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com> 7 * Author: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
8 */ 8 */
9 9
10#include "iova.h" 10#include <linux/iova.h>
11 11
12void 12void
13init_iova_domain(struct iova_domain *iovad, unsigned long pfn_32bit) 13init_iova_domain(struct iova_domain *iovad, unsigned long pfn_32bit)
diff --git a/drivers/pci/iova.h b/drivers/pci/iova.h
deleted file mode 100644
index 228f6c94b69..00000000000
--- a/drivers/pci/iova.h
+++ /dev/null
@@ -1,52 +0,0 @@
1/*
2 * Copyright (c) 2006, Intel Corporation.
3 *
4 * This file is released under the GPLv2.
5 *
6 * Copyright (C) 2006-2008 Intel Corporation
7 * Author: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
8 *
9 */
10
11#ifndef _IOVA_H_
12#define _IOVA_H_
13
14#include <linux/types.h>
15#include <linux/kernel.h>
16#include <linux/rbtree.h>
17#include <linux/dma-mapping.h>
18
19/* IO virtual address start page frame number */
20#define IOVA_START_PFN (1)
21
22/* iova structure */
23struct iova {
24 struct rb_node node;
25 unsigned long pfn_hi; /* IOMMU dish out addr hi */
26 unsigned long pfn_lo; /* IOMMU dish out addr lo */
27};
28
29/* holds all the iova translations for a domain */
30struct iova_domain {
31 spinlock_t iova_alloc_lock;/* Lock to protect iova allocation */
32 spinlock_t iova_rbtree_lock; /* Lock to protect update of rbtree */
33 struct rb_root rbroot; /* iova domain rbtree root */
34 struct rb_node *cached32_node; /* Save last alloced node */
35 unsigned long dma_32bit_pfn;
36};
37
38struct iova *alloc_iova_mem(void);
39void free_iova_mem(struct iova *iova);
40void free_iova(struct iova_domain *iovad, unsigned long pfn);
41void __free_iova(struct iova_domain *iovad, struct iova *iova);
42struct iova *alloc_iova(struct iova_domain *iovad, unsigned long size,
43 unsigned long limit_pfn,
44 bool size_aligned);
45struct iova *reserve_iova(struct iova_domain *iovad, unsigned long pfn_lo,
46 unsigned long pfn_hi);
47void copy_reserved_iova(struct iova_domain *from, struct iova_domain *to);
48void init_iova_domain(struct iova_domain *iovad, unsigned long pfn_32bit);
49struct iova *find_iova(struct iova_domain *iovad, unsigned long pfn);
50void put_iova_domain(struct iova_domain *iovad);
51
52#endif
diff --git a/drivers/pci/msi.c b/drivers/pci/msi.c
index 4a10b5624f7..d2812013fd2 100644
--- a/drivers/pci/msi.c
+++ b/drivers/pci/msi.c
@@ -378,23 +378,21 @@ static int msi_capability_init(struct pci_dev *dev)
378 entry->msi_attrib.masked = 1; 378 entry->msi_attrib.masked = 1;
379 entry->msi_attrib.default_irq = dev->irq; /* Save IOAPIC IRQ */ 379 entry->msi_attrib.default_irq = dev->irq; /* Save IOAPIC IRQ */
380 entry->msi_attrib.pos = pos; 380 entry->msi_attrib.pos = pos;
381 if (is_mask_bit_support(control)) { 381 if (entry->msi_attrib.maskbit) {
382 entry->mask_base = (void __iomem *)(long)msi_mask_bits_reg(pos, 382 entry->mask_base = (void __iomem *)(long)msi_mask_bits_reg(pos,
383 is_64bit_address(control)); 383 entry->msi_attrib.is_64);
384 } 384 }
385 entry->dev = dev; 385 entry->dev = dev;
386 if (entry->msi_attrib.maskbit) { 386 if (entry->msi_attrib.maskbit) {
387 unsigned int maskbits, temp; 387 unsigned int maskbits, temp;
388 /* All MSIs are unmasked by default, Mask them all */ 388 /* All MSIs are unmasked by default, Mask them all */
389 pci_read_config_dword(dev, 389 pci_read_config_dword(dev,
390 msi_mask_bits_reg(pos, is_64bit_address(control)), 390 msi_mask_bits_reg(pos, entry->msi_attrib.is_64),
391 &maskbits); 391 &maskbits);
392 temp = (1 << multi_msi_capable(control)); 392 temp = (1 << multi_msi_capable(control));
393 temp = ((temp - 1) & ~temp); 393 temp = ((temp - 1) & ~temp);
394 maskbits |= temp; 394 maskbits |= temp;
395 pci_write_config_dword(dev, 395 pci_write_config_dword(dev, entry->msi_attrib.is_64, maskbits);
396 msi_mask_bits_reg(pos, is_64bit_address(control)),
397 maskbits);
398 entry->msi_attrib.maskbits_mask = temp; 396 entry->msi_attrib.maskbits_mask = temp;
399 } 397 }
400 list_add_tail(&entry->list, &dev->msi_list); 398 list_add_tail(&entry->list, &dev->msi_list);
diff --git a/drivers/pci/pci-driver.c b/drivers/pci/pci-driver.c
index a13f5348611..b4cdd690ae7 100644
--- a/drivers/pci/pci-driver.c
+++ b/drivers/pci/pci-driver.c
@@ -43,18 +43,32 @@ store_new_id(struct device_driver *driver, const char *buf, size_t count)
43{ 43{
44 struct pci_dynid *dynid; 44 struct pci_dynid *dynid;
45 struct pci_driver *pdrv = to_pci_driver(driver); 45 struct pci_driver *pdrv = to_pci_driver(driver);
46 const struct pci_device_id *ids = pdrv->id_table;
46 __u32 vendor, device, subvendor=PCI_ANY_ID, 47 __u32 vendor, device, subvendor=PCI_ANY_ID,
47 subdevice=PCI_ANY_ID, class=0, class_mask=0; 48 subdevice=PCI_ANY_ID, class=0, class_mask=0;
48 unsigned long driver_data=0; 49 unsigned long driver_data=0;
49 int fields=0; 50 int fields=0;
50 int retval = 0; 51 int retval;
51 52
52 fields = sscanf(buf, "%x %x %x %x %x %x %lux", 53 fields = sscanf(buf, "%x %x %x %x %x %x %lx",
53 &vendor, &device, &subvendor, &subdevice, 54 &vendor, &device, &subvendor, &subdevice,
54 &class, &class_mask, &driver_data); 55 &class, &class_mask, &driver_data);
55 if (fields < 2) 56 if (fields < 2)
56 return -EINVAL; 57 return -EINVAL;
57 58
59 /* Only accept driver_data values that match an existing id_table
60 entry */
61 retval = -EINVAL;
62 while (ids->vendor || ids->subvendor || ids->class_mask) {
63 if (driver_data == ids->driver_data) {
64 retval = 0;
65 break;
66 }
67 ids++;
68 }
69 if (retval) /* No match */
70 return retval;
71
58 dynid = kzalloc(sizeof(*dynid), GFP_KERNEL); 72 dynid = kzalloc(sizeof(*dynid), GFP_KERNEL);
59 if (!dynid) 73 if (!dynid)
60 return -ENOMEM; 74 return -ENOMEM;
@@ -65,8 +79,7 @@ store_new_id(struct device_driver *driver, const char *buf, size_t count)
65 dynid->id.subdevice = subdevice; 79 dynid->id.subdevice = subdevice;
66 dynid->id.class = class; 80 dynid->id.class = class;
67 dynid->id.class_mask = class_mask; 81 dynid->id.class_mask = class_mask;
68 dynid->id.driver_data = pdrv->dynids.use_driver_data ? 82 dynid->id.driver_data = driver_data;
69 driver_data : 0UL;
70 83
71 spin_lock(&pdrv->dynids.lock); 84 spin_lock(&pdrv->dynids.lock);
72 list_add_tail(&dynid->node, &pdrv->dynids.list); 85 list_add_tail(&dynid->node, &pdrv->dynids.list);
diff --git a/drivers/pci/pci-sysfs.c b/drivers/pci/pci-sysfs.c
index 77baff022f7..110022d7868 100644
--- a/drivers/pci/pci-sysfs.c
+++ b/drivers/pci/pci-sysfs.c
@@ -423,7 +423,7 @@ pci_write_vpd(struct kobject *kobj, struct bin_attribute *bin_attr,
423 * Reads 1, 2, or 4 bytes from legacy I/O port space using an arch specific 423 * Reads 1, 2, or 4 bytes from legacy I/O port space using an arch specific
424 * callback routine (pci_legacy_read). 424 * callback routine (pci_legacy_read).
425 */ 425 */
426ssize_t 426static ssize_t
427pci_read_legacy_io(struct kobject *kobj, struct bin_attribute *bin_attr, 427pci_read_legacy_io(struct kobject *kobj, struct bin_attribute *bin_attr,
428 char *buf, loff_t off, size_t count) 428 char *buf, loff_t off, size_t count)
429{ 429{
@@ -448,7 +448,7 @@ pci_read_legacy_io(struct kobject *kobj, struct bin_attribute *bin_attr,
448 * Writes 1, 2, or 4 bytes from legacy I/O port space using an arch specific 448 * Writes 1, 2, or 4 bytes from legacy I/O port space using an arch specific
449 * callback routine (pci_legacy_write). 449 * callback routine (pci_legacy_write).
450 */ 450 */
451ssize_t 451static ssize_t
452pci_write_legacy_io(struct kobject *kobj, struct bin_attribute *bin_attr, 452pci_write_legacy_io(struct kobject *kobj, struct bin_attribute *bin_attr,
453 char *buf, loff_t off, size_t count) 453 char *buf, loff_t off, size_t count)
454{ 454{
@@ -468,11 +468,11 @@ pci_write_legacy_io(struct kobject *kobj, struct bin_attribute *bin_attr,
468 * @attr: struct bin_attribute for this file 468 * @attr: struct bin_attribute for this file
469 * @vma: struct vm_area_struct passed to mmap 469 * @vma: struct vm_area_struct passed to mmap
470 * 470 *
471 * Uses an arch specific callback, pci_mmap_legacy_page_range, to mmap 471 * Uses an arch specific callback, pci_mmap_legacy_mem_page_range, to mmap
472 * legacy memory space (first meg of bus space) into application virtual 472 * legacy memory space (first meg of bus space) into application virtual
473 * memory space. 473 * memory space.
474 */ 474 */
475int 475static int
476pci_mmap_legacy_mem(struct kobject *kobj, struct bin_attribute *attr, 476pci_mmap_legacy_mem(struct kobject *kobj, struct bin_attribute *attr,
477 struct vm_area_struct *vma) 477 struct vm_area_struct *vma)
478{ 478{
@@ -480,7 +480,90 @@ pci_mmap_legacy_mem(struct kobject *kobj, struct bin_attribute *attr,
480 struct device, 480 struct device,
481 kobj)); 481 kobj));
482 482
483 return pci_mmap_legacy_page_range(bus, vma); 483 return pci_mmap_legacy_page_range(bus, vma, pci_mmap_mem);
484}
485
486/**
487 * pci_mmap_legacy_io - map legacy PCI IO into user memory space
488 * @kobj: kobject corresponding to device to be mapped
489 * @attr: struct bin_attribute for this file
490 * @vma: struct vm_area_struct passed to mmap
491 *
492 * Uses an arch specific callback, pci_mmap_legacy_io_page_range, to mmap
493 * legacy IO space (first meg of bus space) into application virtual
494 * memory space. Returns -ENOSYS if the operation isn't supported
495 */
496static int
497pci_mmap_legacy_io(struct kobject *kobj, struct bin_attribute *attr,
498 struct vm_area_struct *vma)
499{
500 struct pci_bus *bus = to_pci_bus(container_of(kobj,
501 struct device,
502 kobj));
503
504 return pci_mmap_legacy_page_range(bus, vma, pci_mmap_io);
505}
506
507/**
508 * pci_create_legacy_files - create legacy I/O port and memory files
509 * @b: bus to create files under
510 *
511 * Some platforms allow access to legacy I/O port and ISA memory space on
512 * a per-bus basis. This routine creates the files and ties them into
513 * their associated read, write and mmap files from pci-sysfs.c
514 *
515 * On error unwind, but don't propogate the error to the caller
516 * as it is ok to set up the PCI bus without these files.
517 */
518void pci_create_legacy_files(struct pci_bus *b)
519{
520 int error;
521
522 b->legacy_io = kzalloc(sizeof(struct bin_attribute) * 2,
523 GFP_ATOMIC);
524 if (!b->legacy_io)
525 goto kzalloc_err;
526
527 b->legacy_io->attr.name = "legacy_io";
528 b->legacy_io->size = 0xffff;
529 b->legacy_io->attr.mode = S_IRUSR | S_IWUSR;
530 b->legacy_io->read = pci_read_legacy_io;
531 b->legacy_io->write = pci_write_legacy_io;
532 b->legacy_io->mmap = pci_mmap_legacy_io;
533 error = device_create_bin_file(&b->dev, b->legacy_io);
534 if (error)
535 goto legacy_io_err;
536
537 /* Allocated above after the legacy_io struct */
538 b->legacy_mem = b->legacy_io + 1;
539 b->legacy_mem->attr.name = "legacy_mem";
540 b->legacy_mem->size = 1024*1024;
541 b->legacy_mem->attr.mode = S_IRUSR | S_IWUSR;
542 b->legacy_mem->mmap = pci_mmap_legacy_mem;
543 error = device_create_bin_file(&b->dev, b->legacy_mem);
544 if (error)
545 goto legacy_mem_err;
546
547 return;
548
549legacy_mem_err:
550 device_remove_bin_file(&b->dev, b->legacy_io);
551legacy_io_err:
552 kfree(b->legacy_io);
553 b->legacy_io = NULL;
554kzalloc_err:
555 printk(KERN_WARNING "pci: warning: could not create legacy I/O port "
556 "and ISA memory resources to sysfs\n");
557 return;
558}
559
560void pci_remove_legacy_files(struct pci_bus *b)
561{
562 if (b->legacy_io) {
563 device_remove_bin_file(&b->dev, b->legacy_io);
564 device_remove_bin_file(&b->dev, b->legacy_mem);
565 kfree(b->legacy_io); /* both are allocated here */
566 }
484} 567}
485#endif /* HAVE_PCI_LEGACY */ 568#endif /* HAVE_PCI_LEGACY */
486 569
@@ -715,7 +798,7 @@ static struct bin_attribute pci_config_attr = {
715 .name = "config", 798 .name = "config",
716 .mode = S_IRUGO | S_IWUSR, 799 .mode = S_IRUGO | S_IWUSR,
717 }, 800 },
718 .size = 256, 801 .size = PCI_CFG_SPACE_SIZE,
719 .read = pci_read_config, 802 .read = pci_read_config,
720 .write = pci_write_config, 803 .write = pci_write_config,
721}; 804};
@@ -725,7 +808,7 @@ static struct bin_attribute pcie_config_attr = {
725 .name = "config", 808 .name = "config",
726 .mode = S_IRUGO | S_IWUSR, 809 .mode = S_IRUGO | S_IWUSR,
727 }, 810 },
728 .size = 4096, 811 .size = PCI_CFG_SPACE_EXP_SIZE,
729 .read = pci_read_config, 812 .read = pci_read_config,
730 .write = pci_write_config, 813 .write = pci_write_config,
731}; 814};
@@ -735,86 +818,103 @@ int __attribute__ ((weak)) pcibios_add_platform_entries(struct pci_dev *dev)
735 return 0; 818 return 0;
736} 819}
737 820
821static int pci_create_capabilities_sysfs(struct pci_dev *dev)
822{
823 int retval;
824 struct bin_attribute *attr;
825
826 /* If the device has VPD, try to expose it in sysfs. */
827 if (dev->vpd) {
828 attr = kzalloc(sizeof(*attr), GFP_ATOMIC);
829 if (!attr)
830 return -ENOMEM;
831
832 attr->size = dev->vpd->len;
833 attr->attr.name = "vpd";
834 attr->attr.mode = S_IRUSR | S_IWUSR;
835 attr->read = pci_read_vpd;
836 attr->write = pci_write_vpd;
837 retval = sysfs_create_bin_file(&dev->dev.kobj, attr);
838 if (retval) {
839 kfree(dev->vpd->attr);
840 return retval;
841 }
842 dev->vpd->attr = attr;
843 }
844
845 /* Active State Power Management */
846 pcie_aspm_create_sysfs_dev_files(dev);
847
848 return 0;
849}
850
738int __must_check pci_create_sysfs_dev_files (struct pci_dev *pdev) 851int __must_check pci_create_sysfs_dev_files (struct pci_dev *pdev)
739{ 852{
740 struct bin_attribute *attr = NULL;
741 int retval; 853 int retval;
854 int rom_size = 0;
855 struct bin_attribute *attr;
742 856
743 if (!sysfs_initialized) 857 if (!sysfs_initialized)
744 return -EACCES; 858 return -EACCES;
745 859
746 if (pdev->cfg_size < 4096) 860 if (pdev->cfg_size < PCI_CFG_SPACE_EXP_SIZE)
747 retval = sysfs_create_bin_file(&pdev->dev.kobj, &pci_config_attr); 861 retval = sysfs_create_bin_file(&pdev->dev.kobj, &pci_config_attr);
748 else 862 else
749 retval = sysfs_create_bin_file(&pdev->dev.kobj, &pcie_config_attr); 863 retval = sysfs_create_bin_file(&pdev->dev.kobj, &pcie_config_attr);
750 if (retval) 864 if (retval)
751 goto err; 865 goto err;
752 866
753 /* If the device has VPD, try to expose it in sysfs. */
754 if (pdev->vpd) {
755 attr = kzalloc(sizeof(*attr), GFP_ATOMIC);
756 if (attr) {
757 pdev->vpd->attr = attr;
758 attr->size = pdev->vpd->len;
759 attr->attr.name = "vpd";
760 attr->attr.mode = S_IRUSR | S_IWUSR;
761 attr->read = pci_read_vpd;
762 attr->write = pci_write_vpd;
763 retval = sysfs_create_bin_file(&pdev->dev.kobj, attr);
764 if (retval)
765 goto err_vpd;
766 } else {
767 retval = -ENOMEM;
768 goto err_config_file;
769 }
770 }
771
772 retval = pci_create_resource_files(pdev); 867 retval = pci_create_resource_files(pdev);
773 if (retval) 868 if (retval)
774 goto err_vpd_file; 869 goto err_config_file;
870
871 if (pci_resource_len(pdev, PCI_ROM_RESOURCE))
872 rom_size = pci_resource_len(pdev, PCI_ROM_RESOURCE);
873 else if (pdev->resource[PCI_ROM_RESOURCE].flags & IORESOURCE_ROM_SHADOW)
874 rom_size = 0x20000;
775 875
776 /* If the device has a ROM, try to expose it in sysfs. */ 876 /* If the device has a ROM, try to expose it in sysfs. */
777 if (pci_resource_len(pdev, PCI_ROM_RESOURCE) || 877 if (rom_size) {
778 (pdev->resource[PCI_ROM_RESOURCE].flags & IORESOURCE_ROM_SHADOW)) {
779 attr = kzalloc(sizeof(*attr), GFP_ATOMIC); 878 attr = kzalloc(sizeof(*attr), GFP_ATOMIC);
780 if (attr) { 879 if (!attr) {
781 pdev->rom_attr = attr;
782 attr->size = pci_resource_len(pdev, PCI_ROM_RESOURCE);
783 attr->attr.name = "rom";
784 attr->attr.mode = S_IRUSR;
785 attr->read = pci_read_rom;
786 attr->write = pci_write_rom;
787 retval = sysfs_create_bin_file(&pdev->dev.kobj, attr);
788 if (retval)
789 goto err_rom;
790 } else {
791 retval = -ENOMEM; 880 retval = -ENOMEM;
792 goto err_resource_files; 881 goto err_resource_files;
793 } 882 }
883 attr->size = rom_size;
884 attr->attr.name = "rom";
885 attr->attr.mode = S_IRUSR;
886 attr->read = pci_read_rom;
887 attr->write = pci_write_rom;
888 retval = sysfs_create_bin_file(&pdev->dev.kobj, attr);
889 if (retval) {
890 kfree(attr);
891 goto err_resource_files;
892 }
893 pdev->rom_attr = attr;
794 } 894 }
895
795 /* add platform-specific attributes */ 896 /* add platform-specific attributes */
796 if (pcibios_add_platform_entries(pdev)) 897 retval = pcibios_add_platform_entries(pdev);
898 if (retval)
797 goto err_rom_file; 899 goto err_rom_file;
798 900
799 pcie_aspm_create_sysfs_dev_files(pdev); 901 /* add sysfs entries for various capabilities */
902 retval = pci_create_capabilities_sysfs(pdev);
903 if (retval)
904 goto err_rom_file;
800 905
801 return 0; 906 return 0;
802 907
803err_rom_file: 908err_rom_file:
804 if (pci_resource_len(pdev, PCI_ROM_RESOURCE)) 909 if (rom_size) {
805 sysfs_remove_bin_file(&pdev->dev.kobj, pdev->rom_attr); 910 sysfs_remove_bin_file(&pdev->dev.kobj, pdev->rom_attr);
806err_rom: 911 kfree(pdev->rom_attr);
807 kfree(pdev->rom_attr); 912 pdev->rom_attr = NULL;
913 }
808err_resource_files: 914err_resource_files:
809 pci_remove_resource_files(pdev); 915 pci_remove_resource_files(pdev);
810err_vpd_file:
811 if (pdev->vpd) {
812 sysfs_remove_bin_file(&pdev->dev.kobj, pdev->vpd->attr);
813err_vpd:
814 kfree(pdev->vpd->attr);
815 }
816err_config_file: 916err_config_file:
817 if (pdev->cfg_size < 4096) 917 if (pdev->cfg_size < PCI_CFG_SPACE_EXP_SIZE)
818 sysfs_remove_bin_file(&pdev->dev.kobj, &pci_config_attr); 918 sysfs_remove_bin_file(&pdev->dev.kobj, &pci_config_attr);
819 else 919 else
820 sysfs_remove_bin_file(&pdev->dev.kobj, &pcie_config_attr); 920 sysfs_remove_bin_file(&pdev->dev.kobj, &pcie_config_attr);
@@ -822,6 +922,16 @@ err:
822 return retval; 922 return retval;
823} 923}
824 924
925static void pci_remove_capabilities_sysfs(struct pci_dev *dev)
926{
927 if (dev->vpd && dev->vpd->attr) {
928 sysfs_remove_bin_file(&dev->dev.kobj, dev->vpd->attr);
929 kfree(dev->vpd->attr);
930 }
931
932 pcie_aspm_remove_sysfs_dev_files(dev);
933}
934
825/** 935/**
826 * pci_remove_sysfs_dev_files - cleanup PCI specific sysfs files 936 * pci_remove_sysfs_dev_files - cleanup PCI specific sysfs files
827 * @pdev: device whose entries we should free 937 * @pdev: device whose entries we should free
@@ -830,27 +940,28 @@ err:
830 */ 940 */
831void pci_remove_sysfs_dev_files(struct pci_dev *pdev) 941void pci_remove_sysfs_dev_files(struct pci_dev *pdev)
832{ 942{
943 int rom_size = 0;
944
833 if (!sysfs_initialized) 945 if (!sysfs_initialized)
834 return; 946 return;
835 947
836 pcie_aspm_remove_sysfs_dev_files(pdev); 948 pci_remove_capabilities_sysfs(pdev);
837 949
838 if (pdev->vpd) { 950 if (pdev->cfg_size < PCI_CFG_SPACE_EXP_SIZE)
839 sysfs_remove_bin_file(&pdev->dev.kobj, pdev->vpd->attr);
840 kfree(pdev->vpd->attr);
841 }
842 if (pdev->cfg_size < 4096)
843 sysfs_remove_bin_file(&pdev->dev.kobj, &pci_config_attr); 951 sysfs_remove_bin_file(&pdev->dev.kobj, &pci_config_attr);
844 else 952 else
845 sysfs_remove_bin_file(&pdev->dev.kobj, &pcie_config_attr); 953 sysfs_remove_bin_file(&pdev->dev.kobj, &pcie_config_attr);
846 954
847 pci_remove_resource_files(pdev); 955 pci_remove_resource_files(pdev);
848 956
849 if (pci_resource_len(pdev, PCI_ROM_RESOURCE)) { 957 if (pci_resource_len(pdev, PCI_ROM_RESOURCE))
850 if (pdev->rom_attr) { 958 rom_size = pci_resource_len(pdev, PCI_ROM_RESOURCE);
851 sysfs_remove_bin_file(&pdev->dev.kobj, pdev->rom_attr); 959 else if (pdev->resource[PCI_ROM_RESOURCE].flags & IORESOURCE_ROM_SHADOW)
852 kfree(pdev->rom_attr); 960 rom_size = 0x20000;
853 } 961
962 if (rom_size && pdev->rom_attr) {
963 sysfs_remove_bin_file(&pdev->dev.kobj, pdev->rom_attr);
964 kfree(pdev->rom_attr);
854 } 965 }
855} 966}
856 967
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
index c9884bba22d..4db261e13e6 100644
--- a/drivers/pci/pci.c
+++ b/drivers/pci/pci.c
@@ -213,10 +213,13 @@ int pci_bus_find_capability(struct pci_bus *bus, unsigned int devfn, int cap)
213int pci_find_ext_capability(struct pci_dev *dev, int cap) 213int pci_find_ext_capability(struct pci_dev *dev, int cap)
214{ 214{
215 u32 header; 215 u32 header;
216 int ttl = 480; /* 3840 bytes, minimum 8 bytes per capability */ 216 int ttl;
217 int pos = 0x100; 217 int pos = PCI_CFG_SPACE_SIZE;
218 218
219 if (dev->cfg_size <= 256) 219 /* minimum 8 bytes per capability */
220 ttl = (PCI_CFG_SPACE_EXP_SIZE - PCI_CFG_SPACE_SIZE) / 8;
221
222 if (dev->cfg_size <= PCI_CFG_SPACE_SIZE)
220 return 0; 223 return 0;
221 224
222 if (pci_read_config_dword(dev, pos, &header) != PCIBIOS_SUCCESSFUL) 225 if (pci_read_config_dword(dev, pos, &header) != PCIBIOS_SUCCESSFUL)
@@ -234,7 +237,7 @@ int pci_find_ext_capability(struct pci_dev *dev, int cap)
234 return pos; 237 return pos;
235 238
236 pos = PCI_EXT_CAP_NEXT(header); 239 pos = PCI_EXT_CAP_NEXT(header);
237 if (pos < 0x100) 240 if (pos < PCI_CFG_SPACE_SIZE)
238 break; 241 break;
239 242
240 if (pci_read_config_dword(dev, pos, &header) != PCIBIOS_SUCCESSFUL) 243 if (pci_read_config_dword(dev, pos, &header) != PCIBIOS_SUCCESSFUL)
@@ -1127,6 +1130,27 @@ int pci_enable_wake(struct pci_dev *dev, pci_power_t state, int enable)
1127} 1130}
1128 1131
1129/** 1132/**
1133 * pci_wake_from_d3 - enable/disable device to wake up from D3_hot or D3_cold
1134 * @dev: PCI device to prepare
1135 * @enable: True to enable wake-up event generation; false to disable
1136 *
1137 * Many drivers want the device to wake up the system from D3_hot or D3_cold
1138 * and this function allows them to set that up cleanly - pci_enable_wake()
1139 * should not be called twice in a row to enable wake-up due to PCI PM vs ACPI
1140 * ordering constraints.
1141 *
1142 * This function only returns error code if the device is not capable of
1143 * generating PME# from both D3_hot and D3_cold, and the platform is unable to
1144 * enable wake-up power for it.
1145 */
1146int pci_wake_from_d3(struct pci_dev *dev, bool enable)
1147{
1148 return pci_pme_capable(dev, PCI_D3cold) ?
1149 pci_enable_wake(dev, PCI_D3cold, enable) :
1150 pci_enable_wake(dev, PCI_D3hot, enable);
1151}
1152
1153/**
1130 * pci_target_state - find an appropriate low power state for a given PCI dev 1154 * pci_target_state - find an appropriate low power state for a given PCI dev
1131 * @dev: PCI device 1155 * @dev: PCI device
1132 * 1156 *
@@ -1242,25 +1266,25 @@ void pci_pm_init(struct pci_dev *dev)
1242 dev->d1_support = false; 1266 dev->d1_support = false;
1243 dev->d2_support = false; 1267 dev->d2_support = false;
1244 if (!pci_no_d1d2(dev)) { 1268 if (!pci_no_d1d2(dev)) {
1245 if (pmc & PCI_PM_CAP_D1) { 1269 if (pmc & PCI_PM_CAP_D1)
1246 dev_printk(KERN_DEBUG, &dev->dev, "supports D1\n");
1247 dev->d1_support = true; 1270 dev->d1_support = true;
1248 } 1271 if (pmc & PCI_PM_CAP_D2)
1249 if (pmc & PCI_PM_CAP_D2) {
1250 dev_printk(KERN_DEBUG, &dev->dev, "supports D2\n");
1251 dev->d2_support = true; 1272 dev->d2_support = true;
1252 } 1273
1274 if (dev->d1_support || dev->d2_support)
1275 dev_printk(KERN_DEBUG, &dev->dev, "supports%s%s\n",
1276 dev->d1_support ? " D1" : "",
1277 dev->d2_support ? " D2" : "");
1253 } 1278 }
1254 1279
1255 pmc &= PCI_PM_CAP_PME_MASK; 1280 pmc &= PCI_PM_CAP_PME_MASK;
1256 if (pmc) { 1281 if (pmc) {
1257 dev_printk(KERN_INFO, &dev->dev, 1282 dev_info(&dev->dev, "PME# supported from%s%s%s%s%s\n",
1258 "PME# supported from%s%s%s%s%s\n", 1283 (pmc & PCI_PM_CAP_PME_D0) ? " D0" : "",
1259 (pmc & PCI_PM_CAP_PME_D0) ? " D0" : "", 1284 (pmc & PCI_PM_CAP_PME_D1) ? " D1" : "",
1260 (pmc & PCI_PM_CAP_PME_D1) ? " D1" : "", 1285 (pmc & PCI_PM_CAP_PME_D2) ? " D2" : "",
1261 (pmc & PCI_PM_CAP_PME_D2) ? " D2" : "", 1286 (pmc & PCI_PM_CAP_PME_D3) ? " D3hot" : "",
1262 (pmc & PCI_PM_CAP_PME_D3) ? " D3hot" : "", 1287 (pmc & PCI_PM_CAP_PME_D3cold) ? " D3cold" : "");
1263 (pmc & PCI_PM_CAP_PME_D3cold) ? " D3cold" : "");
1264 dev->pme_support = pmc >> PCI_PM_CAP_PME_SHIFT; 1288 dev->pme_support = pmc >> PCI_PM_CAP_PME_SHIFT;
1265 /* 1289 /*
1266 * Make device's PM flags reflect the wake-up capability, but 1290 * Make device's PM flags reflect the wake-up capability, but
@@ -1275,6 +1299,38 @@ void pci_pm_init(struct pci_dev *dev)
1275 } 1299 }
1276} 1300}
1277 1301
1302/**
1303 * pci_enable_ari - enable ARI forwarding if hardware support it
1304 * @dev: the PCI device
1305 */
1306void pci_enable_ari(struct pci_dev *dev)
1307{
1308 int pos;
1309 u32 cap;
1310 u16 ctrl;
1311
1312 if (!dev->is_pcie)
1313 return;
1314
1315 if (dev->pcie_type != PCI_EXP_TYPE_ROOT_PORT &&
1316 dev->pcie_type != PCI_EXP_TYPE_DOWNSTREAM)
1317 return;
1318
1319 pos = pci_find_capability(dev, PCI_CAP_ID_EXP);
1320 if (!pos)
1321 return;
1322
1323 pci_read_config_dword(dev, pos + PCI_EXP_DEVCAP2, &cap);
1324 if (!(cap & PCI_EXP_DEVCAP2_ARI))
1325 return;
1326
1327 pci_read_config_word(dev, pos + PCI_EXP_DEVCTL2, &ctrl);
1328 ctrl |= PCI_EXP_DEVCTL2_ARI;
1329 pci_write_config_word(dev, pos + PCI_EXP_DEVCTL2, ctrl);
1330
1331 dev->ari_enabled = 1;
1332}
1333
1278int 1334int
1279pci_get_interrupt_pin(struct pci_dev *dev, struct pci_dev **bridge) 1335pci_get_interrupt_pin(struct pci_dev *dev, struct pci_dev **bridge)
1280{ 1336{
@@ -1358,11 +1414,10 @@ int pci_request_region(struct pci_dev *pdev, int bar, const char *res_name)
1358 return 0; 1414 return 0;
1359 1415
1360err_out: 1416err_out:
1361 dev_warn(&pdev->dev, "BAR %d: can't reserve %s region [%#llx-%#llx]\n", 1417 dev_warn(&pdev->dev, "BAR %d: can't reserve %s region %pR\n",
1362 bar, 1418 bar,
1363 pci_resource_flags(pdev, bar) & IORESOURCE_IO ? "I/O" : "mem", 1419 pci_resource_flags(pdev, bar) & IORESOURCE_IO ? "I/O" : "mem",
1364 (unsigned long long)pci_resource_start(pdev, bar), 1420 &pdev->resource[bar]);
1365 (unsigned long long)pci_resource_end(pdev, bar));
1366 return -EBUSY; 1421 return -EBUSY;
1367} 1422}
1368 1423
@@ -1943,6 +1998,7 @@ EXPORT_SYMBOL(pci_restore_state);
1943EXPORT_SYMBOL(pci_pme_capable); 1998EXPORT_SYMBOL(pci_pme_capable);
1944EXPORT_SYMBOL(pci_pme_active); 1999EXPORT_SYMBOL(pci_pme_active);
1945EXPORT_SYMBOL(pci_enable_wake); 2000EXPORT_SYMBOL(pci_enable_wake);
2001EXPORT_SYMBOL(pci_wake_from_d3);
1946EXPORT_SYMBOL(pci_target_state); 2002EXPORT_SYMBOL(pci_target_state);
1947EXPORT_SYMBOL(pci_prepare_to_sleep); 2003EXPORT_SYMBOL(pci_prepare_to_sleep);
1948EXPORT_SYMBOL(pci_back_from_sleep); 2004EXPORT_SYMBOL(pci_back_from_sleep);
diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h
index d807cd786f2..b205ab866a1 100644
--- a/drivers/pci/pci.h
+++ b/drivers/pci/pci.h
@@ -1,3 +1,9 @@
1#ifndef DRIVERS_PCI_H
2#define DRIVERS_PCI_H
3
4#define PCI_CFG_SPACE_SIZE 256
5#define PCI_CFG_SPACE_EXP_SIZE 4096
6
1/* Functions internal to the PCI core code */ 7/* Functions internal to the PCI core code */
2 8
3extern int pci_uevent(struct device *dev, struct kobj_uevent_env *env); 9extern int pci_uevent(struct device *dev, struct kobj_uevent_env *env);
@@ -76,7 +82,13 @@ static inline int pci_proc_detach_bus(struct pci_bus *bus) { return 0; }
76/* Functions for PCI Hotplug drivers to use */ 82/* Functions for PCI Hotplug drivers to use */
77extern unsigned int pci_do_scan_bus(struct pci_bus *bus); 83extern unsigned int pci_do_scan_bus(struct pci_bus *bus);
78 84
85#ifdef HAVE_PCI_LEGACY
86extern void pci_create_legacy_files(struct pci_bus *bus);
79extern void pci_remove_legacy_files(struct pci_bus *bus); 87extern void pci_remove_legacy_files(struct pci_bus *bus);
88#else
89static inline void pci_create_legacy_files(struct pci_bus *bus) { return; }
90static inline void pci_remove_legacy_files(struct pci_bus *bus) { return; }
91#endif
80 92
81/* Lock for read/write access to pci device and bus lists */ 93/* Lock for read/write access to pci device and bus lists */
82extern struct rw_semaphore pci_bus_sem; 94extern struct rw_semaphore pci_bus_sem;
@@ -109,6 +121,7 @@ static inline int pci_no_d1d2(struct pci_dev *dev)
109extern int pcie_mch_quirk; 121extern int pcie_mch_quirk;
110extern struct device_attribute pci_dev_attrs[]; 122extern struct device_attribute pci_dev_attrs[];
111extern struct device_attribute dev_attr_cpuaffinity; 123extern struct device_attribute dev_attr_cpuaffinity;
124extern struct device_attribute dev_attr_cpulistaffinity;
112 125
113/** 126/**
114 * pci_match_one_device - Tell if a PCI device structure has a matching 127 * pci_match_one_device - Tell if a PCI device structure has a matching
@@ -144,3 +157,16 @@ struct pci_slot_attribute {
144}; 157};
145#define to_pci_slot_attr(s) container_of(s, struct pci_slot_attribute, attr) 158#define to_pci_slot_attr(s) container_of(s, struct pci_slot_attribute, attr)
146 159
160extern void pci_enable_ari(struct pci_dev *dev);
161/**
162 * pci_ari_enabled - query ARI forwarding status
163 * @dev: the PCI device
164 *
165 * Returns 1 if ARI forwarding is enabled, or 0 if not enabled;
166 */
167static inline int pci_ari_enabled(struct pci_dev *dev)
168{
169 return dev->ari_enabled;
170}
171
172#endif /* DRIVERS_PCI_H */
diff --git a/drivers/pci/pcie/aer/aerdrv.c b/drivers/pci/pcie/aer/aerdrv.c
index 77036f46acf..e390707661d 100644
--- a/drivers/pci/pcie/aer/aerdrv.c
+++ b/drivers/pci/pcie/aer/aerdrv.c
@@ -105,7 +105,7 @@ static irqreturn_t aer_irq(int irq, void *context)
105 unsigned long flags; 105 unsigned long flags;
106 int pos; 106 int pos;
107 107
108 pos = pci_find_aer_capability(pdev->port); 108 pos = pci_find_ext_capability(pdev->port, PCI_EXT_CAP_ID_ERR);
109 /* 109 /*
110 * Must lock access to Root Error Status Reg, Root Error ID Reg, 110 * Must lock access to Root Error Status Reg, Root Error ID Reg,
111 * and Root error producer/consumer index 111 * and Root error producer/consumer index
@@ -252,7 +252,7 @@ static pci_ers_result_t aer_root_reset(struct pci_dev *dev)
252 u32 status; 252 u32 status;
253 int pos; 253 int pos;
254 254
255 pos = pci_find_aer_capability(dev); 255 pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR);
256 256
257 /* Disable Root's interrupt in response to error messages */ 257 /* Disable Root's interrupt in response to error messages */
258 pci_write_config_dword(dev, pos + PCI_ERR_ROOT_COMMAND, 0); 258 pci_write_config_dword(dev, pos + PCI_ERR_ROOT_COMMAND, 0);
@@ -316,7 +316,7 @@ static void aer_error_resume(struct pci_dev *dev)
316 pci_write_config_word(dev, pos + PCI_EXP_DEVSTA, reg16); 316 pci_write_config_word(dev, pos + PCI_EXP_DEVSTA, reg16);
317 317
318 /* Clean AER Root Error Status */ 318 /* Clean AER Root Error Status */
319 pos = pci_find_aer_capability(dev); 319 pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR);
320 pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_STATUS, &status); 320 pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_STATUS, &status);
321 pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_SEVER, &mask); 321 pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_SEVER, &mask);
322 if (dev->error_state == pci_channel_io_normal) 322 if (dev->error_state == pci_channel_io_normal)
diff --git a/drivers/pci/pcie/aer/aerdrv_core.c b/drivers/pci/pcie/aer/aerdrv_core.c
index ee5e7b5176d..dfc63d01f20 100644
--- a/drivers/pci/pcie/aer/aerdrv_core.c
+++ b/drivers/pci/pcie/aer/aerdrv_core.c
@@ -28,41 +28,15 @@
28static int forceload; 28static int forceload;
29module_param(forceload, bool, 0); 29module_param(forceload, bool, 0);
30 30
31#define PCI_CFG_SPACE_SIZE (0x100)
32int pci_find_aer_capability(struct pci_dev *dev)
33{
34 int pos;
35 u32 reg32 = 0;
36
37 /* Check if it's a pci-express device */
38 pos = pci_find_capability(dev, PCI_CAP_ID_EXP);
39 if (!pos)
40 return 0;
41
42 /* Check if it supports pci-express AER */
43 pos = PCI_CFG_SPACE_SIZE;
44 while (pos) {
45 if (pci_read_config_dword(dev, pos, &reg32))
46 return 0;
47
48 /* some broken boards return ~0 */
49 if (reg32 == 0xffffffff)
50 return 0;
51
52 if (PCI_EXT_CAP_ID(reg32) == PCI_EXT_CAP_ID_ERR)
53 break;
54
55 pos = reg32 >> 20;
56 }
57
58 return pos;
59}
60
61int pci_enable_pcie_error_reporting(struct pci_dev *dev) 31int pci_enable_pcie_error_reporting(struct pci_dev *dev)
62{ 32{
63 u16 reg16 = 0; 33 u16 reg16 = 0;
64 int pos; 34 int pos;
65 35
36 pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR);
37 if (!pos)
38 return -EIO;
39
66 pos = pci_find_capability(dev, PCI_CAP_ID_EXP); 40 pos = pci_find_capability(dev, PCI_CAP_ID_EXP);
67 if (!pos) 41 if (!pos)
68 return -EIO; 42 return -EIO;
@@ -102,7 +76,7 @@ int pci_cleanup_aer_uncorrect_error_status(struct pci_dev *dev)
102 int pos; 76 int pos;
103 u32 status, mask; 77 u32 status, mask;
104 78
105 pos = pci_find_aer_capability(dev); 79 pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR);
106 if (!pos) 80 if (!pos)
107 return -EIO; 81 return -EIO;
108 82
@@ -123,7 +97,7 @@ int pci_cleanup_aer_correct_error_status(struct pci_dev *dev)
123 int pos; 97 int pos;
124 u32 status; 98 u32 status;
125 99
126 pos = pci_find_aer_capability(dev); 100 pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR);
127 if (!pos) 101 if (!pos)
128 return -EIO; 102 return -EIO;
129 103
@@ -502,7 +476,7 @@ static void handle_error_source(struct pcie_device * aerdev,
502 * Correctable error does not need software intevention. 476 * Correctable error does not need software intevention.
503 * No need to go through error recovery process. 477 * No need to go through error recovery process.
504 */ 478 */
505 pos = pci_find_aer_capability(dev); 479 pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR);
506 if (pos) 480 if (pos)
507 pci_write_config_dword(dev, pos + PCI_ERR_COR_STATUS, 481 pci_write_config_dword(dev, pos + PCI_ERR_COR_STATUS,
508 info.status); 482 info.status);
@@ -542,7 +516,7 @@ void aer_enable_rootport(struct aer_rpc *rpc)
542 reg16 &= ~(SYSTEM_ERROR_INTR_ON_MESG_MASK); 516 reg16 &= ~(SYSTEM_ERROR_INTR_ON_MESG_MASK);
543 pci_write_config_word(pdev, pos + PCI_EXP_RTCTL, reg16); 517 pci_write_config_word(pdev, pos + PCI_EXP_RTCTL, reg16);
544 518
545 aer_pos = pci_find_aer_capability(pdev); 519 aer_pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_ERR);
546 /* Clear error status */ 520 /* Clear error status */
547 pci_read_config_dword(pdev, aer_pos + PCI_ERR_ROOT_STATUS, &reg32); 521 pci_read_config_dword(pdev, aer_pos + PCI_ERR_ROOT_STATUS, &reg32);
548 pci_write_config_dword(pdev, aer_pos + PCI_ERR_ROOT_STATUS, reg32); 522 pci_write_config_dword(pdev, aer_pos + PCI_ERR_ROOT_STATUS, reg32);
@@ -579,7 +553,7 @@ static void disable_root_aer(struct aer_rpc *rpc)
579 u32 reg32; 553 u32 reg32;
580 int pos; 554 int pos;
581 555
582 pos = pci_find_aer_capability(pdev); 556 pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_ERR);
583 /* Disable Root's interrupt in response to error messages */ 557 /* Disable Root's interrupt in response to error messages */
584 pci_write_config_dword(pdev, pos + PCI_ERR_ROOT_COMMAND, 0); 558 pci_write_config_dword(pdev, pos + PCI_ERR_ROOT_COMMAND, 0);
585 559
@@ -618,7 +592,7 @@ static int get_device_error_info(struct pci_dev *dev, struct aer_err_info *info)
618{ 592{
619 int pos; 593 int pos;
620 594
621 pos = pci_find_aer_capability(dev); 595 pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR);
622 596
623 /* The device might not support AER */ 597 /* The device might not support AER */
624 if (!pos) 598 if (!pos)
@@ -755,7 +729,6 @@ int aer_init(struct pcie_device *dev)
755 return AER_SUCCESS; 729 return AER_SUCCESS;
756} 730}
757 731
758EXPORT_SYMBOL_GPL(pci_find_aer_capability);
759EXPORT_SYMBOL_GPL(pci_enable_pcie_error_reporting); 732EXPORT_SYMBOL_GPL(pci_enable_pcie_error_reporting);
760EXPORT_SYMBOL_GPL(pci_disable_pcie_error_reporting); 733EXPORT_SYMBOL_GPL(pci_disable_pcie_error_reporting);
761EXPORT_SYMBOL_GPL(pci_cleanup_aer_uncorrect_error_status); 734EXPORT_SYMBOL_GPL(pci_cleanup_aer_uncorrect_error_status);
diff --git a/drivers/pci/pcie/aspm.c b/drivers/pci/pcie/aspm.c
index 851f5b83cdb..8f63f4c6b85 100644
--- a/drivers/pci/pcie/aspm.c
+++ b/drivers/pci/pcie/aspm.c
@@ -528,9 +528,9 @@ static int pcie_aspm_sanity_check(struct pci_dev *pdev)
528 pci_read_config_dword(child_dev, child_pos + PCI_EXP_DEVCAP, 528 pci_read_config_dword(child_dev, child_pos + PCI_EXP_DEVCAP,
529 &reg32); 529 &reg32);
530 if (!(reg32 & PCI_EXP_DEVCAP_RBER) && !aspm_force) { 530 if (!(reg32 & PCI_EXP_DEVCAP_RBER) && !aspm_force) {
531 printk("Pre-1.1 PCIe device detected, " 531 dev_printk(KERN_INFO, &child_dev->dev, "disabling ASPM"
532 "disable ASPM for %s. It can be enabled forcedly" 532 " on pre-1.1 PCIe device. You can enable it"
533 " with 'pcie_aspm=force'\n", pci_name(pdev)); 533 " with 'pcie_aspm=force'\n");
534 return -EINVAL; 534 return -EINVAL;
535 } 535 }
536 } 536 }
diff --git a/drivers/pci/pcie/portdrv.h b/drivers/pci/pcie/portdrv.h
index 3656e0349dd..2529f3f2ea5 100644
--- a/drivers/pci/pcie/portdrv.h
+++ b/drivers/pci/pcie/portdrv.h
@@ -25,7 +25,6 @@
25#define PCIE_CAPABILITIES_REG 0x2 25#define PCIE_CAPABILITIES_REG 0x2
26#define PCIE_SLOT_CAPABILITIES_REG 0x14 26#define PCIE_SLOT_CAPABILITIES_REG 0x14
27#define PCIE_PORT_DEVICE_MAXSERVICES 4 27#define PCIE_PORT_DEVICE_MAXSERVICES 4
28#define PCI_CFG_SPACE_SIZE 256
29 28
30#define get_descriptor_id(type, service) (((type - 4) << 4) | service) 29#define get_descriptor_id(type, service) (((type - 4) << 4) | service)
31 30
diff --git a/drivers/pci/pcie/portdrv_core.c b/drivers/pci/pcie/portdrv_core.c
index 890f0d2b370..2e091e01482 100644
--- a/drivers/pci/pcie/portdrv_core.c
+++ b/drivers/pci/pcie/portdrv_core.c
@@ -195,24 +195,11 @@ static int get_port_device_capability(struct pci_dev *dev)
195 /* PME Capable - root port capability */ 195 /* PME Capable - root port capability */
196 if (((reg16 >> 4) & PORT_TYPE_MASK) == PCIE_RC_PORT) 196 if (((reg16 >> 4) & PORT_TYPE_MASK) == PCIE_RC_PORT)
197 services |= PCIE_PORT_SERVICE_PME; 197 services |= PCIE_PORT_SERVICE_PME;
198 198
199 pos = PCI_CFG_SPACE_SIZE; 199 if (pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR))
200 while (pos) { 200 services |= PCIE_PORT_SERVICE_AER;
201 pci_read_config_dword(dev, pos, &reg32); 201 if (pci_find_ext_capability(dev, PCI_EXT_CAP_ID_VC))
202 switch (reg32 & 0xffff) { 202 services |= PCIE_PORT_SERVICE_VC;
203 case PCI_EXT_CAP_ID_ERR:
204 services |= PCIE_PORT_SERVICE_AER;
205 pos = reg32 >> 20;
206 break;
207 case PCI_EXT_CAP_ID_VC:
208 services |= PCIE_PORT_SERVICE_VC;
209 pos = reg32 >> 20;
210 break;
211 default:
212 pos = 0;
213 break;
214 }
215 }
216 203
217 return services; 204 return services;
218} 205}
diff --git a/drivers/pci/pcie/portdrv_pci.c b/drivers/pci/pcie/portdrv_pci.c
index 367c9c20000..584422da8d8 100644
--- a/drivers/pci/pcie/portdrv_pci.c
+++ b/drivers/pci/pcie/portdrv_pci.c
@@ -91,7 +91,7 @@ static int __devinit pcie_portdrv_probe (struct pci_dev *dev,
91 91
92 pci_set_master(dev); 92 pci_set_master(dev);
93 if (!dev->irq && dev->pin) { 93 if (!dev->irq && dev->pin) {
94 dev_warn(&dev->dev, "device [%04x/%04x] has invalid IRQ; " 94 dev_warn(&dev->dev, "device [%04x:%04x] has invalid IRQ; "
95 "check vendor BIOS\n", dev->vendor, dev->device); 95 "check vendor BIOS\n", dev->vendor, dev->device);
96 } 96 }
97 if (pcie_port_device_register(dev)) { 97 if (pcie_port_device_register(dev)) {
diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
index 36698e57b97..aaaf0a1fed2 100644
--- a/drivers/pci/probe.c
+++ b/drivers/pci/probe.c
@@ -14,8 +14,6 @@
14 14
15#define CARDBUS_LATENCY_TIMER 176 /* secondary latency timer */ 15#define CARDBUS_LATENCY_TIMER 176 /* secondary latency timer */
16#define CARDBUS_RESERVE_BUSNR 3 16#define CARDBUS_RESERVE_BUSNR 3
17#define PCI_CFG_SPACE_SIZE 256
18#define PCI_CFG_SPACE_EXP_SIZE 4096
19 17
20/* Ugh. Need to stop exporting this to modules. */ 18/* Ugh. Need to stop exporting this to modules. */
21LIST_HEAD(pci_root_buses); 19LIST_HEAD(pci_root_buses);
@@ -44,72 +42,6 @@ int no_pci_devices(void)
44} 42}
45EXPORT_SYMBOL(no_pci_devices); 43EXPORT_SYMBOL(no_pci_devices);
46 44
47#ifdef HAVE_PCI_LEGACY
48/**
49 * pci_create_legacy_files - create legacy I/O port and memory files
50 * @b: bus to create files under
51 *
52 * Some platforms allow access to legacy I/O port and ISA memory space on
53 * a per-bus basis. This routine creates the files and ties them into
54 * their associated read, write and mmap files from pci-sysfs.c
55 *
56 * On error unwind, but don't propogate the error to the caller
57 * as it is ok to set up the PCI bus without these files.
58 */
59static void pci_create_legacy_files(struct pci_bus *b)
60{
61 int error;
62
63 b->legacy_io = kzalloc(sizeof(struct bin_attribute) * 2,
64 GFP_ATOMIC);
65 if (!b->legacy_io)
66 goto kzalloc_err;
67
68 b->legacy_io->attr.name = "legacy_io";
69 b->legacy_io->size = 0xffff;
70 b->legacy_io->attr.mode = S_IRUSR | S_IWUSR;
71 b->legacy_io->read = pci_read_legacy_io;
72 b->legacy_io->write = pci_write_legacy_io;
73 error = device_create_bin_file(&b->dev, b->legacy_io);
74 if (error)
75 goto legacy_io_err;
76
77 /* Allocated above after the legacy_io struct */
78 b->legacy_mem = b->legacy_io + 1;
79 b->legacy_mem->attr.name = "legacy_mem";
80 b->legacy_mem->size = 1024*1024;
81 b->legacy_mem->attr.mode = S_IRUSR | S_IWUSR;
82 b->legacy_mem->mmap = pci_mmap_legacy_mem;
83 error = device_create_bin_file(&b->dev, b->legacy_mem);
84 if (error)
85 goto legacy_mem_err;
86
87 return;
88
89legacy_mem_err:
90 device_remove_bin_file(&b->dev, b->legacy_io);
91legacy_io_err:
92 kfree(b->legacy_io);
93 b->legacy_io = NULL;
94kzalloc_err:
95 printk(KERN_WARNING "pci: warning: could not create legacy I/O port "
96 "and ISA memory resources to sysfs\n");
97 return;
98}
99
100void pci_remove_legacy_files(struct pci_bus *b)
101{
102 if (b->legacy_io) {
103 device_remove_bin_file(&b->dev, b->legacy_io);
104 device_remove_bin_file(&b->dev, b->legacy_mem);
105 kfree(b->legacy_io); /* both are allocated here */
106 }
107}
108#else /* !HAVE_PCI_LEGACY */
109static inline void pci_create_legacy_files(struct pci_bus *bus) { return; }
110void pci_remove_legacy_files(struct pci_bus *bus) { return; }
111#endif /* HAVE_PCI_LEGACY */
112
113/* 45/*
114 * PCI Bus Class Devices 46 * PCI Bus Class Devices
115 */ 47 */
@@ -219,7 +151,7 @@ static inline enum pci_bar_type decode_bar(struct resource *res, u32 bar)
219 151
220 res->flags = bar & ~PCI_BASE_ADDRESS_MEM_MASK; 152 res->flags = bar & ~PCI_BASE_ADDRESS_MEM_MASK;
221 153
222 if (res->flags == PCI_BASE_ADDRESS_MEM_TYPE_64) 154 if (res->flags & PCI_BASE_ADDRESS_MEM_TYPE_64)
223 return pci_bar_mem64; 155 return pci_bar_mem64;
224 return pci_bar_mem32; 156 return pci_bar_mem32;
225} 157}
@@ -304,9 +236,8 @@ static int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type,
304 } else { 236 } else {
305 res->start = l64; 237 res->start = l64;
306 res->end = l64 + sz64; 238 res->end = l64 + sz64;
307 printk(KERN_DEBUG "PCI: %s reg %x 64bit mmio: [%llx, %llx]\n", 239 dev_printk(KERN_DEBUG, &dev->dev,
308 pci_name(dev), pos, (unsigned long long)res->start, 240 "reg %x 64bit mmio: %pR\n", pos, res);
309 (unsigned long long)res->end);
310 } 241 }
311 } else { 242 } else {
312 sz = pci_size(l, sz, mask); 243 sz = pci_size(l, sz, mask);
@@ -316,9 +247,10 @@ static int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type,
316 247
317 res->start = l; 248 res->start = l;
318 res->end = l + sz; 249 res->end = l + sz;
319 printk(KERN_DEBUG "PCI: %s reg %x %s: [%llx, %llx]\n", pci_name(dev), 250
320 pos, (res->flags & IORESOURCE_IO) ? "io port":"32bit mmio", 251 dev_printk(KERN_DEBUG, &dev->dev, "reg %x %s: %pR\n", pos,
321 (unsigned long long)res->start, (unsigned long long)res->end); 252 (res->flags & IORESOURCE_IO) ? "io port" : "32bit mmio",
253 res);
322 } 254 }
323 255
324 out: 256 out:
@@ -389,9 +321,7 @@ void __devinit pci_read_bridge_bases(struct pci_bus *child)
389 res->start = base; 321 res->start = base;
390 if (!res->end) 322 if (!res->end)
391 res->end = limit + 0xfff; 323 res->end = limit + 0xfff;
392 printk(KERN_DEBUG "PCI: bridge %s io port: [%llx, %llx]\n", 324 dev_printk(KERN_DEBUG, &dev->dev, "bridge io port: %pR\n", res);
393 pci_name(dev), (unsigned long long) res->start,
394 (unsigned long long) res->end);
395 } 325 }
396 326
397 res = child->resource[1]; 327 res = child->resource[1];
@@ -403,9 +333,8 @@ void __devinit pci_read_bridge_bases(struct pci_bus *child)
403 res->flags = (mem_base_lo & PCI_MEMORY_RANGE_TYPE_MASK) | IORESOURCE_MEM; 333 res->flags = (mem_base_lo & PCI_MEMORY_RANGE_TYPE_MASK) | IORESOURCE_MEM;
404 res->start = base; 334 res->start = base;
405 res->end = limit + 0xfffff; 335 res->end = limit + 0xfffff;
406 printk(KERN_DEBUG "PCI: bridge %s 32bit mmio: [%llx, %llx]\n", 336 dev_printk(KERN_DEBUG, &dev->dev, "bridge 32bit mmio: %pR\n",
407 pci_name(dev), (unsigned long long) res->start, 337 res);
408 (unsigned long long) res->end);
409 } 338 }
410 339
411 res = child->resource[2]; 340 res = child->resource[2];
@@ -441,9 +370,9 @@ void __devinit pci_read_bridge_bases(struct pci_bus *child)
441 res->flags = (mem_base_lo & PCI_MEMORY_RANGE_TYPE_MASK) | IORESOURCE_MEM | IORESOURCE_PREFETCH; 370 res->flags = (mem_base_lo & PCI_MEMORY_RANGE_TYPE_MASK) | IORESOURCE_MEM | IORESOURCE_PREFETCH;
442 res->start = base; 371 res->start = base;
443 res->end = limit + 0xfffff; 372 res->end = limit + 0xfffff;
444 printk(KERN_DEBUG "PCI: bridge %s %sbit mmio pref: [%llx, %llx]\n", 373 dev_printk(KERN_DEBUG, &dev->dev, "bridge %sbit mmio pref: %pR\n",
445 pci_name(dev), (res->flags & PCI_PREF_RANGE_TYPE_64) ? "64" : "32", 374 (res->flags & PCI_PREF_RANGE_TYPE_64) ? "64" : "32",
446 (unsigned long long) res->start, (unsigned long long) res->end); 375 res);
447 } 376 }
448} 377}
449 378
@@ -764,7 +693,7 @@ static int pci_setup_device(struct pci_dev * dev)
764 dev->class = class; 693 dev->class = class;
765 class >>= 8; 694 class >>= 8;
766 695
767 dev_dbg(&dev->dev, "found [%04x/%04x] class %06x header type %02x\n", 696 dev_dbg(&dev->dev, "found [%04x:%04x] class %06x header type %02x\n",
768 dev->vendor, dev->device, class, dev->hdr_type); 697 dev->vendor, dev->device, class, dev->hdr_type);
769 698
770 /* "Unknown power state" */ 699 /* "Unknown power state" */
@@ -846,6 +775,11 @@ static int pci_setup_device(struct pci_dev * dev)
846 return 0; 775 return 0;
847} 776}
848 777
778static void pci_release_capabilities(struct pci_dev *dev)
779{
780 pci_vpd_release(dev);
781}
782
849/** 783/**
850 * pci_release_dev - free a pci device structure when all users of it are finished. 784 * pci_release_dev - free a pci device structure when all users of it are finished.
851 * @dev: device that's been disconnected 785 * @dev: device that's been disconnected
@@ -858,7 +792,7 @@ static void pci_release_dev(struct device *dev)
858 struct pci_dev *pci_dev; 792 struct pci_dev *pci_dev;
859 793
860 pci_dev = to_pci_dev(dev); 794 pci_dev = to_pci_dev(dev);
861 pci_vpd_release(pci_dev); 795 pci_release_capabilities(pci_dev);
862 kfree(pci_dev); 796 kfree(pci_dev);
863} 797}
864 798
@@ -889,8 +823,9 @@ static void set_pcie_port_type(struct pci_dev *pdev)
889int pci_cfg_space_size_ext(struct pci_dev *dev) 823int pci_cfg_space_size_ext(struct pci_dev *dev)
890{ 824{
891 u32 status; 825 u32 status;
826 int pos = PCI_CFG_SPACE_SIZE;
892 827
893 if (pci_read_config_dword(dev, 256, &status) != PCIBIOS_SUCCESSFUL) 828 if (pci_read_config_dword(dev, pos, &status) != PCIBIOS_SUCCESSFUL)
894 goto fail; 829 goto fail;
895 if (status == 0xffffffff) 830 if (status == 0xffffffff)
896 goto fail; 831 goto fail;
@@ -938,8 +873,6 @@ struct pci_dev *alloc_pci_dev(void)
938 873
939 INIT_LIST_HEAD(&dev->bus_list); 874 INIT_LIST_HEAD(&dev->bus_list);
940 875
941 pci_msi_init_pci_dev(dev);
942
943 return dev; 876 return dev;
944} 877}
945EXPORT_SYMBOL(alloc_pci_dev); 878EXPORT_SYMBOL(alloc_pci_dev);
@@ -951,6 +884,7 @@ EXPORT_SYMBOL(alloc_pci_dev);
951static struct pci_dev *pci_scan_device(struct pci_bus *bus, int devfn) 884static struct pci_dev *pci_scan_device(struct pci_bus *bus, int devfn)
952{ 885{
953 struct pci_dev *dev; 886 struct pci_dev *dev;
887 struct pci_slot *slot;
954 u32 l; 888 u32 l;
955 u8 hdr_type; 889 u8 hdr_type;
956 int delay = 1; 890 int delay = 1;
@@ -999,6 +933,10 @@ static struct pci_dev *pci_scan_device(struct pci_bus *bus, int devfn)
999 dev->error_state = pci_channel_io_normal; 933 dev->error_state = pci_channel_io_normal;
1000 set_pcie_port_type(dev); 934 set_pcie_port_type(dev);
1001 935
936 list_for_each_entry(slot, &bus->slots, list)
937 if (PCI_SLOT(devfn) == slot->number)
938 dev->slot = slot;
939
1002 /* Assume 32-bit PCI; let 64-bit PCI cards (which are far rarer) 940 /* Assume 32-bit PCI; let 64-bit PCI cards (which are far rarer)
1003 set this higher, assuming the system even supports it. */ 941 set this higher, assuming the system even supports it. */
1004 dev->dma_mask = 0xffffffff; 942 dev->dma_mask = 0xffffffff;
@@ -1007,9 +945,22 @@ static struct pci_dev *pci_scan_device(struct pci_bus *bus, int devfn)
1007 return NULL; 945 return NULL;
1008 } 946 }
1009 947
948 return dev;
949}
950
951static void pci_init_capabilities(struct pci_dev *dev)
952{
953 /* MSI/MSI-X list */
954 pci_msi_init_pci_dev(dev);
955
956 /* Power Management */
957 pci_pm_init(dev);
958
959 /* Vital Product Data */
1010 pci_vpd_pci22_init(dev); 960 pci_vpd_pci22_init(dev);
1011 961
1012 return dev; 962 /* Alternative Routing-ID Forwarding */
963 pci_enable_ari(dev);
1013} 964}
1014 965
1015void pci_device_add(struct pci_dev *dev, struct pci_bus *bus) 966void pci_device_add(struct pci_dev *dev, struct pci_bus *bus)
@@ -1028,8 +979,8 @@ void pci_device_add(struct pci_dev *dev, struct pci_bus *bus)
1028 /* Fix up broken headers */ 979 /* Fix up broken headers */
1029 pci_fixup_device(pci_fixup_header, dev); 980 pci_fixup_device(pci_fixup_header, dev);
1030 981
1031 /* Initialize power management of the device */ 982 /* Initialize various capabilities */
1032 pci_pm_init(dev); 983 pci_init_capabilities(dev);
1033 984
1034 /* 985 /*
1035 * Add the device to our list of discovered devices 986 * Add the device to our list of discovered devices
@@ -1237,8 +1188,11 @@ EXPORT_SYMBOL(pci_scan_bridge);
1237EXPORT_SYMBOL_GPL(pci_scan_child_bus); 1188EXPORT_SYMBOL_GPL(pci_scan_child_bus);
1238#endif 1189#endif
1239 1190
1240static int __init pci_sort_bf_cmp(const struct pci_dev *a, const struct pci_dev *b) 1191static int __init pci_sort_bf_cmp(const struct device *d_a, const struct device *d_b)
1241{ 1192{
1193 const struct pci_dev *a = to_pci_dev(d_a);
1194 const struct pci_dev *b = to_pci_dev(d_b);
1195
1242 if (pci_domain_nr(a->bus) < pci_domain_nr(b->bus)) return -1; 1196 if (pci_domain_nr(a->bus) < pci_domain_nr(b->bus)) return -1;
1243 else if (pci_domain_nr(a->bus) > pci_domain_nr(b->bus)) return 1; 1197 else if (pci_domain_nr(a->bus) > pci_domain_nr(b->bus)) return 1;
1244 1198
@@ -1251,50 +1205,7 @@ static int __init pci_sort_bf_cmp(const struct pci_dev *a, const struct pci_dev
1251 return 0; 1205 return 0;
1252} 1206}
1253 1207
1254/*
1255 * Yes, this forcably breaks the klist abstraction temporarily. It
1256 * just wants to sort the klist, not change reference counts and
1257 * take/drop locks rapidly in the process. It does all this while
1258 * holding the lock for the list, so objects can't otherwise be
1259 * added/removed while we're swizzling.
1260 */
1261static void __init pci_insertion_sort_klist(struct pci_dev *a, struct list_head *list)
1262{
1263 struct list_head *pos;
1264 struct klist_node *n;
1265 struct device *dev;
1266 struct pci_dev *b;
1267
1268 list_for_each(pos, list) {
1269 n = container_of(pos, struct klist_node, n_node);
1270 dev = container_of(n, struct device, knode_bus);
1271 b = to_pci_dev(dev);
1272 if (pci_sort_bf_cmp(a, b) <= 0) {
1273 list_move_tail(&a->dev.knode_bus.n_node, &b->dev.knode_bus.n_node);
1274 return;
1275 }
1276 }
1277 list_move_tail(&a->dev.knode_bus.n_node, list);
1278}
1279
1280void __init pci_sort_breadthfirst(void) 1208void __init pci_sort_breadthfirst(void)
1281{ 1209{
1282 LIST_HEAD(sorted_devices); 1210 bus_sort_breadthfirst(&pci_bus_type, &pci_sort_bf_cmp);
1283 struct list_head *pos, *tmp;
1284 struct klist_node *n;
1285 struct device *dev;
1286 struct pci_dev *pdev;
1287 struct klist *device_klist;
1288
1289 device_klist = bus_get_device_klist(&pci_bus_type);
1290
1291 spin_lock(&device_klist->k_lock);
1292 list_for_each_safe(pos, tmp, &device_klist->k_list) {
1293 n = container_of(pos, struct klist_node, n_node);
1294 dev = container_of(n, struct device, knode_bus);
1295 pdev = to_pci_dev(dev);
1296 pci_insertion_sort_klist(pdev, &sorted_devices);
1297 }
1298 list_splice(&sorted_devices, &device_klist->k_list);
1299 spin_unlock(&device_klist->k_lock);
1300} 1211}
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
index 9236e7f869c..bbf66ea8fd8 100644
--- a/drivers/pci/quirks.c
+++ b/drivers/pci/quirks.c
@@ -24,6 +24,14 @@
24#include <linux/kallsyms.h> 24#include <linux/kallsyms.h>
25#include "pci.h" 25#include "pci.h"
26 26
27int isa_dma_bridge_buggy;
28EXPORT_SYMBOL(isa_dma_bridge_buggy);
29int pci_pci_problems;
30EXPORT_SYMBOL(pci_pci_problems);
31int pcie_mch_quirk;
32EXPORT_SYMBOL(pcie_mch_quirk);
33
34#ifdef CONFIG_PCI_QUIRKS
27/* The Mellanox Tavor device gives false positive parity errors 35/* The Mellanox Tavor device gives false positive parity errors
28 * Mark this device with a broken_parity_status, to allow 36 * Mark this device with a broken_parity_status, to allow
29 * PCI scanning code to "skip" this now blacklisted device. 37 * PCI scanning code to "skip" this now blacklisted device.
@@ -62,8 +70,6 @@ DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82441, quirk_p
62 70
63 This appears to be BIOS not version dependent. So presumably there is a 71 This appears to be BIOS not version dependent. So presumably there is a
64 chipset level fix */ 72 chipset level fix */
65int isa_dma_bridge_buggy;
66EXPORT_SYMBOL(isa_dma_bridge_buggy);
67 73
68static void __devinit quirk_isa_dma_hangs(struct pci_dev *dev) 74static void __devinit quirk_isa_dma_hangs(struct pci_dev *dev)
69{ 75{
@@ -84,9 +90,6 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_NEC, PCI_DEVICE_ID_NEC_CBUS_1, quirk_isa_d
84DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_NEC, PCI_DEVICE_ID_NEC_CBUS_2, quirk_isa_dma_hangs); 90DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_NEC, PCI_DEVICE_ID_NEC_CBUS_2, quirk_isa_dma_hangs);
85DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_NEC, PCI_DEVICE_ID_NEC_CBUS_3, quirk_isa_dma_hangs); 91DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_NEC, PCI_DEVICE_ID_NEC_CBUS_3, quirk_isa_dma_hangs);
86 92
87int pci_pci_problems;
88EXPORT_SYMBOL(pci_pci_problems);
89
90/* 93/*
91 * Chipsets where PCI->PCI transfers vanish or hang 94 * Chipsets where PCI->PCI transfers vanish or hang
92 */ 95 */
@@ -1362,9 +1365,6 @@ static void __init quirk_alder_ioapic(struct pci_dev *pdev)
1362DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_EESSC, quirk_alder_ioapic); 1365DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_EESSC, quirk_alder_ioapic);
1363#endif 1366#endif
1364 1367
1365int pcie_mch_quirk;
1366EXPORT_SYMBOL(pcie_mch_quirk);
1367
1368static void __devinit quirk_pcie_mch(struct pci_dev *pdev) 1368static void __devinit quirk_pcie_mch(struct pci_dev *pdev)
1369{ 1369{
1370 pcie_mch_quirk = 1; 1370 pcie_mch_quirk = 1;
@@ -1555,85 +1555,6 @@ static void __devinit fixup_rev1_53c810(struct pci_dev* dev)
1555} 1555}
1556DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NCR, PCI_DEVICE_ID_NCR_53C810, fixup_rev1_53c810); 1556DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NCR, PCI_DEVICE_ID_NCR_53C810, fixup_rev1_53c810);
1557 1557
1558static void pci_do_fixups(struct pci_dev *dev, struct pci_fixup *f, struct pci_fixup *end)
1559{
1560 while (f < end) {
1561 if ((f->vendor == dev->vendor || f->vendor == (u16) PCI_ANY_ID) &&
1562 (f->device == dev->device || f->device == (u16) PCI_ANY_ID)) {
1563#ifdef DEBUG
1564 dev_dbg(&dev->dev, "calling ");
1565 print_fn_descriptor_symbol("%s\n", f->hook);
1566#endif
1567 f->hook(dev);
1568 }
1569 f++;
1570 }
1571}
1572
1573extern struct pci_fixup __start_pci_fixups_early[];
1574extern struct pci_fixup __end_pci_fixups_early[];
1575extern struct pci_fixup __start_pci_fixups_header[];
1576extern struct pci_fixup __end_pci_fixups_header[];
1577extern struct pci_fixup __start_pci_fixups_final[];
1578extern struct pci_fixup __end_pci_fixups_final[];
1579extern struct pci_fixup __start_pci_fixups_enable[];
1580extern struct pci_fixup __end_pci_fixups_enable[];
1581extern struct pci_fixup __start_pci_fixups_resume[];
1582extern struct pci_fixup __end_pci_fixups_resume[];
1583extern struct pci_fixup __start_pci_fixups_resume_early[];
1584extern struct pci_fixup __end_pci_fixups_resume_early[];
1585extern struct pci_fixup __start_pci_fixups_suspend[];
1586extern struct pci_fixup __end_pci_fixups_suspend[];
1587
1588
1589void pci_fixup_device(enum pci_fixup_pass pass, struct pci_dev *dev)
1590{
1591 struct pci_fixup *start, *end;
1592
1593 switch(pass) {
1594 case pci_fixup_early:
1595 start = __start_pci_fixups_early;
1596 end = __end_pci_fixups_early;
1597 break;
1598
1599 case pci_fixup_header:
1600 start = __start_pci_fixups_header;
1601 end = __end_pci_fixups_header;
1602 break;
1603
1604 case pci_fixup_final:
1605 start = __start_pci_fixups_final;
1606 end = __end_pci_fixups_final;
1607 break;
1608
1609 case pci_fixup_enable:
1610 start = __start_pci_fixups_enable;
1611 end = __end_pci_fixups_enable;
1612 break;
1613
1614 case pci_fixup_resume:
1615 start = __start_pci_fixups_resume;
1616 end = __end_pci_fixups_resume;
1617 break;
1618
1619 case pci_fixup_resume_early:
1620 start = __start_pci_fixups_resume_early;
1621 end = __end_pci_fixups_resume_early;
1622 break;
1623
1624 case pci_fixup_suspend:
1625 start = __start_pci_fixups_suspend;
1626 end = __end_pci_fixups_suspend;
1627 break;
1628
1629 default:
1630 /* stupid compiler warning, you would think with an enum... */
1631 return;
1632 }
1633 pci_do_fixups(dev, start, end);
1634}
1635EXPORT_SYMBOL(pci_fixup_device);
1636
1637/* Enable 1k I/O space granularity on the Intel P64H2 */ 1558/* Enable 1k I/O space granularity on the Intel P64H2 */
1638static void __devinit quirk_p64h2_1k_io(struct pci_dev *dev) 1559static void __devinit quirk_p64h2_1k_io(struct pci_dev *dev)
1639{ 1560{
@@ -2007,3 +1928,82 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x4375,
2007 quirk_msi_intx_disable_bug); 1928 quirk_msi_intx_disable_bug);
2008 1929
2009#endif /* CONFIG_PCI_MSI */ 1930#endif /* CONFIG_PCI_MSI */
1931
1932static void pci_do_fixups(struct pci_dev *dev, struct pci_fixup *f, struct pci_fixup *end)
1933{
1934 while (f < end) {
1935 if ((f->vendor == dev->vendor || f->vendor == (u16) PCI_ANY_ID) &&
1936 (f->device == dev->device || f->device == (u16) PCI_ANY_ID)) {
1937 dev_dbg(&dev->dev, "calling %pF\n", f->hook);
1938 f->hook(dev);
1939 }
1940 f++;
1941 }
1942}
1943
1944extern struct pci_fixup __start_pci_fixups_early[];
1945extern struct pci_fixup __end_pci_fixups_early[];
1946extern struct pci_fixup __start_pci_fixups_header[];
1947extern struct pci_fixup __end_pci_fixups_header[];
1948extern struct pci_fixup __start_pci_fixups_final[];
1949extern struct pci_fixup __end_pci_fixups_final[];
1950extern struct pci_fixup __start_pci_fixups_enable[];
1951extern struct pci_fixup __end_pci_fixups_enable[];
1952extern struct pci_fixup __start_pci_fixups_resume[];
1953extern struct pci_fixup __end_pci_fixups_resume[];
1954extern struct pci_fixup __start_pci_fixups_resume_early[];
1955extern struct pci_fixup __end_pci_fixups_resume_early[];
1956extern struct pci_fixup __start_pci_fixups_suspend[];
1957extern struct pci_fixup __end_pci_fixups_suspend[];
1958
1959
1960void pci_fixup_device(enum pci_fixup_pass pass, struct pci_dev *dev)
1961{
1962 struct pci_fixup *start, *end;
1963
1964 switch(pass) {
1965 case pci_fixup_early:
1966 start = __start_pci_fixups_early;
1967 end = __end_pci_fixups_early;
1968 break;
1969
1970 case pci_fixup_header:
1971 start = __start_pci_fixups_header;
1972 end = __end_pci_fixups_header;
1973 break;
1974
1975 case pci_fixup_final:
1976 start = __start_pci_fixups_final;
1977 end = __end_pci_fixups_final;
1978 break;
1979
1980 case pci_fixup_enable:
1981 start = __start_pci_fixups_enable;
1982 end = __end_pci_fixups_enable;
1983 break;
1984
1985 case pci_fixup_resume:
1986 start = __start_pci_fixups_resume;
1987 end = __end_pci_fixups_resume;
1988 break;
1989
1990 case pci_fixup_resume_early:
1991 start = __start_pci_fixups_resume_early;
1992 end = __end_pci_fixups_resume_early;
1993 break;
1994
1995 case pci_fixup_suspend:
1996 start = __start_pci_fixups_suspend;
1997 end = __end_pci_fixups_suspend;
1998 break;
1999
2000 default:
2001 /* stupid compiler warning, you would think with an enum... */
2002 return;
2003 }
2004 pci_do_fixups(dev, start, end);
2005}
2006#else
2007void pci_fixup_device(enum pci_fixup_pass pass, struct pci_dev *dev) {}
2008#endif
2009EXPORT_SYMBOL(pci_fixup_device);
diff --git a/drivers/pci/remove.c b/drivers/pci/remove.c
index bdc2a44d68e..042e0892442 100644
--- a/drivers/pci/remove.c
+++ b/drivers/pci/remove.c
@@ -73,6 +73,7 @@ void pci_remove_bus(struct pci_bus *pci_bus)
73 up_write(&pci_bus_sem); 73 up_write(&pci_bus_sem);
74 pci_remove_legacy_files(pci_bus); 74 pci_remove_legacy_files(pci_bus);
75 device_remove_file(&pci_bus->dev, &dev_attr_cpuaffinity); 75 device_remove_file(&pci_bus->dev, &dev_attr_cpuaffinity);
76 device_remove_file(&pci_bus->dev, &dev_attr_cpulistaffinity);
76 device_unregister(&pci_bus->dev); 77 device_unregister(&pci_bus->dev);
77} 78}
78EXPORT_SYMBOL(pci_remove_bus); 79EXPORT_SYMBOL(pci_remove_bus);
@@ -114,13 +115,9 @@ void pci_remove_behind_bridge(struct pci_dev *dev)
114{ 115{
115 struct list_head *l, *n; 116 struct list_head *l, *n;
116 117
117 if (dev->subordinate) { 118 if (dev->subordinate)
118 list_for_each_safe(l, n, &dev->subordinate->devices) { 119 list_for_each_safe(l, n, &dev->subordinate->devices)
119 struct pci_dev *dev = pci_dev_b(l); 120 pci_remove_bus_device(pci_dev_b(l));
120
121 pci_remove_bus_device(dev);
122 }
123 }
124} 121}
125 122
126static void pci_stop_bus_devices(struct pci_bus *bus) 123static void pci_stop_bus_devices(struct pci_bus *bus)
diff --git a/drivers/pci/rom.c b/drivers/pci/rom.c
index bd5c0e03139..1f5f6143f35 100644
--- a/drivers/pci/rom.c
+++ b/drivers/pci/rom.c
@@ -21,7 +21,7 @@
21 * between the ROM and other resources, so enabling it may disable access 21 * between the ROM and other resources, so enabling it may disable access
22 * to MMIO registers or other card memory. 22 * to MMIO registers or other card memory.
23 */ 23 */
24static int pci_enable_rom(struct pci_dev *pdev) 24int pci_enable_rom(struct pci_dev *pdev)
25{ 25{
26 struct resource *res = pdev->resource + PCI_ROM_RESOURCE; 26 struct resource *res = pdev->resource + PCI_ROM_RESOURCE;
27 struct pci_bus_region region; 27 struct pci_bus_region region;
@@ -45,7 +45,7 @@ static int pci_enable_rom(struct pci_dev *pdev)
45 * Disable ROM decoding on a PCI device by turning off the last bit in the 45 * Disable ROM decoding on a PCI device by turning off the last bit in the
46 * ROM BAR. 46 * ROM BAR.
47 */ 47 */
48static void pci_disable_rom(struct pci_dev *pdev) 48void pci_disable_rom(struct pci_dev *pdev)
49{ 49{
50 u32 rom_addr; 50 u32 rom_addr;
51 pci_read_config_dword(pdev, pdev->rom_base_reg, &rom_addr); 51 pci_read_config_dword(pdev, pdev->rom_base_reg, &rom_addr);
@@ -260,3 +260,5 @@ void pci_cleanup_rom(struct pci_dev *pdev)
260 260
261EXPORT_SYMBOL(pci_map_rom); 261EXPORT_SYMBOL(pci_map_rom);
262EXPORT_SYMBOL(pci_unmap_rom); 262EXPORT_SYMBOL(pci_unmap_rom);
263EXPORT_SYMBOL_GPL(pci_enable_rom);
264EXPORT_SYMBOL_GPL(pci_disable_rom);
diff --git a/drivers/pci/setup-bus.c b/drivers/pci/setup-bus.c
index 3abbfad9dda..ea979f2bc6d 100644
--- a/drivers/pci/setup-bus.c
+++ b/drivers/pci/setup-bus.c
@@ -299,7 +299,7 @@ static void pbus_size_io(struct pci_bus *bus)
299 299
300 if (r->parent || !(r->flags & IORESOURCE_IO)) 300 if (r->parent || !(r->flags & IORESOURCE_IO))
301 continue; 301 continue;
302 r_size = r->end - r->start + 1; 302 r_size = resource_size(r);
303 303
304 if (r_size < 0x400) 304 if (r_size < 0x400)
305 /* Might be re-aligned for ISA */ 305 /* Might be re-aligned for ISA */
@@ -350,16 +350,13 @@ static int pbus_size_mem(struct pci_bus *bus, unsigned long mask, unsigned long
350 350
351 if (r->parent || (r->flags & mask) != type) 351 if (r->parent || (r->flags & mask) != type)
352 continue; 352 continue;
353 r_size = r->end - r->start + 1; 353 r_size = resource_size(r);
354 /* For bridges size != alignment */ 354 /* For bridges size != alignment */
355 align = resource_alignment(r); 355 align = resource_alignment(r);
356 order = __ffs(align) - 20; 356 order = __ffs(align) - 20;
357 if (order > 11) { 357 if (order > 11) {
358 dev_warn(&dev->dev, "BAR %d bad alignment %llx: " 358 dev_warn(&dev->dev, "BAR %d bad alignment %llx: "
359 "%#016llx-%#016llx\n", i, 359 "%pR\n", i, (unsigned long long)align, r);
360 (unsigned long long)align,
361 (unsigned long long)r->start,
362 (unsigned long long)r->end);
363 r->flags = 0; 360 r->flags = 0;
364 continue; 361 continue;
365 } 362 }
@@ -378,11 +375,10 @@ static int pbus_size_mem(struct pci_bus *bus, unsigned long mask, unsigned long
378 align = 0; 375 align = 0;
379 min_align = 0; 376 min_align = 0;
380 for (order = 0; order <= max_order; order++) { 377 for (order = 0; order <= max_order; order++) {
381#ifdef CONFIG_RESOURCES_64BIT 378 resource_size_t align1 = 1;
382 resource_size_t align1 = 1ULL << (order + 20); 379
383#else 380 align1 <<= (order + 20);
384 resource_size_t align1 = 1U << (order + 20); 381
385#endif
386 if (!align) 382 if (!align)
387 min_align = align1; 383 min_align = align1;
388 else if (ALIGN(align + min_align, min_align) < align1) 384 else if (ALIGN(align + min_align, min_align) < align1)
@@ -540,11 +536,9 @@ static void pci_bus_dump_res(struct pci_bus *bus)
540 if (!res) 536 if (!res)
541 continue; 537 continue;
542 538
543 printk(KERN_INFO "bus: %02x index %x %s: [%llx, %llx]\n", 539 printk(KERN_INFO "bus: %02x index %x %s: %pR\n",
544 bus->number, i, 540 bus->number, i,
545 (res->flags & IORESOURCE_IO) ? "io port" : "mmio", 541 (res->flags & IORESOURCE_IO) ? "io port" : "mmio", res);
546 (unsigned long long) res->start,
547 (unsigned long long) res->end);
548 } 542 }
549} 543}
550 544
diff --git a/drivers/pci/setup-res.c b/drivers/pci/setup-res.c
index 1a5fc83c71b..2dbd96cce2d 100644
--- a/drivers/pci/setup-res.c
+++ b/drivers/pci/setup-res.c
@@ -49,10 +49,8 @@ void pci_update_resource(struct pci_dev *dev, struct resource *res, int resno)
49 49
50 pcibios_resource_to_bus(dev, &region, res); 50 pcibios_resource_to_bus(dev, &region, res);
51 51
52 dev_dbg(&dev->dev, "BAR %d: got res [%#llx-%#llx] bus [%#llx-%#llx] " 52 dev_dbg(&dev->dev, "BAR %d: got res %pR bus [%#llx-%#llx] "
53 "flags %#lx\n", resno, 53 "flags %#lx\n", resno, res,
54 (unsigned long long)res->start,
55 (unsigned long long)res->end,
56 (unsigned long long)region.start, 54 (unsigned long long)region.start,
57 (unsigned long long)region.end, 55 (unsigned long long)region.end,
58 (unsigned long)res->flags); 56 (unsigned long)res->flags);
@@ -114,13 +112,11 @@ int pci_claim_resource(struct pci_dev *dev, int resource)
114 err = insert_resource(root, res); 112 err = insert_resource(root, res);
115 113
116 if (err) { 114 if (err) {
117 dev_err(&dev->dev, "BAR %d: %s of %s [%#llx-%#llx]\n", 115 dev_err(&dev->dev, "BAR %d: %s of %s %pR\n",
118 resource, 116 resource,
119 root ? "address space collision on" : 117 root ? "address space collision on" :
120 "no parent found for", 118 "no parent found for",
121 dtype, 119 dtype, res);
122 (unsigned long long)res->start,
123 (unsigned long long)res->end);
124 } 120 }
125 121
126 return err; 122 return err;
@@ -133,15 +129,14 @@ int pci_assign_resource(struct pci_dev *dev, int resno)
133 resource_size_t size, min, align; 129 resource_size_t size, min, align;
134 int ret; 130 int ret;
135 131
136 size = res->end - res->start + 1; 132 size = resource_size(res);
137 min = (res->flags & IORESOURCE_IO) ? PCIBIOS_MIN_IO : PCIBIOS_MIN_MEM; 133 min = (res->flags & IORESOURCE_IO) ? PCIBIOS_MIN_IO : PCIBIOS_MIN_MEM;
138 134
139 align = resource_alignment(res); 135 align = resource_alignment(res);
140 if (!align) { 136 if (!align) {
141 dev_err(&dev->dev, "BAR %d: can't allocate resource (bogus " 137 dev_err(&dev->dev, "BAR %d: can't allocate resource (bogus "
142 "alignment) [%#llx-%#llx] flags %#lx\n", 138 "alignment) %pR flags %#lx\n",
143 resno, (unsigned long long)res->start, 139 resno, res, res->flags);
144 (unsigned long long)res->end, res->flags);
145 return -EINVAL; 140 return -EINVAL;
146 } 141 }
147 142
@@ -162,11 +157,8 @@ int pci_assign_resource(struct pci_dev *dev, int resno)
162 } 157 }
163 158
164 if (ret) { 159 if (ret) {
165 dev_err(&dev->dev, "BAR %d: can't allocate %s resource " 160 dev_err(&dev->dev, "BAR %d: can't allocate %s resource %pR\n",
166 "[%#llx-%#llx]\n", resno, 161 resno, res->flags & IORESOURCE_IO ? "I/O" : "mem", res);
167 res->flags & IORESOURCE_IO ? "I/O" : "mem",
168 (unsigned long long)res->start,
169 (unsigned long long)res->end);
170 } else { 162 } else {
171 res->flags &= ~IORESOURCE_STARTALIGN; 163 res->flags &= ~IORESOURCE_STARTALIGN;
172 if (resno < PCI_BRIDGE_RESOURCES) 164 if (resno < PCI_BRIDGE_RESOURCES)
@@ -202,11 +194,8 @@ int pci_assign_resource_fixed(struct pci_dev *dev, int resno)
202 } 194 }
203 195
204 if (ret) { 196 if (ret) {
205 dev_err(&dev->dev, "BAR %d: can't allocate %s resource " 197 dev_err(&dev->dev, "BAR %d: can't allocate %s resource %pR\n",
206 "[%#llx-%#llx\n]", resno, 198 resno, res->flags & IORESOURCE_IO ? "I/O" : "mem", res);
207 res->flags & IORESOURCE_IO ? "I/O" : "mem",
208 (unsigned long long)res->start,
209 (unsigned long long)res->end);
210 } else if (resno < PCI_BRIDGE_RESOURCES) { 199 } else if (resno < PCI_BRIDGE_RESOURCES) {
211 pci_update_resource(dev, res, resno); 200 pci_update_resource(dev, res, resno);
212 } 201 }
@@ -237,9 +226,8 @@ void pdev_sort_resources(struct pci_dev *dev, struct resource_list *head)
237 r_align = resource_alignment(r); 226 r_align = resource_alignment(r);
238 if (!r_align) { 227 if (!r_align) {
239 dev_warn(&dev->dev, "BAR %d: bogus alignment " 228 dev_warn(&dev->dev, "BAR %d: bogus alignment "
240 "[%#llx-%#llx] flags %#lx\n", 229 "%pR flags %#lx\n",
241 i, (unsigned long long)r->start, 230 i, r, r->flags);
242 (unsigned long long)r->end, r->flags);
243 continue; 231 continue;
244 } 232 }
245 for (list = head; ; list = list->next) { 233 for (list = head; ; list = list->next) {
@@ -287,9 +275,7 @@ int pci_enable_resources(struct pci_dev *dev, int mask)
287 275
288 if (!r->parent) { 276 if (!r->parent) {
289 dev_err(&dev->dev, "device not available because of " 277 dev_err(&dev->dev, "device not available because of "
290 "BAR %d [%#llx-%#llx] collisions\n", i, 278 "BAR %d %pR collisions\n", i, r);
291 (unsigned long long) r->start,
292 (unsigned long long) r->end);
293 return -EINVAL; 279 return -EINVAL;
294 } 280 }
295 281
diff --git a/drivers/pci/slot.c b/drivers/pci/slot.c
index 7e5b85cbd94..0c6db03698e 100644
--- a/drivers/pci/slot.c
+++ b/drivers/pci/slot.c
@@ -49,11 +49,16 @@ static ssize_t address_read_file(struct pci_slot *slot, char *buf)
49 49
50static void pci_slot_release(struct kobject *kobj) 50static void pci_slot_release(struct kobject *kobj)
51{ 51{
52 struct pci_dev *dev;
52 struct pci_slot *slot = to_pci_slot(kobj); 53 struct pci_slot *slot = to_pci_slot(kobj);
53 54
54 pr_debug("%s: releasing pci_slot on %x:%d\n", __func__, 55 pr_debug("%s: releasing pci_slot on %x:%d\n", __func__,
55 slot->bus->number, slot->number); 56 slot->bus->number, slot->number);
56 57
58 list_for_each_entry(dev, &slot->bus->devices, bus_list)
59 if (PCI_SLOT(dev->devfn) == slot->number)
60 dev->slot = NULL;
61
57 list_del(&slot->list); 62 list_del(&slot->list);
58 63
59 kfree(slot); 64 kfree(slot);
@@ -108,6 +113,7 @@ static struct kobj_type pci_slot_ktype = {
108struct pci_slot *pci_create_slot(struct pci_bus *parent, int slot_nr, 113struct pci_slot *pci_create_slot(struct pci_bus *parent, int slot_nr,
109 const char *name) 114 const char *name)
110{ 115{
116 struct pci_dev *dev;
111 struct pci_slot *slot; 117 struct pci_slot *slot;
112 int err; 118 int err;
113 119
@@ -150,6 +156,10 @@ placeholder:
150 INIT_LIST_HEAD(&slot->list); 156 INIT_LIST_HEAD(&slot->list);
151 list_add(&slot->list, &parent->slots); 157 list_add(&slot->list, &parent->slots);
152 158
159 list_for_each_entry(dev, &parent->devices, bus_list)
160 if (PCI_SLOT(dev->devfn) == slot_nr)
161 dev->slot = slot;
162
153 /* Don't care if debug printk has a -1 for slot_nr */ 163 /* Don't care if debug printk has a -1 for slot_nr */
154 pr_debug("%s: created pci_slot on %04x:%02x:%02x\n", 164 pr_debug("%s: created pci_slot on %04x:%02x:%02x\n",
155 __func__, pci_domain_nr(parent), parent->number, slot_nr); 165 __func__, pci_domain_nr(parent), parent->number, slot_nr);