aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/pci
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/pci')
-rw-r--r--drivers/pci/Makefile3
-rw-r--r--drivers/pci/access.c19
-rw-r--r--drivers/pci/bus.c18
-rw-r--r--drivers/pci/dmar.c235
-rw-r--r--drivers/pci/hotplug/Kconfig4
-rw-r--r--drivers/pci/hotplug/acpi_pcihp.c40
-rw-r--r--drivers/pci/hotplug/acpiphp_core.c1
-rw-r--r--drivers/pci/hotplug/acpiphp_glue.c27
-rw-r--r--drivers/pci/hotplug/cpci_hotplug_core.c1
-rw-r--r--drivers/pci/hotplug/cpqphp.h167
-rw-r--r--drivers/pci/hotplug/cpqphp_core.c1100
-rw-r--r--drivers/pci/hotplug/cpqphp_ctrl.c371
-rw-r--r--drivers/pci/hotplug/cpqphp_nvram.c97
-rw-r--r--drivers/pci/hotplug/cpqphp_pci.c599
-rw-r--r--drivers/pci/hotplug/ibmphp_core.c2
-rw-r--r--drivers/pci/hotplug/pci_hotplug_core.c155
-rw-r--r--drivers/pci/hotplug/pciehp.h3
-rw-r--r--drivers/pci/hotplug/pciehp_core.c112
-rw-r--r--drivers/pci/hotplug/pciehp_hpc.c31
-rw-r--r--drivers/pci/hotplug/pcihp_skeleton.c1
-rw-r--r--drivers/pci/hotplug/rpaphp_core.c1
-rw-r--r--drivers/pci/hotplug/sgi_hotplug.c1
-rw-r--r--drivers/pci/hotplug/shpchp_core.c1
-rw-r--r--drivers/pci/intel-iommu.c752
-rw-r--r--drivers/pci/intr_remapping.c168
-rw-r--r--drivers/pci/intr_remapping.h2
-rw-r--r--drivers/pci/iov.c161
-rw-r--r--drivers/pci/msi.c100
-rw-r--r--drivers/pci/msi.h14
-rw-r--r--drivers/pci/pci.c246
-rw-r--r--drivers/pci/pci.h39
-rw-r--r--drivers/pci/pcie/aer/Kconfig15
-rw-r--r--drivers/pci/pcie/aer/Kconfig.debug18
-rw-r--r--drivers/pci/pcie/aer/Makefile3
-rw-r--r--drivers/pci/pcie/aer/aer_inject.c473
-rw-r--r--drivers/pci/pcie/aer/aerdrv.c3
-rw-r--r--drivers/pci/pcie/aer/aerdrv.h6
-rw-r--r--drivers/pci/pcie/aer/aerdrv_core.c278
-rw-r--r--drivers/pci/pcie/aer/ecrc.c131
-rw-r--r--drivers/pci/pcie/aspm.c787
-rw-r--r--drivers/pci/probe.c11
-rw-r--r--drivers/pci/quirks.c24
-rw-r--r--drivers/pci/remove.c2
-rw-r--r--drivers/pci/search.c32
-rw-r--r--drivers/pci/setup-bus.c53
-rw-r--r--drivers/pci/setup-res.c49
-rw-r--r--drivers/pci/slot.c39
47 files changed, 3941 insertions, 2454 deletions
diff --git a/drivers/pci/Makefile b/drivers/pci/Makefile
index b77ae6794275..1ebd6b4c743b 100644
--- a/drivers/pci/Makefile
+++ b/drivers/pci/Makefile
@@ -2,10 +2,11 @@
2# Makefile for the PCI bus specific drivers. 2# Makefile for the PCI bus specific drivers.
3# 3#
4 4
5obj-y += access.o bus.o probe.o remove.o pci.o quirks.o slot.o \ 5obj-y += access.o bus.o probe.o remove.o pci.o quirks.o \
6 pci-driver.o search.o pci-sysfs.o rom.o setup-res.o \ 6 pci-driver.o search.o pci-sysfs.o rom.o setup-res.o \
7 irq.o 7 irq.o
8obj-$(CONFIG_PROC_FS) += proc.o 8obj-$(CONFIG_PROC_FS) += proc.o
9obj-$(CONFIG_SYSFS) += slot.o
9 10
10# Build PCI Express stuff if needed 11# Build PCI Express stuff if needed
11obj-$(CONFIG_PCIEPORTBUS) += pcie/ 12obj-$(CONFIG_PCIEPORTBUS) += pcie/
diff --git a/drivers/pci/access.c b/drivers/pci/access.c
index 0f3706512686..db23200c4874 100644
--- a/drivers/pci/access.c
+++ b/drivers/pci/access.c
@@ -66,6 +66,25 @@ EXPORT_SYMBOL(pci_bus_write_config_byte);
66EXPORT_SYMBOL(pci_bus_write_config_word); 66EXPORT_SYMBOL(pci_bus_write_config_word);
67EXPORT_SYMBOL(pci_bus_write_config_dword); 67EXPORT_SYMBOL(pci_bus_write_config_dword);
68 68
69/**
70 * pci_bus_set_ops - Set raw operations of pci bus
71 * @bus: pci bus struct
72 * @ops: new raw operations
73 *
74 * Return previous raw operations
75 */
76struct pci_ops *pci_bus_set_ops(struct pci_bus *bus, struct pci_ops *ops)
77{
78 struct pci_ops *old_ops;
79 unsigned long flags;
80
81 spin_lock_irqsave(&pci_lock, flags);
82 old_ops = bus->ops;
83 bus->ops = ops;
84 spin_unlock_irqrestore(&pci_lock, flags);
85 return old_ops;
86}
87EXPORT_SYMBOL(pci_bus_set_ops);
69 88
70/** 89/**
71 * pci_read_vpd - Read one entry from Vital Product Data 90 * pci_read_vpd - Read one entry from Vital Product Data
diff --git a/drivers/pci/bus.c b/drivers/pci/bus.c
index 97a8194063b5..cef28a79103f 100644
--- a/drivers/pci/bus.c
+++ b/drivers/pci/bus.c
@@ -41,9 +41,14 @@ pci_bus_alloc_resource(struct pci_bus *bus, struct resource *res,
41 void *alignf_data) 41 void *alignf_data)
42{ 42{
43 int i, ret = -ENOMEM; 43 int i, ret = -ENOMEM;
44 resource_size_t max = -1;
44 45
45 type_mask |= IORESOURCE_IO | IORESOURCE_MEM; 46 type_mask |= IORESOURCE_IO | IORESOURCE_MEM;
46 47
48 /* don't allocate too high if the pref mem doesn't support 64bit*/
49 if (!(res->flags & IORESOURCE_MEM_64))
50 max = PCIBIOS_MAX_MEM_32;
51
47 for (i = 0; i < PCI_BUS_NUM_RESOURCES; i++) { 52 for (i = 0; i < PCI_BUS_NUM_RESOURCES; i++) {
48 struct resource *r = bus->resource[i]; 53 struct resource *r = bus->resource[i];
49 if (!r) 54 if (!r)
@@ -62,7 +67,7 @@ pci_bus_alloc_resource(struct pci_bus *bus, struct resource *res,
62 /* Ok, try it out.. */ 67 /* Ok, try it out.. */
63 ret = allocate_resource(r, res, size, 68 ret = allocate_resource(r, res, size,
64 r->start ? : min, 69 r->start ? : min,
65 -1, align, 70 max, align,
66 alignf, alignf_data); 71 alignf, alignf_data);
67 if (ret == 0) 72 if (ret == 0)
68 break; 73 break;
@@ -201,13 +206,18 @@ void pci_enable_bridges(struct pci_bus *bus)
201 * Walk the given bus, including any bridged devices 206 * Walk the given bus, including any bridged devices
202 * on buses under this bus. Call the provided callback 207 * on buses under this bus. Call the provided callback
203 * on each device found. 208 * on each device found.
209 *
210 * We check the return of @cb each time. If it returns anything
211 * other than 0, we break out.
212 *
204 */ 213 */
205void pci_walk_bus(struct pci_bus *top, void (*cb)(struct pci_dev *, void *), 214void pci_walk_bus(struct pci_bus *top, int (*cb)(struct pci_dev *, void *),
206 void *userdata) 215 void *userdata)
207{ 216{
208 struct pci_dev *dev; 217 struct pci_dev *dev;
209 struct pci_bus *bus; 218 struct pci_bus *bus;
210 struct list_head *next; 219 struct list_head *next;
220 int retval;
211 221
212 bus = top; 222 bus = top;
213 down_read(&pci_bus_sem); 223 down_read(&pci_bus_sem);
@@ -231,8 +241,10 @@ void pci_walk_bus(struct pci_bus *top, void (*cb)(struct pci_dev *, void *),
231 241
232 /* Run device routines with the device locked */ 242 /* Run device routines with the device locked */
233 down(&dev->dev.sem); 243 down(&dev->dev.sem);
234 cb(dev, userdata); 244 retval = cb(dev, userdata);
235 up(&dev->dev.sem); 245 up(&dev->dev.sem);
246 if (retval)
247 break;
236 } 248 }
237 up_read(&pci_bus_sem); 249 up_read(&pci_bus_sem);
238} 250}
diff --git a/drivers/pci/dmar.c b/drivers/pci/dmar.c
index fa3a11365ec3..7b287cb38b7a 100644
--- a/drivers/pci/dmar.c
+++ b/drivers/pci/dmar.c
@@ -267,6 +267,84 @@ rmrr_parse_dev(struct dmar_rmrr_unit *rmrru)
267 } 267 }
268 return ret; 268 return ret;
269} 269}
270
271static LIST_HEAD(dmar_atsr_units);
272
273static int __init dmar_parse_one_atsr(struct acpi_dmar_header *hdr)
274{
275 struct acpi_dmar_atsr *atsr;
276 struct dmar_atsr_unit *atsru;
277
278 atsr = container_of(hdr, struct acpi_dmar_atsr, header);
279 atsru = kzalloc(sizeof(*atsru), GFP_KERNEL);
280 if (!atsru)
281 return -ENOMEM;
282
283 atsru->hdr = hdr;
284 atsru->include_all = atsr->flags & 0x1;
285
286 list_add(&atsru->list, &dmar_atsr_units);
287
288 return 0;
289}
290
291static int __init atsr_parse_dev(struct dmar_atsr_unit *atsru)
292{
293 int rc;
294 struct acpi_dmar_atsr *atsr;
295
296 if (atsru->include_all)
297 return 0;
298
299 atsr = container_of(atsru->hdr, struct acpi_dmar_atsr, header);
300 rc = dmar_parse_dev_scope((void *)(atsr + 1),
301 (void *)atsr + atsr->header.length,
302 &atsru->devices_cnt, &atsru->devices,
303 atsr->segment);
304 if (rc || !atsru->devices_cnt) {
305 list_del(&atsru->list);
306 kfree(atsru);
307 }
308
309 return rc;
310}
311
312int dmar_find_matched_atsr_unit(struct pci_dev *dev)
313{
314 int i;
315 struct pci_bus *bus;
316 struct acpi_dmar_atsr *atsr;
317 struct dmar_atsr_unit *atsru;
318
319 list_for_each_entry(atsru, &dmar_atsr_units, list) {
320 atsr = container_of(atsru->hdr, struct acpi_dmar_atsr, header);
321 if (atsr->segment == pci_domain_nr(dev->bus))
322 goto found;
323 }
324
325 return 0;
326
327found:
328 for (bus = dev->bus; bus; bus = bus->parent) {
329 struct pci_dev *bridge = bus->self;
330
331 if (!bridge || !bridge->is_pcie ||
332 bridge->pcie_type == PCI_EXP_TYPE_PCI_BRIDGE)
333 return 0;
334
335 if (bridge->pcie_type == PCI_EXP_TYPE_ROOT_PORT) {
336 for (i = 0; i < atsru->devices_cnt; i++)
337 if (atsru->devices[i] == bridge)
338 return 1;
339 break;
340 }
341 }
342
343 if (atsru->include_all)
344 return 1;
345
346 return 0;
347}
270#endif 348#endif
271 349
272static void __init 350static void __init
@@ -274,22 +352,28 @@ dmar_table_print_dmar_entry(struct acpi_dmar_header *header)
274{ 352{
275 struct acpi_dmar_hardware_unit *drhd; 353 struct acpi_dmar_hardware_unit *drhd;
276 struct acpi_dmar_reserved_memory *rmrr; 354 struct acpi_dmar_reserved_memory *rmrr;
355 struct acpi_dmar_atsr *atsr;
277 356
278 switch (header->type) { 357 switch (header->type) {
279 case ACPI_DMAR_TYPE_HARDWARE_UNIT: 358 case ACPI_DMAR_TYPE_HARDWARE_UNIT:
280 drhd = (struct acpi_dmar_hardware_unit *)header; 359 drhd = container_of(header, struct acpi_dmar_hardware_unit,
360 header);
281 printk (KERN_INFO PREFIX 361 printk (KERN_INFO PREFIX
282 "DRHD (flags: 0x%08x)base: 0x%016Lx\n", 362 "DRHD base: %#016Lx flags: %#x\n",
283 drhd->flags, (unsigned long long)drhd->address); 363 (unsigned long long)drhd->address, drhd->flags);
284 break; 364 break;
285 case ACPI_DMAR_TYPE_RESERVED_MEMORY: 365 case ACPI_DMAR_TYPE_RESERVED_MEMORY:
286 rmrr = (struct acpi_dmar_reserved_memory *)header; 366 rmrr = container_of(header, struct acpi_dmar_reserved_memory,
287 367 header);
288 printk (KERN_INFO PREFIX 368 printk (KERN_INFO PREFIX
289 "RMRR base: 0x%016Lx end: 0x%016Lx\n", 369 "RMRR base: %#016Lx end: %#016Lx\n",
290 (unsigned long long)rmrr->base_address, 370 (unsigned long long)rmrr->base_address,
291 (unsigned long long)rmrr->end_address); 371 (unsigned long long)rmrr->end_address);
292 break; 372 break;
373 case ACPI_DMAR_TYPE_ATSR:
374 atsr = container_of(header, struct acpi_dmar_atsr, header);
375 printk(KERN_INFO PREFIX "ATSR flags: %#x\n", atsr->flags);
376 break;
293 } 377 }
294} 378}
295 379
@@ -363,6 +447,11 @@ parse_dmar_table(void)
363 ret = dmar_parse_one_rmrr(entry_header); 447 ret = dmar_parse_one_rmrr(entry_header);
364#endif 448#endif
365 break; 449 break;
450 case ACPI_DMAR_TYPE_ATSR:
451#ifdef CONFIG_DMAR
452 ret = dmar_parse_one_atsr(entry_header);
453#endif
454 break;
366 default: 455 default:
367 printk(KERN_WARNING PREFIX 456 printk(KERN_WARNING PREFIX
368 "Unknown DMAR structure type\n"); 457 "Unknown DMAR structure type\n");
@@ -431,11 +520,19 @@ int __init dmar_dev_scope_init(void)
431#ifdef CONFIG_DMAR 520#ifdef CONFIG_DMAR
432 { 521 {
433 struct dmar_rmrr_unit *rmrr, *rmrr_n; 522 struct dmar_rmrr_unit *rmrr, *rmrr_n;
523 struct dmar_atsr_unit *atsr, *atsr_n;
524
434 list_for_each_entry_safe(rmrr, rmrr_n, &dmar_rmrr_units, list) { 525 list_for_each_entry_safe(rmrr, rmrr_n, &dmar_rmrr_units, list) {
435 ret = rmrr_parse_dev(rmrr); 526 ret = rmrr_parse_dev(rmrr);
436 if (ret) 527 if (ret)
437 return ret; 528 return ret;
438 } 529 }
530
531 list_for_each_entry_safe(atsr, atsr_n, &dmar_atsr_units, list) {
532 ret = atsr_parse_dev(atsr);
533 if (ret)
534 return ret;
535 }
439 } 536 }
440#endif 537#endif
441 538
@@ -468,6 +565,9 @@ int __init dmar_table_init(void)
468#ifdef CONFIG_DMAR 565#ifdef CONFIG_DMAR
469 if (list_empty(&dmar_rmrr_units)) 566 if (list_empty(&dmar_rmrr_units))
470 printk(KERN_INFO PREFIX "No RMRR found\n"); 567 printk(KERN_INFO PREFIX "No RMRR found\n");
568
569 if (list_empty(&dmar_atsr_units))
570 printk(KERN_INFO PREFIX "No ATSR found\n");
471#endif 571#endif
472 572
473#ifdef CONFIG_INTR_REMAP 573#ifdef CONFIG_INTR_REMAP
@@ -515,6 +615,7 @@ int alloc_iommu(struct dmar_drhd_unit *drhd)
515 u32 ver; 615 u32 ver;
516 static int iommu_allocated = 0; 616 static int iommu_allocated = 0;
517 int agaw = 0; 617 int agaw = 0;
618 int msagaw = 0;
518 619
519 iommu = kzalloc(sizeof(*iommu), GFP_KERNEL); 620 iommu = kzalloc(sizeof(*iommu), GFP_KERNEL);
520 if (!iommu) 621 if (!iommu)
@@ -535,12 +636,20 @@ int alloc_iommu(struct dmar_drhd_unit *drhd)
535 agaw = iommu_calculate_agaw(iommu); 636 agaw = iommu_calculate_agaw(iommu);
536 if (agaw < 0) { 637 if (agaw < 0) {
537 printk(KERN_ERR 638 printk(KERN_ERR
538 "Cannot get a valid agaw for iommu (seq_id = %d)\n", 639 "Cannot get a valid agaw for iommu (seq_id = %d)\n",
640 iommu->seq_id);
641 goto error;
642 }
643 msagaw = iommu_calculate_max_sagaw(iommu);
644 if (msagaw < 0) {
645 printk(KERN_ERR
646 "Cannot get a valid max agaw for iommu (seq_id = %d)\n",
539 iommu->seq_id); 647 iommu->seq_id);
540 goto error; 648 goto error;
541 } 649 }
542#endif 650#endif
543 iommu->agaw = agaw; 651 iommu->agaw = agaw;
652 iommu->msagaw = msagaw;
544 653
545 /* the registers might be more than one page */ 654 /* the registers might be more than one page */
546 map_size = max_t(int, ecap_max_iotlb_offset(iommu->ecap), 655 map_size = max_t(int, ecap_max_iotlb_offset(iommu->ecap),
@@ -590,7 +699,8 @@ void free_iommu(struct intel_iommu *iommu)
590 */ 699 */
591static inline void reclaim_free_desc(struct q_inval *qi) 700static inline void reclaim_free_desc(struct q_inval *qi)
592{ 701{
593 while (qi->desc_status[qi->free_tail] == QI_DONE) { 702 while (qi->desc_status[qi->free_tail] == QI_DONE ||
703 qi->desc_status[qi->free_tail] == QI_ABORT) {
594 qi->desc_status[qi->free_tail] = QI_FREE; 704 qi->desc_status[qi->free_tail] = QI_FREE;
595 qi->free_tail = (qi->free_tail + 1) % QI_LENGTH; 705 qi->free_tail = (qi->free_tail + 1) % QI_LENGTH;
596 qi->free_cnt++; 706 qi->free_cnt++;
@@ -600,10 +710,13 @@ static inline void reclaim_free_desc(struct q_inval *qi)
600static int qi_check_fault(struct intel_iommu *iommu, int index) 710static int qi_check_fault(struct intel_iommu *iommu, int index)
601{ 711{
602 u32 fault; 712 u32 fault;
603 int head; 713 int head, tail;
604 struct q_inval *qi = iommu->qi; 714 struct q_inval *qi = iommu->qi;
605 int wait_index = (index + 1) % QI_LENGTH; 715 int wait_index = (index + 1) % QI_LENGTH;
606 716
717 if (qi->desc_status[wait_index] == QI_ABORT)
718 return -EAGAIN;
719
607 fault = readl(iommu->reg + DMAR_FSTS_REG); 720 fault = readl(iommu->reg + DMAR_FSTS_REG);
608 721
609 /* 722 /*
@@ -613,7 +726,11 @@ static int qi_check_fault(struct intel_iommu *iommu, int index)
613 */ 726 */
614 if (fault & DMA_FSTS_IQE) { 727 if (fault & DMA_FSTS_IQE) {
615 head = readl(iommu->reg + DMAR_IQH_REG); 728 head = readl(iommu->reg + DMAR_IQH_REG);
616 if ((head >> 4) == index) { 729 if ((head >> DMAR_IQ_SHIFT) == index) {
730 printk(KERN_ERR "VT-d detected invalid descriptor: "
731 "low=%llx, high=%llx\n",
732 (unsigned long long)qi->desc[index].low,
733 (unsigned long long)qi->desc[index].high);
617 memcpy(&qi->desc[index], &qi->desc[wait_index], 734 memcpy(&qi->desc[index], &qi->desc[wait_index],
618 sizeof(struct qi_desc)); 735 sizeof(struct qi_desc));
619 __iommu_flush_cache(iommu, &qi->desc[index], 736 __iommu_flush_cache(iommu, &qi->desc[index],
@@ -623,6 +740,32 @@ static int qi_check_fault(struct intel_iommu *iommu, int index)
623 } 740 }
624 } 741 }
625 742
743 /*
744 * If ITE happens, all pending wait_desc commands are aborted.
745 * No new descriptors are fetched until the ITE is cleared.
746 */
747 if (fault & DMA_FSTS_ITE) {
748 head = readl(iommu->reg + DMAR_IQH_REG);
749 head = ((head >> DMAR_IQ_SHIFT) - 1 + QI_LENGTH) % QI_LENGTH;
750 head |= 1;
751 tail = readl(iommu->reg + DMAR_IQT_REG);
752 tail = ((tail >> DMAR_IQ_SHIFT) - 1 + QI_LENGTH) % QI_LENGTH;
753
754 writel(DMA_FSTS_ITE, iommu->reg + DMAR_FSTS_REG);
755
756 do {
757 if (qi->desc_status[head] == QI_IN_USE)
758 qi->desc_status[head] = QI_ABORT;
759 head = (head - 2 + QI_LENGTH) % QI_LENGTH;
760 } while (head != tail);
761
762 if (qi->desc_status[wait_index] == QI_ABORT)
763 return -EAGAIN;
764 }
765
766 if (fault & DMA_FSTS_ICE)
767 writel(DMA_FSTS_ICE, iommu->reg + DMAR_FSTS_REG);
768
626 return 0; 769 return 0;
627} 770}
628 771
@@ -632,7 +775,7 @@ static int qi_check_fault(struct intel_iommu *iommu, int index)
632 */ 775 */
633int qi_submit_sync(struct qi_desc *desc, struct intel_iommu *iommu) 776int qi_submit_sync(struct qi_desc *desc, struct intel_iommu *iommu)
634{ 777{
635 int rc = 0; 778 int rc;
636 struct q_inval *qi = iommu->qi; 779 struct q_inval *qi = iommu->qi;
637 struct qi_desc *hw, wait_desc; 780 struct qi_desc *hw, wait_desc;
638 int wait_index, index; 781 int wait_index, index;
@@ -643,6 +786,9 @@ int qi_submit_sync(struct qi_desc *desc, struct intel_iommu *iommu)
643 786
644 hw = qi->desc; 787 hw = qi->desc;
645 788
789restart:
790 rc = 0;
791
646 spin_lock_irqsave(&qi->q_lock, flags); 792 spin_lock_irqsave(&qi->q_lock, flags);
647 while (qi->free_cnt < 3) { 793 while (qi->free_cnt < 3) {
648 spin_unlock_irqrestore(&qi->q_lock, flags); 794 spin_unlock_irqrestore(&qi->q_lock, flags);
@@ -673,7 +819,7 @@ int qi_submit_sync(struct qi_desc *desc, struct intel_iommu *iommu)
673 * update the HW tail register indicating the presence of 819 * update the HW tail register indicating the presence of
674 * new descriptors. 820 * new descriptors.
675 */ 821 */
676 writel(qi->free_head << 4, iommu->reg + DMAR_IQT_REG); 822 writel(qi->free_head << DMAR_IQ_SHIFT, iommu->reg + DMAR_IQT_REG);
677 823
678 while (qi->desc_status[wait_index] != QI_DONE) { 824 while (qi->desc_status[wait_index] != QI_DONE) {
679 /* 825 /*
@@ -685,18 +831,21 @@ int qi_submit_sync(struct qi_desc *desc, struct intel_iommu *iommu)
685 */ 831 */
686 rc = qi_check_fault(iommu, index); 832 rc = qi_check_fault(iommu, index);
687 if (rc) 833 if (rc)
688 goto out; 834 break;
689 835
690 spin_unlock(&qi->q_lock); 836 spin_unlock(&qi->q_lock);
691 cpu_relax(); 837 cpu_relax();
692 spin_lock(&qi->q_lock); 838 spin_lock(&qi->q_lock);
693 } 839 }
694out: 840
695 qi->desc_status[index] = qi->desc_status[wait_index] = QI_DONE; 841 qi->desc_status[index] = QI_DONE;
696 842
697 reclaim_free_desc(qi); 843 reclaim_free_desc(qi);
698 spin_unlock_irqrestore(&qi->q_lock, flags); 844 spin_unlock_irqrestore(&qi->q_lock, flags);
699 845
846 if (rc == -EAGAIN)
847 goto restart;
848
700 return rc; 849 return rc;
701} 850}
702 851
@@ -714,41 +863,26 @@ void qi_global_iec(struct intel_iommu *iommu)
714 qi_submit_sync(&desc, iommu); 863 qi_submit_sync(&desc, iommu);
715} 864}
716 865
717int qi_flush_context(struct intel_iommu *iommu, u16 did, u16 sid, u8 fm, 866void qi_flush_context(struct intel_iommu *iommu, u16 did, u16 sid, u8 fm,
718 u64 type, int non_present_entry_flush) 867 u64 type)
719{ 868{
720 struct qi_desc desc; 869 struct qi_desc desc;
721 870
722 if (non_present_entry_flush) {
723 if (!cap_caching_mode(iommu->cap))
724 return 1;
725 else
726 did = 0;
727 }
728
729 desc.low = QI_CC_FM(fm) | QI_CC_SID(sid) | QI_CC_DID(did) 871 desc.low = QI_CC_FM(fm) | QI_CC_SID(sid) | QI_CC_DID(did)
730 | QI_CC_GRAN(type) | QI_CC_TYPE; 872 | QI_CC_GRAN(type) | QI_CC_TYPE;
731 desc.high = 0; 873 desc.high = 0;
732 874
733 return qi_submit_sync(&desc, iommu); 875 qi_submit_sync(&desc, iommu);
734} 876}
735 877
736int qi_flush_iotlb(struct intel_iommu *iommu, u16 did, u64 addr, 878void qi_flush_iotlb(struct intel_iommu *iommu, u16 did, u64 addr,
737 unsigned int size_order, u64 type, 879 unsigned int size_order, u64 type)
738 int non_present_entry_flush)
739{ 880{
740 u8 dw = 0, dr = 0; 881 u8 dw = 0, dr = 0;
741 882
742 struct qi_desc desc; 883 struct qi_desc desc;
743 int ih = 0; 884 int ih = 0;
744 885
745 if (non_present_entry_flush) {
746 if (!cap_caching_mode(iommu->cap))
747 return 1;
748 else
749 did = 0;
750 }
751
752 if (cap_write_drain(iommu->cap)) 886 if (cap_write_drain(iommu->cap))
753 dw = 1; 887 dw = 1;
754 888
@@ -760,7 +894,28 @@ int qi_flush_iotlb(struct intel_iommu *iommu, u16 did, u64 addr,
760 desc.high = QI_IOTLB_ADDR(addr) | QI_IOTLB_IH(ih) 894 desc.high = QI_IOTLB_ADDR(addr) | QI_IOTLB_IH(ih)
761 | QI_IOTLB_AM(size_order); 895 | QI_IOTLB_AM(size_order);
762 896
763 return qi_submit_sync(&desc, iommu); 897 qi_submit_sync(&desc, iommu);
898}
899
900void qi_flush_dev_iotlb(struct intel_iommu *iommu, u16 sid, u16 qdep,
901 u64 addr, unsigned mask)
902{
903 struct qi_desc desc;
904
905 if (mask) {
906 BUG_ON(addr & ((1 << (VTD_PAGE_SHIFT + mask)) - 1));
907 addr |= (1 << (VTD_PAGE_SHIFT + mask - 1)) - 1;
908 desc.high = QI_DEV_IOTLB_ADDR(addr) | QI_DEV_IOTLB_SIZE;
909 } else
910 desc.high = QI_DEV_IOTLB_ADDR(addr);
911
912 if (qdep >= QI_DEV_IOTLB_MAX_INVS)
913 qdep = 0;
914
915 desc.low = QI_DEV_IOTLB_SID(sid) | QI_DEV_IOTLB_QDEP(qdep) |
916 QI_DIOTLB_TYPE;
917
918 qi_submit_sync(&desc, iommu);
764} 919}
765 920
766/* 921/*
@@ -790,7 +945,6 @@ void dmar_disable_qi(struct intel_iommu *iommu)
790 cpu_relax(); 945 cpu_relax();
791 946
792 iommu->gcmd &= ~DMA_GCMD_QIE; 947 iommu->gcmd &= ~DMA_GCMD_QIE;
793
794 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG); 948 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
795 949
796 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, readl, 950 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, readl,
@@ -804,7 +958,7 @@ end:
804 */ 958 */
805static void __dmar_enable_qi(struct intel_iommu *iommu) 959static void __dmar_enable_qi(struct intel_iommu *iommu)
806{ 960{
807 u32 cmd, sts; 961 u32 sts;
808 unsigned long flags; 962 unsigned long flags;
809 struct q_inval *qi = iommu->qi; 963 struct q_inval *qi = iommu->qi;
810 964
@@ -818,9 +972,8 @@ static void __dmar_enable_qi(struct intel_iommu *iommu)
818 972
819 dmar_writeq(iommu->reg + DMAR_IQA_REG, virt_to_phys(qi->desc)); 973 dmar_writeq(iommu->reg + DMAR_IQA_REG, virt_to_phys(qi->desc));
820 974
821 cmd = iommu->gcmd | DMA_GCMD_QIE;
822 iommu->gcmd |= DMA_GCMD_QIE; 975 iommu->gcmd |= DMA_GCMD_QIE;
823 writel(cmd, iommu->reg + DMAR_GCMD_REG); 976 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
824 977
825 /* Make sure hardware complete it */ 978 /* Make sure hardware complete it */
826 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, readl, (sts & DMA_GSTS_QIES), sts); 979 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, readl, (sts & DMA_GSTS_QIES), sts);
@@ -1096,7 +1249,7 @@ int dmar_set_interrupt(struct intel_iommu *iommu)
1096 set_irq_data(irq, NULL); 1249 set_irq_data(irq, NULL);
1097 iommu->irq = 0; 1250 iommu->irq = 0;
1098 destroy_irq(irq); 1251 destroy_irq(irq);
1099 return 0; 1252 return ret;
1100 } 1253 }
1101 1254
1102 ret = request_irq(irq, dmar_fault, 0, iommu->name, iommu); 1255 ret = request_irq(irq, dmar_fault, 0, iommu->name, iommu);
diff --git a/drivers/pci/hotplug/Kconfig b/drivers/pci/hotplug/Kconfig
index 9aa4fe100a0d..66f29bc00be4 100644
--- a/drivers/pci/hotplug/Kconfig
+++ b/drivers/pci/hotplug/Kconfig
@@ -4,7 +4,7 @@
4 4
5menuconfig HOTPLUG_PCI 5menuconfig HOTPLUG_PCI
6 tristate "Support for PCI Hotplug" 6 tristate "Support for PCI Hotplug"
7 depends on PCI && HOTPLUG 7 depends on PCI && HOTPLUG && SYSFS
8 ---help--- 8 ---help---
9 Say Y here if you have a motherboard with a PCI Hotplug controller. 9 Say Y here if you have a motherboard with a PCI Hotplug controller.
10 This allows you to add and remove PCI cards while the machine is 10 This allows you to add and remove PCI cards while the machine is
@@ -41,7 +41,7 @@ config HOTPLUG_PCI_FAKE
41 41
42config HOTPLUG_PCI_COMPAQ 42config HOTPLUG_PCI_COMPAQ
43 tristate "Compaq PCI Hotplug driver" 43 tristate "Compaq PCI Hotplug driver"
44 depends on X86 && PCI_BIOS && PCI_LEGACY 44 depends on X86 && PCI_BIOS
45 help 45 help
46 Say Y here if you have a motherboard with a Compaq PCI Hotplug 46 Say Y here if you have a motherboard with a Compaq PCI Hotplug
47 controller. 47 controller.
diff --git a/drivers/pci/hotplug/acpi_pcihp.c b/drivers/pci/hotplug/acpi_pcihp.c
index fbc63d5e459f..eb159587d0bf 100644
--- a/drivers/pci/hotplug/acpi_pcihp.c
+++ b/drivers/pci/hotplug/acpi_pcihp.c
@@ -354,7 +354,7 @@ acpi_status acpi_get_hp_params_from_firmware(struct pci_bus *bus,
354 status = acpi_run_hpp(handle, hpp); 354 status = acpi_run_hpp(handle, hpp);
355 if (ACPI_SUCCESS(status)) 355 if (ACPI_SUCCESS(status))
356 break; 356 break;
357 if (acpi_root_bridge(handle)) 357 if (acpi_is_root_bridge(handle))
358 break; 358 break;
359 status = acpi_get_parent(handle, &phandle); 359 status = acpi_get_parent(handle, &phandle);
360 if (ACPI_FAILURE(status)) 360 if (ACPI_FAILURE(status))
@@ -428,7 +428,7 @@ int acpi_get_hp_hw_control_from_firmware(struct pci_dev *pdev, u32 flags)
428 status = acpi_run_oshp(handle); 428 status = acpi_run_oshp(handle);
429 if (ACPI_SUCCESS(status)) 429 if (ACPI_SUCCESS(status))
430 goto got_one; 430 goto got_one;
431 if (acpi_root_bridge(handle)) 431 if (acpi_is_root_bridge(handle))
432 break; 432 break;
433 chandle = handle; 433 chandle = handle;
434 status = acpi_get_parent(chandle, &handle); 434 status = acpi_get_parent(chandle, &handle);
@@ -449,42 +449,6 @@ got_one:
449} 449}
450EXPORT_SYMBOL(acpi_get_hp_hw_control_from_firmware); 450EXPORT_SYMBOL(acpi_get_hp_hw_control_from_firmware);
451 451
452/* acpi_root_bridge - check to see if this acpi object is a root bridge
453 *
454 * @handle - the acpi object in question.
455 */
456int acpi_root_bridge(acpi_handle handle)
457{
458 acpi_status status;
459 struct acpi_device_info *info;
460 struct acpi_buffer buffer = {ACPI_ALLOCATE_BUFFER, NULL};
461 int i;
462
463 status = acpi_get_object_info(handle, &buffer);
464 if (ACPI_SUCCESS(status)) {
465 info = buffer.pointer;
466 if ((info->valid & ACPI_VALID_HID) &&
467 !strcmp(PCI_ROOT_HID_STRING,
468 info->hardware_id.value)) {
469 kfree(buffer.pointer);
470 return 1;
471 }
472 if (info->valid & ACPI_VALID_CID) {
473 for (i=0; i < info->compatibility_id.count; i++) {
474 if (!strcmp(PCI_ROOT_HID_STRING,
475 info->compatibility_id.id[i].value)) {
476 kfree(buffer.pointer);
477 return 1;
478 }
479 }
480 }
481 kfree(buffer.pointer);
482 }
483 return 0;
484}
485EXPORT_SYMBOL_GPL(acpi_root_bridge);
486
487
488static int is_ejectable(acpi_handle handle) 452static int is_ejectable(acpi_handle handle)
489{ 453{
490 acpi_status status; 454 acpi_status status;
diff --git a/drivers/pci/hotplug/acpiphp_core.c b/drivers/pci/hotplug/acpiphp_core.c
index 43c10bd261b4..4dd7114964ac 100644
--- a/drivers/pci/hotplug/acpiphp_core.c
+++ b/drivers/pci/hotplug/acpiphp_core.c
@@ -77,7 +77,6 @@ static int get_latch_status (struct hotplug_slot *slot, u8 *value);
77static int get_adapter_status (struct hotplug_slot *slot, u8 *value); 77static int get_adapter_status (struct hotplug_slot *slot, u8 *value);
78 78
79static struct hotplug_slot_ops acpi_hotplug_slot_ops = { 79static struct hotplug_slot_ops acpi_hotplug_slot_ops = {
80 .owner = THIS_MODULE,
81 .enable_slot = enable_slot, 80 .enable_slot = enable_slot,
82 .disable_slot = disable_slot, 81 .disable_slot = disable_slot,
83 .set_attention_status = set_attention_status, 82 .set_attention_status = set_attention_status,
diff --git a/drivers/pci/hotplug/acpiphp_glue.c b/drivers/pci/hotplug/acpiphp_glue.c
index 3a6064bce561..0cb0f830a993 100644
--- a/drivers/pci/hotplug/acpiphp_glue.c
+++ b/drivers/pci/hotplug/acpiphp_glue.c
@@ -678,18 +678,9 @@ static void remove_bridge(acpi_handle handle)
678 678
679static struct pci_dev * get_apic_pci_info(acpi_handle handle) 679static struct pci_dev * get_apic_pci_info(acpi_handle handle)
680{ 680{
681 struct acpi_pci_id id;
682 struct pci_bus *bus;
683 struct pci_dev *dev; 681 struct pci_dev *dev;
684 682
685 if (ACPI_FAILURE(acpi_get_pci_id(handle, &id))) 683 dev = acpi_get_pci_dev(handle);
686 return NULL;
687
688 bus = pci_find_bus(id.segment, id.bus);
689 if (!bus)
690 return NULL;
691
692 dev = pci_get_slot(bus, PCI_DEVFN(id.device, id.function));
693 if (!dev) 684 if (!dev)
694 return NULL; 685 return NULL;
695 686
@@ -1396,19 +1387,16 @@ static void acpiphp_sanitize_bus(struct pci_bus *bus)
1396/* Program resources in newly inserted bridge */ 1387/* Program resources in newly inserted bridge */
1397static int acpiphp_configure_bridge (acpi_handle handle) 1388static int acpiphp_configure_bridge (acpi_handle handle)
1398{ 1389{
1399 struct acpi_pci_id pci_id; 1390 struct pci_dev *dev;
1400 struct pci_bus *bus; 1391 struct pci_bus *bus;
1401 1392
1402 if (ACPI_FAILURE(acpi_get_pci_id(handle, &pci_id))) { 1393 dev = acpi_get_pci_dev(handle);
1394 if (!dev) {
1403 err("cannot get PCI domain and bus number for bridge\n"); 1395 err("cannot get PCI domain and bus number for bridge\n");
1404 return -EINVAL; 1396 return -EINVAL;
1405 } 1397 }
1406 bus = pci_find_bus(pci_id.segment, pci_id.bus); 1398
1407 if (!bus) { 1399 bus = dev->bus;
1408 err("cannot find bus %d:%d\n",
1409 pci_id.segment, pci_id.bus);
1410 return -EINVAL;
1411 }
1412 1400
1413 pci_bus_size_bridges(bus); 1401 pci_bus_size_bridges(bus);
1414 pci_bus_assign_resources(bus); 1402 pci_bus_assign_resources(bus);
@@ -1416,6 +1404,7 @@ static int acpiphp_configure_bridge (acpi_handle handle)
1416 acpiphp_set_hpp_values(handle, bus); 1404 acpiphp_set_hpp_values(handle, bus);
1417 pci_enable_bridges(bus); 1405 pci_enable_bridges(bus);
1418 acpiphp_configure_ioapics(handle); 1406 acpiphp_configure_ioapics(handle);
1407 pci_dev_put(dev);
1419 return 0; 1408 return 0;
1420} 1409}
1421 1410
@@ -1631,7 +1620,7 @@ find_root_bridges(acpi_handle handle, u32 lvl, void *context, void **rv)
1631{ 1620{
1632 int *count = (int *)context; 1621 int *count = (int *)context;
1633 1622
1634 if (acpi_root_bridge(handle)) { 1623 if (acpi_is_root_bridge(handle)) {
1635 acpi_install_notify_handler(handle, ACPI_SYSTEM_NOTIFY, 1624 acpi_install_notify_handler(handle, ACPI_SYSTEM_NOTIFY,
1636 handle_hotplug_event_bridge, NULL); 1625 handle_hotplug_event_bridge, NULL);
1637 (*count)++; 1626 (*count)++;
diff --git a/drivers/pci/hotplug/cpci_hotplug_core.c b/drivers/pci/hotplug/cpci_hotplug_core.c
index de94f4feef8c..a5b9f6ae507b 100644
--- a/drivers/pci/hotplug/cpci_hotplug_core.c
+++ b/drivers/pci/hotplug/cpci_hotplug_core.c
@@ -72,7 +72,6 @@ static int get_adapter_status(struct hotplug_slot *slot, u8 * value);
72static int get_latch_status(struct hotplug_slot *slot, u8 * value); 72static int get_latch_status(struct hotplug_slot *slot, u8 * value);
73 73
74static struct hotplug_slot_ops cpci_hotplug_slot_ops = { 74static struct hotplug_slot_ops cpci_hotplug_slot_ops = {
75 .owner = THIS_MODULE,
76 .enable_slot = enable_slot, 75 .enable_slot = enable_slot,
77 .disable_slot = disable_slot, 76 .disable_slot = disable_slot,
78 .set_attention_status = set_attention_status, 77 .set_attention_status = set_attention_status,
diff --git a/drivers/pci/hotplug/cpqphp.h b/drivers/pci/hotplug/cpqphp.h
index afaf8f69f73e..53836001d511 100644
--- a/drivers/pci/hotplug/cpqphp.h
+++ b/drivers/pci/hotplug/cpqphp.h
@@ -150,25 +150,25 @@ struct ctrl_reg { /* offset */
150 150
151/* offsets to the controller registers based on the above structure layout */ 151/* offsets to the controller registers based on the above structure layout */
152enum ctrl_offsets { 152enum ctrl_offsets {
153 SLOT_RST = offsetof(struct ctrl_reg, slot_RST), 153 SLOT_RST = offsetof(struct ctrl_reg, slot_RST),
154 SLOT_ENABLE = offsetof(struct ctrl_reg, slot_enable), 154 SLOT_ENABLE = offsetof(struct ctrl_reg, slot_enable),
155 MISC = offsetof(struct ctrl_reg, misc), 155 MISC = offsetof(struct ctrl_reg, misc),
156 LED_CONTROL = offsetof(struct ctrl_reg, led_control), 156 LED_CONTROL = offsetof(struct ctrl_reg, led_control),
157 INT_INPUT_CLEAR = offsetof(struct ctrl_reg, int_input_clear), 157 INT_INPUT_CLEAR = offsetof(struct ctrl_reg, int_input_clear),
158 INT_MASK = offsetof(struct ctrl_reg, int_mask), 158 INT_MASK = offsetof(struct ctrl_reg, int_mask),
159 CTRL_RESERVED0 = offsetof(struct ctrl_reg, reserved0), 159 CTRL_RESERVED0 = offsetof(struct ctrl_reg, reserved0),
160 CTRL_RESERVED1 = offsetof(struct ctrl_reg, reserved1), 160 CTRL_RESERVED1 = offsetof(struct ctrl_reg, reserved1),
161 CTRL_RESERVED2 = offsetof(struct ctrl_reg, reserved1), 161 CTRL_RESERVED2 = offsetof(struct ctrl_reg, reserved1),
162 GEN_OUTPUT_AB = offsetof(struct ctrl_reg, gen_output_AB), 162 GEN_OUTPUT_AB = offsetof(struct ctrl_reg, gen_output_AB),
163 NON_INT_INPUT = offsetof(struct ctrl_reg, non_int_input), 163 NON_INT_INPUT = offsetof(struct ctrl_reg, non_int_input),
164 CTRL_RESERVED3 = offsetof(struct ctrl_reg, reserved3), 164 CTRL_RESERVED3 = offsetof(struct ctrl_reg, reserved3),
165 CTRL_RESERVED4 = offsetof(struct ctrl_reg, reserved4), 165 CTRL_RESERVED4 = offsetof(struct ctrl_reg, reserved4),
166 CTRL_RESERVED5 = offsetof(struct ctrl_reg, reserved5), 166 CTRL_RESERVED5 = offsetof(struct ctrl_reg, reserved5),
167 CTRL_RESERVED6 = offsetof(struct ctrl_reg, reserved6), 167 CTRL_RESERVED6 = offsetof(struct ctrl_reg, reserved6),
168 CTRL_RESERVED7 = offsetof(struct ctrl_reg, reserved7), 168 CTRL_RESERVED7 = offsetof(struct ctrl_reg, reserved7),
169 CTRL_RESERVED8 = offsetof(struct ctrl_reg, reserved8), 169 CTRL_RESERVED8 = offsetof(struct ctrl_reg, reserved8),
170 SLOT_MASK = offsetof(struct ctrl_reg, slot_mask), 170 SLOT_MASK = offsetof(struct ctrl_reg, slot_mask),
171 CTRL_RESERVED9 = offsetof(struct ctrl_reg, reserved9), 171 CTRL_RESERVED9 = offsetof(struct ctrl_reg, reserved9),
172 CTRL_RESERVED10 = offsetof(struct ctrl_reg, reserved10), 172 CTRL_RESERVED10 = offsetof(struct ctrl_reg, reserved10),
173 CTRL_RESERVED11 = offsetof(struct ctrl_reg, reserved11), 173 CTRL_RESERVED11 = offsetof(struct ctrl_reg, reserved11),
174 SLOT_SERR = offsetof(struct ctrl_reg, slot_SERR), 174 SLOT_SERR = offsetof(struct ctrl_reg, slot_SERR),
@@ -190,7 +190,9 @@ struct hrt {
190 u32 reserved2; 190 u32 reserved2;
191} __attribute__ ((packed)); 191} __attribute__ ((packed));
192 192
193/* offsets to the hotplug resource table registers based on the above structure layout */ 193/* offsets to the hotplug resource table registers based on the above
194 * structure layout
195 */
194enum hrt_offsets { 196enum hrt_offsets {
195 SIG0 = offsetof(struct hrt, sig0), 197 SIG0 = offsetof(struct hrt, sig0),
196 SIG1 = offsetof(struct hrt, sig1), 198 SIG1 = offsetof(struct hrt, sig1),
@@ -217,18 +219,20 @@ struct slot_rt {
217 u16 pre_mem_length; 219 u16 pre_mem_length;
218} __attribute__ ((packed)); 220} __attribute__ ((packed));
219 221
220/* offsets to the hotplug slot resource table registers based on the above structure layout */ 222/* offsets to the hotplug slot resource table registers based on the above
223 * structure layout
224 */
221enum slot_rt_offsets { 225enum slot_rt_offsets {
222 DEV_FUNC = offsetof(struct slot_rt, dev_func), 226 DEV_FUNC = offsetof(struct slot_rt, dev_func),
223 PRIMARY_BUS = offsetof(struct slot_rt, primary_bus), 227 PRIMARY_BUS = offsetof(struct slot_rt, primary_bus),
224 SECONDARY_BUS = offsetof(struct slot_rt, secondary_bus), 228 SECONDARY_BUS = offsetof(struct slot_rt, secondary_bus),
225 MAX_BUS = offsetof(struct slot_rt, max_bus), 229 MAX_BUS = offsetof(struct slot_rt, max_bus),
226 IO_BASE = offsetof(struct slot_rt, io_base), 230 IO_BASE = offsetof(struct slot_rt, io_base),
227 IO_LENGTH = offsetof(struct slot_rt, io_length), 231 IO_LENGTH = offsetof(struct slot_rt, io_length),
228 MEM_BASE = offsetof(struct slot_rt, mem_base), 232 MEM_BASE = offsetof(struct slot_rt, mem_base),
229 MEM_LENGTH = offsetof(struct slot_rt, mem_length), 233 MEM_LENGTH = offsetof(struct slot_rt, mem_length),
230 PRE_MEM_BASE = offsetof(struct slot_rt, pre_mem_base), 234 PRE_MEM_BASE = offsetof(struct slot_rt, pre_mem_base),
231 PRE_MEM_LENGTH = offsetof(struct slot_rt, pre_mem_length), 235 PRE_MEM_LENGTH = offsetof(struct slot_rt, pre_mem_length),
232}; 236};
233 237
234struct pci_func { 238struct pci_func {
@@ -286,8 +290,8 @@ struct event_info {
286struct controller { 290struct controller {
287 struct controller *next; 291 struct controller *next;
288 u32 ctrl_int_comp; 292 u32 ctrl_int_comp;
289 struct mutex crit_sect; /* critical section mutex */ 293 struct mutex crit_sect; /* critical section mutex */
290 void __iomem *hpc_reg; /* cookie for our pci controller location */ 294 void __iomem *hpc_reg; /* cookie for our pci controller location */
291 struct pci_resource *mem_head; 295 struct pci_resource *mem_head;
292 struct pci_resource *p_mem_head; 296 struct pci_resource *p_mem_head;
293 struct pci_resource *io_head; 297 struct pci_resource *io_head;
@@ -299,7 +303,7 @@ struct controller {
299 u8 next_event; 303 u8 next_event;
300 u8 interrupt; 304 u8 interrupt;
301 u8 cfgspc_irq; 305 u8 cfgspc_irq;
302 u8 bus; /* bus number for the pci hotplug controller */ 306 u8 bus; /* bus number for the pci hotplug controller */
303 u8 rev; 307 u8 rev;
304 u8 slot_device_offset; 308 u8 slot_device_offset;
305 u8 first_slot; 309 u8 first_slot;
@@ -401,46 +405,57 @@ struct resource_lists {
401 405
402 406
403/* debugfs functions for the hotplug controller info */ 407/* debugfs functions for the hotplug controller info */
404extern void cpqhp_initialize_debugfs (void); 408extern void cpqhp_initialize_debugfs(void);
405extern void cpqhp_shutdown_debugfs (void); 409extern void cpqhp_shutdown_debugfs(void);
406extern void cpqhp_create_debugfs_files (struct controller *ctrl); 410extern void cpqhp_create_debugfs_files(struct controller *ctrl);
407extern void cpqhp_remove_debugfs_files (struct controller *ctrl); 411extern void cpqhp_remove_debugfs_files(struct controller *ctrl);
408 412
409/* controller functions */ 413/* controller functions */
410extern void cpqhp_pushbutton_thread (unsigned long event_pointer); 414extern void cpqhp_pushbutton_thread(unsigned long event_pointer);
411extern irqreturn_t cpqhp_ctrl_intr (int IRQ, void *data); 415extern irqreturn_t cpqhp_ctrl_intr(int IRQ, void *data);
412extern int cpqhp_find_available_resources (struct controller *ctrl, void __iomem *rom_start); 416extern int cpqhp_find_available_resources(struct controller *ctrl,
413extern int cpqhp_event_start_thread (void); 417 void __iomem *rom_start);
414extern void cpqhp_event_stop_thread (void); 418extern int cpqhp_event_start_thread(void);
415extern struct pci_func *cpqhp_slot_create (unsigned char busnumber); 419extern void cpqhp_event_stop_thread(void);
416extern struct pci_func *cpqhp_slot_find (unsigned char bus, unsigned char device, unsigned char index); 420extern struct pci_func *cpqhp_slot_create(unsigned char busnumber);
417extern int cpqhp_process_SI (struct controller *ctrl, struct pci_func *func); 421extern struct pci_func *cpqhp_slot_find(unsigned char bus, unsigned char device,
418extern int cpqhp_process_SS (struct controller *ctrl, struct pci_func *func); 422 unsigned char index);
419extern int cpqhp_hardware_test (struct controller *ctrl, int test_num); 423extern int cpqhp_process_SI(struct controller *ctrl, struct pci_func *func);
424extern int cpqhp_process_SS(struct controller *ctrl, struct pci_func *func);
425extern int cpqhp_hardware_test(struct controller *ctrl, int test_num);
420 426
421/* resource functions */ 427/* resource functions */
422extern int cpqhp_resource_sort_and_combine (struct pci_resource **head); 428extern int cpqhp_resource_sort_and_combine (struct pci_resource **head);
423 429
424/* pci functions */ 430/* pci functions */
425extern int cpqhp_set_irq (u8 bus_num, u8 dev_num, u8 int_pin, u8 irq_num); 431extern int cpqhp_set_irq(u8 bus_num, u8 dev_num, u8 int_pin, u8 irq_num);
426extern int cpqhp_get_bus_dev (struct controller *ctrl, u8 *bus_num, u8 *dev_num, u8 slot); 432extern int cpqhp_get_bus_dev(struct controller *ctrl, u8 *bus_num, u8 *dev_num,
427extern int cpqhp_save_config (struct controller *ctrl, int busnumber, int is_hot_plug); 433 u8 slot);
428extern int cpqhp_save_base_addr_length (struct controller *ctrl, struct pci_func * func); 434extern int cpqhp_save_config(struct controller *ctrl, int busnumber,
429extern int cpqhp_save_used_resources (struct controller *ctrl, struct pci_func * func); 435 int is_hot_plug);
430extern int cpqhp_configure_board (struct controller *ctrl, struct pci_func * func); 436extern int cpqhp_save_base_addr_length(struct controller *ctrl,
431extern int cpqhp_save_slot_config (struct controller *ctrl, struct pci_func * new_slot); 437 struct pci_func *func);
432extern int cpqhp_valid_replace (struct controller *ctrl, struct pci_func * func); 438extern int cpqhp_save_used_resources(struct controller *ctrl,
433extern void cpqhp_destroy_board_resources (struct pci_func * func); 439 struct pci_func *func);
434extern int cpqhp_return_board_resources (struct pci_func * func, struct resource_lists * resources); 440extern int cpqhp_configure_board(struct controller *ctrl,
435extern void cpqhp_destroy_resource_list (struct resource_lists * resources); 441 struct pci_func *func);
436extern int cpqhp_configure_device (struct controller* ctrl, struct pci_func* func); 442extern int cpqhp_save_slot_config(struct controller *ctrl,
437extern int cpqhp_unconfigure_device (struct pci_func* func); 443 struct pci_func *new_slot);
444extern int cpqhp_valid_replace(struct controller *ctrl, struct pci_func *func);
445extern void cpqhp_destroy_board_resources(struct pci_func *func);
446extern int cpqhp_return_board_resources (struct pci_func *func,
447 struct resource_lists *resources);
448extern void cpqhp_destroy_resource_list(struct resource_lists *resources);
449extern int cpqhp_configure_device(struct controller *ctrl,
450 struct pci_func *func);
451extern int cpqhp_unconfigure_device(struct pci_func *func);
438 452
439/* Global variables */ 453/* Global variables */
440extern int cpqhp_debug; 454extern int cpqhp_debug;
441extern int cpqhp_legacy_mode; 455extern int cpqhp_legacy_mode;
442extern struct controller *cpqhp_ctrl_list; 456extern struct controller *cpqhp_ctrl_list;
443extern struct pci_func *cpqhp_slot_list[256]; 457extern struct pci_func *cpqhp_slot_list[256];
458extern struct irq_routing_table *cpqhp_routing_table;
444 459
445/* these can be gotten rid of, but for debugging they are purty */ 460/* these can be gotten rid of, but for debugging they are purty */
446extern u8 cpqhp_nic_irq; 461extern u8 cpqhp_nic_irq;
@@ -449,7 +464,7 @@ extern u8 cpqhp_disk_irq;
449 464
450/* inline functions */ 465/* inline functions */
451 466
452static inline char *slot_name(struct slot *slot) 467static inline const char *slot_name(struct slot *slot)
453{ 468{
454 return hotplug_slot_name(slot->hotplug_slot); 469 return hotplug_slot_name(slot->hotplug_slot);
455} 470}
@@ -458,9 +473,9 @@ static inline char *slot_name(struct slot *slot)
458 * return_resource 473 * return_resource
459 * 474 *
460 * Puts node back in the resource list pointed to by head 475 * Puts node back in the resource list pointed to by head
461 *
462 */ 476 */
463static inline void return_resource(struct pci_resource **head, struct pci_resource *node) 477static inline void return_resource(struct pci_resource **head,
478 struct pci_resource *node)
464{ 479{
465 if (!node || !head) 480 if (!node || !head)
466 return; 481 return;
@@ -471,7 +486,7 @@ static inline void return_resource(struct pci_resource **head, struct pci_resour
471static inline void set_SOGO(struct controller *ctrl) 486static inline void set_SOGO(struct controller *ctrl)
472{ 487{
473 u16 misc; 488 u16 misc;
474 489
475 misc = readw(ctrl->hpc_reg + MISC); 490 misc = readw(ctrl->hpc_reg + MISC);
476 misc = (misc | 0x0001) & 0xFFFB; 491 misc = (misc | 0x0001) & 0xFFFB;
477 writew(misc, ctrl->hpc_reg + MISC); 492 writew(misc, ctrl->hpc_reg + MISC);
@@ -481,7 +496,7 @@ static inline void set_SOGO(struct controller *ctrl)
481static inline void amber_LED_on(struct controller *ctrl, u8 slot) 496static inline void amber_LED_on(struct controller *ctrl, u8 slot)
482{ 497{
483 u32 led_control; 498 u32 led_control;
484 499
485 led_control = readl(ctrl->hpc_reg + LED_CONTROL); 500 led_control = readl(ctrl->hpc_reg + LED_CONTROL);
486 led_control |= (0x01010000L << slot); 501 led_control |= (0x01010000L << slot);
487 writel(led_control, ctrl->hpc_reg + LED_CONTROL); 502 writel(led_control, ctrl->hpc_reg + LED_CONTROL);
@@ -491,7 +506,7 @@ static inline void amber_LED_on(struct controller *ctrl, u8 slot)
491static inline void amber_LED_off(struct controller *ctrl, u8 slot) 506static inline void amber_LED_off(struct controller *ctrl, u8 slot)
492{ 507{
493 u32 led_control; 508 u32 led_control;
494 509
495 led_control = readl(ctrl->hpc_reg + LED_CONTROL); 510 led_control = readl(ctrl->hpc_reg + LED_CONTROL);
496 led_control &= ~(0x01010000L << slot); 511 led_control &= ~(0x01010000L << slot);
497 writel(led_control, ctrl->hpc_reg + LED_CONTROL); 512 writel(led_control, ctrl->hpc_reg + LED_CONTROL);
@@ -504,7 +519,7 @@ static inline int read_amber_LED(struct controller *ctrl, u8 slot)
504 519
505 led_control = readl(ctrl->hpc_reg + LED_CONTROL); 520 led_control = readl(ctrl->hpc_reg + LED_CONTROL);
506 led_control &= (0x01010000L << slot); 521 led_control &= (0x01010000L << slot);
507 522
508 return led_control ? 1 : 0; 523 return led_control ? 1 : 0;
509} 524}
510 525
@@ -512,7 +527,7 @@ static inline int read_amber_LED(struct controller *ctrl, u8 slot)
512static inline void green_LED_on(struct controller *ctrl, u8 slot) 527static inline void green_LED_on(struct controller *ctrl, u8 slot)
513{ 528{
514 u32 led_control; 529 u32 led_control;
515 530
516 led_control = readl(ctrl->hpc_reg + LED_CONTROL); 531 led_control = readl(ctrl->hpc_reg + LED_CONTROL);
517 led_control |= 0x0101L << slot; 532 led_control |= 0x0101L << slot;
518 writel(led_control, ctrl->hpc_reg + LED_CONTROL); 533 writel(led_control, ctrl->hpc_reg + LED_CONTROL);
@@ -521,7 +536,7 @@ static inline void green_LED_on(struct controller *ctrl, u8 slot)
521static inline void green_LED_off(struct controller *ctrl, u8 slot) 536static inline void green_LED_off(struct controller *ctrl, u8 slot)
522{ 537{
523 u32 led_control; 538 u32 led_control;
524 539
525 led_control = readl(ctrl->hpc_reg + LED_CONTROL); 540 led_control = readl(ctrl->hpc_reg + LED_CONTROL);
526 led_control &= ~(0x0101L << slot); 541 led_control &= ~(0x0101L << slot);
527 writel(led_control, ctrl->hpc_reg + LED_CONTROL); 542 writel(led_control, ctrl->hpc_reg + LED_CONTROL);
@@ -531,7 +546,7 @@ static inline void green_LED_off(struct controller *ctrl, u8 slot)
531static inline void green_LED_blink(struct controller *ctrl, u8 slot) 546static inline void green_LED_blink(struct controller *ctrl, u8 slot)
532{ 547{
533 u32 led_control; 548 u32 led_control;
534 549
535 led_control = readl(ctrl->hpc_reg + LED_CONTROL); 550 led_control = readl(ctrl->hpc_reg + LED_CONTROL);
536 led_control &= ~(0x0101L << slot); 551 led_control &= ~(0x0101L << slot);
537 led_control |= (0x0001L << slot); 552 led_control |= (0x0001L << slot);
@@ -575,22 +590,21 @@ static inline u8 read_slot_enable(struct controller *ctrl)
575} 590}
576 591
577 592
578/* 593/**
579 * get_controller_speed - find the current frequency/mode of controller. 594 * get_controller_speed - find the current frequency/mode of controller.
580 * 595 *
581 * @ctrl: controller to get frequency/mode for. 596 * @ctrl: controller to get frequency/mode for.
582 * 597 *
583 * Returns controller speed. 598 * Returns controller speed.
584 *
585 */ 599 */
586static inline u8 get_controller_speed(struct controller *ctrl) 600static inline u8 get_controller_speed(struct controller *ctrl)
587{ 601{
588 u8 curr_freq; 602 u8 curr_freq;
589 u16 misc; 603 u16 misc;
590 604
591 if (ctrl->pcix_support) { 605 if (ctrl->pcix_support) {
592 curr_freq = readb(ctrl->hpc_reg + NEXT_CURR_FREQ); 606 curr_freq = readb(ctrl->hpc_reg + NEXT_CURR_FREQ);
593 if ((curr_freq & 0xB0) == 0xB0) 607 if ((curr_freq & 0xB0) == 0xB0)
594 return PCI_SPEED_133MHz_PCIX; 608 return PCI_SPEED_133MHz_PCIX;
595 if ((curr_freq & 0xA0) == 0xA0) 609 if ((curr_freq & 0xA0) == 0xA0)
596 return PCI_SPEED_100MHz_PCIX; 610 return PCI_SPEED_100MHz_PCIX;
@@ -602,19 +616,18 @@ static inline u8 get_controller_speed(struct controller *ctrl)
602 return PCI_SPEED_33MHz; 616 return PCI_SPEED_33MHz;
603 } 617 }
604 618
605 misc = readw(ctrl->hpc_reg + MISC); 619 misc = readw(ctrl->hpc_reg + MISC);
606 return (misc & 0x0800) ? PCI_SPEED_66MHz : PCI_SPEED_33MHz; 620 return (misc & 0x0800) ? PCI_SPEED_66MHz : PCI_SPEED_33MHz;
607} 621}
608
609 622
610/* 623
624/**
611 * get_adapter_speed - find the max supported frequency/mode of adapter. 625 * get_adapter_speed - find the max supported frequency/mode of adapter.
612 * 626 *
613 * @ctrl: hotplug controller. 627 * @ctrl: hotplug controller.
614 * @hp_slot: hotplug slot where adapter is installed. 628 * @hp_slot: hotplug slot where adapter is installed.
615 * 629 *
616 * Returns adapter speed. 630 * Returns adapter speed.
617 *
618 */ 631 */
619static inline u8 get_adapter_speed(struct controller *ctrl, u8 hp_slot) 632static inline u8 get_adapter_speed(struct controller *ctrl, u8 hp_slot)
620{ 633{
@@ -672,7 +685,8 @@ static inline int get_slot_enabled(struct controller *ctrl, struct slot *slot)
672} 685}
673 686
674 687
675static inline int cpq_get_latch_status(struct controller *ctrl, struct slot *slot) 688static inline int cpq_get_latch_status(struct controller *ctrl,
689 struct slot *slot)
676{ 690{
677 u32 status; 691 u32 status;
678 u8 hp_slot; 692 u8 hp_slot;
@@ -687,7 +701,8 @@ static inline int cpq_get_latch_status(struct controller *ctrl, struct slot *slo
687} 701}
688 702
689 703
690static inline int get_presence_status(struct controller *ctrl, struct slot *slot) 704static inline int get_presence_status(struct controller *ctrl,
705 struct slot *slot)
691{ 706{
692 int presence_save = 0; 707 int presence_save = 0;
693 u8 hp_slot; 708 u8 hp_slot;
@@ -696,7 +711,8 @@ static inline int get_presence_status(struct controller *ctrl, struct slot *slot
696 hp_slot = slot->device - ctrl->slot_device_offset; 711 hp_slot = slot->device - ctrl->slot_device_offset;
697 712
698 tempdword = readl(ctrl->hpc_reg + INT_INPUT_CLEAR); 713 tempdword = readl(ctrl->hpc_reg + INT_INPUT_CLEAR);
699 presence_save = (int) ((((~tempdword) >> 23) | ((~tempdword) >> 15)) >> hp_slot) & 0x02; 714 presence_save = (int) ((((~tempdword) >> 23) | ((~tempdword) >> 15))
715 >> hp_slot) & 0x02;
700 716
701 return presence_save; 717 return presence_save;
702} 718}
@@ -718,5 +734,12 @@ static inline int wait_for_ctrl_irq(struct controller *ctrl)
718 return retval; 734 return retval;
719} 735}
720 736
721#endif 737#include <asm/pci_x86.h>
738static inline int cpqhp_routing_table_length(void)
739{
740 BUG_ON(cpqhp_routing_table == NULL);
741 return ((cpqhp_routing_table->size - sizeof(struct irq_routing_table)) /
742 sizeof(struct irq_info));
743}
722 744
745#endif
diff --git a/drivers/pci/hotplug/cpqphp_core.c b/drivers/pci/hotplug/cpqphp_core.c
index c2e1bcbb28a7..075b4f4b6e0d 100644
--- a/drivers/pci/hotplug/cpqphp_core.c
+++ b/drivers/pci/hotplug/cpqphp_core.c
@@ -25,8 +25,7 @@
25 * Send feedback to <greg@kroah.com> 25 * Send feedback to <greg@kroah.com>
26 * 26 *
27 * Jan 12, 2003 - Added 66/100/133MHz PCI-X support, 27 * Jan 12, 2003 - Added 66/100/133MHz PCI-X support,
28 * Torben Mathiasen <torben.mathiasen@hp.com> 28 * Torben Mathiasen <torben.mathiasen@hp.com>
29 *
30 */ 29 */
31 30
32#include <linux/module.h> 31#include <linux/module.h>
@@ -45,7 +44,6 @@
45 44
46#include "cpqphp.h" 45#include "cpqphp.h"
47#include "cpqphp_nvram.h" 46#include "cpqphp_nvram.h"
48#include <asm/pci_x86.h>
49 47
50 48
51/* Global variables */ 49/* Global variables */
@@ -53,6 +51,7 @@ int cpqhp_debug;
53int cpqhp_legacy_mode; 51int cpqhp_legacy_mode;
54struct controller *cpqhp_ctrl_list; /* = NULL */ 52struct controller *cpqhp_ctrl_list; /* = NULL */
55struct pci_func *cpqhp_slot_list[256]; 53struct pci_func *cpqhp_slot_list[256];
54struct irq_routing_table *cpqhp_routing_table;
56 55
57/* local variables */ 56/* local variables */
58static void __iomem *smbios_table; 57static void __iomem *smbios_table;
@@ -78,33 +77,6 @@ MODULE_PARM_DESC(debug, "Debugging mode enabled or not");
78 77
79#define CPQHPC_MODULE_MINOR 208 78#define CPQHPC_MODULE_MINOR 208
80 79
81static int one_time_init (void);
82static int set_attention_status (struct hotplug_slot *slot, u8 value);
83static int process_SI (struct hotplug_slot *slot);
84static int process_SS (struct hotplug_slot *slot);
85static int hardware_test (struct hotplug_slot *slot, u32 value);
86static int get_power_status (struct hotplug_slot *slot, u8 *value);
87static int get_attention_status (struct hotplug_slot *slot, u8 *value);
88static int get_latch_status (struct hotplug_slot *slot, u8 *value);
89static int get_adapter_status (struct hotplug_slot *slot, u8 *value);
90static int get_max_bus_speed (struct hotplug_slot *slot, enum pci_bus_speed *value);
91static int get_cur_bus_speed (struct hotplug_slot *slot, enum pci_bus_speed *value);
92
93static struct hotplug_slot_ops cpqphp_hotplug_slot_ops = {
94 .owner = THIS_MODULE,
95 .set_attention_status = set_attention_status,
96 .enable_slot = process_SI,
97 .disable_slot = process_SS,
98 .hardware_test = hardware_test,
99 .get_power_status = get_power_status,
100 .get_attention_status = get_attention_status,
101 .get_latch_status = get_latch_status,
102 .get_adapter_status = get_adapter_status,
103 .get_max_bus_speed = get_max_bus_speed,
104 .get_cur_bus_speed = get_cur_bus_speed,
105};
106
107
108static inline int is_slot64bit(struct slot *slot) 80static inline int is_slot64bit(struct slot *slot)
109{ 81{
110 return (readb(slot->p_sm_slot + SMBIOS_SLOT_WIDTH) == 0x06) ? 1 : 0; 82 return (readb(slot->p_sm_slot + SMBIOS_SLOT_WIDTH) == 0x06) ? 1 : 0;
@@ -144,7 +116,7 @@ static void __iomem * detect_SMBIOS_pointer(void __iomem *begin, void __iomem *e
144 break; 116 break;
145 } 117 }
146 } 118 }
147 119
148 if (!status) 120 if (!status)
149 fp = NULL; 121 fp = NULL;
150 122
@@ -171,7 +143,7 @@ static int init_SERR(struct controller * ctrl)
171 tempdword = ctrl->first_slot; 143 tempdword = ctrl->first_slot;
172 144
173 number_of_slots = readb(ctrl->hpc_reg + SLOT_MASK) & 0x0F; 145 number_of_slots = readb(ctrl->hpc_reg + SLOT_MASK) & 0x0F;
174 // Loop through slots 146 /* Loop through slots */
175 while (number_of_slots) { 147 while (number_of_slots) {
176 physical_slot = tempdword; 148 physical_slot = tempdword;
177 writeb(0, ctrl->hpc_reg + SLOT_SERR); 149 writeb(0, ctrl->hpc_reg + SLOT_SERR);
@@ -182,41 +154,42 @@ static int init_SERR(struct controller * ctrl)
182 return 0; 154 return 0;
183} 155}
184 156
185 157static int init_cpqhp_routing_table(void)
186/* nice debugging output */
187static int pci_print_IRQ_route (void)
188{ 158{
189 struct irq_routing_table *routing_table;
190 int len; 159 int len;
191 int loop;
192
193 u8 tbus, tdevice, tslot;
194 160
195 routing_table = pcibios_get_irq_routing_table(); 161 cpqhp_routing_table = pcibios_get_irq_routing_table();
196 if (routing_table == NULL) { 162 if (cpqhp_routing_table == NULL)
197 err("No BIOS Routing Table??? Not good\n");
198 return -ENOMEM; 163 return -ENOMEM;
199 }
200 164
201 len = (routing_table->size - sizeof(struct irq_routing_table)) / 165 len = cpqhp_routing_table_length();
202 sizeof(struct irq_info);
203 // Make sure I got at least one entry
204 if (len == 0) { 166 if (len == 0) {
205 kfree(routing_table); 167 kfree(cpqhp_routing_table);
168 cpqhp_routing_table = NULL;
206 return -1; 169 return -1;
207 } 170 }
208 171
209 dbg("bus dev func slot\n"); 172 return 0;
173}
174
175/* nice debugging output */
176static void pci_print_IRQ_route(void)
177{
178 int len;
179 int loop;
180 u8 tbus, tdevice, tslot;
181
182 len = cpqhp_routing_table_length();
210 183
184 dbg("bus dev func slot\n");
211 for (loop = 0; loop < len; ++loop) { 185 for (loop = 0; loop < len; ++loop) {
212 tbus = routing_table->slots[loop].bus; 186 tbus = cpqhp_routing_table->slots[loop].bus;
213 tdevice = routing_table->slots[loop].devfn; 187 tdevice = cpqhp_routing_table->slots[loop].devfn;
214 tslot = routing_table->slots[loop].slot; 188 tslot = cpqhp_routing_table->slots[loop].slot;
215 dbg("%d %d %d %d\n", tbus, tdevice >> 3, tdevice & 0x7, tslot); 189 dbg("%d %d %d %d\n", tbus, tdevice >> 3, tdevice & 0x7, tslot);
216 190
217 } 191 }
218 kfree(routing_table); 192 return;
219 return 0;
220} 193}
221 194
222 195
@@ -242,9 +215,9 @@ static void __iomem *get_subsequent_smbios_entry(void __iomem *smbios_start,
242 void __iomem *p_max; 215 void __iomem *p_max;
243 216
244 if (!smbios_table || !curr) 217 if (!smbios_table || !curr)
245 return(NULL); 218 return NULL;
246 219
247 // set p_max to the end of the table 220 /* set p_max to the end of the table */
248 p_max = smbios_start + readw(smbios_table + ST_LENGTH); 221 p_max = smbios_start + readw(smbios_table + ST_LENGTH);
249 222
250 p_temp = curr; 223 p_temp = curr;
@@ -253,20 +226,19 @@ static void __iomem *get_subsequent_smbios_entry(void __iomem *smbios_start,
253 while ((p_temp < p_max) && !bail) { 226 while ((p_temp < p_max) && !bail) {
254 /* Look for the double NULL terminator 227 /* Look for the double NULL terminator
255 * The first condition is the previous byte 228 * The first condition is the previous byte
256 * and the second is the curr */ 229 * and the second is the curr
257 if (!previous_byte && !(readb(p_temp))) { 230 */
231 if (!previous_byte && !(readb(p_temp)))
258 bail = 1; 232 bail = 1;
259 }
260 233
261 previous_byte = readb(p_temp); 234 previous_byte = readb(p_temp);
262 p_temp++; 235 p_temp++;
263 } 236 }
264 237
265 if (p_temp < p_max) { 238 if (p_temp < p_max)
266 return p_temp; 239 return p_temp;
267 } else { 240 else
268 return NULL; 241 return NULL;
269 }
270} 242}
271 243
272 244
@@ -292,21 +264,18 @@ static void __iomem *get_SMBIOS_entry(void __iomem *smbios_start,
292 if (!smbios_table) 264 if (!smbios_table)
293 return NULL; 265 return NULL;
294 266
295 if (!previous) { 267 if (!previous)
296 previous = smbios_start; 268 previous = smbios_start;
297 } else { 269 else
298 previous = get_subsequent_smbios_entry(smbios_start, 270 previous = get_subsequent_smbios_entry(smbios_start,
299 smbios_table, previous); 271 smbios_table, previous);
300 }
301 272
302 while (previous) { 273 while (previous)
303 if (readb(previous + SMBIOS_GENERIC_TYPE) != type) { 274 if (readb(previous + SMBIOS_GENERIC_TYPE) != type)
304 previous = get_subsequent_smbios_entry(smbios_start, 275 previous = get_subsequent_smbios_entry(smbios_start,
305 smbios_table, previous); 276 smbios_table, previous);
306 } else { 277 else
307 break; 278 break;
308 }
309 }
310 279
311 return previous; 280 return previous;
312} 281}
@@ -322,144 +291,6 @@ static void release_slot(struct hotplug_slot *hotplug_slot)
322 kfree(slot); 291 kfree(slot);
323} 292}
324 293
325#define SLOT_NAME_SIZE 10
326
327static int ctrl_slot_setup(struct controller *ctrl,
328 void __iomem *smbios_start,
329 void __iomem *smbios_table)
330{
331 struct slot *slot;
332 struct hotplug_slot *hotplug_slot;
333 struct hotplug_slot_info *hotplug_slot_info;
334 u8 number_of_slots;
335 u8 slot_device;
336 u8 slot_number;
337 u8 ctrl_slot;
338 u32 tempdword;
339 char name[SLOT_NAME_SIZE];
340 void __iomem *slot_entry= NULL;
341 int result = -ENOMEM;
342
343 dbg("%s\n", __func__);
344
345 tempdword = readl(ctrl->hpc_reg + INT_INPUT_CLEAR);
346
347 number_of_slots = readb(ctrl->hpc_reg + SLOT_MASK) & 0x0F;
348 slot_device = readb(ctrl->hpc_reg + SLOT_MASK) >> 4;
349 slot_number = ctrl->first_slot;
350
351 while (number_of_slots) {
352 slot = kzalloc(sizeof(*slot), GFP_KERNEL);
353 if (!slot)
354 goto error;
355
356 slot->hotplug_slot = kzalloc(sizeof(*(slot->hotplug_slot)),
357 GFP_KERNEL);
358 if (!slot->hotplug_slot)
359 goto error_slot;
360 hotplug_slot = slot->hotplug_slot;
361
362 hotplug_slot->info =
363 kzalloc(sizeof(*(hotplug_slot->info)),
364 GFP_KERNEL);
365 if (!hotplug_slot->info)
366 goto error_hpslot;
367 hotplug_slot_info = hotplug_slot->info;
368
369 slot->ctrl = ctrl;
370 slot->bus = ctrl->bus;
371 slot->device = slot_device;
372 slot->number = slot_number;
373 dbg("slot->number = %u\n", slot->number);
374
375 slot_entry = get_SMBIOS_entry(smbios_start, smbios_table, 9,
376 slot_entry);
377
378 while (slot_entry && (readw(slot_entry + SMBIOS_SLOT_NUMBER) !=
379 slot->number)) {
380 slot_entry = get_SMBIOS_entry(smbios_start,
381 smbios_table, 9, slot_entry);
382 }
383
384 slot->p_sm_slot = slot_entry;
385
386 init_timer(&slot->task_event);
387 slot->task_event.expires = jiffies + 5 * HZ;
388 slot->task_event.function = cpqhp_pushbutton_thread;
389
390 //FIXME: these capabilities aren't used but if they are
391 // they need to be correctly implemented
392 slot->capabilities |= PCISLOT_REPLACE_SUPPORTED;
393 slot->capabilities |= PCISLOT_INTERLOCK_SUPPORTED;
394
395 if (is_slot64bit(slot))
396 slot->capabilities |= PCISLOT_64_BIT_SUPPORTED;
397 if (is_slot66mhz(slot))
398 slot->capabilities |= PCISLOT_66_MHZ_SUPPORTED;
399 if (ctrl->speed == PCI_SPEED_66MHz)
400 slot->capabilities |= PCISLOT_66_MHZ_OPERATION;
401
402 ctrl_slot =
403 slot_device - (readb(ctrl->hpc_reg + SLOT_MASK) >> 4);
404
405 // Check presence
406 slot->capabilities |=
407 ((((~tempdword) >> 23) |
408 ((~tempdword) >> 15)) >> ctrl_slot) & 0x02;
409 // Check the switch state
410 slot->capabilities |=
411 ((~tempdword & 0xFF) >> ctrl_slot) & 0x01;
412 // Check the slot enable
413 slot->capabilities |=
414 ((read_slot_enable(ctrl) << 2) >> ctrl_slot) & 0x04;
415
416 /* register this slot with the hotplug pci core */
417 hotplug_slot->release = &release_slot;
418 hotplug_slot->private = slot;
419 snprintf(name, SLOT_NAME_SIZE, "%u", slot->number);
420 hotplug_slot->ops = &cpqphp_hotplug_slot_ops;
421
422 hotplug_slot_info->power_status = get_slot_enabled(ctrl, slot);
423 hotplug_slot_info->attention_status =
424 cpq_get_attention_status(ctrl, slot);
425 hotplug_slot_info->latch_status =
426 cpq_get_latch_status(ctrl, slot);
427 hotplug_slot_info->adapter_status =
428 get_presence_status(ctrl, slot);
429
430 dbg("registering bus %d, dev %d, number %d, "
431 "ctrl->slot_device_offset %d, slot %d\n",
432 slot->bus, slot->device,
433 slot->number, ctrl->slot_device_offset,
434 slot_number);
435 result = pci_hp_register(hotplug_slot,
436 ctrl->pci_dev->bus,
437 slot->device,
438 name);
439 if (result) {
440 err("pci_hp_register failed with error %d\n", result);
441 goto error_info;
442 }
443
444 slot->next = ctrl->slot;
445 ctrl->slot = slot;
446
447 number_of_slots--;
448 slot_device++;
449 slot_number++;
450 }
451
452 return 0;
453error_info:
454 kfree(hotplug_slot_info);
455error_hpslot:
456 kfree(hotplug_slot);
457error_slot:
458 kfree(slot);
459error:
460 return result;
461}
462
463static int ctrl_slot_cleanup (struct controller * ctrl) 294static int ctrl_slot_cleanup (struct controller * ctrl)
464{ 295{
465 struct slot *old_slot, *next_slot; 296 struct slot *old_slot, *next_slot;
@@ -476,36 +307,32 @@ static int ctrl_slot_cleanup (struct controller * ctrl)
476 307
477 cpqhp_remove_debugfs_files(ctrl); 308 cpqhp_remove_debugfs_files(ctrl);
478 309
479 //Free IRQ associated with hot plug device 310 /* Free IRQ associated with hot plug device */
480 free_irq(ctrl->interrupt, ctrl); 311 free_irq(ctrl->interrupt, ctrl);
481 //Unmap the memory 312 /* Unmap the memory */
482 iounmap(ctrl->hpc_reg); 313 iounmap(ctrl->hpc_reg);
483 //Finally reclaim PCI mem 314 /* Finally reclaim PCI mem */
484 release_mem_region(pci_resource_start(ctrl->pci_dev, 0), 315 release_mem_region(pci_resource_start(ctrl->pci_dev, 0),
485 pci_resource_len(ctrl->pci_dev, 0)); 316 pci_resource_len(ctrl->pci_dev, 0));
486 317
487 return(0); 318 return 0;
488} 319}
489 320
490 321
491//============================================================================ 322/**
492// function: get_slot_mapping 323 * get_slot_mapping - determine logical slot mapping for PCI device
493// 324 *
494// Description: Attempts to determine a logical slot mapping for a PCI 325 * Won't work for more than one PCI-PCI bridge in a slot.
495// device. Won't work for more than one PCI-PCI bridge 326 *
496// in a slot. 327 * @bus_num - bus number of PCI device
497// 328 * @dev_num - device number of PCI device
498// Input: u8 bus_num - bus number of PCI device 329 * @slot - Pointer to u8 where slot number will be returned
499// u8 dev_num - device number of PCI device 330 *
500// u8 *slot - Pointer to u8 where slot number will 331 * Output: SUCCESS or FAILURE
501// be returned 332 */
502//
503// Output: SUCCESS or FAILURE
504//=============================================================================
505static int 333static int
506get_slot_mapping(struct pci_bus *bus, u8 bus_num, u8 dev_num, u8 *slot) 334get_slot_mapping(struct pci_bus *bus, u8 bus_num, u8 dev_num, u8 *slot)
507{ 335{
508 struct irq_routing_table *PCIIRQRoutingInfoLength;
509 u32 work; 336 u32 work;
510 long len; 337 long len;
511 long loop; 338 long loop;
@@ -516,36 +343,25 @@ get_slot_mapping(struct pci_bus *bus, u8 bus_num, u8 dev_num, u8 *slot)
516 343
517 bridgeSlot = 0xFF; 344 bridgeSlot = 0xFF;
518 345
519 PCIIRQRoutingInfoLength = pcibios_get_irq_routing_table(); 346 len = cpqhp_routing_table_length();
520 if (!PCIIRQRoutingInfoLength)
521 return -1;
522
523 len = (PCIIRQRoutingInfoLength->size -
524 sizeof(struct irq_routing_table)) / sizeof(struct irq_info);
525 // Make sure I got at least one entry
526 if (len == 0) {
527 kfree(PCIIRQRoutingInfoLength);
528 return -1;
529 }
530
531 for (loop = 0; loop < len; ++loop) { 347 for (loop = 0; loop < len; ++loop) {
532 tbus = PCIIRQRoutingInfoLength->slots[loop].bus; 348 tbus = cpqhp_routing_table->slots[loop].bus;
533 tdevice = PCIIRQRoutingInfoLength->slots[loop].devfn >> 3; 349 tdevice = cpqhp_routing_table->slots[loop].devfn >> 3;
534 tslot = PCIIRQRoutingInfoLength->slots[loop].slot; 350 tslot = cpqhp_routing_table->slots[loop].slot;
535 351
536 if ((tbus == bus_num) && (tdevice == dev_num)) { 352 if ((tbus == bus_num) && (tdevice == dev_num)) {
537 *slot = tslot; 353 *slot = tslot;
538 kfree(PCIIRQRoutingInfoLength);
539 return 0; 354 return 0;
540 } else { 355 } else {
541 /* Did not get a match on the target PCI device. Check 356 /* Did not get a match on the target PCI device. Check
542 * if the current IRQ table entry is a PCI-to-PCI bridge 357 * if the current IRQ table entry is a PCI-to-PCI
543 * device. If so, and it's secondary bus matches the 358 * bridge device. If so, and it's secondary bus
544 * bus number for the target device, I need to save the 359 * matches the bus number for the target device, I need
545 * bridge's slot number. If I can not find an entry for 360 * to save the bridge's slot number. If I can not find
546 * the target device, I will have to assume it's on the 361 * an entry for the target device, I will have to
547 * other side of the bridge, and assign it the bridge's 362 * assume it's on the other side of the bridge, and
548 * slot. */ 363 * assign it the bridge's slot.
364 */
549 bus->number = tbus; 365 bus->number = tbus;
550 pci_bus_read_config_dword(bus, PCI_DEVFN(tdevice, 0), 366 pci_bus_read_config_dword(bus, PCI_DEVFN(tdevice, 0),
551 PCI_CLASS_REVISION, &work); 367 PCI_CLASS_REVISION, &work);
@@ -555,25 +371,23 @@ get_slot_mapping(struct pci_bus *bus, u8 bus_num, u8 dev_num, u8 *slot)
555 PCI_DEVFN(tdevice, 0), 371 PCI_DEVFN(tdevice, 0),
556 PCI_PRIMARY_BUS, &work); 372 PCI_PRIMARY_BUS, &work);
557 // See if bridge's secondary bus matches target bus. 373 // See if bridge's secondary bus matches target bus.
558 if (((work >> 8) & 0x000000FF) == (long) bus_num) { 374 if (((work >> 8) & 0x000000FF) == (long) bus_num)
559 bridgeSlot = tslot; 375 bridgeSlot = tslot;
560 }
561 } 376 }
562 } 377 }
563 378
564 } 379 }
565 380
566 // If we got here, we didn't find an entry in the IRQ mapping table 381 /* If we got here, we didn't find an entry in the IRQ mapping table for
567 // for the target PCI device. If we did determine that the target 382 * the target PCI device. If we did determine that the target device
568 // device is on the other side of a PCI-to-PCI bridge, return the 383 * is on the other side of a PCI-to-PCI bridge, return the slot number
569 // slot number for the bridge. 384 * for the bridge.
385 */
570 if (bridgeSlot != 0xFF) { 386 if (bridgeSlot != 0xFF) {
571 *slot = bridgeSlot; 387 *slot = bridgeSlot;
572 kfree(PCIIRQRoutingInfoLength);
573 return 0; 388 return 0;
574 } 389 }
575 kfree(PCIIRQRoutingInfoLength); 390 /* Couldn't find an entry in the routing table for this PCI device */
576 // Couldn't find an entry in the routing table for this PCI device
577 return -1; 391 return -1;
578} 392}
579 393
@@ -591,32 +405,32 @@ cpqhp_set_attention_status(struct controller *ctrl, struct pci_func *func,
591 u8 hp_slot; 405 u8 hp_slot;
592 406
593 if (func == NULL) 407 if (func == NULL)
594 return(1); 408 return 1;
595 409
596 hp_slot = func->device - ctrl->slot_device_offset; 410 hp_slot = func->device - ctrl->slot_device_offset;
597 411
598 // Wait for exclusive access to hardware 412 /* Wait for exclusive access to hardware */
599 mutex_lock(&ctrl->crit_sect); 413 mutex_lock(&ctrl->crit_sect);
600 414
601 if (status == 1) { 415 if (status == 1)
602 amber_LED_on (ctrl, hp_slot); 416 amber_LED_on (ctrl, hp_slot);
603 } else if (status == 0) { 417 else if (status == 0)
604 amber_LED_off (ctrl, hp_slot); 418 amber_LED_off (ctrl, hp_slot);
605 } else { 419 else {
606 // Done with exclusive hardware access 420 /* Done with exclusive hardware access */
607 mutex_unlock(&ctrl->crit_sect); 421 mutex_unlock(&ctrl->crit_sect);
608 return(1); 422 return 1;
609 } 423 }
610 424
611 set_SOGO(ctrl); 425 set_SOGO(ctrl);
612 426
613 // Wait for SOBS to be unset 427 /* Wait for SOBS to be unset */
614 wait_for_ctrl_irq (ctrl); 428 wait_for_ctrl_irq (ctrl);
615 429
616 // Done with exclusive hardware access 430 /* Done with exclusive hardware access */
617 mutex_unlock(&ctrl->crit_sect); 431 mutex_unlock(&ctrl->crit_sect);
618 432
619 return(0); 433 return 0;
620} 434}
621 435
622 436
@@ -719,7 +533,7 @@ static int hardware_test(struct hotplug_slot *hotplug_slot, u32 value)
719 533
720 dbg("%s - physical_slot = %s\n", __func__, slot_name(slot)); 534 dbg("%s - physical_slot = %s\n", __func__, slot_name(slot));
721 535
722 return cpqhp_hardware_test(ctrl, value); 536 return cpqhp_hardware_test(ctrl, value);
723} 537}
724 538
725 539
@@ -738,7 +552,7 @@ static int get_attention_status(struct hotplug_slot *hotplug_slot, u8 *value)
738{ 552{
739 struct slot *slot = hotplug_slot->private; 553 struct slot *slot = hotplug_slot->private;
740 struct controller *ctrl = slot->ctrl; 554 struct controller *ctrl = slot->ctrl;
741 555
742 dbg("%s - physical_slot = %s\n", __func__, slot_name(slot)); 556 dbg("%s - physical_slot = %s\n", __func__, slot_name(slot));
743 557
744 *value = cpq_get_attention_status(ctrl, slot); 558 *value = cpq_get_attention_status(ctrl, slot);
@@ -793,6 +607,230 @@ static int get_cur_bus_speed (struct hotplug_slot *hotplug_slot, enum pci_bus_sp
793 return 0; 607 return 0;
794} 608}
795 609
610static struct hotplug_slot_ops cpqphp_hotplug_slot_ops = {
611 .set_attention_status = set_attention_status,
612 .enable_slot = process_SI,
613 .disable_slot = process_SS,
614 .hardware_test = hardware_test,
615 .get_power_status = get_power_status,
616 .get_attention_status = get_attention_status,
617 .get_latch_status = get_latch_status,
618 .get_adapter_status = get_adapter_status,
619 .get_max_bus_speed = get_max_bus_speed,
620 .get_cur_bus_speed = get_cur_bus_speed,
621};
622
623#define SLOT_NAME_SIZE 10
624
625static int ctrl_slot_setup(struct controller *ctrl,
626 void __iomem *smbios_start,
627 void __iomem *smbios_table)
628{
629 struct slot *slot;
630 struct hotplug_slot *hotplug_slot;
631 struct hotplug_slot_info *hotplug_slot_info;
632 u8 number_of_slots;
633 u8 slot_device;
634 u8 slot_number;
635 u8 ctrl_slot;
636 u32 tempdword;
637 char name[SLOT_NAME_SIZE];
638 void __iomem *slot_entry= NULL;
639 int result = -ENOMEM;
640
641 dbg("%s\n", __func__);
642
643 tempdword = readl(ctrl->hpc_reg + INT_INPUT_CLEAR);
644
645 number_of_slots = readb(ctrl->hpc_reg + SLOT_MASK) & 0x0F;
646 slot_device = readb(ctrl->hpc_reg + SLOT_MASK) >> 4;
647 slot_number = ctrl->first_slot;
648
649 while (number_of_slots) {
650 slot = kzalloc(sizeof(*slot), GFP_KERNEL);
651 if (!slot)
652 goto error;
653
654 slot->hotplug_slot = kzalloc(sizeof(*(slot->hotplug_slot)),
655 GFP_KERNEL);
656 if (!slot->hotplug_slot)
657 goto error_slot;
658 hotplug_slot = slot->hotplug_slot;
659
660 hotplug_slot->info = kzalloc(sizeof(*(hotplug_slot->info)),
661 GFP_KERNEL);
662 if (!hotplug_slot->info)
663 goto error_hpslot;
664 hotplug_slot_info = hotplug_slot->info;
665
666 slot->ctrl = ctrl;
667 slot->bus = ctrl->bus;
668 slot->device = slot_device;
669 slot->number = slot_number;
670 dbg("slot->number = %u\n", slot->number);
671
672 slot_entry = get_SMBIOS_entry(smbios_start, smbios_table, 9,
673 slot_entry);
674
675 while (slot_entry && (readw(slot_entry + SMBIOS_SLOT_NUMBER) !=
676 slot->number)) {
677 slot_entry = get_SMBIOS_entry(smbios_start,
678 smbios_table, 9, slot_entry);
679 }
680
681 slot->p_sm_slot = slot_entry;
682
683 init_timer(&slot->task_event);
684 slot->task_event.expires = jiffies + 5 * HZ;
685 slot->task_event.function = cpqhp_pushbutton_thread;
686
687 /*FIXME: these capabilities aren't used but if they are
688 * they need to be correctly implemented
689 */
690 slot->capabilities |= PCISLOT_REPLACE_SUPPORTED;
691 slot->capabilities |= PCISLOT_INTERLOCK_SUPPORTED;
692
693 if (is_slot64bit(slot))
694 slot->capabilities |= PCISLOT_64_BIT_SUPPORTED;
695 if (is_slot66mhz(slot))
696 slot->capabilities |= PCISLOT_66_MHZ_SUPPORTED;
697 if (ctrl->speed == PCI_SPEED_66MHz)
698 slot->capabilities |= PCISLOT_66_MHZ_OPERATION;
699
700 ctrl_slot =
701 slot_device - (readb(ctrl->hpc_reg + SLOT_MASK) >> 4);
702
703 /* Check presence */
704 slot->capabilities |=
705 ((((~tempdword) >> 23) |
706 ((~tempdword) >> 15)) >> ctrl_slot) & 0x02;
707 /* Check the switch state */
708 slot->capabilities |=
709 ((~tempdword & 0xFF) >> ctrl_slot) & 0x01;
710 /* Check the slot enable */
711 slot->capabilities |=
712 ((read_slot_enable(ctrl) << 2) >> ctrl_slot) & 0x04;
713
714 /* register this slot with the hotplug pci core */
715 hotplug_slot->release = &release_slot;
716 hotplug_slot->private = slot;
717 snprintf(name, SLOT_NAME_SIZE, "%u", slot->number);
718 hotplug_slot->ops = &cpqphp_hotplug_slot_ops;
719
720 hotplug_slot_info->power_status = get_slot_enabled(ctrl, slot);
721 hotplug_slot_info->attention_status =
722 cpq_get_attention_status(ctrl, slot);
723 hotplug_slot_info->latch_status =
724 cpq_get_latch_status(ctrl, slot);
725 hotplug_slot_info->adapter_status =
726 get_presence_status(ctrl, slot);
727
728 dbg("registering bus %d, dev %d, number %d, "
729 "ctrl->slot_device_offset %d, slot %d\n",
730 slot->bus, slot->device,
731 slot->number, ctrl->slot_device_offset,
732 slot_number);
733 result = pci_hp_register(hotplug_slot,
734 ctrl->pci_dev->bus,
735 slot->device,
736 name);
737 if (result) {
738 err("pci_hp_register failed with error %d\n", result);
739 goto error_info;
740 }
741
742 slot->next = ctrl->slot;
743 ctrl->slot = slot;
744
745 number_of_slots--;
746 slot_device++;
747 slot_number++;
748 }
749
750 return 0;
751error_info:
752 kfree(hotplug_slot_info);
753error_hpslot:
754 kfree(hotplug_slot);
755error_slot:
756 kfree(slot);
757error:
758 return result;
759}
760
761static int one_time_init(void)
762{
763 int loop;
764 int retval = 0;
765
766 if (initialized)
767 return 0;
768
769 power_mode = 0;
770
771 retval = init_cpqhp_routing_table();
772 if (retval)
773 goto error;
774
775 if (cpqhp_debug)
776 pci_print_IRQ_route();
777
778 dbg("Initialize + Start the notification mechanism \n");
779
780 retval = cpqhp_event_start_thread();
781 if (retval)
782 goto error;
783
784 dbg("Initialize slot lists\n");
785 for (loop = 0; loop < 256; loop++)
786 cpqhp_slot_list[loop] = NULL;
787
788 /* FIXME: We also need to hook the NMI handler eventually.
789 * this also needs to be worked with Christoph
790 * register_NMI_handler();
791 */
792 /* Map rom address */
793 cpqhp_rom_start = ioremap(ROM_PHY_ADDR, ROM_PHY_LEN);
794 if (!cpqhp_rom_start) {
795 err ("Could not ioremap memory region for ROM\n");
796 retval = -EIO;
797 goto error;
798 }
799
800 /* Now, map the int15 entry point if we are on compaq specific
801 * hardware
802 */
803 compaq_nvram_init(cpqhp_rom_start);
804
805 /* Map smbios table entry point structure */
806 smbios_table = detect_SMBIOS_pointer(cpqhp_rom_start,
807 cpqhp_rom_start + ROM_PHY_LEN);
808 if (!smbios_table) {
809 err ("Could not find the SMBIOS pointer in memory\n");
810 retval = -EIO;
811 goto error_rom_start;
812 }
813
814 smbios_start = ioremap(readl(smbios_table + ST_ADDRESS),
815 readw(smbios_table + ST_LENGTH));
816 if (!smbios_start) {
817 err ("Could not ioremap memory region taken from SMBIOS values\n");
818 retval = -EIO;
819 goto error_smbios_start;
820 }
821
822 initialized = 1;
823
824 return retval;
825
826error_smbios_start:
827 iounmap(smbios_start);
828error_rom_start:
829 iounmap(cpqhp_rom_start);
830error:
831 return retval;
832}
833
796static int cpqhpc_probe(struct pci_dev *pdev, const struct pci_device_id *ent) 834static int cpqhpc_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
797{ 835{
798 u8 num_of_slots = 0; 836 u8 num_of_slots = 0;
@@ -815,7 +853,9 @@ static int cpqhpc_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
815 return err; 853 return err;
816 } 854 }
817 855
818 // Need to read VID early b/c it's used to differentiate CPQ and INTC discovery 856 /* Need to read VID early b/c it's used to differentiate CPQ and INTC
857 * discovery
858 */
819 rc = pci_read_config_word(pdev, PCI_VENDOR_ID, &vendor_id); 859 rc = pci_read_config_word(pdev, PCI_VENDOR_ID, &vendor_id);
820 if (rc || ((vendor_id != PCI_VENDOR_ID_COMPAQ) && (vendor_id != PCI_VENDOR_ID_INTEL))) { 860 if (rc || ((vendor_id != PCI_VENDOR_ID_COMPAQ) && (vendor_id != PCI_VENDOR_ID_INTEL))) {
821 err(msg_HPC_non_compaq_or_intel); 861 err(msg_HPC_non_compaq_or_intel);
@@ -832,217 +872,209 @@ static int cpqhpc_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
832 } 872 }
833 873
834 /* Check for the proper subsytem ID's 874 /* Check for the proper subsytem ID's
835 * Intel uses a different SSID programming model than Compaq. 875 * Intel uses a different SSID programming model than Compaq.
836 * For Intel, each SSID bit identifies a PHP capability. 876 * For Intel, each SSID bit identifies a PHP capability.
837 * Also Intel HPC's may have RID=0. 877 * Also Intel HPC's may have RID=0.
838 */ 878 */
839 if ((pdev->revision > 2) || (vendor_id == PCI_VENDOR_ID_INTEL)) { 879 if ((pdev->revision <= 2) && (vendor_id != PCI_VENDOR_ID_INTEL)) {
840 // TODO: This code can be made to support non-Compaq or Intel subsystem IDs 880 err(msg_HPC_not_supported);
841 rc = pci_read_config_word(pdev, PCI_SUBSYSTEM_VENDOR_ID, &subsystem_vid); 881 return -ENODEV;
842 if (rc) { 882 }
843 err("%s : pci_read_config_word failed\n", __func__);
844 goto err_disable_device;
845 }
846 dbg("Subsystem Vendor ID: %x\n", subsystem_vid);
847 if ((subsystem_vid != PCI_VENDOR_ID_COMPAQ) && (subsystem_vid != PCI_VENDOR_ID_INTEL)) {
848 err(msg_HPC_non_compaq_or_intel);
849 rc = -ENODEV;
850 goto err_disable_device;
851 }
852 883
853 ctrl = kzalloc(sizeof(struct controller), GFP_KERNEL); 884 /* TODO: This code can be made to support non-Compaq or Intel
854 if (!ctrl) { 885 * subsystem IDs
855 err("%s : out of memory\n", __func__); 886 */
856 rc = -ENOMEM; 887 rc = pci_read_config_word(pdev, PCI_SUBSYSTEM_VENDOR_ID, &subsystem_vid);
857 goto err_disable_device; 888 if (rc) {
858 } 889 err("%s : pci_read_config_word failed\n", __func__);
890 goto err_disable_device;
891 }
892 dbg("Subsystem Vendor ID: %x\n", subsystem_vid);
893 if ((subsystem_vid != PCI_VENDOR_ID_COMPAQ) && (subsystem_vid != PCI_VENDOR_ID_INTEL)) {
894 err(msg_HPC_non_compaq_or_intel);
895 rc = -ENODEV;
896 goto err_disable_device;
897 }
859 898
860 rc = pci_read_config_word(pdev, PCI_SUBSYSTEM_ID, &subsystem_deviceid); 899 ctrl = kzalloc(sizeof(struct controller), GFP_KERNEL);
861 if (rc) { 900 if (!ctrl) {
862 err("%s : pci_read_config_word failed\n", __func__); 901 err("%s : out of memory\n", __func__);
863 goto err_free_ctrl; 902 rc = -ENOMEM;
864 } 903 goto err_disable_device;
904 }
865 905
866 info("Hot Plug Subsystem Device ID: %x\n", subsystem_deviceid); 906 rc = pci_read_config_word(pdev, PCI_SUBSYSTEM_ID, &subsystem_deviceid);
867 907 if (rc) {
868 /* Set Vendor ID, so it can be accessed later from other functions */ 908 err("%s : pci_read_config_word failed\n", __func__);
869 ctrl->vendor_id = vendor_id; 909 goto err_free_ctrl;
870 910 }
871 switch (subsystem_vid) {
872 case PCI_VENDOR_ID_COMPAQ:
873 if (pdev->revision >= 0x13) { /* CIOBX */
874 ctrl->push_flag = 1;
875 ctrl->slot_switch_type = 1;
876 ctrl->push_button = 1;
877 ctrl->pci_config_space = 1;
878 ctrl->defeature_PHP = 1;
879 ctrl->pcix_support = 1;
880 ctrl->pcix_speed_capability = 1;
881 pci_read_config_byte(pdev, 0x41, &bus_cap);
882 if (bus_cap & 0x80) {
883 dbg("bus max supports 133MHz PCI-X\n");
884 ctrl->speed_capability = PCI_SPEED_133MHz_PCIX;
885 break;
886 }
887 if (bus_cap & 0x40) {
888 dbg("bus max supports 100MHz PCI-X\n");
889 ctrl->speed_capability = PCI_SPEED_100MHz_PCIX;
890 break;
891 }
892 if (bus_cap & 20) {
893 dbg("bus max supports 66MHz PCI-X\n");
894 ctrl->speed_capability = PCI_SPEED_66MHz_PCIX;
895 break;
896 }
897 if (bus_cap & 10) {
898 dbg("bus max supports 66MHz PCI\n");
899 ctrl->speed_capability = PCI_SPEED_66MHz;
900 break;
901 }
902
903 break;
904 }
905
906 switch (subsystem_deviceid) {
907 case PCI_SUB_HPC_ID:
908 /* Original 6500/7000 implementation */
909 ctrl->slot_switch_type = 1;
910 ctrl->speed_capability = PCI_SPEED_33MHz;
911 ctrl->push_button = 0;
912 ctrl->pci_config_space = 1;
913 ctrl->defeature_PHP = 1;
914 ctrl->pcix_support = 0;
915 ctrl->pcix_speed_capability = 0;
916 break;
917 case PCI_SUB_HPC_ID2:
918 /* First Pushbutton implementation */
919 ctrl->push_flag = 1;
920 ctrl->slot_switch_type = 1;
921 ctrl->speed_capability = PCI_SPEED_33MHz;
922 ctrl->push_button = 1;
923 ctrl->pci_config_space = 1;
924 ctrl->defeature_PHP = 1;
925 ctrl->pcix_support = 0;
926 ctrl->pcix_speed_capability = 0;
927 break;
928 case PCI_SUB_HPC_ID_INTC:
929 /* Third party (6500/7000) */
930 ctrl->slot_switch_type = 1;
931 ctrl->speed_capability = PCI_SPEED_33MHz;
932 ctrl->push_button = 0;
933 ctrl->pci_config_space = 1;
934 ctrl->defeature_PHP = 1;
935 ctrl->pcix_support = 0;
936 ctrl->pcix_speed_capability = 0;
937 break;
938 case PCI_SUB_HPC_ID3:
939 /* First 66 Mhz implementation */
940 ctrl->push_flag = 1;
941 ctrl->slot_switch_type = 1;
942 ctrl->speed_capability = PCI_SPEED_66MHz;
943 ctrl->push_button = 1;
944 ctrl->pci_config_space = 1;
945 ctrl->defeature_PHP = 1;
946 ctrl->pcix_support = 0;
947 ctrl->pcix_speed_capability = 0;
948 break;
949 case PCI_SUB_HPC_ID4:
950 /* First PCI-X implementation, 100MHz */
951 ctrl->push_flag = 1;
952 ctrl->slot_switch_type = 1;
953 ctrl->speed_capability = PCI_SPEED_100MHz_PCIX;
954 ctrl->push_button = 1;
955 ctrl->pci_config_space = 1;
956 ctrl->defeature_PHP = 1;
957 ctrl->pcix_support = 1;
958 ctrl->pcix_speed_capability = 0;
959 break;
960 default:
961 err(msg_HPC_not_supported);
962 rc = -ENODEV;
963 goto err_free_ctrl;
964 }
965 break;
966 911
967 case PCI_VENDOR_ID_INTEL: 912 info("Hot Plug Subsystem Device ID: %x\n", subsystem_deviceid);
968 /* Check for speed capability (0=33, 1=66) */ 913
969 if (subsystem_deviceid & 0x0001) { 914 /* Set Vendor ID, so it can be accessed later from other
970 ctrl->speed_capability = PCI_SPEED_66MHz; 915 * functions
971 } else { 916 */
972 ctrl->speed_capability = PCI_SPEED_33MHz; 917 ctrl->vendor_id = vendor_id;
973 } 918
974 919 switch (subsystem_vid) {
975 /* Check for push button */ 920 case PCI_VENDOR_ID_COMPAQ:
976 if (subsystem_deviceid & 0x0002) { 921 if (pdev->revision >= 0x13) { /* CIOBX */
977 /* no push button */ 922 ctrl->push_flag = 1;
978 ctrl->push_button = 0; 923 ctrl->slot_switch_type = 1;
979 } else { 924 ctrl->push_button = 1;
980 /* push button supported */ 925 ctrl->pci_config_space = 1;
981 ctrl->push_button = 1; 926 ctrl->defeature_PHP = 1;
982 } 927 ctrl->pcix_support = 1;
983 928 ctrl->pcix_speed_capability = 1;
984 /* Check for slot switch type (0=mechanical, 1=not mechanical) */ 929 pci_read_config_byte(pdev, 0x41, &bus_cap);
985 if (subsystem_deviceid & 0x0004) { 930 if (bus_cap & 0x80) {
986 /* no switch */ 931 dbg("bus max supports 133MHz PCI-X\n");
987 ctrl->slot_switch_type = 0; 932 ctrl->speed_capability = PCI_SPEED_133MHz_PCIX;
988 } else {
989 /* switch */
990 ctrl->slot_switch_type = 1;
991 }
992
993 /* PHP Status (0=De-feature PHP, 1=Normal operation) */
994 if (subsystem_deviceid & 0x0008) {
995 ctrl->defeature_PHP = 1; // PHP supported
996 } else {
997 ctrl->defeature_PHP = 0; // PHP not supported
998 }
999
1000 /* Alternate Base Address Register Interface (0=not supported, 1=supported) */
1001 if (subsystem_deviceid & 0x0010) {
1002 ctrl->alternate_base_address = 1; // supported
1003 } else {
1004 ctrl->alternate_base_address = 0; // not supported
1005 }
1006
1007 /* PCI Config Space Index (0=not supported, 1=supported) */
1008 if (subsystem_deviceid & 0x0020) {
1009 ctrl->pci_config_space = 1; // supported
1010 } else {
1011 ctrl->pci_config_space = 0; // not supported
1012 }
1013
1014 /* PCI-X support */
1015 if (subsystem_deviceid & 0x0080) {
1016 /* PCI-X capable */
1017 ctrl->pcix_support = 1;
1018 /* Frequency of operation in PCI-X mode */
1019 if (subsystem_deviceid & 0x0040) {
1020 /* 133MHz PCI-X if bit 7 is 1 */
1021 ctrl->pcix_speed_capability = 1;
1022 } else {
1023 /* 100MHz PCI-X if bit 7 is 1 and bit 0 is 0, */
1024 /* 66MHz PCI-X if bit 7 is 1 and bit 0 is 1 */
1025 ctrl->pcix_speed_capability = 0;
1026 }
1027 } else {
1028 /* Conventional PCI */
1029 ctrl->pcix_support = 0;
1030 ctrl->pcix_speed_capability = 0;
1031 }
1032 break; 933 break;
934 }
935 if (bus_cap & 0x40) {
936 dbg("bus max supports 100MHz PCI-X\n");
937 ctrl->speed_capability = PCI_SPEED_100MHz_PCIX;
938 break;
939 }
940 if (bus_cap & 20) {
941 dbg("bus max supports 66MHz PCI-X\n");
942 ctrl->speed_capability = PCI_SPEED_66MHz_PCIX;
943 break;
944 }
945 if (bus_cap & 10) {
946 dbg("bus max supports 66MHz PCI\n");
947 ctrl->speed_capability = PCI_SPEED_66MHz;
948 break;
949 }
950
951 break;
952 }
1033 953
1034 default: 954 switch (subsystem_deviceid) {
1035 err(msg_HPC_not_supported); 955 case PCI_SUB_HPC_ID:
1036 rc = -ENODEV; 956 /* Original 6500/7000 implementation */
1037 goto err_free_ctrl; 957 ctrl->slot_switch_type = 1;
958 ctrl->speed_capability = PCI_SPEED_33MHz;
959 ctrl->push_button = 0;
960 ctrl->pci_config_space = 1;
961 ctrl->defeature_PHP = 1;
962 ctrl->pcix_support = 0;
963 ctrl->pcix_speed_capability = 0;
964 break;
965 case PCI_SUB_HPC_ID2:
966 /* First Pushbutton implementation */
967 ctrl->push_flag = 1;
968 ctrl->slot_switch_type = 1;
969 ctrl->speed_capability = PCI_SPEED_33MHz;
970 ctrl->push_button = 1;
971 ctrl->pci_config_space = 1;
972 ctrl->defeature_PHP = 1;
973 ctrl->pcix_support = 0;
974 ctrl->pcix_speed_capability = 0;
975 break;
976 case PCI_SUB_HPC_ID_INTC:
977 /* Third party (6500/7000) */
978 ctrl->slot_switch_type = 1;
979 ctrl->speed_capability = PCI_SPEED_33MHz;
980 ctrl->push_button = 0;
981 ctrl->pci_config_space = 1;
982 ctrl->defeature_PHP = 1;
983 ctrl->pcix_support = 0;
984 ctrl->pcix_speed_capability = 0;
985 break;
986 case PCI_SUB_HPC_ID3:
987 /* First 66 Mhz implementation */
988 ctrl->push_flag = 1;
989 ctrl->slot_switch_type = 1;
990 ctrl->speed_capability = PCI_SPEED_66MHz;
991 ctrl->push_button = 1;
992 ctrl->pci_config_space = 1;
993 ctrl->defeature_PHP = 1;
994 ctrl->pcix_support = 0;
995 ctrl->pcix_speed_capability = 0;
996 break;
997 case PCI_SUB_HPC_ID4:
998 /* First PCI-X implementation, 100MHz */
999 ctrl->push_flag = 1;
1000 ctrl->slot_switch_type = 1;
1001 ctrl->speed_capability = PCI_SPEED_100MHz_PCIX;
1002 ctrl->push_button = 1;
1003 ctrl->pci_config_space = 1;
1004 ctrl->defeature_PHP = 1;
1005 ctrl->pcix_support = 1;
1006 ctrl->pcix_speed_capability = 0;
1007 break;
1008 default:
1009 err(msg_HPC_not_supported);
1010 rc = -ENODEV;
1011 goto err_free_ctrl;
1038 } 1012 }
1013 break;
1014
1015 case PCI_VENDOR_ID_INTEL:
1016 /* Check for speed capability (0=33, 1=66) */
1017 if (subsystem_deviceid & 0x0001)
1018 ctrl->speed_capability = PCI_SPEED_66MHz;
1019 else
1020 ctrl->speed_capability = PCI_SPEED_33MHz;
1021
1022 /* Check for push button */
1023 if (subsystem_deviceid & 0x0002)
1024 ctrl->push_button = 0;
1025 else
1026 ctrl->push_button = 1;
1027
1028 /* Check for slot switch type (0=mechanical, 1=not mechanical) */
1029 if (subsystem_deviceid & 0x0004)
1030 ctrl->slot_switch_type = 0;
1031 else
1032 ctrl->slot_switch_type = 1;
1033
1034 /* PHP Status (0=De-feature PHP, 1=Normal operation) */
1035 if (subsystem_deviceid & 0x0008)
1036 ctrl->defeature_PHP = 1; /* PHP supported */
1037 else
1038 ctrl->defeature_PHP = 0; /* PHP not supported */
1039
1040 /* Alternate Base Address Register Interface
1041 * (0=not supported, 1=supported)
1042 */
1043 if (subsystem_deviceid & 0x0010)
1044 ctrl->alternate_base_address = 1;
1045 else
1046 ctrl->alternate_base_address = 0;
1047
1048 /* PCI Config Space Index (0=not supported, 1=supported) */
1049 if (subsystem_deviceid & 0x0020)
1050 ctrl->pci_config_space = 1;
1051 else
1052 ctrl->pci_config_space = 0;
1053
1054 /* PCI-X support */
1055 if (subsystem_deviceid & 0x0080) {
1056 ctrl->pcix_support = 1;
1057 if (subsystem_deviceid & 0x0040)
1058 /* 133MHz PCI-X if bit 7 is 1 */
1059 ctrl->pcix_speed_capability = 1;
1060 else
1061 /* 100MHz PCI-X if bit 7 is 1 and bit 0 is 0, */
1062 /* 66MHz PCI-X if bit 7 is 1 and bit 0 is 1 */
1063 ctrl->pcix_speed_capability = 0;
1064 } else {
1065 /* Conventional PCI */
1066 ctrl->pcix_support = 0;
1067 ctrl->pcix_speed_capability = 0;
1068 }
1069 break;
1039 1070
1040 } else { 1071 default:
1041 err(msg_HPC_not_supported); 1072 err(msg_HPC_not_supported);
1042 return -ENODEV; 1073 rc = -ENODEV;
1074 goto err_free_ctrl;
1043 } 1075 }
1044 1076
1045 // Tell the user that we found one. 1077 /* Tell the user that we found one. */
1046 info("Initializing the PCI hot plug controller residing on PCI bus %d\n", 1078 info("Initializing the PCI hot plug controller residing on PCI bus %d\n",
1047 pdev->bus->number); 1079 pdev->bus->number);
1048 1080
@@ -1087,7 +1119,7 @@ static int cpqhpc_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1087 if (rc) { 1119 if (rc) {
1088 goto err_free_bus; 1120 goto err_free_bus;
1089 } 1121 }
1090 1122
1091 dbg("pdev = %p\n", pdev); 1123 dbg("pdev = %p\n", pdev);
1092 dbg("pci resource start %llx\n", (unsigned long long)pci_resource_start(pdev, 0)); 1124 dbg("pci resource start %llx\n", (unsigned long long)pci_resource_start(pdev, 0));
1093 dbg("pci resource len %llx\n", (unsigned long long)pci_resource_len(pdev, 0)); 1125 dbg("pci resource len %llx\n", (unsigned long long)pci_resource_len(pdev, 0));
@@ -1109,7 +1141,7 @@ static int cpqhpc_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1109 goto err_free_mem_region; 1141 goto err_free_mem_region;
1110 } 1142 }
1111 1143
1112 // Check for 66Mhz operation 1144 /* Check for 66Mhz operation */
1113 ctrl->speed = get_controller_speed(ctrl); 1145 ctrl->speed = get_controller_speed(ctrl);
1114 1146
1115 1147
@@ -1120,7 +1152,7 @@ static int cpqhpc_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1120 * 1152 *
1121 ********************************************************/ 1153 ********************************************************/
1122 1154
1123 // find the physical slot number of the first hot plug slot 1155 /* find the physical slot number of the first hot plug slot */
1124 1156
1125 /* Get slot won't work for devices behind bridges, but 1157 /* Get slot won't work for devices behind bridges, but
1126 * in this case it will always be called for the "base" 1158 * in this case it will always be called for the "base"
@@ -1137,7 +1169,7 @@ static int cpqhpc_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1137 goto err_iounmap; 1169 goto err_iounmap;
1138 } 1170 }
1139 1171
1140 // Store PCI Config Space for all devices on this bus 1172 /* Store PCI Config Space for all devices on this bus */
1141 rc = cpqhp_save_config(ctrl, ctrl->bus, readb(ctrl->hpc_reg + SLOT_MASK)); 1173 rc = cpqhp_save_config(ctrl, ctrl->bus, readb(ctrl->hpc_reg + SLOT_MASK));
1142 if (rc) { 1174 if (rc) {
1143 err("%s: unable to save PCI configuration data, error %d\n", 1175 err("%s: unable to save PCI configuration data, error %d\n",
@@ -1148,7 +1180,7 @@ static int cpqhpc_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1148 /* 1180 /*
1149 * Get IO, memory, and IRQ resources for new devices 1181 * Get IO, memory, and IRQ resources for new devices
1150 */ 1182 */
1151 // The next line is required for cpqhp_find_available_resources 1183 /* The next line is required for cpqhp_find_available_resources */
1152 ctrl->interrupt = pdev->irq; 1184 ctrl->interrupt = pdev->irq;
1153 if (ctrl->interrupt < 0x10) { 1185 if (ctrl->interrupt < 0x10) {
1154 cpqhp_legacy_mode = 1; 1186 cpqhp_legacy_mode = 1;
@@ -1182,7 +1214,7 @@ static int cpqhpc_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1182 __func__, rc); 1214 __func__, rc);
1183 goto err_iounmap; 1215 goto err_iounmap;
1184 } 1216 }
1185 1217
1186 /* Mask all general input interrupts */ 1218 /* Mask all general input interrupts */
1187 writel(0xFFFFFFFFL, ctrl->hpc_reg + INT_MASK); 1219 writel(0xFFFFFFFFL, ctrl->hpc_reg + INT_MASK);
1188 1220
@@ -1196,12 +1228,14 @@ static int cpqhpc_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1196 goto err_iounmap; 1228 goto err_iounmap;
1197 } 1229 }
1198 1230
1199 /* Enable Shift Out interrupt and clear it, also enable SERR on power fault */ 1231 /* Enable Shift Out interrupt and clear it, also enable SERR on power
1232 * fault
1233 */
1200 temp_word = readw(ctrl->hpc_reg + MISC); 1234 temp_word = readw(ctrl->hpc_reg + MISC);
1201 temp_word |= 0x4006; 1235 temp_word |= 0x4006;
1202 writew(temp_word, ctrl->hpc_reg + MISC); 1236 writew(temp_word, ctrl->hpc_reg + MISC);
1203 1237
1204 // Changed 05/05/97 to clear all interrupts at start 1238 /* Changed 05/05/97 to clear all interrupts at start */
1205 writel(0xFFFFFFFFL, ctrl->hpc_reg + INT_INPUT_CLEAR); 1239 writel(0xFFFFFFFFL, ctrl->hpc_reg + INT_INPUT_CLEAR);
1206 1240
1207 ctrl->ctrl_int_comp = readl(ctrl->hpc_reg + INT_INPUT_CLEAR); 1241 ctrl->ctrl_int_comp = readl(ctrl->hpc_reg + INT_INPUT_CLEAR);
@@ -1216,13 +1250,14 @@ static int cpqhpc_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1216 cpqhp_ctrl_list = ctrl; 1250 cpqhp_ctrl_list = ctrl;
1217 } 1251 }
1218 1252
1219 // turn off empty slots here unless command line option "ON" set 1253 /* turn off empty slots here unless command line option "ON" set
1220 // Wait for exclusive access to hardware 1254 * Wait for exclusive access to hardware
1255 */
1221 mutex_lock(&ctrl->crit_sect); 1256 mutex_lock(&ctrl->crit_sect);
1222 1257
1223 num_of_slots = readb(ctrl->hpc_reg + SLOT_MASK) & 0x0F; 1258 num_of_slots = readb(ctrl->hpc_reg + SLOT_MASK) & 0x0F;
1224 1259
1225 // find first device number for the ctrl 1260 /* find first device number for the ctrl */
1226 device = readb(ctrl->hpc_reg + SLOT_MASK) >> 4; 1261 device = readb(ctrl->hpc_reg + SLOT_MASK) >> 4;
1227 1262
1228 while (num_of_slots) { 1263 while (num_of_slots) {
@@ -1234,23 +1269,21 @@ static int cpqhpc_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1234 hp_slot = func->device - ctrl->slot_device_offset; 1269 hp_slot = func->device - ctrl->slot_device_offset;
1235 dbg("hp_slot: %d\n", hp_slot); 1270 dbg("hp_slot: %d\n", hp_slot);
1236 1271
1237 // We have to save the presence info for these slots 1272 /* We have to save the presence info for these slots */
1238 temp_word = ctrl->ctrl_int_comp >> 16; 1273 temp_word = ctrl->ctrl_int_comp >> 16;
1239 func->presence_save = (temp_word >> hp_slot) & 0x01; 1274 func->presence_save = (temp_word >> hp_slot) & 0x01;
1240 func->presence_save |= (temp_word >> (hp_slot + 7)) & 0x02; 1275 func->presence_save |= (temp_word >> (hp_slot + 7)) & 0x02;
1241 1276
1242 if (ctrl->ctrl_int_comp & (0x1L << hp_slot)) { 1277 if (ctrl->ctrl_int_comp & (0x1L << hp_slot))
1243 func->switch_save = 0; 1278 func->switch_save = 0;
1244 } else { 1279 else
1245 func->switch_save = 0x10; 1280 func->switch_save = 0x10;
1246 }
1247 1281
1248 if (!power_mode) { 1282 if (!power_mode)
1249 if (!func->is_a_board) { 1283 if (!func->is_a_board) {
1250 green_LED_off(ctrl, hp_slot); 1284 green_LED_off(ctrl, hp_slot);
1251 slot_disable(ctrl, hp_slot); 1285 slot_disable(ctrl, hp_slot);
1252 } 1286 }
1253 }
1254 1287
1255 device++; 1288 device++;
1256 num_of_slots--; 1289 num_of_slots--;
@@ -1258,7 +1291,7 @@ static int cpqhpc_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1258 1291
1259 if (!power_mode) { 1292 if (!power_mode) {
1260 set_SOGO(ctrl); 1293 set_SOGO(ctrl);
1261 // Wait for SOBS to be unset 1294 /* Wait for SOBS to be unset */
1262 wait_for_ctrl_irq(ctrl); 1295 wait_for_ctrl_irq(ctrl);
1263 } 1296 }
1264 1297
@@ -1269,7 +1302,7 @@ static int cpqhpc_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1269 goto err_free_irq; 1302 goto err_free_irq;
1270 } 1303 }
1271 1304
1272 // Done with exclusive hardware access 1305 /* Done with exclusive hardware access */
1273 mutex_unlock(&ctrl->crit_sect); 1306 mutex_unlock(&ctrl->crit_sect);
1274 1307
1275 cpqhp_create_debugfs_files(ctrl); 1308 cpqhp_create_debugfs_files(ctrl);
@@ -1291,77 +1324,6 @@ err_disable_device:
1291 return rc; 1324 return rc;
1292} 1325}
1293 1326
1294
1295static int one_time_init(void)
1296{
1297 int loop;
1298 int retval = 0;
1299
1300 if (initialized)
1301 return 0;
1302
1303 power_mode = 0;
1304
1305 retval = pci_print_IRQ_route();
1306 if (retval)
1307 goto error;
1308
1309 dbg("Initialize + Start the notification mechanism \n");
1310
1311 retval = cpqhp_event_start_thread();
1312 if (retval)
1313 goto error;
1314
1315 dbg("Initialize slot lists\n");
1316 for (loop = 0; loop < 256; loop++) {
1317 cpqhp_slot_list[loop] = NULL;
1318 }
1319
1320 // FIXME: We also need to hook the NMI handler eventually.
1321 // this also needs to be worked with Christoph
1322 // register_NMI_handler();
1323
1324 // Map rom address
1325 cpqhp_rom_start = ioremap(ROM_PHY_ADDR, ROM_PHY_LEN);
1326 if (!cpqhp_rom_start) {
1327 err ("Could not ioremap memory region for ROM\n");
1328 retval = -EIO;
1329 goto error;
1330 }
1331
1332 /* Now, map the int15 entry point if we are on compaq specific hardware */
1333 compaq_nvram_init(cpqhp_rom_start);
1334
1335 /* Map smbios table entry point structure */
1336 smbios_table = detect_SMBIOS_pointer(cpqhp_rom_start,
1337 cpqhp_rom_start + ROM_PHY_LEN);
1338 if (!smbios_table) {
1339 err ("Could not find the SMBIOS pointer in memory\n");
1340 retval = -EIO;
1341 goto error_rom_start;
1342 }
1343
1344 smbios_start = ioremap(readl(smbios_table + ST_ADDRESS),
1345 readw(smbios_table + ST_LENGTH));
1346 if (!smbios_start) {
1347 err ("Could not ioremap memory region taken from SMBIOS values\n");
1348 retval = -EIO;
1349 goto error_smbios_start;
1350 }
1351
1352 initialized = 1;
1353
1354 return retval;
1355
1356error_smbios_start:
1357 iounmap(smbios_start);
1358error_rom_start:
1359 iounmap(cpqhp_rom_start);
1360error:
1361 return retval;
1362}
1363
1364
1365static void __exit unload_cpqphpd(void) 1327static void __exit unload_cpqphpd(void)
1366{ 1328{
1367 struct pci_func *next; 1329 struct pci_func *next;
@@ -1381,10 +1343,10 @@ static void __exit unload_cpqphpd(void)
1381 if (ctrl->hpc_reg) { 1343 if (ctrl->hpc_reg) {
1382 u16 misc; 1344 u16 misc;
1383 rc = read_slot_enable (ctrl); 1345 rc = read_slot_enable (ctrl);
1384 1346
1385 writeb(0, ctrl->hpc_reg + SLOT_SERR); 1347 writeb(0, ctrl->hpc_reg + SLOT_SERR);
1386 writel(0xFFFFFFC0L | ~rc, ctrl->hpc_reg + INT_MASK); 1348 writel(0xFFFFFFC0L | ~rc, ctrl->hpc_reg + INT_MASK);
1387 1349
1388 misc = readw(ctrl->hpc_reg + MISC); 1350 misc = readw(ctrl->hpc_reg + MISC);
1389 misc &= 0xFFFD; 1351 misc &= 0xFFFD;
1390 writew(misc, ctrl->hpc_reg + MISC); 1352 writew(misc, ctrl->hpc_reg + MISC);
@@ -1464,38 +1426,34 @@ static void __exit unload_cpqphpd(void)
1464 } 1426 }
1465 } 1427 }
1466 1428
1467 // Stop the notification mechanism 1429 /* Stop the notification mechanism */
1468 if (initialized) 1430 if (initialized)
1469 cpqhp_event_stop_thread(); 1431 cpqhp_event_stop_thread();
1470 1432
1471 //unmap the rom address 1433 /* unmap the rom address */
1472 if (cpqhp_rom_start) 1434 if (cpqhp_rom_start)
1473 iounmap(cpqhp_rom_start); 1435 iounmap(cpqhp_rom_start);
1474 if (smbios_start) 1436 if (smbios_start)
1475 iounmap(smbios_start); 1437 iounmap(smbios_start);
1476} 1438}
1477 1439
1478
1479
1480static struct pci_device_id hpcd_pci_tbl[] = { 1440static struct pci_device_id hpcd_pci_tbl[] = {
1481 { 1441 {
1482 /* handle any PCI Hotplug controller */ 1442 /* handle any PCI Hotplug controller */
1483 .class = ((PCI_CLASS_SYSTEM_PCI_HOTPLUG << 8) | 0x00), 1443 .class = ((PCI_CLASS_SYSTEM_PCI_HOTPLUG << 8) | 0x00),
1484 .class_mask = ~0, 1444 .class_mask = ~0,
1485 1445
1486 /* no matter who makes it */ 1446 /* no matter who makes it */
1487 .vendor = PCI_ANY_ID, 1447 .vendor = PCI_ANY_ID,
1488 .device = PCI_ANY_ID, 1448 .device = PCI_ANY_ID,
1489 .subvendor = PCI_ANY_ID, 1449 .subvendor = PCI_ANY_ID,
1490 .subdevice = PCI_ANY_ID, 1450 .subdevice = PCI_ANY_ID,
1491 1451
1492 }, { /* end: all zeroes */ } 1452 }, { /* end: all zeroes */ }
1493}; 1453};
1494 1454
1495MODULE_DEVICE_TABLE(pci, hpcd_pci_tbl); 1455MODULE_DEVICE_TABLE(pci, hpcd_pci_tbl);
1496 1456
1497
1498
1499static struct pci_driver cpqhpc_driver = { 1457static struct pci_driver cpqhpc_driver = {
1500 .name = "compaq_pci_hotplug", 1458 .name = "compaq_pci_hotplug",
1501 .id_table = hpcd_pci_tbl, 1459 .id_table = hpcd_pci_tbl,
@@ -1503,8 +1461,6 @@ static struct pci_driver cpqhpc_driver = {
1503 /* remove: cpqhpc_remove_one, */ 1461 /* remove: cpqhpc_remove_one, */
1504}; 1462};
1505 1463
1506
1507
1508static int __init cpqhpc_init(void) 1464static int __init cpqhpc_init(void)
1509{ 1465{
1510 int result; 1466 int result;
@@ -1518,7 +1474,6 @@ static int __init cpqhpc_init(void)
1518 return result; 1474 return result;
1519} 1475}
1520 1476
1521
1522static void __exit cpqhpc_cleanup(void) 1477static void __exit cpqhpc_cleanup(void)
1523{ 1478{
1524 dbg("unload_cpqphpd()\n"); 1479 dbg("unload_cpqphpd()\n");
@@ -1529,8 +1484,5 @@ static void __exit cpqhpc_cleanup(void)
1529 cpqhp_shutdown_debugfs(); 1484 cpqhp_shutdown_debugfs();
1530} 1485}
1531 1486
1532
1533module_init(cpqhpc_init); 1487module_init(cpqhpc_init);
1534module_exit(cpqhpc_cleanup); 1488module_exit(cpqhpc_cleanup);
1535
1536
diff --git a/drivers/pci/hotplug/cpqphp_ctrl.c b/drivers/pci/hotplug/cpqphp_ctrl.c
index cc227a8c4b11..2fa47af992a8 100644
--- a/drivers/pci/hotplug/cpqphp_ctrl.c
+++ b/drivers/pci/hotplug/cpqphp_ctrl.c
@@ -81,14 +81,15 @@ static u8 handle_switch_change(u8 change, struct controller * ctrl)
81 81
82 for (hp_slot = 0; hp_slot < 6; hp_slot++) { 82 for (hp_slot = 0; hp_slot < 6; hp_slot++) {
83 if (change & (0x1L << hp_slot)) { 83 if (change & (0x1L << hp_slot)) {
84 /********************************** 84 /*
85 * this one changed. 85 * this one changed.
86 **********************************/ 86 */
87 func = cpqhp_slot_find(ctrl->bus, 87 func = cpqhp_slot_find(ctrl->bus,
88 (hp_slot + ctrl->slot_device_offset), 0); 88 (hp_slot + ctrl->slot_device_offset), 0);
89 89
90 /* this is the structure that tells the worker thread 90 /* this is the structure that tells the worker thread
91 *what to do */ 91 * what to do
92 */
92 taskInfo = &(ctrl->event_queue[ctrl->next_event]); 93 taskInfo = &(ctrl->event_queue[ctrl->next_event]);
93 ctrl->next_event = (ctrl->next_event + 1) % 10; 94 ctrl->next_event = (ctrl->next_event + 1) % 10;
94 taskInfo->hp_slot = hp_slot; 95 taskInfo->hp_slot = hp_slot;
@@ -100,17 +101,17 @@ static u8 handle_switch_change(u8 change, struct controller * ctrl)
100 func->presence_save |= (temp_word >> (hp_slot + 7)) & 0x02; 101 func->presence_save |= (temp_word >> (hp_slot + 7)) & 0x02;
101 102
102 if (ctrl->ctrl_int_comp & (0x1L << hp_slot)) { 103 if (ctrl->ctrl_int_comp & (0x1L << hp_slot)) {
103 /********************************** 104 /*
104 * Switch opened 105 * Switch opened
105 **********************************/ 106 */
106 107
107 func->switch_save = 0; 108 func->switch_save = 0;
108 109
109 taskInfo->event_type = INT_SWITCH_OPEN; 110 taskInfo->event_type = INT_SWITCH_OPEN;
110 } else { 111 } else {
111 /********************************** 112 /*
112 * Switch closed 113 * Switch closed
113 **********************************/ 114 */
114 115
115 func->switch_save = 0x10; 116 func->switch_save = 0x10;
116 117
@@ -131,9 +132,8 @@ static struct slot *cpqhp_find_slot(struct controller *ctrl, u8 device)
131{ 132{
132 struct slot *slot = ctrl->slot; 133 struct slot *slot = ctrl->slot;
133 134
134 while (slot && (slot->device != device)) { 135 while (slot && (slot->device != device))
135 slot = slot->next; 136 slot = slot->next;
136 }
137 137
138 return slot; 138 return slot;
139} 139}
@@ -152,17 +152,17 @@ static u8 handle_presence_change(u16 change, struct controller * ctrl)
152 if (!change) 152 if (!change)
153 return 0; 153 return 0;
154 154
155 /********************************** 155 /*
156 * Presence Change 156 * Presence Change
157 **********************************/ 157 */
158 dbg("cpqsbd: Presence/Notify input change.\n"); 158 dbg("cpqsbd: Presence/Notify input change.\n");
159 dbg(" Changed bits are 0x%4.4x\n", change ); 159 dbg(" Changed bits are 0x%4.4x\n", change );
160 160
161 for (hp_slot = 0; hp_slot < 6; hp_slot++) { 161 for (hp_slot = 0; hp_slot < 6; hp_slot++) {
162 if (change & (0x0101 << hp_slot)) { 162 if (change & (0x0101 << hp_slot)) {
163 /********************************** 163 /*
164 * this one changed. 164 * this one changed.
165 **********************************/ 165 */
166 func = cpqhp_slot_find(ctrl->bus, 166 func = cpqhp_slot_find(ctrl->bus,
167 (hp_slot + ctrl->slot_device_offset), 0); 167 (hp_slot + ctrl->slot_device_offset), 0);
168 168
@@ -177,22 +177,23 @@ static u8 handle_presence_change(u16 change, struct controller * ctrl)
177 return 0; 177 return 0;
178 178
179 /* If the switch closed, must be a button 179 /* If the switch closed, must be a button
180 * If not in button mode, nevermind */ 180 * If not in button mode, nevermind
181 */
181 if (func->switch_save && (ctrl->push_button == 1)) { 182 if (func->switch_save && (ctrl->push_button == 1)) {
182 temp_word = ctrl->ctrl_int_comp >> 16; 183 temp_word = ctrl->ctrl_int_comp >> 16;
183 temp_byte = (temp_word >> hp_slot) & 0x01; 184 temp_byte = (temp_word >> hp_slot) & 0x01;
184 temp_byte |= (temp_word >> (hp_slot + 7)) & 0x02; 185 temp_byte |= (temp_word >> (hp_slot + 7)) & 0x02;
185 186
186 if (temp_byte != func->presence_save) { 187 if (temp_byte != func->presence_save) {
187 /************************************** 188 /*
188 * button Pressed (doesn't do anything) 189 * button Pressed (doesn't do anything)
189 **************************************/ 190 */
190 dbg("hp_slot %d button pressed\n", hp_slot); 191 dbg("hp_slot %d button pressed\n", hp_slot);
191 taskInfo->event_type = INT_BUTTON_PRESS; 192 taskInfo->event_type = INT_BUTTON_PRESS;
192 } else { 193 } else {
193 /********************************** 194 /*
194 * button Released - TAKE ACTION!!!! 195 * button Released - TAKE ACTION!!!!
195 **********************************/ 196 */
196 dbg("hp_slot %d button released\n", hp_slot); 197 dbg("hp_slot %d button released\n", hp_slot);
197 taskInfo->event_type = INT_BUTTON_RELEASE; 198 taskInfo->event_type = INT_BUTTON_RELEASE;
198 199
@@ -210,7 +211,8 @@ static u8 handle_presence_change(u16 change, struct controller * ctrl)
210 } 211 }
211 } else { 212 } else {
212 /* Switch is open, assume a presence change 213 /* Switch is open, assume a presence change
213 * Save the presence state */ 214 * Save the presence state
215 */
214 temp_word = ctrl->ctrl_int_comp >> 16; 216 temp_word = ctrl->ctrl_int_comp >> 16;
215 func->presence_save = (temp_word >> hp_slot) & 0x01; 217 func->presence_save = (temp_word >> hp_slot) & 0x01;
216 func->presence_save |= (temp_word >> (hp_slot + 7)) & 0x02; 218 func->presence_save |= (temp_word >> (hp_slot + 7)) & 0x02;
@@ -241,17 +243,17 @@ static u8 handle_power_fault(u8 change, struct controller * ctrl)
241 if (!change) 243 if (!change)
242 return 0; 244 return 0;
243 245
244 /********************************** 246 /*
245 * power fault 247 * power fault
246 **********************************/ 248 */
247 249
248 info("power fault interrupt\n"); 250 info("power fault interrupt\n");
249 251
250 for (hp_slot = 0; hp_slot < 6; hp_slot++) { 252 for (hp_slot = 0; hp_slot < 6; hp_slot++) {
251 if (change & (0x01 << hp_slot)) { 253 if (change & (0x01 << hp_slot)) {
252 /********************************** 254 /*
253 * this one changed. 255 * this one changed.
254 **********************************/ 256 */
255 func = cpqhp_slot_find(ctrl->bus, 257 func = cpqhp_slot_find(ctrl->bus,
256 (hp_slot + ctrl->slot_device_offset), 0); 258 (hp_slot + ctrl->slot_device_offset), 0);
257 259
@@ -262,16 +264,16 @@ static u8 handle_power_fault(u8 change, struct controller * ctrl)
262 rc++; 264 rc++;
263 265
264 if (ctrl->ctrl_int_comp & (0x00000100 << hp_slot)) { 266 if (ctrl->ctrl_int_comp & (0x00000100 << hp_slot)) {
265 /********************************** 267 /*
266 * power fault Cleared 268 * power fault Cleared
267 **********************************/ 269 */
268 func->status = 0x00; 270 func->status = 0x00;
269 271
270 taskInfo->event_type = INT_POWER_FAULT_CLEAR; 272 taskInfo->event_type = INT_POWER_FAULT_CLEAR;
271 } else { 273 } else {
272 /********************************** 274 /*
273 * power fault 275 * power fault
274 **********************************/ 276 */
275 taskInfo->event_type = INT_POWER_FAULT; 277 taskInfo->event_type = INT_POWER_FAULT;
276 278
277 if (ctrl->rev < 4) { 279 if (ctrl->rev < 4) {
@@ -432,13 +434,15 @@ static struct pci_resource *do_pre_bridge_resource_split(struct pci_resource **h
432 434
433 435
434 /* If we got here, there the bridge requires some of the resource, but 436 /* If we got here, there the bridge requires some of the resource, but
435 * we may be able to split some off of the front */ 437 * we may be able to split some off of the front
438 */
436 439
437 node = *head; 440 node = *head;
438 441
439 if (node->length & (alignment -1)) { 442 if (node->length & (alignment -1)) {
440 /* this one isn't an aligned length, so we'll make a new entry 443 /* this one isn't an aligned length, so we'll make a new entry
441 * and split it up. */ 444 * and split it up.
445 */
442 split_node = kmalloc(sizeof(*split_node), GFP_KERNEL); 446 split_node = kmalloc(sizeof(*split_node), GFP_KERNEL);
443 447
444 if (!split_node) 448 if (!split_node)
@@ -544,10 +548,10 @@ static struct pci_resource *get_io_resource(struct pci_resource **head, u32 size
544 if (!(*head)) 548 if (!(*head))
545 return NULL; 549 return NULL;
546 550
547 if ( cpqhp_resource_sort_and_combine(head) ) 551 if (cpqhp_resource_sort_and_combine(head))
548 return NULL; 552 return NULL;
549 553
550 if ( sort_by_size(head) ) 554 if (sort_by_size(head))
551 return NULL; 555 return NULL;
552 556
553 for (node = *head; node; node = node->next) { 557 for (node = *head; node; node = node->next) {
@@ -556,7 +560,8 @@ static struct pci_resource *get_io_resource(struct pci_resource **head, u32 size
556 560
557 if (node->base & (size - 1)) { 561 if (node->base & (size - 1)) {
558 /* this one isn't base aligned properly 562 /* this one isn't base aligned properly
559 * so we'll make a new entry and split it up */ 563 * so we'll make a new entry and split it up
564 */
560 temp_dword = (node->base | (size-1)) + 1; 565 temp_dword = (node->base | (size-1)) + 1;
561 566
562 /* Short circuit if adjusted size is too small */ 567 /* Short circuit if adjusted size is too small */
@@ -581,7 +586,8 @@ static struct pci_resource *get_io_resource(struct pci_resource **head, u32 size
581 /* Don't need to check if too small since we already did */ 586 /* Don't need to check if too small since we already did */
582 if (node->length > size) { 587 if (node->length > size) {
583 /* this one is longer than we need 588 /* this one is longer than we need
584 * so we'll make a new entry and split it up */ 589 * so we'll make a new entry and split it up
590 */
585 split_node = kmalloc(sizeof(*split_node), GFP_KERNEL); 591 split_node = kmalloc(sizeof(*split_node), GFP_KERNEL);
586 592
587 if (!split_node) 593 if (!split_node)
@@ -601,7 +607,8 @@ static struct pci_resource *get_io_resource(struct pci_resource **head, u32 size
601 continue; 607 continue;
602 608
603 /* If we got here, then it is the right size 609 /* If we got here, then it is the right size
604 * Now take it out of the list and break */ 610 * Now take it out of the list and break
611 */
605 if (*head == node) { 612 if (*head == node) {
606 *head = node->next; 613 *head = node->next;
607 } else { 614 } else {
@@ -642,14 +649,16 @@ static struct pci_resource *get_max_resource(struct pci_resource **head, u32 siz
642 return NULL; 649 return NULL;
643 650
644 for (max = *head; max; max = max->next) { 651 for (max = *head; max; max = max->next) {
645 /* If not big enough we could probably just bail, 652 /* If not big enough we could probably just bail,
646 * instead we'll continue to the next. */ 653 * instead we'll continue to the next.
654 */
647 if (max->length < size) 655 if (max->length < size)
648 continue; 656 continue;
649 657
650 if (max->base & (size - 1)) { 658 if (max->base & (size - 1)) {
651 /* this one isn't base aligned properly 659 /* this one isn't base aligned properly
652 * so we'll make a new entry and split it up */ 660 * so we'll make a new entry and split it up
661 */
653 temp_dword = (max->base | (size-1)) + 1; 662 temp_dword = (max->base | (size-1)) + 1;
654 663
655 /* Short circuit if adjusted size is too small */ 664 /* Short circuit if adjusted size is too small */
@@ -672,7 +681,8 @@ static struct pci_resource *get_max_resource(struct pci_resource **head, u32 siz
672 681
673 if ((max->base + max->length) & (size - 1)) { 682 if ((max->base + max->length) & (size - 1)) {
674 /* this one isn't end aligned properly at the top 683 /* this one isn't end aligned properly at the top
675 * so we'll make a new entry and split it up */ 684 * so we'll make a new entry and split it up
685 */
676 split_node = kmalloc(sizeof(*split_node), GFP_KERNEL); 686 split_node = kmalloc(sizeof(*split_node), GFP_KERNEL);
677 687
678 if (!split_node) 688 if (!split_node)
@@ -744,7 +754,8 @@ static struct pci_resource *get_resource(struct pci_resource **head, u32 size)
744 if (node->base & (size - 1)) { 754 if (node->base & (size - 1)) {
745 dbg("%s: not aligned\n", __func__); 755 dbg("%s: not aligned\n", __func__);
746 /* this one isn't base aligned properly 756 /* this one isn't base aligned properly
747 * so we'll make a new entry and split it up */ 757 * so we'll make a new entry and split it up
758 */
748 temp_dword = (node->base | (size-1)) + 1; 759 temp_dword = (node->base | (size-1)) + 1;
749 760
750 /* Short circuit if adjusted size is too small */ 761 /* Short circuit if adjusted size is too small */
@@ -769,7 +780,8 @@ static struct pci_resource *get_resource(struct pci_resource **head, u32 size)
769 if (node->length > size) { 780 if (node->length > size) {
770 dbg("%s: too big\n", __func__); 781 dbg("%s: too big\n", __func__);
771 /* this one is longer than we need 782 /* this one is longer than we need
772 * so we'll make a new entry and split it up */ 783 * so we'll make a new entry and split it up
784 */
773 split_node = kmalloc(sizeof(*split_node), GFP_KERNEL); 785 split_node = kmalloc(sizeof(*split_node), GFP_KERNEL);
774 786
775 if (!split_node) 787 if (!split_node)
@@ -886,19 +898,19 @@ irqreturn_t cpqhp_ctrl_intr(int IRQ, void *data)
886 u32 Diff; 898 u32 Diff;
887 u32 temp_dword; 899 u32 temp_dword;
888 900
889 901
890 misc = readw(ctrl->hpc_reg + MISC); 902 misc = readw(ctrl->hpc_reg + MISC);
891 /*************************************** 903 /*
892 * Check to see if it was our interrupt 904 * Check to see if it was our interrupt
893 ***************************************/ 905 */
894 if (!(misc & 0x000C)) { 906 if (!(misc & 0x000C)) {
895 return IRQ_NONE; 907 return IRQ_NONE;
896 } 908 }
897 909
898 if (misc & 0x0004) { 910 if (misc & 0x0004) {
899 /********************************** 911 /*
900 * Serial Output interrupt Pending 912 * Serial Output interrupt Pending
901 **********************************/ 913 */
902 914
903 /* Clear the interrupt */ 915 /* Clear the interrupt */
904 misc |= 0x0004; 916 misc |= 0x0004;
@@ -961,11 +973,8 @@ struct pci_func *cpqhp_slot_create(u8 busnumber)
961 struct pci_func *next; 973 struct pci_func *next;
962 974
963 new_slot = kzalloc(sizeof(*new_slot), GFP_KERNEL); 975 new_slot = kzalloc(sizeof(*new_slot), GFP_KERNEL);
964 if (new_slot == NULL) { 976 if (new_slot == NULL)
965 /* I'm not dead yet!
966 * You will be. */
967 return new_slot; 977 return new_slot;
968 }
969 978
970 new_slot->next = NULL; 979 new_slot->next = NULL;
971 new_slot->configured = 1; 980 new_slot->configured = 1;
@@ -996,10 +1005,8 @@ static int slot_remove(struct pci_func * old_slot)
996 return 1; 1005 return 1;
997 1006
998 next = cpqhp_slot_list[old_slot->bus]; 1007 next = cpqhp_slot_list[old_slot->bus];
999 1008 if (next == NULL)
1000 if (next == NULL) {
1001 return 1; 1009 return 1;
1002 }
1003 1010
1004 if (next == old_slot) { 1011 if (next == old_slot) {
1005 cpqhp_slot_list[old_slot->bus] = old_slot->next; 1012 cpqhp_slot_list[old_slot->bus] = old_slot->next;
@@ -1008,9 +1015,8 @@ static int slot_remove(struct pci_func * old_slot)
1008 return 0; 1015 return 0;
1009 } 1016 }
1010 1017
1011 while ((next->next != old_slot) && (next->next != NULL)) { 1018 while ((next->next != old_slot) && (next->next != NULL))
1012 next = next->next; 1019 next = next->next;
1013 }
1014 1020
1015 if (next->next == old_slot) { 1021 if (next->next == old_slot) {
1016 next->next = old_slot->next; 1022 next->next = old_slot->next;
@@ -1040,9 +1046,8 @@ static int bridge_slot_remove(struct pci_func *bridge)
1040 for (tempBus = secondaryBus; tempBus <= subordinateBus; tempBus++) { 1046 for (tempBus = secondaryBus; tempBus <= subordinateBus; tempBus++) {
1041 next = cpqhp_slot_list[tempBus]; 1047 next = cpqhp_slot_list[tempBus];
1042 1048
1043 while (!slot_remove(next)) { 1049 while (!slot_remove(next))
1044 next = cpqhp_slot_list[tempBus]; 1050 next = cpqhp_slot_list[tempBus];
1045 }
1046 } 1051 }
1047 1052
1048 next = cpqhp_slot_list[bridge->bus]; 1053 next = cpqhp_slot_list[bridge->bus];
@@ -1130,39 +1135,43 @@ static u8 set_controller_speed(struct controller *ctrl, u8 adapter_speed, u8 hp_
1130 u8 slot_power = readb(ctrl->hpc_reg + SLOT_POWER); 1135 u8 slot_power = readb(ctrl->hpc_reg + SLOT_POWER);
1131 u16 reg16; 1136 u16 reg16;
1132 u32 leds = readl(ctrl->hpc_reg + LED_CONTROL); 1137 u32 leds = readl(ctrl->hpc_reg + LED_CONTROL);
1133 1138
1134 if (ctrl->speed == adapter_speed) 1139 if (ctrl->speed == adapter_speed)
1135 return 0; 1140 return 0;
1136 1141
1137 /* We don't allow freq/mode changes if we find another adapter running 1142 /* We don't allow freq/mode changes if we find another adapter running
1138 * in another slot on this controller */ 1143 * in another slot on this controller
1144 */
1139 for(slot = ctrl->slot; slot; slot = slot->next) { 1145 for(slot = ctrl->slot; slot; slot = slot->next) {
1140 if (slot->device == (hp_slot + ctrl->slot_device_offset)) 1146 if (slot->device == (hp_slot + ctrl->slot_device_offset))
1141 continue; 1147 continue;
1142 if (!slot->hotplug_slot || !slot->hotplug_slot->info) 1148 if (!slot->hotplug_slot || !slot->hotplug_slot->info)
1143 continue; 1149 continue;
1144 if (slot->hotplug_slot->info->adapter_status == 0) 1150 if (slot->hotplug_slot->info->adapter_status == 0)
1145 continue; 1151 continue;
1146 /* If another adapter is running on the same segment but at a 1152 /* If another adapter is running on the same segment but at a
1147 * lower speed/mode, we allow the new adapter to function at 1153 * lower speed/mode, we allow the new adapter to function at
1148 * this rate if supported */ 1154 * this rate if supported
1149 if (ctrl->speed < adapter_speed) 1155 */
1156 if (ctrl->speed < adapter_speed)
1150 return 0; 1157 return 0;
1151 1158
1152 return 1; 1159 return 1;
1153 } 1160 }
1154 1161
1155 /* If the controller doesn't support freq/mode changes and the 1162 /* If the controller doesn't support freq/mode changes and the
1156 * controller is running at a higher mode, we bail */ 1163 * controller is running at a higher mode, we bail
1164 */
1157 if ((ctrl->speed > adapter_speed) && (!ctrl->pcix_speed_capability)) 1165 if ((ctrl->speed > adapter_speed) && (!ctrl->pcix_speed_capability))
1158 return 1; 1166 return 1;
1159 1167
1160 /* But we allow the adapter to run at a lower rate if possible */ 1168 /* But we allow the adapter to run at a lower rate if possible */
1161 if ((ctrl->speed < adapter_speed) && (!ctrl->pcix_speed_capability)) 1169 if ((ctrl->speed < adapter_speed) && (!ctrl->pcix_speed_capability))
1162 return 0; 1170 return 0;
1163 1171
1164 /* We try to set the max speed supported by both the adapter and 1172 /* We try to set the max speed supported by both the adapter and
1165 * controller */ 1173 * controller
1174 */
1166 if (ctrl->speed_capability < adapter_speed) { 1175 if (ctrl->speed_capability < adapter_speed) {
1167 if (ctrl->speed == ctrl->speed_capability) 1176 if (ctrl->speed == ctrl->speed_capability)
1168 return 0; 1177 return 0;
@@ -1171,22 +1180,22 @@ static u8 set_controller_speed(struct controller *ctrl, u8 adapter_speed, u8 hp_
1171 1180
1172 writel(0x0L, ctrl->hpc_reg + LED_CONTROL); 1181 writel(0x0L, ctrl->hpc_reg + LED_CONTROL);
1173 writeb(0x00, ctrl->hpc_reg + SLOT_ENABLE); 1182 writeb(0x00, ctrl->hpc_reg + SLOT_ENABLE);
1174 1183
1175 set_SOGO(ctrl); 1184 set_SOGO(ctrl);
1176 wait_for_ctrl_irq(ctrl); 1185 wait_for_ctrl_irq(ctrl);
1177 1186
1178 if (adapter_speed != PCI_SPEED_133MHz_PCIX) 1187 if (adapter_speed != PCI_SPEED_133MHz_PCIX)
1179 reg = 0xF5; 1188 reg = 0xF5;
1180 else 1189 else
1181 reg = 0xF4; 1190 reg = 0xF4;
1182 pci_write_config_byte(ctrl->pci_dev, 0x41, reg); 1191 pci_write_config_byte(ctrl->pci_dev, 0x41, reg);
1183 1192
1184 reg16 = readw(ctrl->hpc_reg + NEXT_CURR_FREQ); 1193 reg16 = readw(ctrl->hpc_reg + NEXT_CURR_FREQ);
1185 reg16 &= ~0x000F; 1194 reg16 &= ~0x000F;
1186 switch(adapter_speed) { 1195 switch(adapter_speed) {
1187 case(PCI_SPEED_133MHz_PCIX): 1196 case(PCI_SPEED_133MHz_PCIX):
1188 reg = 0x75; 1197 reg = 0x75;
1189 reg16 |= 0xB; 1198 reg16 |= 0xB;
1190 break; 1199 break;
1191 case(PCI_SPEED_100MHz_PCIX): 1200 case(PCI_SPEED_100MHz_PCIX):
1192 reg = 0x74; 1201 reg = 0x74;
@@ -1203,48 +1212,48 @@ static u8 set_controller_speed(struct controller *ctrl, u8 adapter_speed, u8 hp_
1203 default: /* 33MHz PCI 2.2 */ 1212 default: /* 33MHz PCI 2.2 */
1204 reg = 0x71; 1213 reg = 0x71;
1205 break; 1214 break;
1206 1215
1207 } 1216 }
1208 reg16 |= 0xB << 12; 1217 reg16 |= 0xB << 12;
1209 writew(reg16, ctrl->hpc_reg + NEXT_CURR_FREQ); 1218 writew(reg16, ctrl->hpc_reg + NEXT_CURR_FREQ);
1210 1219
1211 mdelay(5); 1220 mdelay(5);
1212 1221
1213 /* Reenable interrupts */ 1222 /* Reenable interrupts */
1214 writel(0, ctrl->hpc_reg + INT_MASK); 1223 writel(0, ctrl->hpc_reg + INT_MASK);
1215 1224
1216 pci_write_config_byte(ctrl->pci_dev, 0x41, reg); 1225 pci_write_config_byte(ctrl->pci_dev, 0x41, reg);
1217 1226
1218 /* Restart state machine */ 1227 /* Restart state machine */
1219 reg = ~0xF; 1228 reg = ~0xF;
1220 pci_read_config_byte(ctrl->pci_dev, 0x43, &reg); 1229 pci_read_config_byte(ctrl->pci_dev, 0x43, &reg);
1221 pci_write_config_byte(ctrl->pci_dev, 0x43, reg); 1230 pci_write_config_byte(ctrl->pci_dev, 0x43, reg);
1222 1231
1223 /* Only if mode change...*/ 1232 /* Only if mode change...*/
1224 if (((ctrl->speed == PCI_SPEED_66MHz) && (adapter_speed == PCI_SPEED_66MHz_PCIX)) || 1233 if (((ctrl->speed == PCI_SPEED_66MHz) && (adapter_speed == PCI_SPEED_66MHz_PCIX)) ||
1225 ((ctrl->speed == PCI_SPEED_66MHz_PCIX) && (adapter_speed == PCI_SPEED_66MHz))) 1234 ((ctrl->speed == PCI_SPEED_66MHz_PCIX) && (adapter_speed == PCI_SPEED_66MHz)))
1226 set_SOGO(ctrl); 1235 set_SOGO(ctrl);
1227 1236
1228 wait_for_ctrl_irq(ctrl); 1237 wait_for_ctrl_irq(ctrl);
1229 mdelay(1100); 1238 mdelay(1100);
1230 1239
1231 /* Restore LED/Slot state */ 1240 /* Restore LED/Slot state */
1232 writel(leds, ctrl->hpc_reg + LED_CONTROL); 1241 writel(leds, ctrl->hpc_reg + LED_CONTROL);
1233 writeb(slot_power, ctrl->hpc_reg + SLOT_ENABLE); 1242 writeb(slot_power, ctrl->hpc_reg + SLOT_ENABLE);
1234 1243
1235 set_SOGO(ctrl); 1244 set_SOGO(ctrl);
1236 wait_for_ctrl_irq(ctrl); 1245 wait_for_ctrl_irq(ctrl);
1237 1246
1238 ctrl->speed = adapter_speed; 1247 ctrl->speed = adapter_speed;
1239 slot = cpqhp_find_slot(ctrl, hp_slot + ctrl->slot_device_offset); 1248 slot = cpqhp_find_slot(ctrl, hp_slot + ctrl->slot_device_offset);
1240 1249
1241 info("Successfully changed frequency/mode for adapter in slot %d\n", 1250 info("Successfully changed frequency/mode for adapter in slot %d\n",
1242 slot->number); 1251 slot->number);
1243 return 0; 1252 return 0;
1244} 1253}
1245 1254
1246/* the following routines constitute the bulk of the 1255/* the following routines constitute the bulk of the
1247 hotplug controller logic 1256 * hotplug controller logic
1248 */ 1257 */
1249 1258
1250 1259
@@ -1268,17 +1277,17 @@ static u32 board_replaced(struct pci_func *func, struct controller *ctrl)
1268 1277
1269 hp_slot = func->device - ctrl->slot_device_offset; 1278 hp_slot = func->device - ctrl->slot_device_offset;
1270 1279
1271 if (readl(ctrl->hpc_reg + INT_INPUT_CLEAR) & (0x01L << hp_slot)) { 1280 /*
1272 /********************************** 1281 * The switch is open.
1273 * The switch is open. 1282 */
1274 **********************************/ 1283 if (readl(ctrl->hpc_reg + INT_INPUT_CLEAR) & (0x01L << hp_slot))
1275 rc = INTERLOCK_OPEN; 1284 rc = INTERLOCK_OPEN;
1276 } else if (is_slot_enabled (ctrl, hp_slot)) { 1285 /*
1277 /********************************** 1286 * The board is already on
1278 * The board is already on 1287 */
1279 **********************************/ 1288 else if (is_slot_enabled (ctrl, hp_slot))
1280 rc = CARD_FUNCTIONING; 1289 rc = CARD_FUNCTIONING;
1281 } else { 1290 else {
1282 mutex_lock(&ctrl->crit_sect); 1291 mutex_lock(&ctrl->crit_sect);
1283 1292
1284 /* turn on board without attaching to the bus */ 1293 /* turn on board without attaching to the bus */
@@ -1299,7 +1308,7 @@ static u32 board_replaced(struct pci_func *func, struct controller *ctrl)
1299 1308
1300 /* Wait for SOBS to be unset */ 1309 /* Wait for SOBS to be unset */
1301 wait_for_ctrl_irq (ctrl); 1310 wait_for_ctrl_irq (ctrl);
1302 1311
1303 adapter_speed = get_adapter_speed(ctrl, hp_slot); 1312 adapter_speed = get_adapter_speed(ctrl, hp_slot);
1304 if (ctrl->speed != adapter_speed) 1313 if (ctrl->speed != adapter_speed)
1305 if (set_controller_speed(ctrl, adapter_speed, hp_slot)) 1314 if (set_controller_speed(ctrl, adapter_speed, hp_slot))
@@ -1352,7 +1361,8 @@ static u32 board_replaced(struct pci_func *func, struct controller *ctrl)
1352 * Get slot won't work for devices behind 1361 * Get slot won't work for devices behind
1353 * bridges, but in this case it will always be 1362 * bridges, but in this case it will always be
1354 * called for the "base" bus/dev/func of an 1363 * called for the "base" bus/dev/func of an
1355 * adapter. */ 1364 * adapter.
1365 */
1356 1366
1357 mutex_lock(&ctrl->crit_sect); 1367 mutex_lock(&ctrl->crit_sect);
1358 1368
@@ -1377,7 +1387,8 @@ static u32 board_replaced(struct pci_func *func, struct controller *ctrl)
1377 1387
1378 * Get slot won't work for devices behind bridges, but 1388 * Get slot won't work for devices behind bridges, but
1379 * in this case it will always be called for the "base" 1389 * in this case it will always be called for the "base"
1380 * bus/dev/func of an adapter. */ 1390 * bus/dev/func of an adapter.
1391 */
1381 1392
1382 mutex_lock(&ctrl->crit_sect); 1393 mutex_lock(&ctrl->crit_sect);
1383 1394
@@ -1434,7 +1445,8 @@ static u32 board_added(struct pci_func *func, struct controller *ctrl)
1434 wait_for_ctrl_irq (ctrl); 1445 wait_for_ctrl_irq (ctrl);
1435 1446
1436 /* Change bits in slot power register to force another shift out 1447 /* Change bits in slot power register to force another shift out
1437 * NOTE: this is to work around the timer bug */ 1448 * NOTE: this is to work around the timer bug
1449 */
1438 temp_byte = readb(ctrl->hpc_reg + SLOT_POWER); 1450 temp_byte = readb(ctrl->hpc_reg + SLOT_POWER);
1439 writeb(0x00, ctrl->hpc_reg + SLOT_POWER); 1451 writeb(0x00, ctrl->hpc_reg + SLOT_POWER);
1440 writeb(temp_byte, ctrl->hpc_reg + SLOT_POWER); 1452 writeb(temp_byte, ctrl->hpc_reg + SLOT_POWER);
@@ -1443,12 +1455,12 @@ static u32 board_added(struct pci_func *func, struct controller *ctrl)
1443 1455
1444 /* Wait for SOBS to be unset */ 1456 /* Wait for SOBS to be unset */
1445 wait_for_ctrl_irq (ctrl); 1457 wait_for_ctrl_irq (ctrl);
1446 1458
1447 adapter_speed = get_adapter_speed(ctrl, hp_slot); 1459 adapter_speed = get_adapter_speed(ctrl, hp_slot);
1448 if (ctrl->speed != adapter_speed) 1460 if (ctrl->speed != adapter_speed)
1449 if (set_controller_speed(ctrl, adapter_speed, hp_slot)) 1461 if (set_controller_speed(ctrl, adapter_speed, hp_slot))
1450 rc = WRONG_BUS_FREQUENCY; 1462 rc = WRONG_BUS_FREQUENCY;
1451 1463
1452 /* turn off board without attaching to the bus */ 1464 /* turn off board without attaching to the bus */
1453 disable_slot_power (ctrl, hp_slot); 1465 disable_slot_power (ctrl, hp_slot);
1454 1466
@@ -1461,7 +1473,7 @@ static u32 board_added(struct pci_func *func, struct controller *ctrl)
1461 1473
1462 if (rc) 1474 if (rc)
1463 return rc; 1475 return rc;
1464 1476
1465 p_slot = cpqhp_find_slot(ctrl, hp_slot + ctrl->slot_device_offset); 1477 p_slot = cpqhp_find_slot(ctrl, hp_slot + ctrl->slot_device_offset);
1466 1478
1467 /* turn on board and blink green LED */ 1479 /* turn on board and blink green LED */
@@ -1521,7 +1533,7 @@ static u32 board_added(struct pci_func *func, struct controller *ctrl)
1521 } 1533 }
1522 1534
1523 /* All F's is an empty slot or an invalid board */ 1535 /* All F's is an empty slot or an invalid board */
1524 if (temp_register != 0xFFFFFFFF) { /* Check for a board in the slot */ 1536 if (temp_register != 0xFFFFFFFF) {
1525 res_lists.io_head = ctrl->io_head; 1537 res_lists.io_head = ctrl->io_head;
1526 res_lists.mem_head = ctrl->mem_head; 1538 res_lists.mem_head = ctrl->mem_head;
1527 res_lists.p_mem_head = ctrl->p_mem_head; 1539 res_lists.p_mem_head = ctrl->p_mem_head;
@@ -1570,9 +1582,8 @@ static u32 board_added(struct pci_func *func, struct controller *ctrl)
1570 index = 0; 1582 index = 0;
1571 do { 1583 do {
1572 new_slot = cpqhp_slot_find(ctrl->bus, func->device, index++); 1584 new_slot = cpqhp_slot_find(ctrl->bus, func->device, index++);
1573 if (new_slot && !new_slot->pci_dev) { 1585 if (new_slot && !new_slot->pci_dev)
1574 cpqhp_configure_device(ctrl, new_slot); 1586 cpqhp_configure_device(ctrl, new_slot);
1575 }
1576 } while (new_slot); 1587 } while (new_slot);
1577 1588
1578 mutex_lock(&ctrl->crit_sect); 1589 mutex_lock(&ctrl->crit_sect);
@@ -1859,12 +1870,12 @@ static void interrupt_event_handler(struct controller *ctrl)
1859 info(msg_button_on, p_slot->number); 1870 info(msg_button_on, p_slot->number);
1860 } 1871 }
1861 mutex_lock(&ctrl->crit_sect); 1872 mutex_lock(&ctrl->crit_sect);
1862 1873
1863 dbg("blink green LED and turn off amber\n"); 1874 dbg("blink green LED and turn off amber\n");
1864 1875
1865 amber_LED_off (ctrl, hp_slot); 1876 amber_LED_off (ctrl, hp_slot);
1866 green_LED_blink (ctrl, hp_slot); 1877 green_LED_blink (ctrl, hp_slot);
1867 1878
1868 set_SOGO(ctrl); 1879 set_SOGO(ctrl);
1869 1880
1870 /* Wait for SOBS to be unset */ 1881 /* Wait for SOBS to be unset */
@@ -1958,7 +1969,7 @@ void cpqhp_pushbutton_thread(unsigned long slot)
1958 if (cpqhp_process_SI(ctrl, func) != 0) { 1969 if (cpqhp_process_SI(ctrl, func) != 0) {
1959 amber_LED_on(ctrl, hp_slot); 1970 amber_LED_on(ctrl, hp_slot);
1960 green_LED_off(ctrl, hp_slot); 1971 green_LED_off(ctrl, hp_slot);
1961 1972
1962 set_SOGO(ctrl); 1973 set_SOGO(ctrl);
1963 1974
1964 /* Wait for SOBS to be unset */ 1975 /* Wait for SOBS to be unset */
@@ -2079,7 +2090,7 @@ int cpqhp_process_SS(struct controller *ctrl, struct pci_func *func)
2079 struct pci_bus *pci_bus = ctrl->pci_bus; 2090 struct pci_bus *pci_bus = ctrl->pci_bus;
2080 int physical_slot=0; 2091 int physical_slot=0;
2081 2092
2082 device = func->device; 2093 device = func->device;
2083 func = cpqhp_slot_find(ctrl->bus, device, index++); 2094 func = cpqhp_slot_find(ctrl->bus, device, index++);
2084 p_slot = cpqhp_find_slot(ctrl, device); 2095 p_slot = cpqhp_find_slot(ctrl, device);
2085 if (p_slot) { 2096 if (p_slot) {
@@ -2113,9 +2124,8 @@ int cpqhp_process_SS(struct controller *ctrl, struct pci_func *func)
2113 2124
2114 /* If the VGA Enable bit is set, remove isn't 2125 /* If the VGA Enable bit is set, remove isn't
2115 * supported */ 2126 * supported */
2116 if (BCR & PCI_BRIDGE_CTL_VGA) { 2127 if (BCR & PCI_BRIDGE_CTL_VGA)
2117 rc = REMOVE_NOT_SUPPORTED; 2128 rc = REMOVE_NOT_SUPPORTED;
2118 }
2119 } 2129 }
2120 } 2130 }
2121 2131
@@ -2183,67 +2193,67 @@ int cpqhp_hardware_test(struct controller *ctrl, int test_num)
2183 num_of_slots = readb(ctrl->hpc_reg + SLOT_MASK) & 0x0f; 2193 num_of_slots = readb(ctrl->hpc_reg + SLOT_MASK) & 0x0f;
2184 2194
2185 switch (test_num) { 2195 switch (test_num) {
2186 case 1: 2196 case 1:
2187 /* Do stuff here! */ 2197 /* Do stuff here! */
2188 2198
2189 /* Do that funky LED thing */ 2199 /* Do that funky LED thing */
2190 /* so we can restore them later */ 2200 /* so we can restore them later */
2191 save_LED = readl(ctrl->hpc_reg + LED_CONTROL); 2201 save_LED = readl(ctrl->hpc_reg + LED_CONTROL);
2192 work_LED = 0x01010101; 2202 work_LED = 0x01010101;
2193 switch_leds(ctrl, num_of_slots, &work_LED, 0); 2203 switch_leds(ctrl, num_of_slots, &work_LED, 0);
2194 switch_leds(ctrl, num_of_slots, &work_LED, 1); 2204 switch_leds(ctrl, num_of_slots, &work_LED, 1);
2195 switch_leds(ctrl, num_of_slots, &work_LED, 0); 2205 switch_leds(ctrl, num_of_slots, &work_LED, 0);
2196 switch_leds(ctrl, num_of_slots, &work_LED, 1); 2206 switch_leds(ctrl, num_of_slots, &work_LED, 1);
2197 2207
2198 work_LED = 0x01010000; 2208 work_LED = 0x01010000;
2199 writel(work_LED, ctrl->hpc_reg + LED_CONTROL); 2209 writel(work_LED, ctrl->hpc_reg + LED_CONTROL);
2200 switch_leds(ctrl, num_of_slots, &work_LED, 0); 2210 switch_leds(ctrl, num_of_slots, &work_LED, 0);
2201 switch_leds(ctrl, num_of_slots, &work_LED, 1); 2211 switch_leds(ctrl, num_of_slots, &work_LED, 1);
2202 work_LED = 0x00000101; 2212 work_LED = 0x00000101;
2203 writel(work_LED, ctrl->hpc_reg + LED_CONTROL); 2213 writel(work_LED, ctrl->hpc_reg + LED_CONTROL);
2204 switch_leds(ctrl, num_of_slots, &work_LED, 0); 2214 switch_leds(ctrl, num_of_slots, &work_LED, 0);
2205 switch_leds(ctrl, num_of_slots, &work_LED, 1); 2215 switch_leds(ctrl, num_of_slots, &work_LED, 1);
2216
2217 work_LED = 0x01010000;
2218 writel(work_LED, ctrl->hpc_reg + LED_CONTROL);
2219 for (loop = 0; loop < num_of_slots; loop++) {
2220 set_SOGO(ctrl);
2206 2221
2207 work_LED = 0x01010000; 2222 /* Wait for SOGO interrupt */
2208 writel(work_LED, ctrl->hpc_reg + LED_CONTROL); 2223 wait_for_ctrl_irq (ctrl);
2209 for (loop = 0; loop < num_of_slots; loop++) {
2210 set_SOGO(ctrl);
2211 2224
2212 /* Wait for SOGO interrupt */ 2225 /* Get ready for next iteration */
2213 wait_for_ctrl_irq (ctrl); 2226 long_delay((3*HZ)/10);
2227 work_LED = work_LED >> 16;
2228 writel(work_LED, ctrl->hpc_reg + LED_CONTROL);
2214 2229
2215 /* Get ready for next iteration */ 2230 set_SOGO(ctrl);
2216 long_delay((3*HZ)/10);
2217 work_LED = work_LED >> 16;
2218 writel(work_LED, ctrl->hpc_reg + LED_CONTROL);
2219
2220 set_SOGO(ctrl);
2221 2231
2222 /* Wait for SOGO interrupt */ 2232 /* Wait for SOGO interrupt */
2223 wait_for_ctrl_irq (ctrl); 2233 wait_for_ctrl_irq (ctrl);
2224 2234
2225 /* Get ready for next iteration */ 2235 /* Get ready for next iteration */
2226 long_delay((3*HZ)/10); 2236 long_delay((3*HZ)/10);
2227 work_LED = work_LED << 16; 2237 work_LED = work_LED << 16;
2228 writel(work_LED, ctrl->hpc_reg + LED_CONTROL); 2238 writel(work_LED, ctrl->hpc_reg + LED_CONTROL);
2229 work_LED = work_LED << 1; 2239 work_LED = work_LED << 1;
2230 writel(work_LED, ctrl->hpc_reg + LED_CONTROL); 2240 writel(work_LED, ctrl->hpc_reg + LED_CONTROL);
2231 } 2241 }
2232 2242
2233 /* put it back the way it was */ 2243 /* put it back the way it was */
2234 writel(save_LED, ctrl->hpc_reg + LED_CONTROL); 2244 writel(save_LED, ctrl->hpc_reg + LED_CONTROL);
2235 2245
2236 set_SOGO(ctrl); 2246 set_SOGO(ctrl);
2237 2247
2238 /* Wait for SOBS to be unset */ 2248 /* Wait for SOBS to be unset */
2239 wait_for_ctrl_irq (ctrl); 2249 wait_for_ctrl_irq (ctrl);
2240 break; 2250 break;
2241 case 2: 2251 case 2:
2242 /* Do other stuff here! */ 2252 /* Do other stuff here! */
2243 break; 2253 break;
2244 case 3: 2254 case 3:
2245 /* and more... */ 2255 /* and more... */
2246 break; 2256 break;
2247 } 2257 }
2248 return 0; 2258 return 0;
2249} 2259}
@@ -2312,9 +2322,9 @@ static u32 configure_new_device(struct controller * ctrl, struct pci_func * func
2312 while ((function < max_functions) && (!stop_it)) { 2322 while ((function < max_functions) && (!stop_it)) {
2313 pci_bus_read_config_dword (ctrl->pci_bus, PCI_DEVFN(func->device, function), 0x00, &ID); 2323 pci_bus_read_config_dword (ctrl->pci_bus, PCI_DEVFN(func->device, function), 0x00, &ID);
2314 2324
2315 if (ID == 0xFFFFFFFF) { /* There's nothing there. */ 2325 if (ID == 0xFFFFFFFF) {
2316 function++; 2326 function++;
2317 } else { /* There's something there */ 2327 } else {
2318 /* Setup slot structure. */ 2328 /* Setup slot structure. */
2319 new_slot = cpqhp_slot_create(func->bus); 2329 new_slot = cpqhp_slot_create(func->bus);
2320 2330
@@ -2339,8 +2349,8 @@ static u32 configure_new_device(struct controller * ctrl, struct pci_func * func
2339 2349
2340 2350
2341/* 2351/*
2342 Configuration logic that involves the hotplug data structures and 2352 * Configuration logic that involves the hotplug data structures and
2343 their bookkeeping 2353 * their bookkeeping
2344 */ 2354 */
2345 2355
2346 2356
@@ -2393,7 +2403,7 @@ static int configure_new_function(struct controller *ctrl, struct pci_func *func
2393 if (rc) 2403 if (rc)
2394 return rc; 2404 return rc;
2395 2405
2396 if ((temp_byte & 0x7F) == PCI_HEADER_TYPE_BRIDGE) { /* PCI-PCI Bridge */ 2406 if ((temp_byte & 0x7F) == PCI_HEADER_TYPE_BRIDGE) {
2397 /* set Primary bus */ 2407 /* set Primary bus */
2398 dbg("set Primary bus = %d\n", func->bus); 2408 dbg("set Primary bus = %d\n", func->bus);
2399 rc = pci_bus_write_config_byte(pci_bus, devfn, PCI_PRIMARY_BUS, func->bus); 2409 rc = pci_bus_write_config_byte(pci_bus, devfn, PCI_PRIMARY_BUS, func->bus);
@@ -2484,7 +2494,8 @@ static int configure_new_function(struct controller *ctrl, struct pci_func *func
2484 temp_resources.irqs = &irqs; 2494 temp_resources.irqs = &irqs;
2485 2495
2486 /* Make copies of the nodes we are going to pass down so that 2496 /* Make copies of the nodes we are going to pass down so that
2487 * if there is a problem,we can just use these to free resources */ 2497 * if there is a problem,we can just use these to free resources
2498 */
2488 hold_bus_node = kmalloc(sizeof(*hold_bus_node), GFP_KERNEL); 2499 hold_bus_node = kmalloc(sizeof(*hold_bus_node), GFP_KERNEL);
2489 hold_IO_node = kmalloc(sizeof(*hold_IO_node), GFP_KERNEL); 2500 hold_IO_node = kmalloc(sizeof(*hold_IO_node), GFP_KERNEL);
2490 hold_mem_node = kmalloc(sizeof(*hold_mem_node), GFP_KERNEL); 2501 hold_mem_node = kmalloc(sizeof(*hold_mem_node), GFP_KERNEL);
@@ -2556,7 +2567,8 @@ static int configure_new_function(struct controller *ctrl, struct pci_func *func
2556 temp_word = (p_mem_node->base + p_mem_node->length - 1) >> 16; 2567 temp_word = (p_mem_node->base + p_mem_node->length - 1) >> 16;
2557 rc = pci_bus_write_config_word (pci_bus, devfn, PCI_PREF_MEMORY_LIMIT, temp_word); 2568 rc = pci_bus_write_config_word (pci_bus, devfn, PCI_PREF_MEMORY_LIMIT, temp_word);
2558 2569
2559 /* Adjust this to compensate for extra adjustment in first loop */ 2570 /* Adjust this to compensate for extra adjustment in first loop
2571 */
2560 irqs.barber_pole--; 2572 irqs.barber_pole--;
2561 2573
2562 rc = 0; 2574 rc = 0;
@@ -2917,27 +2929,26 @@ static int configure_new_function(struct controller *ctrl, struct pci_func *func
2917 } /* End of base register loop */ 2929 } /* End of base register loop */
2918 if (cpqhp_legacy_mode) { 2930 if (cpqhp_legacy_mode) {
2919 /* Figure out which interrupt pin this function uses */ 2931 /* Figure out which interrupt pin this function uses */
2920 rc = pci_bus_read_config_byte (pci_bus, devfn, 2932 rc = pci_bus_read_config_byte (pci_bus, devfn,
2921 PCI_INTERRUPT_PIN, &temp_byte); 2933 PCI_INTERRUPT_PIN, &temp_byte);
2922 2934
2923 /* If this function needs an interrupt and we are behind 2935 /* If this function needs an interrupt and we are behind
2924 * a bridge and the pin is tied to something that's 2936 * a bridge and the pin is tied to something that's
2925 * alread mapped, set this one the same */ 2937 * alread mapped, set this one the same */
2926 if (temp_byte && resources->irqs && 2938 if (temp_byte && resources->irqs &&
2927 (resources->irqs->valid_INT & 2939 (resources->irqs->valid_INT &
2928 (0x01 << ((temp_byte + resources->irqs->barber_pole - 1) & 0x03)))) { 2940 (0x01 << ((temp_byte + resources->irqs->barber_pole - 1) & 0x03)))) {
2929 /* We have to share with something already set up */ 2941 /* We have to share with something already set up */
2930 IRQ = resources->irqs->interrupt[(temp_byte + 2942 IRQ = resources->irqs->interrupt[(temp_byte +
2931 resources->irqs->barber_pole - 1) & 0x03]; 2943 resources->irqs->barber_pole - 1) & 0x03];
2932 } else { 2944 } else {
2933 /* Program IRQ based on card type */ 2945 /* Program IRQ based on card type */
2934 rc = pci_bus_read_config_byte (pci_bus, devfn, 0x0B, &class_code); 2946 rc = pci_bus_read_config_byte (pci_bus, devfn, 0x0B, &class_code);
2935 2947
2936 if (class_code == PCI_BASE_CLASS_STORAGE) { 2948 if (class_code == PCI_BASE_CLASS_STORAGE)
2937 IRQ = cpqhp_disk_irq; 2949 IRQ = cpqhp_disk_irq;
2938 } else { 2950 else
2939 IRQ = cpqhp_nic_irq; 2951 IRQ = cpqhp_nic_irq;
2940 }
2941 } 2952 }
2942 2953
2943 /* IRQ Line */ 2954 /* IRQ Line */
diff --git a/drivers/pci/hotplug/cpqphp_nvram.c b/drivers/pci/hotplug/cpqphp_nvram.c
index cb174888002b..76ba8a1c774d 100644
--- a/drivers/pci/hotplug/cpqphp_nvram.c
+++ b/drivers/pci/hotplug/cpqphp_nvram.c
@@ -94,12 +94,13 @@ static u8 evbuffer[1024];
94 94
95static void __iomem *compaq_int15_entry_point; 95static void __iomem *compaq_int15_entry_point;
96 96
97static spinlock_t int15_lock; /* lock for ordering int15_bios_call() */ 97/* lock for ordering int15_bios_call() */
98static spinlock_t int15_lock;
98 99
99 100
100/* This is a series of function that deals with 101/* This is a series of function that deals with
101 setting & getting the hotplug resource table in some environment variable. 102 * setting & getting the hotplug resource table in some environment variable.
102*/ 103 */
103 104
104/* 105/*
105 * We really shouldn't be doing this unless there is a _very_ good reason to!!! 106 * We really shouldn't be doing this unless there is a _very_ good reason to!!!
@@ -113,7 +114,7 @@ static u32 add_byte( u32 **p_buffer, u8 value, u32 *used, u32 *avail)
113 114
114 if ((*used + 1) > *avail) 115 if ((*used + 1) > *avail)
115 return(1); 116 return(1);
116 117
117 *((u8*)*p_buffer) = value; 118 *((u8*)*p_buffer) = value;
118 tByte = (u8**)p_buffer; 119 tByte = (u8**)p_buffer;
119 (*tByte)++; 120 (*tByte)++;
@@ -170,10 +171,10 @@ static u32 access_EV (u16 operation, u8 *ev_name, u8 *buffer, u32 *buf_size)
170 unsigned long flags; 171 unsigned long flags;
171 int op = operation; 172 int op = operation;
172 int ret_val; 173 int ret_val;
173 174
174 if (!compaq_int15_entry_point) 175 if (!compaq_int15_entry_point)
175 return -ENODEV; 176 return -ENODEV;
176 177
177 spin_lock_irqsave(&int15_lock, flags); 178 spin_lock_irqsave(&int15_lock, flags);
178 __asm__ ( 179 __asm__ (
179 "xorl %%ebx,%%ebx\n" \ 180 "xorl %%ebx,%%ebx\n" \
@@ -187,7 +188,7 @@ static u32 access_EV (u16 operation, u8 *ev_name, u8 *buffer, u32 *buf_size)
187 "D" (buffer), "m" (compaq_int15_entry_point) 188 "D" (buffer), "m" (compaq_int15_entry_point)
188 : "%ebx", "%edx"); 189 : "%ebx", "%edx");
189 spin_unlock_irqrestore(&int15_lock, flags); 190 spin_unlock_irqrestore(&int15_lock, flags);
190 191
191 return((ret_val & 0xFF00) >> 8); 192 return((ret_val & 0xFF00) >> 8);
192} 193}
193 194
@@ -210,14 +211,16 @@ static int load_HRT (void __iomem *rom_start)
210 211
211 available = 1024; 212 available = 1024;
212 213
213 // Now load the EV 214 /* Now load the EV */
214 temp_dword = available; 215 temp_dword = available;
215 216
216 rc = access_EV(READ_EV, "CQTHPS", evbuffer, &temp_dword); 217 rc = access_EV(READ_EV, "CQTHPS", evbuffer, &temp_dword);
217 218
218 evbuffer_length = temp_dword; 219 evbuffer_length = temp_dword;
219 220
220 // We're maintaining the resource lists so write FF to invalidate old info 221 /* We're maintaining the resource lists so write FF to invalidate old
222 * info
223 */
221 temp_dword = 1; 224 temp_dword = 1;
222 225
223 rc = access_EV(WRITE_EV, "CQTHPS", &temp_byte, &temp_dword); 226 rc = access_EV(WRITE_EV, "CQTHPS", &temp_byte, &temp_dword);
@@ -263,13 +266,13 @@ static u32 store_HRT (void __iomem *rom_start)
263 p_EV_header = (struct ev_hrt_header *) pFill; 266 p_EV_header = (struct ev_hrt_header *) pFill;
264 267
265 ctrl = cpqhp_ctrl_list; 268 ctrl = cpqhp_ctrl_list;
266 269
267 // The revision of this structure 270 /* The revision of this structure */
268 rc = add_byte( &pFill, 1 + ctrl->push_flag, &usedbytes, &available); 271 rc = add_byte( &pFill, 1 + ctrl->push_flag, &usedbytes, &available);
269 if (rc) 272 if (rc)
270 return(rc); 273 return(rc);
271 274
272 // The number of controllers 275 /* The number of controllers */
273 rc = add_byte( &pFill, 1, &usedbytes, &available); 276 rc = add_byte( &pFill, 1, &usedbytes, &available);
274 if (rc) 277 if (rc)
275 return(rc); 278 return(rc);
@@ -279,27 +282,27 @@ static u32 store_HRT (void __iomem *rom_start)
279 282
280 numCtrl++; 283 numCtrl++;
281 284
282 // The bus number 285 /* The bus number */
283 rc = add_byte( &pFill, ctrl->bus, &usedbytes, &available); 286 rc = add_byte( &pFill, ctrl->bus, &usedbytes, &available);
284 if (rc) 287 if (rc)
285 return(rc); 288 return(rc);
286 289
287 // The device Number 290 /* The device Number */
288 rc = add_byte( &pFill, PCI_SLOT(ctrl->pci_dev->devfn), &usedbytes, &available); 291 rc = add_byte( &pFill, PCI_SLOT(ctrl->pci_dev->devfn), &usedbytes, &available);
289 if (rc) 292 if (rc)
290 return(rc); 293 return(rc);
291 294
292 // The function Number 295 /* The function Number */
293 rc = add_byte( &pFill, PCI_FUNC(ctrl->pci_dev->devfn), &usedbytes, &available); 296 rc = add_byte( &pFill, PCI_FUNC(ctrl->pci_dev->devfn), &usedbytes, &available);
294 if (rc) 297 if (rc)
295 return(rc); 298 return(rc);
296 299
297 // Skip the number of available entries 300 /* Skip the number of available entries */
298 rc = add_dword( &pFill, 0, &usedbytes, &available); 301 rc = add_dword( &pFill, 0, &usedbytes, &available);
299 if (rc) 302 if (rc)
300 return(rc); 303 return(rc);
301 304
302 // Figure out memory Available 305 /* Figure out memory Available */
303 306
304 resNode = ctrl->mem_head; 307 resNode = ctrl->mem_head;
305 308
@@ -308,12 +311,12 @@ static u32 store_HRT (void __iomem *rom_start)
308 while (resNode) { 311 while (resNode) {
309 loop ++; 312 loop ++;
310 313
311 // base 314 /* base */
312 rc = add_dword( &pFill, resNode->base, &usedbytes, &available); 315 rc = add_dword( &pFill, resNode->base, &usedbytes, &available);
313 if (rc) 316 if (rc)
314 return(rc); 317 return(rc);
315 318
316 // length 319 /* length */
317 rc = add_dword( &pFill, resNode->length, &usedbytes, &available); 320 rc = add_dword( &pFill, resNode->length, &usedbytes, &available);
318 if (rc) 321 if (rc)
319 return(rc); 322 return(rc);
@@ -321,10 +324,10 @@ static u32 store_HRT (void __iomem *rom_start)
321 resNode = resNode->next; 324 resNode = resNode->next;
322 } 325 }
323 326
324 // Fill in the number of entries 327 /* Fill in the number of entries */
325 p_ev_ctrl->mem_avail = loop; 328 p_ev_ctrl->mem_avail = loop;
326 329
327 // Figure out prefetchable memory Available 330 /* Figure out prefetchable memory Available */
328 331
329 resNode = ctrl->p_mem_head; 332 resNode = ctrl->p_mem_head;
330 333
@@ -333,12 +336,12 @@ static u32 store_HRT (void __iomem *rom_start)
333 while (resNode) { 336 while (resNode) {
334 loop ++; 337 loop ++;
335 338
336 // base 339 /* base */
337 rc = add_dword( &pFill, resNode->base, &usedbytes, &available); 340 rc = add_dword( &pFill, resNode->base, &usedbytes, &available);
338 if (rc) 341 if (rc)
339 return(rc); 342 return(rc);
340 343
341 // length 344 /* length */
342 rc = add_dword( &pFill, resNode->length, &usedbytes, &available); 345 rc = add_dword( &pFill, resNode->length, &usedbytes, &available);
343 if (rc) 346 if (rc)
344 return(rc); 347 return(rc);
@@ -346,10 +349,10 @@ static u32 store_HRT (void __iomem *rom_start)
346 resNode = resNode->next; 349 resNode = resNode->next;
347 } 350 }
348 351
349 // Fill in the number of entries 352 /* Fill in the number of entries */
350 p_ev_ctrl->p_mem_avail = loop; 353 p_ev_ctrl->p_mem_avail = loop;
351 354
352 // Figure out IO Available 355 /* Figure out IO Available */
353 356
354 resNode = ctrl->io_head; 357 resNode = ctrl->io_head;
355 358
@@ -358,12 +361,12 @@ static u32 store_HRT (void __iomem *rom_start)
358 while (resNode) { 361 while (resNode) {
359 loop ++; 362 loop ++;
360 363
361 // base 364 /* base */
362 rc = add_dword( &pFill, resNode->base, &usedbytes, &available); 365 rc = add_dword( &pFill, resNode->base, &usedbytes, &available);
363 if (rc) 366 if (rc)
364 return(rc); 367 return(rc);
365 368
366 // length 369 /* length */
367 rc = add_dword( &pFill, resNode->length, &usedbytes, &available); 370 rc = add_dword( &pFill, resNode->length, &usedbytes, &available);
368 if (rc) 371 if (rc)
369 return(rc); 372 return(rc);
@@ -371,10 +374,10 @@ static u32 store_HRT (void __iomem *rom_start)
371 resNode = resNode->next; 374 resNode = resNode->next;
372 } 375 }
373 376
374 // Fill in the number of entries 377 /* Fill in the number of entries */
375 p_ev_ctrl->io_avail = loop; 378 p_ev_ctrl->io_avail = loop;
376 379
377 // Figure out bus Available 380 /* Figure out bus Available */
378 381
379 resNode = ctrl->bus_head; 382 resNode = ctrl->bus_head;
380 383
@@ -383,12 +386,12 @@ static u32 store_HRT (void __iomem *rom_start)
383 while (resNode) { 386 while (resNode) {
384 loop ++; 387 loop ++;
385 388
386 // base 389 /* base */
387 rc = add_dword( &pFill, resNode->base, &usedbytes, &available); 390 rc = add_dword( &pFill, resNode->base, &usedbytes, &available);
388 if (rc) 391 if (rc)
389 return(rc); 392 return(rc);
390 393
391 // length 394 /* length */
392 rc = add_dword( &pFill, resNode->length, &usedbytes, &available); 395 rc = add_dword( &pFill, resNode->length, &usedbytes, &available);
393 if (rc) 396 if (rc)
394 return(rc); 397 return(rc);
@@ -396,15 +399,15 @@ static u32 store_HRT (void __iomem *rom_start)
396 resNode = resNode->next; 399 resNode = resNode->next;
397 } 400 }
398 401
399 // Fill in the number of entries 402 /* Fill in the number of entries */
400 p_ev_ctrl->bus_avail = loop; 403 p_ev_ctrl->bus_avail = loop;
401 404
402 ctrl = ctrl->next; 405 ctrl = ctrl->next;
403 } 406 }
404 407
405 p_EV_header->num_of_ctrl = numCtrl; 408 p_EV_header->num_of_ctrl = numCtrl;
406 409
407 // Now store the EV 410 /* Now store the EV */
408 411
409 temp_dword = usedbytes; 412 temp_dword = usedbytes;
410 413
@@ -449,20 +452,21 @@ int compaq_nvram_load (void __iomem *rom_start, struct controller *ctrl)
449 struct ev_hrt_header *p_EV_header; 452 struct ev_hrt_header *p_EV_header;
450 453
451 if (!evbuffer_init) { 454 if (!evbuffer_init) {
452 // Read the resource list information in from NVRAM 455 /* Read the resource list information in from NVRAM */
453 if (load_HRT(rom_start)) 456 if (load_HRT(rom_start))
454 memset (evbuffer, 0, 1024); 457 memset (evbuffer, 0, 1024);
455 458
456 evbuffer_init = 1; 459 evbuffer_init = 1;
457 } 460 }
458 461
459 // If we saved information in NVRAM, use it now 462 /* If we saved information in NVRAM, use it now */
460 p_EV_header = (struct ev_hrt_header *) evbuffer; 463 p_EV_header = (struct ev_hrt_header *) evbuffer;
461 464
462 // The following code is for systems where version 1.0 of this 465 /* The following code is for systems where version 1.0 of this
463 // driver has been loaded, but doesn't support the hardware. 466 * driver has been loaded, but doesn't support the hardware.
464 // In that case, the driver would incorrectly store something 467 * In that case, the driver would incorrectly store something
465 // in NVRAM. 468 * in NVRAM.
469 */
466 if ((p_EV_header->Version == 2) || 470 if ((p_EV_header->Version == 2) ||
467 ((p_EV_header->Version == 1) && !ctrl->push_flag)) { 471 ((p_EV_header->Version == 1) && !ctrl->push_flag)) {
468 p_byte = &(p_EV_header->next); 472 p_byte = &(p_EV_header->next);
@@ -479,7 +483,7 @@ int compaq_nvram_load (void __iomem *rom_start, struct controller *ctrl)
479 function = p_ev_ctrl->function; 483 function = p_ev_ctrl->function;
480 484
481 while ((bus != ctrl->bus) || 485 while ((bus != ctrl->bus) ||
482 (device != PCI_SLOT(ctrl->pci_dev->devfn)) || 486 (device != PCI_SLOT(ctrl->pci_dev->devfn)) ||
483 (function != PCI_FUNC(ctrl->pci_dev->devfn))) { 487 (function != PCI_FUNC(ctrl->pci_dev->devfn))) {
484 nummem = p_ev_ctrl->mem_avail; 488 nummem = p_ev_ctrl->mem_avail;
485 numpmem = p_ev_ctrl->p_mem_avail; 489 numpmem = p_ev_ctrl->p_mem_avail;
@@ -491,7 +495,7 @@ int compaq_nvram_load (void __iomem *rom_start, struct controller *ctrl)
491 if (p_byte > ((u8*)p_EV_header + evbuffer_length)) 495 if (p_byte > ((u8*)p_EV_header + evbuffer_length))
492 return 2; 496 return 2;
493 497
494 // Skip forward to the next entry 498 /* Skip forward to the next entry */
495 p_byte += (nummem + numpmem + numio + numbus) * 8; 499 p_byte += (nummem + numpmem + numio + numbus) * 8;
496 500
497 if (p_byte > ((u8*)p_EV_header + evbuffer_length)) 501 if (p_byte > ((u8*)p_EV_header + evbuffer_length))
@@ -629,8 +633,9 @@ int compaq_nvram_load (void __iomem *rom_start, struct controller *ctrl)
629 ctrl->bus_head = bus_node; 633 ctrl->bus_head = bus_node;
630 } 634 }
631 635
632 // If all of the following fail, we don't have any resources for 636 /* If all of the following fail, we don't have any resources for
633 // hot plug add 637 * hot plug add
638 */
634 rc = 1; 639 rc = 1;
635 rc &= cpqhp_resource_sort_and_combine(&(ctrl->mem_head)); 640 rc &= cpqhp_resource_sort_and_combine(&(ctrl->mem_head));
636 rc &= cpqhp_resource_sort_and_combine(&(ctrl->p_mem_head)); 641 rc &= cpqhp_resource_sort_and_combine(&(ctrl->p_mem_head));
@@ -640,14 +645,14 @@ int compaq_nvram_load (void __iomem *rom_start, struct controller *ctrl)
640 if (rc) 645 if (rc)
641 return(rc); 646 return(rc);
642 } else { 647 } else {
643 if ((evbuffer[0] != 0) && (!ctrl->push_flag)) 648 if ((evbuffer[0] != 0) && (!ctrl->push_flag))
644 return 1; 649 return 1;
645 } 650 }
646 651
647 return 0; 652 return 0;
648} 653}
649 654
650 655
651int compaq_nvram_store (void __iomem *rom_start) 656int compaq_nvram_store (void __iomem *rom_start)
652{ 657{
653 int rc = 1; 658 int rc = 1;
diff --git a/drivers/pci/hotplug/cpqphp_pci.c b/drivers/pci/hotplug/cpqphp_pci.c
index 6c0ed0fcb8ee..6173b9a4544e 100644
--- a/drivers/pci/hotplug/cpqphp_pci.c
+++ b/drivers/pci/hotplug/cpqphp_pci.c
@@ -37,7 +37,6 @@
37#include "../pci.h" 37#include "../pci.h"
38#include "cpqphp.h" 38#include "cpqphp.h"
39#include "cpqphp_nvram.h" 39#include "cpqphp_nvram.h"
40#include <asm/pci_x86.h>
41 40
42 41
43u8 cpqhp_nic_irq; 42u8 cpqhp_nic_irq;
@@ -82,14 +81,14 @@ static void __iomem *detect_HRT_floating_pointer(void __iomem *begin, void __iom
82} 81}
83 82
84 83
85int cpqhp_configure_device (struct controller* ctrl, struct pci_func* func) 84int cpqhp_configure_device (struct controller* ctrl, struct pci_func* func)
86{ 85{
87 unsigned char bus; 86 unsigned char bus;
88 struct pci_bus *child; 87 struct pci_bus *child;
89 int num; 88 int num;
90 89
91 if (func->pci_dev == NULL) 90 if (func->pci_dev == NULL)
92 func->pci_dev = pci_find_slot(func->bus, PCI_DEVFN(func->device, func->function)); 91 func->pci_dev = pci_get_bus_and_slot(func->bus,PCI_DEVFN(func->device, func->function));
93 92
94 /* No pci device, we need to create it then */ 93 /* No pci device, we need to create it then */
95 if (func->pci_dev == NULL) { 94 if (func->pci_dev == NULL) {
@@ -99,7 +98,7 @@ int cpqhp_configure_device (struct controller* ctrl, struct pci_func* func)
99 if (num) 98 if (num)
100 pci_bus_add_devices(ctrl->pci_dev->bus); 99 pci_bus_add_devices(ctrl->pci_dev->bus);
101 100
102 func->pci_dev = pci_find_slot(func->bus, PCI_DEVFN(func->device, func->function)); 101 func->pci_dev = pci_get_bus_and_slot(func->bus, PCI_DEVFN(func->device, func->function));
103 if (func->pci_dev == NULL) { 102 if (func->pci_dev == NULL) {
104 dbg("ERROR: pci_dev still null\n"); 103 dbg("ERROR: pci_dev still null\n");
105 return 0; 104 return 0;
@@ -112,20 +111,24 @@ int cpqhp_configure_device (struct controller* ctrl, struct pci_func* func)
112 pci_do_scan_bus(child); 111 pci_do_scan_bus(child);
113 } 112 }
114 113
114 pci_dev_put(func->pci_dev);
115
115 return 0; 116 return 0;
116} 117}
117 118
118 119
119int cpqhp_unconfigure_device(struct pci_func* func) 120int cpqhp_unconfigure_device(struct pci_func* func)
120{ 121{
121 int j; 122 int j;
122 123
123 dbg("%s: bus/dev/func = %x/%x/%x\n", __func__, func->bus, func->device, func->function); 124 dbg("%s: bus/dev/func = %x/%x/%x\n", __func__, func->bus, func->device, func->function);
124 125
125 for (j=0; j<8 ; j++) { 126 for (j=0; j<8 ; j++) {
126 struct pci_dev* temp = pci_find_slot(func->bus, PCI_DEVFN(func->device, j)); 127 struct pci_dev* temp = pci_get_bus_and_slot(func->bus, PCI_DEVFN(func->device, j));
127 if (temp) 128 if (temp) {
129 pci_dev_put(temp);
128 pci_remove_bus_device(temp); 130 pci_remove_bus_device(temp);
131 }
129 } 132 }
130 return 0; 133 return 0;
131} 134}
@@ -178,32 +181,22 @@ int cpqhp_set_irq (u8 bus_num, u8 dev_num, u8 int_pin, u8 irq_num)
178 if (!rc) 181 if (!rc)
179 return !rc; 182 return !rc;
180 183
181 // set the Edge Level Control Register (ELCR) 184 /* set the Edge Level Control Register (ELCR) */
182 temp_word = inb(0x4d0); 185 temp_word = inb(0x4d0);
183 temp_word |= inb(0x4d1) << 8; 186 temp_word |= inb(0x4d1) << 8;
184 187
185 temp_word |= 0x01 << irq_num; 188 temp_word |= 0x01 << irq_num;
186 189
187 // This should only be for x86 as it sets the Edge Level Control Register 190 /* This should only be for x86 as it sets the Edge Level
188 outb((u8) (temp_word & 0xFF), 0x4d0); 191 * Control Register
189 outb((u8) ((temp_word & 0xFF00) >> 8), 0x4d1); 192 */
190 rc = 0; 193 outb((u8) (temp_word & 0xFF), 0x4d0); outb((u8) ((temp_word &
191 } 194 0xFF00) >> 8), 0x4d1); rc = 0; }
192 195
193 return rc; 196 return rc;
194} 197}
195 198
196 199
197/*
198 * WTF??? This function isn't in the code, yet a function calls it, but the
199 * compiler optimizes it away? strange. Here as a placeholder to keep the
200 * compiler happy.
201 */
202static int PCI_ScanBusNonBridge (u8 bus, u8 device)
203{
204 return 0;
205}
206
207static int PCI_ScanBusForNonBridge(struct controller *ctrl, u8 bus_num, u8 * dev_num) 200static int PCI_ScanBusForNonBridge(struct controller *ctrl, u8 bus_num, u8 * dev_num)
208{ 201{
209 u16 tdevice; 202 u16 tdevice;
@@ -213,11 +206,11 @@ static int PCI_ScanBusForNonBridge(struct controller *ctrl, u8 bus_num, u8 * dev
213 ctrl->pci_bus->number = bus_num; 206 ctrl->pci_bus->number = bus_num;
214 207
215 for (tdevice = 0; tdevice < 0xFF; tdevice++) { 208 for (tdevice = 0; tdevice < 0xFF; tdevice++) {
216 //Scan for access first 209 /* Scan for access first */
217 if (PCI_RefinedAccessConfig(ctrl->pci_bus, tdevice, 0x08, &work) == -1) 210 if (PCI_RefinedAccessConfig(ctrl->pci_bus, tdevice, 0x08, &work) == -1)
218 continue; 211 continue;
219 dbg("Looking for nonbridge bus_num %d dev_num %d\n", bus_num, tdevice); 212 dbg("Looking for nonbridge bus_num %d dev_num %d\n", bus_num, tdevice);
220 //Yep we got one. Not a bridge ? 213 /* Yep we got one. Not a bridge ? */
221 if ((work >> 8) != PCI_TO_PCI_BRIDGE_CLASS) { 214 if ((work >> 8) != PCI_TO_PCI_BRIDGE_CLASS) {
222 *dev_num = tdevice; 215 *dev_num = tdevice;
223 dbg("found it !\n"); 216 dbg("found it !\n");
@@ -225,16 +218,16 @@ static int PCI_ScanBusForNonBridge(struct controller *ctrl, u8 bus_num, u8 * dev
225 } 218 }
226 } 219 }
227 for (tdevice = 0; tdevice < 0xFF; tdevice++) { 220 for (tdevice = 0; tdevice < 0xFF; tdevice++) {
228 //Scan for access first 221 /* Scan for access first */
229 if (PCI_RefinedAccessConfig(ctrl->pci_bus, tdevice, 0x08, &work) == -1) 222 if (PCI_RefinedAccessConfig(ctrl->pci_bus, tdevice, 0x08, &work) == -1)
230 continue; 223 continue;
231 dbg("Looking for bridge bus_num %d dev_num %d\n", bus_num, tdevice); 224 dbg("Looking for bridge bus_num %d dev_num %d\n", bus_num, tdevice);
232 //Yep we got one. bridge ? 225 /* Yep we got one. bridge ? */
233 if ((work >> 8) == PCI_TO_PCI_BRIDGE_CLASS) { 226 if ((work >> 8) == PCI_TO_PCI_BRIDGE_CLASS) {
234 pci_bus_read_config_byte (ctrl->pci_bus, PCI_DEVFN(tdevice, 0), PCI_SECONDARY_BUS, &tbus); 227 pci_bus_read_config_byte (ctrl->pci_bus, PCI_DEVFN(tdevice, 0), PCI_SECONDARY_BUS, &tbus);
228 /* XXX: no recursion, wtf? */
235 dbg("Recurse on bus_num %d tdevice %d\n", tbus, tdevice); 229 dbg("Recurse on bus_num %d tdevice %d\n", tbus, tdevice);
236 if (PCI_ScanBusNonBridge(tbus, tdevice) == 0) 230 return 0;
237 return 0;
238 } 231 }
239 } 232 }
240 233
@@ -244,39 +237,23 @@ static int PCI_ScanBusForNonBridge(struct controller *ctrl, u8 bus_num, u8 * dev
244 237
245static int PCI_GetBusDevHelper(struct controller *ctrl, u8 *bus_num, u8 *dev_num, u8 slot, u8 nobridge) 238static int PCI_GetBusDevHelper(struct controller *ctrl, u8 *bus_num, u8 *dev_num, u8 slot, u8 nobridge)
246{ 239{
247 struct irq_routing_table *PCIIRQRoutingInfoLength; 240 int loop, len;
248 long len;
249 long loop;
250 u32 work; 241 u32 work;
251
252 u8 tbus, tdevice, tslot; 242 u8 tbus, tdevice, tslot;
253 243
254 PCIIRQRoutingInfoLength = pcibios_get_irq_routing_table(); 244 len = cpqhp_routing_table_length();
255 if (!PCIIRQRoutingInfoLength)
256 return -1;
257
258 len = (PCIIRQRoutingInfoLength->size -
259 sizeof(struct irq_routing_table)) / sizeof(struct irq_info);
260 // Make sure I got at least one entry
261 if (len == 0) {
262 kfree(PCIIRQRoutingInfoLength );
263 return -1;
264 }
265
266 for (loop = 0; loop < len; ++loop) { 245 for (loop = 0; loop < len; ++loop) {
267 tbus = PCIIRQRoutingInfoLength->slots[loop].bus; 246 tbus = cpqhp_routing_table->slots[loop].bus;
268 tdevice = PCIIRQRoutingInfoLength->slots[loop].devfn; 247 tdevice = cpqhp_routing_table->slots[loop].devfn;
269 tslot = PCIIRQRoutingInfoLength->slots[loop].slot; 248 tslot = cpqhp_routing_table->slots[loop].slot;
270 249
271 if (tslot == slot) { 250 if (tslot == slot) {
272 *bus_num = tbus; 251 *bus_num = tbus;
273 *dev_num = tdevice; 252 *dev_num = tdevice;
274 ctrl->pci_bus->number = tbus; 253 ctrl->pci_bus->number = tbus;
275 pci_bus_read_config_dword (ctrl->pci_bus, *dev_num, PCI_VENDOR_ID, &work); 254 pci_bus_read_config_dword (ctrl->pci_bus, *dev_num, PCI_VENDOR_ID, &work);
276 if (!nobridge || (work == 0xffffffff)) { 255 if (!nobridge || (work == 0xffffffff))
277 kfree(PCIIRQRoutingInfoLength );
278 return 0; 256 return 0;
279 }
280 257
281 dbg("bus_num %d devfn %d\n", *bus_num, *dev_num); 258 dbg("bus_num %d devfn %d\n", *bus_num, *dev_num);
282 pci_bus_read_config_dword (ctrl->pci_bus, *dev_num, PCI_CLASS_REVISION, &work); 259 pci_bus_read_config_dword (ctrl->pci_bus, *dev_num, PCI_CLASS_REVISION, &work);
@@ -287,28 +264,26 @@ static int PCI_GetBusDevHelper(struct controller *ctrl, u8 *bus_num, u8 *dev_num
287 dbg("Scan bus for Non Bridge: bus %d\n", tbus); 264 dbg("Scan bus for Non Bridge: bus %d\n", tbus);
288 if (PCI_ScanBusForNonBridge(ctrl, tbus, dev_num) == 0) { 265 if (PCI_ScanBusForNonBridge(ctrl, tbus, dev_num) == 0) {
289 *bus_num = tbus; 266 *bus_num = tbus;
290 kfree(PCIIRQRoutingInfoLength );
291 return 0; 267 return 0;
292 } 268 }
293 } else { 269 } else
294 kfree(PCIIRQRoutingInfoLength );
295 return 0; 270 return 0;
296 }
297
298 } 271 }
299 } 272 }
300 kfree(PCIIRQRoutingInfoLength );
301 return -1; 273 return -1;
302} 274}
303 275
304 276
305int cpqhp_get_bus_dev (struct controller *ctrl, u8 * bus_num, u8 * dev_num, u8 slot) 277int cpqhp_get_bus_dev (struct controller *ctrl, u8 * bus_num, u8 * dev_num, u8 slot)
306{ 278{
307 return PCI_GetBusDevHelper(ctrl, bus_num, dev_num, slot, 0); //plain (bridges allowed) 279 /* plain (bridges allowed) */
280 return PCI_GetBusDevHelper(ctrl, bus_num, dev_num, slot, 0);
308} 281}
309 282
310 283
311/* More PCI configuration routines; this time centered around hotplug controller */ 284/* More PCI configuration routines; this time centered around hotplug
285 * controller
286 */
312 287
313 288
314/* 289/*
@@ -339,12 +314,12 @@ int cpqhp_save_config(struct controller *ctrl, int busnumber, int is_hot_plug)
339 int stop_it; 314 int stop_it;
340 int index; 315 int index;
341 316
342 // Decide which slots are supported 317 /* Decide which slots are supported */
343 318
344 if (is_hot_plug) { 319 if (is_hot_plug) {
345 //********************************* 320 /*
346 // is_hot_plug is the slot mask 321 * is_hot_plug is the slot mask
347 //********************************* 322 */
348 FirstSupported = is_hot_plug >> 4; 323 FirstSupported = is_hot_plug >> 4;
349 LastSupported = FirstSupported + (is_hot_plug & 0x0F) - 1; 324 LastSupported = FirstSupported + (is_hot_plug & 0x0F) - 1;
350 } else { 325 } else {
@@ -352,123 +327,127 @@ int cpqhp_save_config(struct controller *ctrl, int busnumber, int is_hot_plug)
352 LastSupported = 0x1F; 327 LastSupported = 0x1F;
353 } 328 }
354 329
355 // Save PCI configuration space for all devices in supported slots 330 /* Save PCI configuration space for all devices in supported slots */
356 ctrl->pci_bus->number = busnumber; 331 ctrl->pci_bus->number = busnumber;
357 for (device = FirstSupported; device <= LastSupported; device++) { 332 for (device = FirstSupported; device <= LastSupported; device++) {
358 ID = 0xFFFFFFFF; 333 ID = 0xFFFFFFFF;
359 rc = pci_bus_read_config_dword (ctrl->pci_bus, PCI_DEVFN(device, 0), PCI_VENDOR_ID, &ID); 334 rc = pci_bus_read_config_dword(ctrl->pci_bus, PCI_DEVFN(device, 0), PCI_VENDOR_ID, &ID);
335
336 if (ID == 0xFFFFFFFF) {
337 if (is_hot_plug) {
338 /* Setup slot structure with entry for empty
339 * slot
340 */
341 new_slot = cpqhp_slot_create(busnumber);
342 if (new_slot == NULL)
343 return 1;
360 344
361 if (ID != 0xFFFFFFFF) { // device in slot 345 new_slot->bus = (u8) busnumber;
362 rc = pci_bus_read_config_byte (ctrl->pci_bus, PCI_DEVFN(device, 0), 0x0B, &class_code); 346 new_slot->device = (u8) device;
363 if (rc) 347 new_slot->function = 0;
364 return rc; 348 new_slot->is_a_board = 0;
349 new_slot->presence_save = 0;
350 new_slot->switch_save = 0;
351 }
352 continue;
353 }
365 354
366 rc = pci_bus_read_config_byte (ctrl->pci_bus, PCI_DEVFN(device, 0), PCI_HEADER_TYPE, &header_type); 355 rc = pci_bus_read_config_byte(ctrl->pci_bus, PCI_DEVFN(device, 0), 0x0B, &class_code);
367 if (rc) 356 if (rc)
368 return rc; 357 return rc;
369 358
370 // If multi-function device, set max_functions to 8 359 rc = pci_bus_read_config_byte(ctrl->pci_bus, PCI_DEVFN(device, 0), PCI_HEADER_TYPE, &header_type);
371 if (header_type & 0x80) 360 if (rc)
372 max_functions = 8; 361 return rc;
373 else
374 max_functions = 1;
375 362
376 function = 0; 363 /* If multi-function device, set max_functions to 8 */
364 if (header_type & 0x80)
365 max_functions = 8;
366 else
367 max_functions = 1;
377 368
378 do { 369 function = 0;
379 DevError = 0;
380 370
381 if ((header_type & 0x7F) == PCI_HEADER_TYPE_BRIDGE) { // P-P Bridge 371 do {
382 // Recurse the subordinate bus 372 DevError = 0;
383 // get the subordinate bus number 373 if ((header_type & 0x7F) == PCI_HEADER_TYPE_BRIDGE) {
384 rc = pci_bus_read_config_byte (ctrl->pci_bus, PCI_DEVFN(device, function), PCI_SECONDARY_BUS, &secondary_bus); 374 /* Recurse the subordinate bus
385 if (rc) { 375 * get the subordinate bus number
376 */
377 rc = pci_bus_read_config_byte(ctrl->pci_bus, PCI_DEVFN(device, function), PCI_SECONDARY_BUS, &secondary_bus);
378 if (rc) {
379 return rc;
380 } else {
381 sub_bus = (int) secondary_bus;
382
383 /* Save secondary bus cfg spc
384 * with this recursive call.
385 */
386 rc = cpqhp_save_config(ctrl, sub_bus, 0);
387 if (rc)
386 return rc; 388 return rc;
387 } else { 389 ctrl->pci_bus->number = busnumber;
388 sub_bus = (int) secondary_bus;
389
390 // Save secondary bus cfg spc
391 // with this recursive call.
392 rc = cpqhp_save_config(ctrl, sub_bus, 0);
393 if (rc)
394 return rc;
395 ctrl->pci_bus->number = busnumber;
396 }
397 } 390 }
391 }
398 392
399 index = 0; 393 index = 0;
394 new_slot = cpqhp_slot_find(busnumber, device, index++);
395 while (new_slot &&
396 (new_slot->function != (u8) function))
400 new_slot = cpqhp_slot_find(busnumber, device, index++); 397 new_slot = cpqhp_slot_find(busnumber, device, index++);
401 while (new_slot &&
402 (new_slot->function != (u8) function))
403 new_slot = cpqhp_slot_find(busnumber, device, index++);
404 398
405 if (!new_slot) { 399 if (!new_slot) {
406 // Setup slot structure. 400 /* Setup slot structure. */
407 new_slot = cpqhp_slot_create(busnumber); 401 new_slot = cpqhp_slot_create(busnumber);
408 402 if (new_slot == NULL)
409 if (new_slot == NULL) 403 return 1;
410 return(1); 404 }
411 }
412
413 new_slot->bus = (u8) busnumber;
414 new_slot->device = (u8) device;
415 new_slot->function = (u8) function;
416 new_slot->is_a_board = 1;
417 new_slot->switch_save = 0x10;
418 // In case of unsupported board
419 new_slot->status = DevError;
420 new_slot->pci_dev = pci_find_slot(new_slot->bus, (new_slot->device << 3) | new_slot->function);
421
422 for (cloop = 0; cloop < 0x20; cloop++) {
423 rc = pci_bus_read_config_dword (ctrl->pci_bus, PCI_DEVFN(device, function), cloop << 2, (u32 *) & (new_slot-> config_space [cloop]));
424 if (rc)
425 return rc;
426 }
427 405
428 function++; 406 new_slot->bus = (u8) busnumber;
407 new_slot->device = (u8) device;
408 new_slot->function = (u8) function;
409 new_slot->is_a_board = 1;
410 new_slot->switch_save = 0x10;
411 /* In case of unsupported board */
412 new_slot->status = DevError;
413 new_slot->pci_dev = pci_get_bus_and_slot(new_slot->bus, (new_slot->device << 3) | new_slot->function);
429 414
430 stop_it = 0; 415 for (cloop = 0; cloop < 0x20; cloop++) {
416 rc = pci_bus_read_config_dword(ctrl->pci_bus, PCI_DEVFN(device, function), cloop << 2, (u32 *) & (new_slot-> config_space [cloop]));
417 if (rc)
418 return rc;
419 }
431 420
432 // this loop skips to the next present function 421 pci_dev_put(new_slot->pci_dev);
433 // reading in Class Code and Header type.
434 422
435 while ((function < max_functions)&&(!stop_it)) { 423 function++;
436 rc = pci_bus_read_config_dword (ctrl->pci_bus, PCI_DEVFN(device, function), PCI_VENDOR_ID, &ID);
437 if (ID == 0xFFFFFFFF) { // nothing there.
438 function++;
439 } else { // Something there
440 rc = pci_bus_read_config_byte (ctrl->pci_bus, PCI_DEVFN(device, function), 0x0B, &class_code);
441 if (rc)
442 return rc;
443 424
444 rc = pci_bus_read_config_byte (ctrl->pci_bus, PCI_DEVFN(device, function), PCI_HEADER_TYPE, &header_type); 425 stop_it = 0;
445 if (rc)
446 return rc;
447 426
448 stop_it++; 427 /* this loop skips to the next present function
449 } 428 * reading in Class Code and Header type.
429 */
430 while ((function < max_functions) && (!stop_it)) {
431 rc = pci_bus_read_config_dword(ctrl->pci_bus, PCI_DEVFN(device, function), PCI_VENDOR_ID, &ID);
432 if (ID == 0xFFFFFFFF) {
433 function++;
434 continue;
450 } 435 }
436 rc = pci_bus_read_config_byte(ctrl->pci_bus, PCI_DEVFN(device, function), 0x0B, &class_code);
437 if (rc)
438 return rc;
451 439
452 } while (function < max_functions); 440 rc = pci_bus_read_config_byte(ctrl->pci_bus, PCI_DEVFN(device, function), PCI_HEADER_TYPE, &header_type);
453 } // End of IF (device in slot?) 441 if (rc)
454 else if (is_hot_plug) { 442 return rc;
455 // Setup slot structure with entry for empty slot
456 new_slot = cpqhp_slot_create(busnumber);
457 443
458 if (new_slot == NULL) { 444 stop_it++;
459 return(1);
460 } 445 }
461 446
462 new_slot->bus = (u8) busnumber; 447 } while (function < max_functions);
463 new_slot->device = (u8) device; 448 } /* End of FOR loop */
464 new_slot->function = 0;
465 new_slot->is_a_board = 0;
466 new_slot->presence_save = 0;
467 new_slot->switch_save = 0;
468 }
469 } // End of FOR loop
470 449
471 return(0); 450 return 0;
472} 451}
473 452
474 453
@@ -489,7 +468,7 @@ int cpqhp_save_slot_config (struct controller *ctrl, struct pci_func * new_slot)
489 u8 secondary_bus; 468 u8 secondary_bus;
490 int sub_bus; 469 int sub_bus;
491 int max_functions; 470 int max_functions;
492 int function; 471 int function = 0;
493 int cloop = 0; 472 int cloop = 0;
494 int stop_it; 473 int stop_it;
495 474
@@ -498,63 +477,58 @@ int cpqhp_save_slot_config (struct controller *ctrl, struct pci_func * new_slot)
498 ctrl->pci_bus->number = new_slot->bus; 477 ctrl->pci_bus->number = new_slot->bus;
499 pci_bus_read_config_dword (ctrl->pci_bus, PCI_DEVFN(new_slot->device, 0), PCI_VENDOR_ID, &ID); 478 pci_bus_read_config_dword (ctrl->pci_bus, PCI_DEVFN(new_slot->device, 0), PCI_VENDOR_ID, &ID);
500 479
501 if (ID != 0xFFFFFFFF) { // device in slot 480 if (ID == 0xFFFFFFFF)
502 pci_bus_read_config_byte (ctrl->pci_bus, PCI_DEVFN(new_slot->device, 0), 0x0B, &class_code); 481 return 2;
503 pci_bus_read_config_byte (ctrl->pci_bus, PCI_DEVFN(new_slot->device, 0), PCI_HEADER_TYPE, &header_type);
504
505 if (header_type & 0x80) // Multi-function device
506 max_functions = 8;
507 else
508 max_functions = 1;
509
510 function = 0;
511
512 do {
513 if ((header_type & 0x7F) == PCI_HEADER_TYPE_BRIDGE) { // PCI-PCI Bridge
514 // Recurse the subordinate bus
515 pci_bus_read_config_byte (ctrl->pci_bus, PCI_DEVFN(new_slot->device, function), PCI_SECONDARY_BUS, &secondary_bus);
516 482
517 sub_bus = (int) secondary_bus; 483 pci_bus_read_config_byte(ctrl->pci_bus, PCI_DEVFN(new_slot->device, 0), 0x0B, &class_code);
484 pci_bus_read_config_byte(ctrl->pci_bus, PCI_DEVFN(new_slot->device, 0), PCI_HEADER_TYPE, &header_type);
518 485
519 // Save the config headers for the secondary bus. 486 if (header_type & 0x80) /* Multi-function device */
520 rc = cpqhp_save_config(ctrl, sub_bus, 0); 487 max_functions = 8;
521 if (rc) 488 else
522 return(rc); 489 max_functions = 1;
523 ctrl->pci_bus->number = new_slot->bus;
524 490
525 } // End of IF 491 while (function < max_functions) {
492 if ((header_type & 0x7F) == PCI_HEADER_TYPE_BRIDGE) {
493 /* Recurse the subordinate bus */
494 pci_bus_read_config_byte (ctrl->pci_bus, PCI_DEVFN(new_slot->device, function), PCI_SECONDARY_BUS, &secondary_bus);
526 495
527 new_slot->status = 0; 496 sub_bus = (int) secondary_bus;
528 497
529 for (cloop = 0; cloop < 0x20; cloop++) { 498 /* Save the config headers for the secondary
530 pci_bus_read_config_dword (ctrl->pci_bus, PCI_DEVFN(new_slot->device, function), cloop << 2, (u32 *) & (new_slot-> config_space [cloop])); 499 * bus.
531 } 500 */
501 rc = cpqhp_save_config(ctrl, sub_bus, 0);
502 if (rc)
503 return(rc);
504 ctrl->pci_bus->number = new_slot->bus;
532 505
533 function++; 506 }
534 507
535 stop_it = 0; 508 new_slot->status = 0;
536 509
537 // this loop skips to the next present function 510 for (cloop = 0; cloop < 0x20; cloop++)
538 // reading in the Class Code and the Header type. 511 pci_bus_read_config_dword(ctrl->pci_bus, PCI_DEVFN(new_slot->device, function), cloop << 2, (u32 *) & (new_slot-> config_space [cloop]));
539 512
540 while ((function < max_functions) && (!stop_it)) { 513 function++;
541 pci_bus_read_config_dword (ctrl->pci_bus, PCI_DEVFN(new_slot->device, function), PCI_VENDOR_ID, &ID);
542 514
543 if (ID == 0xFFFFFFFF) { // nothing there. 515 stop_it = 0;
544 function++;
545 } else { // Something there
546 pci_bus_read_config_byte (ctrl->pci_bus, PCI_DEVFN(new_slot->device, function), 0x0B, &class_code);
547 516
548 pci_bus_read_config_byte (ctrl->pci_bus, PCI_DEVFN(new_slot->device, function), PCI_HEADER_TYPE, &header_type); 517 /* this loop skips to the next present function
518 * reading in the Class Code and the Header type.
519 */
520 while ((function < max_functions) && (!stop_it)) {
521 pci_bus_read_config_dword(ctrl->pci_bus, PCI_DEVFN(new_slot->device, function), PCI_VENDOR_ID, &ID);
549 522
550 stop_it++; 523 if (ID == 0xFFFFFFFF)
551 } 524 function++;
525 else {
526 pci_bus_read_config_byte(ctrl->pci_bus, PCI_DEVFN(new_slot->device, function), 0x0B, &class_code);
527 pci_bus_read_config_byte(ctrl->pci_bus, PCI_DEVFN(new_slot->device, function), PCI_HEADER_TYPE, &header_type);
528 stop_it++;
552 } 529 }
530 }
553 531
554 } while (function < max_functions);
555 } // End of IF (device in slot?)
556 else {
557 return 2;
558 } 532 }
559 533
560 return 0; 534 return 0;
@@ -590,11 +564,10 @@ int cpqhp_save_base_addr_length(struct controller *ctrl, struct pci_func * func)
590 pci_bus->number = func->bus; 564 pci_bus->number = func->bus;
591 devfn = PCI_DEVFN(func->device, func->function); 565 devfn = PCI_DEVFN(func->device, func->function);
592 566
593 // Check for Bridge 567 /* Check for Bridge */
594 pci_bus_read_config_byte (pci_bus, devfn, PCI_HEADER_TYPE, &header_type); 568 pci_bus_read_config_byte (pci_bus, devfn, PCI_HEADER_TYPE, &header_type);
595 569
596 if ((header_type & 0x7F) == PCI_HEADER_TYPE_BRIDGE) { 570 if ((header_type & 0x7F) == PCI_HEADER_TYPE_BRIDGE) {
597 // PCI-PCI Bridge
598 pci_bus_read_config_byte (pci_bus, devfn, PCI_SECONDARY_BUS, &secondary_bus); 571 pci_bus_read_config_byte (pci_bus, devfn, PCI_SECONDARY_BUS, &secondary_bus);
599 572
600 sub_bus = (int) secondary_bus; 573 sub_bus = (int) secondary_bus;
@@ -610,23 +583,27 @@ int cpqhp_save_base_addr_length(struct controller *ctrl, struct pci_func * func)
610 } 583 }
611 pci_bus->number = func->bus; 584 pci_bus->number = func->bus;
612 585
613 //FIXME: this loop is duplicated in the non-bridge case. The two could be rolled together 586 /* FIXME: this loop is duplicated in the non-bridge
614 // Figure out IO and memory base lengths 587 * case. The two could be rolled together Figure out
588 * IO and memory base lengths
589 */
615 for (cloop = 0x10; cloop <= 0x14; cloop += 4) { 590 for (cloop = 0x10; cloop <= 0x14; cloop += 4) {
616 temp_register = 0xFFFFFFFF; 591 temp_register = 0xFFFFFFFF;
617 pci_bus_write_config_dword (pci_bus, devfn, cloop, temp_register); 592 pci_bus_write_config_dword (pci_bus, devfn, cloop, temp_register);
618 pci_bus_read_config_dword (pci_bus, devfn, cloop, &base); 593 pci_bus_read_config_dword (pci_bus, devfn, cloop, &base);
619 594 /* If this register is implemented */
620 if (base) { // If this register is implemented 595 if (base) {
621 if (base & 0x01L) { 596 if (base & 0x01L) {
622 // IO base 597 /* IO base
623 // set base = amount of IO space requested 598 * set base = amount of IO space
599 * requested
600 */
624 base = base & 0xFFFFFFFE; 601 base = base & 0xFFFFFFFE;
625 base = (~base) + 1; 602 base = (~base) + 1;
626 603
627 type = 1; 604 type = 1;
628 } else { 605 } else {
629 // memory base 606 /* memory base */
630 base = base & 0xFFFFFFF0; 607 base = base & 0xFFFFFFF0;
631 base = (~base) + 1; 608 base = (~base) + 1;
632 609
@@ -637,32 +614,36 @@ int cpqhp_save_base_addr_length(struct controller *ctrl, struct pci_func * func)
637 type = 0; 614 type = 0;
638 } 615 }
639 616
640 // Save information in slot structure 617 /* Save information in slot structure */
641 func->base_length[(cloop - 0x10) >> 2] = 618 func->base_length[(cloop - 0x10) >> 2] =
642 base; 619 base;
643 func->base_type[(cloop - 0x10) >> 2] = type; 620 func->base_type[(cloop - 0x10) >> 2] = type;
644 621
645 } // End of base register loop 622 } /* End of base register loop */
646 623
647 624 } else if ((header_type & 0x7F) == 0x00) {
648 } else if ((header_type & 0x7F) == 0x00) { // PCI-PCI Bridge 625 /* Figure out IO and memory base lengths */
649 // Figure out IO and memory base lengths
650 for (cloop = 0x10; cloop <= 0x24; cloop += 4) { 626 for (cloop = 0x10; cloop <= 0x24; cloop += 4) {
651 temp_register = 0xFFFFFFFF; 627 temp_register = 0xFFFFFFFF;
652 pci_bus_write_config_dword (pci_bus, devfn, cloop, temp_register); 628 pci_bus_write_config_dword (pci_bus, devfn, cloop, temp_register);
653 pci_bus_read_config_dword (pci_bus, devfn, cloop, &base); 629 pci_bus_read_config_dword (pci_bus, devfn, cloop, &base);
654 630
655 if (base) { // If this register is implemented 631 /* If this register is implemented */
632 if (base) {
656 if (base & 0x01L) { 633 if (base & 0x01L) {
657 // IO base 634 /* IO base
658 // base = amount of IO space requested 635 * base = amount of IO space
636 * requested
637 */
659 base = base & 0xFFFFFFFE; 638 base = base & 0xFFFFFFFE;
660 base = (~base) + 1; 639 base = (~base) + 1;
661 640
662 type = 1; 641 type = 1;
663 } else { 642 } else {
664 // memory base 643 /* memory base
665 // base = amount of memory space requested 644 * base = amount of memory
645 * space requested
646 */
666 base = base & 0xFFFFFFF0; 647 base = base & 0xFFFFFFF0;
667 base = (~base) + 1; 648 base = (~base) + 1;
668 649
@@ -673,16 +654,16 @@ int cpqhp_save_base_addr_length(struct controller *ctrl, struct pci_func * func)
673 type = 0; 654 type = 0;
674 } 655 }
675 656
676 // Save information in slot structure 657 /* Save information in slot structure */
677 func->base_length[(cloop - 0x10) >> 2] = base; 658 func->base_length[(cloop - 0x10) >> 2] = base;
678 func->base_type[(cloop - 0x10) >> 2] = type; 659 func->base_type[(cloop - 0x10) >> 2] = type;
679 660
680 } // End of base register loop 661 } /* End of base register loop */
681 662
682 } else { // Some other unknown header type 663 } else { /* Some other unknown header type */
683 } 664 }
684 665
685 // find the next device in this slot 666 /* find the next device in this slot */
686 func = cpqhp_slot_find(func->bus, func->device, index++); 667 func = cpqhp_slot_find(func->bus, func->device, index++);
687 } 668 }
688 669
@@ -728,18 +709,18 @@ int cpqhp_save_used_resources (struct controller *ctrl, struct pci_func * func)
728 pci_bus->number = func->bus; 709 pci_bus->number = func->bus;
729 devfn = PCI_DEVFN(func->device, func->function); 710 devfn = PCI_DEVFN(func->device, func->function);
730 711
731 // Save the command register 712 /* Save the command register */
732 pci_bus_read_config_word(pci_bus, devfn, PCI_COMMAND, &save_command); 713 pci_bus_read_config_word(pci_bus, devfn, PCI_COMMAND, &save_command);
733 714
734 // disable card 715 /* disable card */
735 command = 0x00; 716 command = 0x00;
736 pci_bus_write_config_word(pci_bus, devfn, PCI_COMMAND, command); 717 pci_bus_write_config_word(pci_bus, devfn, PCI_COMMAND, command);
737 718
738 // Check for Bridge 719 /* Check for Bridge */
739 pci_bus_read_config_byte(pci_bus, devfn, PCI_HEADER_TYPE, &header_type); 720 pci_bus_read_config_byte(pci_bus, devfn, PCI_HEADER_TYPE, &header_type);
740 721
741 if ((header_type & 0x7F) == PCI_HEADER_TYPE_BRIDGE) { // PCI-PCI Bridge 722 if ((header_type & 0x7F) == PCI_HEADER_TYPE_BRIDGE) {
742 // Clear Bridge Control Register 723 /* Clear Bridge Control Register */
743 command = 0x00; 724 command = 0x00;
744 pci_bus_write_config_word(pci_bus, devfn, PCI_BRIDGE_CONTROL, command); 725 pci_bus_write_config_word(pci_bus, devfn, PCI_BRIDGE_CONTROL, command);
745 pci_bus_read_config_byte(pci_bus, devfn, PCI_SECONDARY_BUS, &secondary_bus); 726 pci_bus_read_config_byte(pci_bus, devfn, PCI_SECONDARY_BUS, &secondary_bus);
@@ -755,7 +736,7 @@ int cpqhp_save_used_resources (struct controller *ctrl, struct pci_func * func)
755 bus_node->next = func->bus_head; 736 bus_node->next = func->bus_head;
756 func->bus_head = bus_node; 737 func->bus_head = bus_node;
757 738
758 // Save IO base and Limit registers 739 /* Save IO base and Limit registers */
759 pci_bus_read_config_byte(pci_bus, devfn, PCI_IO_BASE, &b_base); 740 pci_bus_read_config_byte(pci_bus, devfn, PCI_IO_BASE, &b_base);
760 pci_bus_read_config_byte(pci_bus, devfn, PCI_IO_LIMIT, &b_length); 741 pci_bus_read_config_byte(pci_bus, devfn, PCI_IO_LIMIT, &b_length);
761 742
@@ -771,7 +752,7 @@ int cpqhp_save_used_resources (struct controller *ctrl, struct pci_func * func)
771 func->io_head = io_node; 752 func->io_head = io_node;
772 } 753 }
773 754
774 // Save memory base and Limit registers 755 /* Save memory base and Limit registers */
775 pci_bus_read_config_word(pci_bus, devfn, PCI_MEMORY_BASE, &w_base); 756 pci_bus_read_config_word(pci_bus, devfn, PCI_MEMORY_BASE, &w_base);
776 pci_bus_read_config_word(pci_bus, devfn, PCI_MEMORY_LIMIT, &w_length); 757 pci_bus_read_config_word(pci_bus, devfn, PCI_MEMORY_LIMIT, &w_length);
777 758
@@ -787,7 +768,7 @@ int cpqhp_save_used_resources (struct controller *ctrl, struct pci_func * func)
787 func->mem_head = mem_node; 768 func->mem_head = mem_node;
788 } 769 }
789 770
790 // Save prefetchable memory base and Limit registers 771 /* Save prefetchable memory base and Limit registers */
791 pci_bus_read_config_word(pci_bus, devfn, PCI_PREF_MEMORY_BASE, &w_base); 772 pci_bus_read_config_word(pci_bus, devfn, PCI_PREF_MEMORY_BASE, &w_base);
792 pci_bus_read_config_word(pci_bus, devfn, PCI_PREF_MEMORY_LIMIT, &w_length); 773 pci_bus_read_config_word(pci_bus, devfn, PCI_PREF_MEMORY_LIMIT, &w_length);
793 774
@@ -802,7 +783,7 @@ int cpqhp_save_used_resources (struct controller *ctrl, struct pci_func * func)
802 p_mem_node->next = func->p_mem_head; 783 p_mem_node->next = func->p_mem_head;
803 func->p_mem_head = p_mem_node; 784 func->p_mem_head = p_mem_node;
804 } 785 }
805 // Figure out IO and memory base lengths 786 /* Figure out IO and memory base lengths */
806 for (cloop = 0x10; cloop <= 0x14; cloop += 4) { 787 for (cloop = 0x10; cloop <= 0x14; cloop += 4) {
807 pci_bus_read_config_dword (pci_bus, devfn, cloop, &save_base); 788 pci_bus_read_config_dword (pci_bus, devfn, cloop, &save_base);
808 789
@@ -812,11 +793,14 @@ int cpqhp_save_used_resources (struct controller *ctrl, struct pci_func * func)
812 793
813 temp_register = base; 794 temp_register = base;
814 795
815 if (base) { // If this register is implemented 796 /* If this register is implemented */
797 if (base) {
816 if (((base & 0x03L) == 0x01) 798 if (((base & 0x03L) == 0x01)
817 && (save_command & 0x01)) { 799 && (save_command & 0x01)) {
818 // IO base 800 /* IO base
819 // set temp_register = amount of IO space requested 801 * set temp_register = amount
802 * of IO space requested
803 */
820 temp_register = base & 0xFFFFFFFE; 804 temp_register = base & 0xFFFFFFFE;
821 temp_register = (~temp_register) + 1; 805 temp_register = (~temp_register) + 1;
822 806
@@ -834,7 +818,7 @@ int cpqhp_save_used_resources (struct controller *ctrl, struct pci_func * func)
834 } else 818 } else
835 if (((base & 0x0BL) == 0x08) 819 if (((base & 0x0BL) == 0x08)
836 && (save_command & 0x02)) { 820 && (save_command & 0x02)) {
837 // prefetchable memory base 821 /* prefetchable memory base */
838 temp_register = base & 0xFFFFFFF0; 822 temp_register = base & 0xFFFFFFF0;
839 temp_register = (~temp_register) + 1; 823 temp_register = (~temp_register) + 1;
840 824
@@ -851,7 +835,7 @@ int cpqhp_save_used_resources (struct controller *ctrl, struct pci_func * func)
851 } else 835 } else
852 if (((base & 0x0BL) == 0x00) 836 if (((base & 0x0BL) == 0x00)
853 && (save_command & 0x02)) { 837 && (save_command & 0x02)) {
854 // prefetchable memory base 838 /* prefetchable memory base */
855 temp_register = base & 0xFFFFFFF0; 839 temp_register = base & 0xFFFFFFF0;
856 temp_register = (~temp_register) + 1; 840 temp_register = (~temp_register) + 1;
857 841
@@ -868,9 +852,10 @@ int cpqhp_save_used_resources (struct controller *ctrl, struct pci_func * func)
868 } else 852 } else
869 return(1); 853 return(1);
870 } 854 }
871 } // End of base register loop 855 } /* End of base register loop */
872 } else if ((header_type & 0x7F) == 0x00) { // Standard header 856 /* Standard header */
873 // Figure out IO and memory base lengths 857 } else if ((header_type & 0x7F) == 0x00) {
858 /* Figure out IO and memory base lengths */
874 for (cloop = 0x10; cloop <= 0x24; cloop += 4) { 859 for (cloop = 0x10; cloop <= 0x24; cloop += 4) {
875 pci_bus_read_config_dword(pci_bus, devfn, cloop, &save_base); 860 pci_bus_read_config_dword(pci_bus, devfn, cloop, &save_base);
876 861
@@ -880,11 +865,14 @@ int cpqhp_save_used_resources (struct controller *ctrl, struct pci_func * func)
880 865
881 temp_register = base; 866 temp_register = base;
882 867
883 if (base) { // If this register is implemented 868 /* If this register is implemented */
869 if (base) {
884 if (((base & 0x03L) == 0x01) 870 if (((base & 0x03L) == 0x01)
885 && (save_command & 0x01)) { 871 && (save_command & 0x01)) {
886 // IO base 872 /* IO base
887 // set temp_register = amount of IO space requested 873 * set temp_register = amount
874 * of IO space requested
875 */
888 temp_register = base & 0xFFFFFFFE; 876 temp_register = base & 0xFFFFFFFE;
889 temp_register = (~temp_register) + 1; 877 temp_register = (~temp_register) + 1;
890 878
@@ -901,7 +889,7 @@ int cpqhp_save_used_resources (struct controller *ctrl, struct pci_func * func)
901 } else 889 } else
902 if (((base & 0x0BL) == 0x08) 890 if (((base & 0x0BL) == 0x08)
903 && (save_command & 0x02)) { 891 && (save_command & 0x02)) {
904 // prefetchable memory base 892 /* prefetchable memory base */
905 temp_register = base & 0xFFFFFFF0; 893 temp_register = base & 0xFFFFFFF0;
906 temp_register = (~temp_register) + 1; 894 temp_register = (~temp_register) + 1;
907 895
@@ -918,7 +906,7 @@ int cpqhp_save_used_resources (struct controller *ctrl, struct pci_func * func)
918 } else 906 } else
919 if (((base & 0x0BL) == 0x00) 907 if (((base & 0x0BL) == 0x00)
920 && (save_command & 0x02)) { 908 && (save_command & 0x02)) {
921 // prefetchable memory base 909 /* prefetchable memory base */
922 temp_register = base & 0xFFFFFFF0; 910 temp_register = base & 0xFFFFFFF0;
923 temp_register = (~temp_register) + 1; 911 temp_register = (~temp_register) + 1;
924 912
@@ -935,15 +923,14 @@ int cpqhp_save_used_resources (struct controller *ctrl, struct pci_func * func)
935 } else 923 } else
936 return(1); 924 return(1);
937 } 925 }
938 } // End of base register loop 926 } /* End of base register loop */
939 } else { // Some other unknown header type
940 } 927 }
941 928
942 // find the next device in this slot 929 /* find the next device in this slot */
943 func = cpqhp_slot_find(func->bus, func->device, index++); 930 func = cpqhp_slot_find(func->bus, func->device, index++);
944 } 931 }
945 932
946 return(0); 933 return 0;
947} 934}
948 935
949 936
@@ -975,16 +962,16 @@ int cpqhp_configure_board(struct controller *ctrl, struct pci_func * func)
975 pci_bus->number = func->bus; 962 pci_bus->number = func->bus;
976 devfn = PCI_DEVFN(func->device, func->function); 963 devfn = PCI_DEVFN(func->device, func->function);
977 964
978 // Start at the top of config space so that the control 965 /* Start at the top of config space so that the control
979 // registers are programmed last 966 * registers are programmed last
980 for (cloop = 0x3C; cloop > 0; cloop -= 4) { 967 */
968 for (cloop = 0x3C; cloop > 0; cloop -= 4)
981 pci_bus_write_config_dword (pci_bus, devfn, cloop, func->config_space[cloop >> 2]); 969 pci_bus_write_config_dword (pci_bus, devfn, cloop, func->config_space[cloop >> 2]);
982 }
983 970
984 pci_bus_read_config_byte (pci_bus, devfn, PCI_HEADER_TYPE, &header_type); 971 pci_bus_read_config_byte (pci_bus, devfn, PCI_HEADER_TYPE, &header_type);
985 972
986 // If this is a bridge device, restore subordinate devices 973 /* If this is a bridge device, restore subordinate devices */
987 if ((header_type & 0x7F) == PCI_HEADER_TYPE_BRIDGE) { // PCI-PCI Bridge 974 if ((header_type & 0x7F) == PCI_HEADER_TYPE_BRIDGE) {
988 pci_bus_read_config_byte (pci_bus, devfn, PCI_SECONDARY_BUS, &secondary_bus); 975 pci_bus_read_config_byte (pci_bus, devfn, PCI_SECONDARY_BUS, &secondary_bus);
989 976
990 sub_bus = (int) secondary_bus; 977 sub_bus = (int) secondary_bus;
@@ -1000,8 +987,9 @@ int cpqhp_configure_board(struct controller *ctrl, struct pci_func * func)
1000 } 987 }
1001 } else { 988 } else {
1002 989
1003 // Check all the base Address Registers to make sure 990 /* Check all the base Address Registers to make sure
1004 // they are the same. If not, the board is different. 991 * they are the same. If not, the board is different.
992 */
1005 993
1006 for (cloop = 16; cloop < 40; cloop += 4) { 994 for (cloop = 16; cloop < 40; cloop += 4) {
1007 pci_bus_read_config_dword (pci_bus, devfn, cloop, &temp); 995 pci_bus_read_config_dword (pci_bus, devfn, cloop, &temp);
@@ -1058,27 +1046,28 @@ int cpqhp_valid_replace(struct controller *ctrl, struct pci_func * func)
1058 1046
1059 pci_bus_read_config_dword (pci_bus, devfn, PCI_VENDOR_ID, &temp_register); 1047 pci_bus_read_config_dword (pci_bus, devfn, PCI_VENDOR_ID, &temp_register);
1060 1048
1061 // No adapter present 1049 /* No adapter present */
1062 if (temp_register == 0xFFFFFFFF) 1050 if (temp_register == 0xFFFFFFFF)
1063 return(NO_ADAPTER_PRESENT); 1051 return(NO_ADAPTER_PRESENT);
1064 1052
1065 if (temp_register != func->config_space[0]) 1053 if (temp_register != func->config_space[0])
1066 return(ADAPTER_NOT_SAME); 1054 return(ADAPTER_NOT_SAME);
1067 1055
1068 // Check for same revision number and class code 1056 /* Check for same revision number and class code */
1069 pci_bus_read_config_dword (pci_bus, devfn, PCI_CLASS_REVISION, &temp_register); 1057 pci_bus_read_config_dword (pci_bus, devfn, PCI_CLASS_REVISION, &temp_register);
1070 1058
1071 // Adapter not the same 1059 /* Adapter not the same */
1072 if (temp_register != func->config_space[0x08 >> 2]) 1060 if (temp_register != func->config_space[0x08 >> 2])
1073 return(ADAPTER_NOT_SAME); 1061 return(ADAPTER_NOT_SAME);
1074 1062
1075 // Check for Bridge 1063 /* Check for Bridge */
1076 pci_bus_read_config_byte (pci_bus, devfn, PCI_HEADER_TYPE, &header_type); 1064 pci_bus_read_config_byte (pci_bus, devfn, PCI_HEADER_TYPE, &header_type);
1077 1065
1078 if ((header_type & 0x7F) == PCI_HEADER_TYPE_BRIDGE) { // PCI-PCI Bridge 1066 if ((header_type & 0x7F) == PCI_HEADER_TYPE_BRIDGE) {
1079 // In order to continue checking, we must program the 1067 /* In order to continue checking, we must program the
1080 // bus registers in the bridge to respond to accesses 1068 * bus registers in the bridge to respond to accesses
1081 // for it's subordinate bus(es) 1069 * for its subordinate bus(es)
1070 */
1082 1071
1083 temp_register = func->config_space[0x18 >> 2]; 1072 temp_register = func->config_space[0x18 >> 2];
1084 pci_bus_write_config_dword (pci_bus, devfn, PCI_PRIMARY_BUS, temp_register); 1073 pci_bus_write_config_dword (pci_bus, devfn, PCI_PRIMARY_BUS, temp_register);
@@ -1096,35 +1085,39 @@ int cpqhp_valid_replace(struct controller *ctrl, struct pci_func * func)
1096 } 1085 }
1097 1086
1098 } 1087 }
1099 // Check to see if it is a standard config header 1088 /* Check to see if it is a standard config header */
1100 else if ((header_type & 0x7F) == PCI_HEADER_TYPE_NORMAL) { 1089 else if ((header_type & 0x7F) == PCI_HEADER_TYPE_NORMAL) {
1101 // Check subsystem vendor and ID 1090 /* Check subsystem vendor and ID */
1102 pci_bus_read_config_dword (pci_bus, devfn, PCI_SUBSYSTEM_VENDOR_ID, &temp_register); 1091 pci_bus_read_config_dword (pci_bus, devfn, PCI_SUBSYSTEM_VENDOR_ID, &temp_register);
1103 1092
1104 if (temp_register != func->config_space[0x2C >> 2]) { 1093 if (temp_register != func->config_space[0x2C >> 2]) {
1105 // If it's a SMART-2 and the register isn't filled 1094 /* If it's a SMART-2 and the register isn't
1106 // in, ignore the difference because 1095 * filled in, ignore the difference because
1107 // they just have an old rev of the firmware 1096 * they just have an old rev of the firmware
1108 1097 */
1109 if (!((func->config_space[0] == 0xAE100E11) 1098 if (!((func->config_space[0] == 0xAE100E11)
1110 && (temp_register == 0x00L))) 1099 && (temp_register == 0x00L)))
1111 return(ADAPTER_NOT_SAME); 1100 return(ADAPTER_NOT_SAME);
1112 } 1101 }
1113 // Figure out IO and memory base lengths 1102 /* Figure out IO and memory base lengths */
1114 for (cloop = 0x10; cloop <= 0x24; cloop += 4) { 1103 for (cloop = 0x10; cloop <= 0x24; cloop += 4) {
1115 temp_register = 0xFFFFFFFF; 1104 temp_register = 0xFFFFFFFF;
1116 pci_bus_write_config_dword (pci_bus, devfn, cloop, temp_register); 1105 pci_bus_write_config_dword (pci_bus, devfn, cloop, temp_register);
1117 pci_bus_read_config_dword (pci_bus, devfn, cloop, &base); 1106 pci_bus_read_config_dword (pci_bus, devfn, cloop, &base);
1118 if (base) { // If this register is implemented 1107
1108 /* If this register is implemented */
1109 if (base) {
1119 if (base & 0x01L) { 1110 if (base & 0x01L) {
1120 // IO base 1111 /* IO base
1121 // set base = amount of IO space requested 1112 * set base = amount of IO
1113 * space requested
1114 */
1122 base = base & 0xFFFFFFFE; 1115 base = base & 0xFFFFFFFE;
1123 base = (~base) + 1; 1116 base = (~base) + 1;
1124 1117
1125 type = 1; 1118 type = 1;
1126 } else { 1119 } else {
1127 // memory base 1120 /* memory base */
1128 base = base & 0xFFFFFFF0; 1121 base = base & 0xFFFFFFF0;
1129 base = (~base) + 1; 1122 base = (~base) + 1;
1130 1123
@@ -1135,23 +1128,24 @@ int cpqhp_valid_replace(struct controller *ctrl, struct pci_func * func)
1135 type = 0; 1128 type = 0;
1136 } 1129 }
1137 1130
1138 // Check information in slot structure 1131 /* Check information in slot structure */
1139 if (func->base_length[(cloop - 0x10) >> 2] != base) 1132 if (func->base_length[(cloop - 0x10) >> 2] != base)
1140 return(ADAPTER_NOT_SAME); 1133 return(ADAPTER_NOT_SAME);
1141 1134
1142 if (func->base_type[(cloop - 0x10) >> 2] != type) 1135 if (func->base_type[(cloop - 0x10) >> 2] != type)
1143 return(ADAPTER_NOT_SAME); 1136 return(ADAPTER_NOT_SAME);
1144 1137
1145 } // End of base register loop 1138 } /* End of base register loop */
1146 1139
1147 } // End of (type 0 config space) else 1140 } /* End of (type 0 config space) else */
1148 else { 1141 else {
1149 // this is not a type 0 or 1 config space header so 1142 /* this is not a type 0 or 1 config space header so
1150 // we don't know how to do it 1143 * we don't know how to do it
1144 */
1151 return(DEVICE_TYPE_NOT_SUPPORTED); 1145 return(DEVICE_TYPE_NOT_SUPPORTED);
1152 } 1146 }
1153 1147
1154 // Get the next function 1148 /* Get the next function */
1155 func = cpqhp_slot_find(func->bus, func->device, index++); 1149 func = cpqhp_slot_find(func->bus, func->device, index++);
1156 } 1150 }
1157 1151
@@ -1168,7 +1162,7 @@ int cpqhp_valid_replace(struct controller *ctrl, struct pci_func * func)
1168 * this function is for hot plug ADD! 1162 * this function is for hot plug ADD!
1169 * 1163 *
1170 * returns 0 if success 1164 * returns 0 if success
1171 */ 1165 */
1172int cpqhp_find_available_resources(struct controller *ctrl, void __iomem *rom_start) 1166int cpqhp_find_available_resources(struct controller *ctrl, void __iomem *rom_start)
1173{ 1167{
1174 u8 temp; 1168 u8 temp;
@@ -1187,10 +1181,10 @@ int cpqhp_find_available_resources(struct controller *ctrl, void __iomem *rom_st
1187 rom_resource_table = detect_HRT_floating_pointer(rom_start, rom_start+0xffff); 1181 rom_resource_table = detect_HRT_floating_pointer(rom_start, rom_start+0xffff);
1188 dbg("rom_resource_table = %p\n", rom_resource_table); 1182 dbg("rom_resource_table = %p\n", rom_resource_table);
1189 1183
1190 if (rom_resource_table == NULL) { 1184 if (rom_resource_table == NULL)
1191 return -ENODEV; 1185 return -ENODEV;
1192 } 1186
1193 // Sum all resources and setup resource maps 1187 /* Sum all resources and setup resource maps */
1194 unused_IRQ = readl(rom_resource_table + UNUSED_IRQ); 1188 unused_IRQ = readl(rom_resource_table + UNUSED_IRQ);
1195 dbg("unused_IRQ = %x\n", unused_IRQ); 1189 dbg("unused_IRQ = %x\n", unused_IRQ);
1196 1190
@@ -1222,13 +1216,11 @@ int cpqhp_find_available_resources(struct controller *ctrl, void __iomem *rom_st
1222 1216
1223 temp = 0; 1217 temp = 0;
1224 1218
1225 if (!cpqhp_nic_irq) { 1219 if (!cpqhp_nic_irq)
1226 cpqhp_nic_irq = ctrl->cfgspc_irq; 1220 cpqhp_nic_irq = ctrl->cfgspc_irq;
1227 }
1228 1221
1229 if (!cpqhp_disk_irq) { 1222 if (!cpqhp_disk_irq)
1230 cpqhp_disk_irq = ctrl->cfgspc_irq; 1223 cpqhp_disk_irq = ctrl->cfgspc_irq;
1231 }
1232 1224
1233 dbg("cpqhp_disk_irq, cpqhp_nic_irq= %d, %d\n", cpqhp_disk_irq, cpqhp_nic_irq); 1225 dbg("cpqhp_disk_irq, cpqhp_nic_irq= %d, %d\n", cpqhp_disk_irq, cpqhp_nic_irq);
1234 1226
@@ -1262,13 +1254,13 @@ int cpqhp_find_available_resources(struct controller *ctrl, void __iomem *rom_st
1262 dev_func, io_base, io_length, mem_base, mem_length, pre_mem_base, pre_mem_length, 1254 dev_func, io_base, io_length, mem_base, mem_length, pre_mem_base, pre_mem_length,
1263 primary_bus, secondary_bus, max_bus); 1255 primary_bus, secondary_bus, max_bus);
1264 1256
1265 // If this entry isn't for our controller's bus, ignore it 1257 /* If this entry isn't for our controller's bus, ignore it */
1266 if (primary_bus != ctrl->bus) { 1258 if (primary_bus != ctrl->bus) {
1267 i--; 1259 i--;
1268 one_slot += sizeof (struct slot_rt); 1260 one_slot += sizeof (struct slot_rt);
1269 continue; 1261 continue;
1270 } 1262 }
1271 // find out if this entry is for an occupied slot 1263 /* find out if this entry is for an occupied slot */
1272 ctrl->pci_bus->number = primary_bus; 1264 ctrl->pci_bus->number = primary_bus;
1273 pci_bus_read_config_dword (ctrl->pci_bus, dev_func, PCI_VENDOR_ID, &temp_dword); 1265 pci_bus_read_config_dword (ctrl->pci_bus, dev_func, PCI_VENDOR_ID, &temp_dword);
1274 dbg("temp_D_word = %x\n", temp_dword); 1266 dbg("temp_D_word = %x\n", temp_dword);
@@ -1282,13 +1274,13 @@ int cpqhp_find_available_resources(struct controller *ctrl, void __iomem *rom_st
1282 func = cpqhp_slot_find(primary_bus, dev_func >> 3, index++); 1274 func = cpqhp_slot_find(primary_bus, dev_func >> 3, index++);
1283 } 1275 }
1284 1276
1285 // If we can't find a match, skip this table entry 1277 /* If we can't find a match, skip this table entry */
1286 if (!func) { 1278 if (!func) {
1287 i--; 1279 i--;
1288 one_slot += sizeof (struct slot_rt); 1280 one_slot += sizeof (struct slot_rt);
1289 continue; 1281 continue;
1290 } 1282 }
1291 // this may not work and shouldn't be used 1283 /* this may not work and shouldn't be used */
1292 if (secondary_bus != primary_bus) 1284 if (secondary_bus != primary_bus)
1293 bridged_slot = 1; 1285 bridged_slot = 1;
1294 else 1286 else
@@ -1301,7 +1293,7 @@ int cpqhp_find_available_resources(struct controller *ctrl, void __iomem *rom_st
1301 } 1293 }
1302 1294
1303 1295
1304 // If we've got a valid IO base, use it 1296 /* If we've got a valid IO base, use it */
1305 1297
1306 temp_dword = io_base + io_length; 1298 temp_dword = io_base + io_length;
1307 1299
@@ -1325,7 +1317,7 @@ int cpqhp_find_available_resources(struct controller *ctrl, void __iomem *rom_st
1325 } 1317 }
1326 } 1318 }
1327 1319
1328 // If we've got a valid memory base, use it 1320 /* If we've got a valid memory base, use it */
1329 temp_dword = mem_base + mem_length; 1321 temp_dword = mem_base + mem_length;
1330 if ((mem_base) && (temp_dword < 0x10000)) { 1322 if ((mem_base) && (temp_dword < 0x10000)) {
1331 mem_node = kmalloc(sizeof(*mem_node), GFP_KERNEL); 1323 mem_node = kmalloc(sizeof(*mem_node), GFP_KERNEL);
@@ -1348,8 +1340,9 @@ int cpqhp_find_available_resources(struct controller *ctrl, void __iomem *rom_st
1348 } 1340 }
1349 } 1341 }
1350 1342
1351 // If we've got a valid prefetchable memory base, and 1343 /* If we've got a valid prefetchable memory base, and
1352 // the base + length isn't greater than 0xFFFF 1344 * the base + length isn't greater than 0xFFFF
1345 */
1353 temp_dword = pre_mem_base + pre_mem_length; 1346 temp_dword = pre_mem_base + pre_mem_length;
1354 if ((pre_mem_base) && (temp_dword < 0x10000)) { 1347 if ((pre_mem_base) && (temp_dword < 0x10000)) {
1355 p_mem_node = kmalloc(sizeof(*p_mem_node), GFP_KERNEL); 1348 p_mem_node = kmalloc(sizeof(*p_mem_node), GFP_KERNEL);
@@ -1372,9 +1365,10 @@ int cpqhp_find_available_resources(struct controller *ctrl, void __iomem *rom_st
1372 } 1365 }
1373 } 1366 }
1374 1367
1375 // If we've got a valid bus number, use it 1368 /* If we've got a valid bus number, use it
1376 // The second condition is to ignore bus numbers on 1369 * The second condition is to ignore bus numbers on
1377 // populated slots that don't have PCI-PCI bridges 1370 * populated slots that don't have PCI-PCI bridges
1371 */
1378 if (secondary_bus && (secondary_bus != primary_bus)) { 1372 if (secondary_bus && (secondary_bus != primary_bus)) {
1379 bus_node = kmalloc(sizeof(*bus_node), GFP_KERNEL); 1373 bus_node = kmalloc(sizeof(*bus_node), GFP_KERNEL);
1380 if (!bus_node) 1374 if (!bus_node)
@@ -1398,8 +1392,9 @@ int cpqhp_find_available_resources(struct controller *ctrl, void __iomem *rom_st
1398 one_slot += sizeof (struct slot_rt); 1392 one_slot += sizeof (struct slot_rt);
1399 } 1393 }
1400 1394
1401 // If all of the following fail, we don't have any resources for 1395 /* If all of the following fail, we don't have any resources for
1402 // hot plug add 1396 * hot plug add
1397 */
1403 rc = 1; 1398 rc = 1;
1404 rc &= cpqhp_resource_sort_and_combine(&(ctrl->mem_head)); 1399 rc &= cpqhp_resource_sort_and_combine(&(ctrl->mem_head));
1405 rc &= cpqhp_resource_sort_and_combine(&(ctrl->p_mem_head)); 1400 rc &= cpqhp_resource_sort_and_combine(&(ctrl->p_mem_head));
diff --git a/drivers/pci/hotplug/ibmphp_core.c b/drivers/pci/hotplug/ibmphp_core.c
index 42e4260c3b12..7485ffda950c 100644
--- a/drivers/pci/hotplug/ibmphp_core.c
+++ b/drivers/pci/hotplug/ibmphp_core.c
@@ -1318,7 +1318,6 @@ error:
1318} 1318}
1319 1319
1320struct hotplug_slot_ops ibmphp_hotplug_slot_ops = { 1320struct hotplug_slot_ops ibmphp_hotplug_slot_ops = {
1321 .owner = THIS_MODULE,
1322 .set_attention_status = set_attention_status, 1321 .set_attention_status = set_attention_status,
1323 .enable_slot = enable_slot, 1322 .enable_slot = enable_slot,
1324 .disable_slot = ibmphp_disable_slot, 1323 .disable_slot = ibmphp_disable_slot,
@@ -1421,3 +1420,4 @@ static void __exit ibmphp_exit(void)
1421} 1420}
1422 1421
1423module_init(ibmphp_init); 1422module_init(ibmphp_init);
1423module_exit(ibmphp_exit);
diff --git a/drivers/pci/hotplug/pci_hotplug_core.c b/drivers/pci/hotplug/pci_hotplug_core.c
index 535fce0f07f9..844580489d4d 100644
--- a/drivers/pci/hotplug/pci_hotplug_core.c
+++ b/drivers/pci/hotplug/pci_hotplug_core.c
@@ -347,125 +347,129 @@ static struct pci_slot_attribute hotplug_slot_attr_test = {
347 .store = test_write_file 347 .store = test_write_file
348}; 348};
349 349
350static int has_power_file(struct pci_slot *pci_slot) 350static bool has_power_file(struct pci_slot *pci_slot)
351{ 351{
352 struct hotplug_slot *slot = pci_slot->hotplug; 352 struct hotplug_slot *slot = pci_slot->hotplug;
353 if ((!slot) || (!slot->ops)) 353 if ((!slot) || (!slot->ops))
354 return -ENODEV; 354 return false;
355 if ((slot->ops->enable_slot) || 355 if ((slot->ops->enable_slot) ||
356 (slot->ops->disable_slot) || 356 (slot->ops->disable_slot) ||
357 (slot->ops->get_power_status)) 357 (slot->ops->get_power_status))
358 return 0; 358 return true;
359 return -ENOENT; 359 return false;
360} 360}
361 361
362static int has_attention_file(struct pci_slot *pci_slot) 362static bool has_attention_file(struct pci_slot *pci_slot)
363{ 363{
364 struct hotplug_slot *slot = pci_slot->hotplug; 364 struct hotplug_slot *slot = pci_slot->hotplug;
365 if ((!slot) || (!slot->ops)) 365 if ((!slot) || (!slot->ops))
366 return -ENODEV; 366 return false;
367 if ((slot->ops->set_attention_status) || 367 if ((slot->ops->set_attention_status) ||
368 (slot->ops->get_attention_status)) 368 (slot->ops->get_attention_status))
369 return 0; 369 return true;
370 return -ENOENT; 370 return false;
371} 371}
372 372
373static int has_latch_file(struct pci_slot *pci_slot) 373static bool has_latch_file(struct pci_slot *pci_slot)
374{ 374{
375 struct hotplug_slot *slot = pci_slot->hotplug; 375 struct hotplug_slot *slot = pci_slot->hotplug;
376 if ((!slot) || (!slot->ops)) 376 if ((!slot) || (!slot->ops))
377 return -ENODEV; 377 return false;
378 if (slot->ops->get_latch_status) 378 if (slot->ops->get_latch_status)
379 return 0; 379 return true;
380 return -ENOENT; 380 return false;
381} 381}
382 382
383static int has_adapter_file(struct pci_slot *pci_slot) 383static bool has_adapter_file(struct pci_slot *pci_slot)
384{ 384{
385 struct hotplug_slot *slot = pci_slot->hotplug; 385 struct hotplug_slot *slot = pci_slot->hotplug;
386 if ((!slot) || (!slot->ops)) 386 if ((!slot) || (!slot->ops))
387 return -ENODEV; 387 return false;
388 if (slot->ops->get_adapter_status) 388 if (slot->ops->get_adapter_status)
389 return 0; 389 return true;
390 return -ENOENT; 390 return false;
391} 391}
392 392
393static int has_max_bus_speed_file(struct pci_slot *pci_slot) 393static bool has_max_bus_speed_file(struct pci_slot *pci_slot)
394{ 394{
395 struct hotplug_slot *slot = pci_slot->hotplug; 395 struct hotplug_slot *slot = pci_slot->hotplug;
396 if ((!slot) || (!slot->ops)) 396 if ((!slot) || (!slot->ops))
397 return -ENODEV; 397 return false;
398 if (slot->ops->get_max_bus_speed) 398 if (slot->ops->get_max_bus_speed)
399 return 0; 399 return true;
400 return -ENOENT; 400 return false;
401} 401}
402 402
403static int has_cur_bus_speed_file(struct pci_slot *pci_slot) 403static bool has_cur_bus_speed_file(struct pci_slot *pci_slot)
404{ 404{
405 struct hotplug_slot *slot = pci_slot->hotplug; 405 struct hotplug_slot *slot = pci_slot->hotplug;
406 if ((!slot) || (!slot->ops)) 406 if ((!slot) || (!slot->ops))
407 return -ENODEV; 407 return false;
408 if (slot->ops->get_cur_bus_speed) 408 if (slot->ops->get_cur_bus_speed)
409 return 0; 409 return true;
410 return -ENOENT; 410 return false;
411} 411}
412 412
413static int has_test_file(struct pci_slot *pci_slot) 413static bool has_test_file(struct pci_slot *pci_slot)
414{ 414{
415 struct hotplug_slot *slot = pci_slot->hotplug; 415 struct hotplug_slot *slot = pci_slot->hotplug;
416 if ((!slot) || (!slot->ops)) 416 if ((!slot) || (!slot->ops))
417 return -ENODEV; 417 return false;
418 if (slot->ops->hardware_test) 418 if (slot->ops->hardware_test)
419 return 0; 419 return true;
420 return -ENOENT; 420 return false;
421} 421}
422 422
423static int fs_add_slot(struct pci_slot *slot) 423static int fs_add_slot(struct pci_slot *slot)
424{ 424{
425 int retval = 0; 425 int retval = 0;
426 426
427 if (has_power_file(slot) == 0) { 427 /* Create symbolic link to the hotplug driver module */
428 retval = sysfs_create_file(&slot->kobj, &hotplug_slot_attr_power.attr); 428 pci_hp_create_module_link(slot);
429
430 if (has_power_file(slot)) {
431 retval = sysfs_create_file(&slot->kobj,
432 &hotplug_slot_attr_power.attr);
429 if (retval) 433 if (retval)
430 goto exit_power; 434 goto exit_power;
431 } 435 }
432 436
433 if (has_attention_file(slot) == 0) { 437 if (has_attention_file(slot)) {
434 retval = sysfs_create_file(&slot->kobj, 438 retval = sysfs_create_file(&slot->kobj,
435 &hotplug_slot_attr_attention.attr); 439 &hotplug_slot_attr_attention.attr);
436 if (retval) 440 if (retval)
437 goto exit_attention; 441 goto exit_attention;
438 } 442 }
439 443
440 if (has_latch_file(slot) == 0) { 444 if (has_latch_file(slot)) {
441 retval = sysfs_create_file(&slot->kobj, 445 retval = sysfs_create_file(&slot->kobj,
442 &hotplug_slot_attr_latch.attr); 446 &hotplug_slot_attr_latch.attr);
443 if (retval) 447 if (retval)
444 goto exit_latch; 448 goto exit_latch;
445 } 449 }
446 450
447 if (has_adapter_file(slot) == 0) { 451 if (has_adapter_file(slot)) {
448 retval = sysfs_create_file(&slot->kobj, 452 retval = sysfs_create_file(&slot->kobj,
449 &hotplug_slot_attr_presence.attr); 453 &hotplug_slot_attr_presence.attr);
450 if (retval) 454 if (retval)
451 goto exit_adapter; 455 goto exit_adapter;
452 } 456 }
453 457
454 if (has_max_bus_speed_file(slot) == 0) { 458 if (has_max_bus_speed_file(slot)) {
455 retval = sysfs_create_file(&slot->kobj, 459 retval = sysfs_create_file(&slot->kobj,
456 &hotplug_slot_attr_max_bus_speed.attr); 460 &hotplug_slot_attr_max_bus_speed.attr);
457 if (retval) 461 if (retval)
458 goto exit_max_speed; 462 goto exit_max_speed;
459 } 463 }
460 464
461 if (has_cur_bus_speed_file(slot) == 0) { 465 if (has_cur_bus_speed_file(slot)) {
462 retval = sysfs_create_file(&slot->kobj, 466 retval = sysfs_create_file(&slot->kobj,
463 &hotplug_slot_attr_cur_bus_speed.attr); 467 &hotplug_slot_attr_cur_bus_speed.attr);
464 if (retval) 468 if (retval)
465 goto exit_cur_speed; 469 goto exit_cur_speed;
466 } 470 }
467 471
468 if (has_test_file(slot) == 0) { 472 if (has_test_file(slot)) {
469 retval = sysfs_create_file(&slot->kobj, 473 retval = sysfs_create_file(&slot->kobj,
470 &hotplug_slot_attr_test.attr); 474 &hotplug_slot_attr_test.attr);
471 if (retval) 475 if (retval)
@@ -475,55 +479,61 @@ static int fs_add_slot(struct pci_slot *slot)
475 goto exit; 479 goto exit;
476 480
477exit_test: 481exit_test:
478 if (has_cur_bus_speed_file(slot) == 0) 482 if (has_cur_bus_speed_file(slot))
479 sysfs_remove_file(&slot->kobj, &hotplug_slot_attr_cur_bus_speed.attr); 483 sysfs_remove_file(&slot->kobj,
480 484 &hotplug_slot_attr_cur_bus_speed.attr);
481exit_cur_speed: 485exit_cur_speed:
482 if (has_max_bus_speed_file(slot) == 0) 486 if (has_max_bus_speed_file(slot))
483 sysfs_remove_file(&slot->kobj, &hotplug_slot_attr_max_bus_speed.attr); 487 sysfs_remove_file(&slot->kobj,
484 488 &hotplug_slot_attr_max_bus_speed.attr);
485exit_max_speed: 489exit_max_speed:
486 if (has_adapter_file(slot) == 0) 490 if (has_adapter_file(slot))
487 sysfs_remove_file(&slot->kobj, &hotplug_slot_attr_presence.attr); 491 sysfs_remove_file(&slot->kobj,
488 492 &hotplug_slot_attr_presence.attr);
489exit_adapter: 493exit_adapter:
490 if (has_latch_file(slot) == 0) 494 if (has_latch_file(slot))
491 sysfs_remove_file(&slot->kobj, &hotplug_slot_attr_latch.attr); 495 sysfs_remove_file(&slot->kobj, &hotplug_slot_attr_latch.attr);
492
493exit_latch: 496exit_latch:
494 if (has_attention_file(slot) == 0) 497 if (has_attention_file(slot))
495 sysfs_remove_file(&slot->kobj, &hotplug_slot_attr_attention.attr); 498 sysfs_remove_file(&slot->kobj,
496 499 &hotplug_slot_attr_attention.attr);
497exit_attention: 500exit_attention:
498 if (has_power_file(slot) == 0) 501 if (has_power_file(slot))
499 sysfs_remove_file(&slot->kobj, &hotplug_slot_attr_power.attr); 502 sysfs_remove_file(&slot->kobj, &hotplug_slot_attr_power.attr);
500exit_power: 503exit_power:
504 pci_hp_remove_module_link(slot);
501exit: 505exit:
502 return retval; 506 return retval;
503} 507}
504 508
505static void fs_remove_slot(struct pci_slot *slot) 509static void fs_remove_slot(struct pci_slot *slot)
506{ 510{
507 if (has_power_file(slot) == 0) 511 if (has_power_file(slot))
508 sysfs_remove_file(&slot->kobj, &hotplug_slot_attr_power.attr); 512 sysfs_remove_file(&slot->kobj, &hotplug_slot_attr_power.attr);
509 513
510 if (has_attention_file(slot) == 0) 514 if (has_attention_file(slot))
511 sysfs_remove_file(&slot->kobj, &hotplug_slot_attr_attention.attr); 515 sysfs_remove_file(&slot->kobj,
516 &hotplug_slot_attr_attention.attr);
512 517
513 if (has_latch_file(slot) == 0) 518 if (has_latch_file(slot))
514 sysfs_remove_file(&slot->kobj, &hotplug_slot_attr_latch.attr); 519 sysfs_remove_file(&slot->kobj, &hotplug_slot_attr_latch.attr);
515 520
516 if (has_adapter_file(slot) == 0) 521 if (has_adapter_file(slot))
517 sysfs_remove_file(&slot->kobj, &hotplug_slot_attr_presence.attr); 522 sysfs_remove_file(&slot->kobj,
523 &hotplug_slot_attr_presence.attr);
518 524
519 if (has_max_bus_speed_file(slot) == 0) 525 if (has_max_bus_speed_file(slot))
520 sysfs_remove_file(&slot->kobj, &hotplug_slot_attr_max_bus_speed.attr); 526 sysfs_remove_file(&slot->kobj,
527 &hotplug_slot_attr_max_bus_speed.attr);
521 528
522 if (has_cur_bus_speed_file(slot) == 0) 529 if (has_cur_bus_speed_file(slot))
523 sysfs_remove_file(&slot->kobj, &hotplug_slot_attr_cur_bus_speed.attr); 530 sysfs_remove_file(&slot->kobj,
531 &hotplug_slot_attr_cur_bus_speed.attr);
524 532
525 if (has_test_file(slot) == 0) 533 if (has_test_file(slot))
526 sysfs_remove_file(&slot->kobj, &hotplug_slot_attr_test.attr); 534 sysfs_remove_file(&slot->kobj, &hotplug_slot_attr_test.attr);
535
536 pci_hp_remove_module_link(slot);
527} 537}
528 538
529static struct hotplug_slot *get_slot_from_name (const char *name) 539static struct hotplug_slot *get_slot_from_name (const char *name)
@@ -540,10 +550,10 @@ static struct hotplug_slot *get_slot_from_name (const char *name)
540} 550}
541 551
542/** 552/**
543 * pci_hp_register - register a hotplug_slot with the PCI hotplug subsystem 553 * __pci_hp_register - register a hotplug_slot with the PCI hotplug subsystem
544 * @bus: bus this slot is on 554 * @bus: bus this slot is on
545 * @slot: pointer to the &struct hotplug_slot to register 555 * @slot: pointer to the &struct hotplug_slot to register
546 * @slot_nr: slot number 556 * @devnr: device number
547 * @name: name registered with kobject core 557 * @name: name registered with kobject core
548 * 558 *
549 * Registers a hotplug slot with the pci hotplug subsystem, which will allow 559 * Registers a hotplug slot with the pci hotplug subsystem, which will allow
@@ -551,8 +561,9 @@ static struct hotplug_slot *get_slot_from_name (const char *name)
551 * 561 *
552 * Returns 0 if successful, anything else for an error. 562 * Returns 0 if successful, anything else for an error.
553 */ 563 */
554int pci_hp_register(struct hotplug_slot *slot, struct pci_bus *bus, int slot_nr, 564int __pci_hp_register(struct hotplug_slot *slot, struct pci_bus *bus,
555 const char *name) 565 int devnr, const char *name,
566 struct module *owner, const char *mod_name)
556{ 567{
557 int result; 568 int result;
558 struct pci_slot *pci_slot; 569 struct pci_slot *pci_slot;
@@ -567,14 +578,16 @@ int pci_hp_register(struct hotplug_slot *slot, struct pci_bus *bus, int slot_nr,
567 return -EINVAL; 578 return -EINVAL;
568 } 579 }
569 580
570 mutex_lock(&pci_hp_mutex); 581 slot->ops->owner = owner;
582 slot->ops->mod_name = mod_name;
571 583
584 mutex_lock(&pci_hp_mutex);
572 /* 585 /*
573 * No problems if we call this interface from both ACPI_PCI_SLOT 586 * No problems if we call this interface from both ACPI_PCI_SLOT
574 * driver and call it here again. If we've already created the 587 * driver and call it here again. If we've already created the
575 * pci_slot, the interface will simply bump the refcount. 588 * pci_slot, the interface will simply bump the refcount.
576 */ 589 */
577 pci_slot = pci_create_slot(bus, slot_nr, name, slot); 590 pci_slot = pci_create_slot(bus, devnr, name, slot);
578 if (IS_ERR(pci_slot)) { 591 if (IS_ERR(pci_slot)) {
579 result = PTR_ERR(pci_slot); 592 result = PTR_ERR(pci_slot);
580 goto out; 593 goto out;
@@ -684,6 +697,6 @@ MODULE_LICENSE("GPL");
684module_param(debug, bool, 0644); 697module_param(debug, bool, 0644);
685MODULE_PARM_DESC(debug, "Debugging mode enabled or not"); 698MODULE_PARM_DESC(debug, "Debugging mode enabled or not");
686 699
687EXPORT_SYMBOL_GPL(pci_hp_register); 700EXPORT_SYMBOL_GPL(__pci_hp_register);
688EXPORT_SYMBOL_GPL(pci_hp_deregister); 701EXPORT_SYMBOL_GPL(pci_hp_deregister);
689EXPORT_SYMBOL_GPL(pci_hp_change_slot_info); 702EXPORT_SYMBOL_GPL(pci_hp_change_slot_info);
diff --git a/drivers/pci/hotplug/pciehp.h b/drivers/pci/hotplug/pciehp.h
index 0a368547e633..e6cf096498be 100644
--- a/drivers/pci/hotplug/pciehp.h
+++ b/drivers/pci/hotplug/pciehp.h
@@ -81,7 +81,6 @@ struct slot {
81 struct hpc_ops *hpc_ops; 81 struct hpc_ops *hpc_ops;
82 struct hotplug_slot *hotplug_slot; 82 struct hotplug_slot *hotplug_slot;
83 struct list_head slot_list; 83 struct list_head slot_list;
84 unsigned long last_emi_toggle;
85 struct delayed_work work; /* work for button event */ 84 struct delayed_work work; /* work for button event */
86 struct mutex lock; 85 struct mutex lock;
87}; 86};
@@ -203,8 +202,6 @@ struct hpc_ops {
203 int (*set_attention_status)(struct slot *slot, u8 status); 202 int (*set_attention_status)(struct slot *slot, u8 status);
204 int (*get_latch_status)(struct slot *slot, u8 *status); 203 int (*get_latch_status)(struct slot *slot, u8 *status);
205 int (*get_adapter_status)(struct slot *slot, u8 *status); 204 int (*get_adapter_status)(struct slot *slot, u8 *status);
206 int (*get_emi_status)(struct slot *slot, u8 *status);
207 int (*toggle_emi)(struct slot *slot);
208 int (*get_max_bus_speed)(struct slot *slot, enum pci_bus_speed *speed); 205 int (*get_max_bus_speed)(struct slot *slot, enum pci_bus_speed *speed);
209 int (*get_cur_bus_speed)(struct slot *slot, enum pci_bus_speed *speed); 206 int (*get_cur_bus_speed)(struct slot *slot, enum pci_bus_speed *speed);
210 int (*get_max_lnk_width)(struct slot *slot, enum pcie_link_width *val); 207 int (*get_max_lnk_width)(struct slot *slot, enum pcie_link_width *val);
diff --git a/drivers/pci/hotplug/pciehp_core.c b/drivers/pci/hotplug/pciehp_core.c
index fb254b2454de..2317557fdee6 100644
--- a/drivers/pci/hotplug/pciehp_core.c
+++ b/drivers/pci/hotplug/pciehp_core.c
@@ -73,7 +73,6 @@ static int get_max_bus_speed (struct hotplug_slot *slot, enum pci_bus_speed *val
73static int get_cur_bus_speed (struct hotplug_slot *slot, enum pci_bus_speed *value); 73static int get_cur_bus_speed (struct hotplug_slot *slot, enum pci_bus_speed *value);
74 74
75static struct hotplug_slot_ops pciehp_hotplug_slot_ops = { 75static struct hotplug_slot_ops pciehp_hotplug_slot_ops = {
76 .owner = THIS_MODULE,
77 .set_attention_status = set_attention_status, 76 .set_attention_status = set_attention_status,
78 .enable_slot = enable_slot, 77 .enable_slot = enable_slot,
79 .disable_slot = disable_slot, 78 .disable_slot = disable_slot,
@@ -85,99 +84,6 @@ static struct hotplug_slot_ops pciehp_hotplug_slot_ops = {
85 .get_cur_bus_speed = get_cur_bus_speed, 84 .get_cur_bus_speed = get_cur_bus_speed,
86}; 85};
87 86
88/*
89 * Check the status of the Electro Mechanical Interlock (EMI)
90 */
91static int get_lock_status(struct hotplug_slot *hotplug_slot, u8 *value)
92{
93 struct slot *slot = hotplug_slot->private;
94 return (slot->hpc_ops->get_emi_status(slot, value));
95}
96
97/*
98 * sysfs interface for the Electro Mechanical Interlock (EMI)
99 * 1 == locked, 0 == unlocked
100 */
101static ssize_t lock_read_file(struct hotplug_slot *slot, char *buf)
102{
103 int retval;
104 u8 value;
105
106 retval = get_lock_status(slot, &value);
107 if (retval)
108 goto lock_read_exit;
109 retval = sprintf (buf, "%d\n", value);
110
111lock_read_exit:
112 return retval;
113}
114
115/*
116 * Change the status of the Electro Mechanical Interlock (EMI)
117 * This is a toggle - in addition there must be at least 1 second
118 * in between toggles.
119 */
120static int set_lock_status(struct hotplug_slot *hotplug_slot, u8 status)
121{
122 struct slot *slot = hotplug_slot->private;
123 int retval;
124 u8 value;
125
126 mutex_lock(&slot->ctrl->crit_sect);
127
128 /* has it been >1 sec since our last toggle? */
129 if ((get_seconds() - slot->last_emi_toggle) < 1) {
130 mutex_unlock(&slot->ctrl->crit_sect);
131 return -EINVAL;
132 }
133
134 /* see what our current state is */
135 retval = get_lock_status(hotplug_slot, &value);
136 if (retval || (value == status))
137 goto set_lock_exit;
138
139 slot->hpc_ops->toggle_emi(slot);
140set_lock_exit:
141 mutex_unlock(&slot->ctrl->crit_sect);
142 return 0;
143}
144
145/*
146 * sysfs interface which allows the user to toggle the Electro Mechanical
147 * Interlock. Valid values are either 0 or 1. 0 == unlock, 1 == lock
148 */
149static ssize_t lock_write_file(struct hotplug_slot *hotplug_slot,
150 const char *buf, size_t count)
151{
152 struct slot *slot = hotplug_slot->private;
153 unsigned long llock;
154 u8 lock;
155 int retval = 0;
156
157 llock = simple_strtoul(buf, NULL, 10);
158 lock = (u8)(llock & 0xff);
159
160 switch (lock) {
161 case 0:
162 case 1:
163 retval = set_lock_status(hotplug_slot, lock);
164 break;
165 default:
166 ctrl_err(slot->ctrl, "%d is an invalid lock value\n",
167 lock);
168 retval = -EINVAL;
169 }
170 if (retval)
171 return retval;
172 return count;
173}
174
175static struct hotplug_slot_attribute hotplug_slot_attr_lock = {
176 .attr = {.name = "lock", .mode = S_IFREG | S_IRUGO | S_IWUSR},
177 .show = lock_read_file,
178 .store = lock_write_file
179};
180
181/** 87/**
182 * release_slot - free up the memory used by a slot 88 * release_slot - free up the memory used by a slot
183 * @hotplug_slot: slot to free 89 * @hotplug_slot: slot to free
@@ -236,17 +142,6 @@ static int init_slots(struct controller *ctrl)
236 get_attention_status(hotplug_slot, &info->attention_status); 142 get_attention_status(hotplug_slot, &info->attention_status);
237 get_latch_status(hotplug_slot, &info->latch_status); 143 get_latch_status(hotplug_slot, &info->latch_status);
238 get_adapter_status(hotplug_slot, &info->adapter_status); 144 get_adapter_status(hotplug_slot, &info->adapter_status);
239 /* create additional sysfs entries */
240 if (EMI(ctrl)) {
241 retval = sysfs_create_file(&hotplug_slot->pci_slot->kobj,
242 &hotplug_slot_attr_lock.attr);
243 if (retval) {
244 pci_hp_deregister(hotplug_slot);
245 ctrl_err(ctrl, "Cannot create additional sysfs "
246 "entries\n");
247 goto error_info;
248 }
249 }
250 } 145 }
251 146
252 return 0; 147 return 0;
@@ -261,13 +156,8 @@ error:
261static void cleanup_slots(struct controller *ctrl) 156static void cleanup_slots(struct controller *ctrl)
262{ 157{
263 struct slot *slot; 158 struct slot *slot;
264 159 list_for_each_entry(slot, &ctrl->slot_list, slot_list)
265 list_for_each_entry(slot, &ctrl->slot_list, slot_list) {
266 if (EMI(ctrl))
267 sysfs_remove_file(&slot->hotplug_slot->pci_slot->kobj,
268 &hotplug_slot_attr_lock.attr);
269 pci_hp_deregister(slot->hotplug_slot); 160 pci_hp_deregister(slot->hotplug_slot);
270 }
271} 161}
272 162
273/* 163/*
diff --git a/drivers/pci/hotplug/pciehp_hpc.c b/drivers/pci/hotplug/pciehp_hpc.c
index 07bd32151146..52813257e5bf 100644
--- a/drivers/pci/hotplug/pciehp_hpc.c
+++ b/drivers/pci/hotplug/pciehp_hpc.c
@@ -422,35 +422,6 @@ static int hpc_query_power_fault(struct slot *slot)
422 return !!(slot_status & PCI_EXP_SLTSTA_PFD); 422 return !!(slot_status & PCI_EXP_SLTSTA_PFD);
423} 423}
424 424
425static int hpc_get_emi_status(struct slot *slot, u8 *status)
426{
427 struct controller *ctrl = slot->ctrl;
428 u16 slot_status;
429 int retval;
430
431 retval = pciehp_readw(ctrl, PCI_EXP_SLTSTA, &slot_status);
432 if (retval) {
433 ctrl_err(ctrl, "Cannot check EMI status\n");
434 return retval;
435 }
436 *status = !!(slot_status & PCI_EXP_SLTSTA_EIS);
437 return retval;
438}
439
440static int hpc_toggle_emi(struct slot *slot)
441{
442 u16 slot_cmd;
443 u16 cmd_mask;
444 int rc;
445
446 slot_cmd = PCI_EXP_SLTCTL_EIC;
447 cmd_mask = PCI_EXP_SLTCTL_EIC;
448 rc = pcie_write_cmd(slot->ctrl, slot_cmd, cmd_mask);
449 slot->last_emi_toggle = get_seconds();
450
451 return rc;
452}
453
454static int hpc_set_attention_status(struct slot *slot, u8 value) 425static int hpc_set_attention_status(struct slot *slot, u8 value)
455{ 426{
456 struct controller *ctrl = slot->ctrl; 427 struct controller *ctrl = slot->ctrl;
@@ -874,8 +845,6 @@ static struct hpc_ops pciehp_hpc_ops = {
874 .get_attention_status = hpc_get_attention_status, 845 .get_attention_status = hpc_get_attention_status,
875 .get_latch_status = hpc_get_latch_status, 846 .get_latch_status = hpc_get_latch_status,
876 .get_adapter_status = hpc_get_adapter_status, 847 .get_adapter_status = hpc_get_adapter_status,
877 .get_emi_status = hpc_get_emi_status,
878 .toggle_emi = hpc_toggle_emi,
879 848
880 .get_max_bus_speed = hpc_get_max_lnk_speed, 849 .get_max_bus_speed = hpc_get_max_lnk_speed,
881 .get_cur_bus_speed = hpc_get_cur_lnk_speed, 850 .get_cur_bus_speed = hpc_get_cur_lnk_speed,
diff --git a/drivers/pci/hotplug/pcihp_skeleton.c b/drivers/pci/hotplug/pcihp_skeleton.c
index e3dd6cf9e89f..5175d9b26f0b 100644
--- a/drivers/pci/hotplug/pcihp_skeleton.c
+++ b/drivers/pci/hotplug/pcihp_skeleton.c
@@ -82,7 +82,6 @@ static int get_latch_status (struct hotplug_slot *slot, u8 *value);
82static int get_adapter_status (struct hotplug_slot *slot, u8 *value); 82static int get_adapter_status (struct hotplug_slot *slot, u8 *value);
83 83
84static struct hotplug_slot_ops skel_hotplug_slot_ops = { 84static struct hotplug_slot_ops skel_hotplug_slot_ops = {
85 .owner = THIS_MODULE,
86 .enable_slot = enable_slot, 85 .enable_slot = enable_slot,
87 .disable_slot = disable_slot, 86 .disable_slot = disable_slot,
88 .set_attention_status = set_attention_status, 87 .set_attention_status = set_attention_status,
diff --git a/drivers/pci/hotplug/rpaphp_core.c b/drivers/pci/hotplug/rpaphp_core.c
index 95d02a08fdc7..c159223389ec 100644
--- a/drivers/pci/hotplug/rpaphp_core.c
+++ b/drivers/pci/hotplug/rpaphp_core.c
@@ -423,7 +423,6 @@ static int disable_slot(struct hotplug_slot *hotplug_slot)
423} 423}
424 424
425struct hotplug_slot_ops rpaphp_hotplug_slot_ops = { 425struct hotplug_slot_ops rpaphp_hotplug_slot_ops = {
426 .owner = THIS_MODULE,
427 .enable_slot = enable_slot, 426 .enable_slot = enable_slot,
428 .disable_slot = disable_slot, 427 .disable_slot = disable_slot,
429 .set_attention_status = set_attention_status, 428 .set_attention_status = set_attention_status,
diff --git a/drivers/pci/hotplug/sgi_hotplug.c b/drivers/pci/hotplug/sgi_hotplug.c
index 2d6da78fddb6..a4494d78e7c2 100644
--- a/drivers/pci/hotplug/sgi_hotplug.c
+++ b/drivers/pci/hotplug/sgi_hotplug.c
@@ -83,7 +83,6 @@ static int disable_slot(struct hotplug_slot *slot);
83static inline int get_power_status(struct hotplug_slot *slot, u8 *value); 83static inline int get_power_status(struct hotplug_slot *slot, u8 *value);
84 84
85static struct hotplug_slot_ops sn_hotplug_slot_ops = { 85static struct hotplug_slot_ops sn_hotplug_slot_ops = {
86 .owner = THIS_MODULE,
87 .enable_slot = enable_slot, 86 .enable_slot = enable_slot,
88 .disable_slot = disable_slot, 87 .disable_slot = disable_slot,
89 .get_power_status = get_power_status, 88 .get_power_status = get_power_status,
diff --git a/drivers/pci/hotplug/shpchp_core.c b/drivers/pci/hotplug/shpchp_core.c
index fe8d149c2293..8a520a3d0f59 100644
--- a/drivers/pci/hotplug/shpchp_core.c
+++ b/drivers/pci/hotplug/shpchp_core.c
@@ -69,7 +69,6 @@ static int get_max_bus_speed (struct hotplug_slot *slot, enum pci_bus_speed *val
69static int get_cur_bus_speed (struct hotplug_slot *slot, enum pci_bus_speed *value); 69static int get_cur_bus_speed (struct hotplug_slot *slot, enum pci_bus_speed *value);
70 70
71static struct hotplug_slot_ops shpchp_hotplug_slot_ops = { 71static struct hotplug_slot_ops shpchp_hotplug_slot_ops = {
72 .owner = THIS_MODULE,
73 .set_attention_status = set_attention_status, 72 .set_attention_status = set_attention_status,
74 .enable_slot = enable_slot, 73 .enable_slot = enable_slot,
75 .disable_slot = disable_slot, 74 .disable_slot = disable_slot,
diff --git a/drivers/pci/intel-iommu.c b/drivers/pci/intel-iommu.c
index cd389162735f..420afa887283 100644
--- a/drivers/pci/intel-iommu.c
+++ b/drivers/pci/intel-iommu.c
@@ -53,6 +53,8 @@
53 53
54#define DEFAULT_DOMAIN_ADDRESS_WIDTH 48 54#define DEFAULT_DOMAIN_ADDRESS_WIDTH 48
55 55
56#define MAX_AGAW_WIDTH 64
57
56#define DOMAIN_MAX_ADDR(gaw) ((((u64)1) << gaw) - 1) 58#define DOMAIN_MAX_ADDR(gaw) ((((u64)1) << gaw) - 1)
57 59
58#define IOVA_PFN(addr) ((addr) >> PAGE_SHIFT) 60#define IOVA_PFN(addr) ((addr) >> PAGE_SHIFT)
@@ -131,8 +133,6 @@ static inline void context_set_fault_enable(struct context_entry *context)
131 context->lo &= (((u64)-1) << 2) | 1; 133 context->lo &= (((u64)-1) << 2) | 1;
132} 134}
133 135
134#define CONTEXT_TT_MULTI_LEVEL 0
135
136static inline void context_set_translation_type(struct context_entry *context, 136static inline void context_set_translation_type(struct context_entry *context,
137 unsigned long value) 137 unsigned long value)
138{ 138{
@@ -217,6 +217,14 @@ static inline bool dma_pte_present(struct dma_pte *pte)
217 return (pte->val & 3) != 0; 217 return (pte->val & 3) != 0;
218} 218}
219 219
220/*
221 * This domain is a statically identity mapping domain.
222 * 1. This domain creats a static 1:1 mapping to all usable memory.
223 * 2. It maps to each iommu if successful.
224 * 3. Each iommu mapps to this domain if successful.
225 */
226struct dmar_domain *si_domain;
227
220/* devices under the same p2p bridge are owned in one domain */ 228/* devices under the same p2p bridge are owned in one domain */
221#define DOMAIN_FLAG_P2P_MULTIPLE_DEVICES (1 << 0) 229#define DOMAIN_FLAG_P2P_MULTIPLE_DEVICES (1 << 0)
222 230
@@ -225,6 +233,9 @@ static inline bool dma_pte_present(struct dma_pte *pte)
225 */ 233 */
226#define DOMAIN_FLAG_VIRTUAL_MACHINE (1 << 1) 234#define DOMAIN_FLAG_VIRTUAL_MACHINE (1 << 1)
227 235
236/* si_domain contains mulitple devices */
237#define DOMAIN_FLAG_STATIC_IDENTITY (1 << 2)
238
228struct dmar_domain { 239struct dmar_domain {
229 int id; /* domain id */ 240 int id; /* domain id */
230 unsigned long iommu_bmp; /* bitmap of iommus this domain uses*/ 241 unsigned long iommu_bmp; /* bitmap of iommus this domain uses*/
@@ -256,6 +267,7 @@ struct device_domain_info {
256 u8 bus; /* PCI bus number */ 267 u8 bus; /* PCI bus number */
257 u8 devfn; /* PCI devfn number */ 268 u8 devfn; /* PCI devfn number */
258 struct pci_dev *dev; /* it's NULL for PCIE-to-PCI bridge */ 269 struct pci_dev *dev; /* it's NULL for PCIE-to-PCI bridge */
270 struct intel_iommu *iommu; /* IOMMU used by this device */
259 struct dmar_domain *domain; /* pointer to domain */ 271 struct dmar_domain *domain; /* pointer to domain */
260}; 272};
261 273
@@ -401,17 +413,13 @@ void free_iova_mem(struct iova *iova)
401 413
402static inline int width_to_agaw(int width); 414static inline int width_to_agaw(int width);
403 415
404/* calculate agaw for each iommu. 416static int __iommu_calculate_agaw(struct intel_iommu *iommu, int max_gaw)
405 * "SAGAW" may be different across iommus, use a default agaw, and
406 * get a supported less agaw for iommus that don't support the default agaw.
407 */
408int iommu_calculate_agaw(struct intel_iommu *iommu)
409{ 417{
410 unsigned long sagaw; 418 unsigned long sagaw;
411 int agaw = -1; 419 int agaw = -1;
412 420
413 sagaw = cap_sagaw(iommu->cap); 421 sagaw = cap_sagaw(iommu->cap);
414 for (agaw = width_to_agaw(DEFAULT_DOMAIN_ADDRESS_WIDTH); 422 for (agaw = width_to_agaw(max_gaw);
415 agaw >= 0; agaw--) { 423 agaw >= 0; agaw--) {
416 if (test_bit(agaw, &sagaw)) 424 if (test_bit(agaw, &sagaw))
417 break; 425 break;
@@ -420,12 +428,32 @@ int iommu_calculate_agaw(struct intel_iommu *iommu)
420 return agaw; 428 return agaw;
421} 429}
422 430
423/* in native case, each domain is related to only one iommu */ 431/*
432 * Calculate max SAGAW for each iommu.
433 */
434int iommu_calculate_max_sagaw(struct intel_iommu *iommu)
435{
436 return __iommu_calculate_agaw(iommu, MAX_AGAW_WIDTH);
437}
438
439/*
440 * calculate agaw for each iommu.
441 * "SAGAW" may be different across iommus, use a default agaw, and
442 * get a supported less agaw for iommus that don't support the default agaw.
443 */
444int iommu_calculate_agaw(struct intel_iommu *iommu)
445{
446 return __iommu_calculate_agaw(iommu, DEFAULT_DOMAIN_ADDRESS_WIDTH);
447}
448
449/* This functionin only returns single iommu in a domain */
424static struct intel_iommu *domain_get_iommu(struct dmar_domain *domain) 450static struct intel_iommu *domain_get_iommu(struct dmar_domain *domain)
425{ 451{
426 int iommu_id; 452 int iommu_id;
427 453
454 /* si_domain and vm domain should not get here. */
428 BUG_ON(domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE); 455 BUG_ON(domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE);
456 BUG_ON(domain->flags & DOMAIN_FLAG_STATIC_IDENTITY);
429 457
430 iommu_id = find_first_bit(&domain->iommu_bmp, g_num_of_iommus); 458 iommu_id = find_first_bit(&domain->iommu_bmp, g_num_of_iommus);
431 if (iommu_id < 0 || iommu_id >= g_num_of_iommus) 459 if (iommu_id < 0 || iommu_id >= g_num_of_iommus)
@@ -809,7 +837,7 @@ static int iommu_alloc_root_entry(struct intel_iommu *iommu)
809static void iommu_set_root_entry(struct intel_iommu *iommu) 837static void iommu_set_root_entry(struct intel_iommu *iommu)
810{ 838{
811 void *addr; 839 void *addr;
812 u32 cmd, sts; 840 u32 sts;
813 unsigned long flag; 841 unsigned long flag;
814 842
815 addr = iommu->root_entry; 843 addr = iommu->root_entry;
@@ -817,12 +845,11 @@ static void iommu_set_root_entry(struct intel_iommu *iommu)
817 spin_lock_irqsave(&iommu->register_lock, flag); 845 spin_lock_irqsave(&iommu->register_lock, flag);
818 dmar_writeq(iommu->reg + DMAR_RTADDR_REG, virt_to_phys(addr)); 846 dmar_writeq(iommu->reg + DMAR_RTADDR_REG, virt_to_phys(addr));
819 847
820 cmd = iommu->gcmd | DMA_GCMD_SRTP; 848 writel(iommu->gcmd | DMA_GCMD_SRTP, iommu->reg + DMAR_GCMD_REG);
821 writel(cmd, iommu->reg + DMAR_GCMD_REG);
822 849
823 /* Make sure hardware complete it */ 850 /* Make sure hardware complete it */
824 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, 851 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
825 readl, (sts & DMA_GSTS_RTPS), sts); 852 readl, (sts & DMA_GSTS_RTPS), sts);
826 853
827 spin_unlock_irqrestore(&iommu->register_lock, flag); 854 spin_unlock_irqrestore(&iommu->register_lock, flag);
828} 855}
@@ -834,39 +861,25 @@ static void iommu_flush_write_buffer(struct intel_iommu *iommu)
834 861
835 if (!rwbf_quirk && !cap_rwbf(iommu->cap)) 862 if (!rwbf_quirk && !cap_rwbf(iommu->cap))
836 return; 863 return;
837 val = iommu->gcmd | DMA_GCMD_WBF;
838 864
839 spin_lock_irqsave(&iommu->register_lock, flag); 865 spin_lock_irqsave(&iommu->register_lock, flag);
840 writel(val, iommu->reg + DMAR_GCMD_REG); 866 writel(iommu->gcmd | DMA_GCMD_WBF, iommu->reg + DMAR_GCMD_REG);
841 867
842 /* Make sure hardware complete it */ 868 /* Make sure hardware complete it */
843 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, 869 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
844 readl, (!(val & DMA_GSTS_WBFS)), val); 870 readl, (!(val & DMA_GSTS_WBFS)), val);
845 871
846 spin_unlock_irqrestore(&iommu->register_lock, flag); 872 spin_unlock_irqrestore(&iommu->register_lock, flag);
847} 873}
848 874
849/* return value determine if we need a write buffer flush */ 875/* return value determine if we need a write buffer flush */
850static int __iommu_flush_context(struct intel_iommu *iommu, 876static void __iommu_flush_context(struct intel_iommu *iommu,
851 u16 did, u16 source_id, u8 function_mask, u64 type, 877 u16 did, u16 source_id, u8 function_mask,
852 int non_present_entry_flush) 878 u64 type)
853{ 879{
854 u64 val = 0; 880 u64 val = 0;
855 unsigned long flag; 881 unsigned long flag;
856 882
857 /*
858 * In the non-present entry flush case, if hardware doesn't cache
859 * non-present entry we do nothing and if hardware cache non-present
860 * entry, we flush entries of domain 0 (the domain id is used to cache
861 * any non-present entries)
862 */
863 if (non_present_entry_flush) {
864 if (!cap_caching_mode(iommu->cap))
865 return 1;
866 else
867 did = 0;
868 }
869
870 switch (type) { 883 switch (type) {
871 case DMA_CCMD_GLOBAL_INVL: 884 case DMA_CCMD_GLOBAL_INVL:
872 val = DMA_CCMD_GLOBAL_INVL; 885 val = DMA_CCMD_GLOBAL_INVL;
@@ -891,33 +904,16 @@ static int __iommu_flush_context(struct intel_iommu *iommu,
891 dmar_readq, (!(val & DMA_CCMD_ICC)), val); 904 dmar_readq, (!(val & DMA_CCMD_ICC)), val);
892 905
893 spin_unlock_irqrestore(&iommu->register_lock, flag); 906 spin_unlock_irqrestore(&iommu->register_lock, flag);
894
895 /* flush context entry will implicitly flush write buffer */
896 return 0;
897} 907}
898 908
899/* return value determine if we need a write buffer flush */ 909/* return value determine if we need a write buffer flush */
900static int __iommu_flush_iotlb(struct intel_iommu *iommu, u16 did, 910static void __iommu_flush_iotlb(struct intel_iommu *iommu, u16 did,
901 u64 addr, unsigned int size_order, u64 type, 911 u64 addr, unsigned int size_order, u64 type)
902 int non_present_entry_flush)
903{ 912{
904 int tlb_offset = ecap_iotlb_offset(iommu->ecap); 913 int tlb_offset = ecap_iotlb_offset(iommu->ecap);
905 u64 val = 0, val_iva = 0; 914 u64 val = 0, val_iva = 0;
906 unsigned long flag; 915 unsigned long flag;
907 916
908 /*
909 * In the non-present entry flush case, if hardware doesn't cache
910 * non-present entry we do nothing and if hardware cache non-present
911 * entry, we flush entries of domain 0 (the domain id is used to cache
912 * any non-present entries)
913 */
914 if (non_present_entry_flush) {
915 if (!cap_caching_mode(iommu->cap))
916 return 1;
917 else
918 did = 0;
919 }
920
921 switch (type) { 917 switch (type) {
922 case DMA_TLB_GLOBAL_FLUSH: 918 case DMA_TLB_GLOBAL_FLUSH:
923 /* global flush doesn't need set IVA_REG */ 919 /* global flush doesn't need set IVA_REG */
@@ -965,37 +961,101 @@ static int __iommu_flush_iotlb(struct intel_iommu *iommu, u16 did,
965 pr_debug("IOMMU: tlb flush request %Lx, actual %Lx\n", 961 pr_debug("IOMMU: tlb flush request %Lx, actual %Lx\n",
966 (unsigned long long)DMA_TLB_IIRG(type), 962 (unsigned long long)DMA_TLB_IIRG(type),
967 (unsigned long long)DMA_TLB_IAIG(val)); 963 (unsigned long long)DMA_TLB_IAIG(val));
968 /* flush iotlb entry will implicitly flush write buffer */
969 return 0;
970} 964}
971 965
972static int iommu_flush_iotlb_psi(struct intel_iommu *iommu, u16 did, 966static struct device_domain_info *iommu_support_dev_iotlb(
973 u64 addr, unsigned int pages, int non_present_entry_flush) 967 struct dmar_domain *domain, int segment, u8 bus, u8 devfn)
968{
969 int found = 0;
970 unsigned long flags;
971 struct device_domain_info *info;
972 struct intel_iommu *iommu = device_to_iommu(segment, bus, devfn);
973
974 if (!ecap_dev_iotlb_support(iommu->ecap))
975 return NULL;
976
977 if (!iommu->qi)
978 return NULL;
979
980 spin_lock_irqsave(&device_domain_lock, flags);
981 list_for_each_entry(info, &domain->devices, link)
982 if (info->bus == bus && info->devfn == devfn) {
983 found = 1;
984 break;
985 }
986 spin_unlock_irqrestore(&device_domain_lock, flags);
987
988 if (!found || !info->dev)
989 return NULL;
990
991 if (!pci_find_ext_capability(info->dev, PCI_EXT_CAP_ID_ATS))
992 return NULL;
993
994 if (!dmar_find_matched_atsr_unit(info->dev))
995 return NULL;
996
997 info->iommu = iommu;
998
999 return info;
1000}
1001
1002static void iommu_enable_dev_iotlb(struct device_domain_info *info)
1003{
1004 if (!info)
1005 return;
1006
1007 pci_enable_ats(info->dev, VTD_PAGE_SHIFT);
1008}
1009
1010static void iommu_disable_dev_iotlb(struct device_domain_info *info)
1011{
1012 if (!info->dev || !pci_ats_enabled(info->dev))
1013 return;
1014
1015 pci_disable_ats(info->dev);
1016}
1017
1018static void iommu_flush_dev_iotlb(struct dmar_domain *domain,
1019 u64 addr, unsigned mask)
1020{
1021 u16 sid, qdep;
1022 unsigned long flags;
1023 struct device_domain_info *info;
1024
1025 spin_lock_irqsave(&device_domain_lock, flags);
1026 list_for_each_entry(info, &domain->devices, link) {
1027 if (!info->dev || !pci_ats_enabled(info->dev))
1028 continue;
1029
1030 sid = info->bus << 8 | info->devfn;
1031 qdep = pci_ats_queue_depth(info->dev);
1032 qi_flush_dev_iotlb(info->iommu, sid, qdep, addr, mask);
1033 }
1034 spin_unlock_irqrestore(&device_domain_lock, flags);
1035}
1036
1037static void iommu_flush_iotlb_psi(struct intel_iommu *iommu, u16 did,
1038 u64 addr, unsigned int pages)
974{ 1039{
975 unsigned int mask; 1040 unsigned int mask = ilog2(__roundup_pow_of_two(pages));
976 1041
977 BUG_ON(addr & (~VTD_PAGE_MASK)); 1042 BUG_ON(addr & (~VTD_PAGE_MASK));
978 BUG_ON(pages == 0); 1043 BUG_ON(pages == 0);
979 1044
980 /* Fallback to domain selective flush if no PSI support */
981 if (!cap_pgsel_inv(iommu->cap))
982 return iommu->flush.flush_iotlb(iommu, did, 0, 0,
983 DMA_TLB_DSI_FLUSH,
984 non_present_entry_flush);
985
986 /* 1045 /*
1046 * Fallback to domain selective flush if no PSI support or the size is
1047 * too big.
987 * PSI requires page size to be 2 ^ x, and the base address is naturally 1048 * PSI requires page size to be 2 ^ x, and the base address is naturally
988 * aligned to the size 1049 * aligned to the size
989 */ 1050 */
990 mask = ilog2(__roundup_pow_of_two(pages)); 1051 if (!cap_pgsel_inv(iommu->cap) || mask > cap_max_amask_val(iommu->cap))
991 /* Fallback to domain selective flush if size is too big */ 1052 iommu->flush.flush_iotlb(iommu, did, 0, 0,
992 if (mask > cap_max_amask_val(iommu->cap)) 1053 DMA_TLB_DSI_FLUSH);
993 return iommu->flush.flush_iotlb(iommu, did, 0, 0, 1054 else
994 DMA_TLB_DSI_FLUSH, non_present_entry_flush); 1055 iommu->flush.flush_iotlb(iommu, did, addr, mask,
995 1056 DMA_TLB_PSI_FLUSH);
996 return iommu->flush.flush_iotlb(iommu, did, addr, mask, 1057 if (did)
997 DMA_TLB_PSI_FLUSH, 1058 iommu_flush_dev_iotlb(iommu->domains[did], addr, mask);
998 non_present_entry_flush);
999} 1059}
1000 1060
1001static void iommu_disable_protect_mem_regions(struct intel_iommu *iommu) 1061static void iommu_disable_protect_mem_regions(struct intel_iommu *iommu)
@@ -1021,13 +1081,13 @@ static int iommu_enable_translation(struct intel_iommu *iommu)
1021 unsigned long flags; 1081 unsigned long flags;
1022 1082
1023 spin_lock_irqsave(&iommu->register_lock, flags); 1083 spin_lock_irqsave(&iommu->register_lock, flags);
1024 writel(iommu->gcmd|DMA_GCMD_TE, iommu->reg + DMAR_GCMD_REG); 1084 iommu->gcmd |= DMA_GCMD_TE;
1085 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
1025 1086
1026 /* Make sure hardware complete it */ 1087 /* Make sure hardware complete it */
1027 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, 1088 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
1028 readl, (sts & DMA_GSTS_TES), sts); 1089 readl, (sts & DMA_GSTS_TES), sts);
1029 1090
1030 iommu->gcmd |= DMA_GCMD_TE;
1031 spin_unlock_irqrestore(&iommu->register_lock, flags); 1091 spin_unlock_irqrestore(&iommu->register_lock, flags);
1032 return 0; 1092 return 0;
1033} 1093}
@@ -1043,7 +1103,7 @@ static int iommu_disable_translation(struct intel_iommu *iommu)
1043 1103
1044 /* Make sure hardware complete it */ 1104 /* Make sure hardware complete it */
1045 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, 1105 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
1046 readl, (!(sts & DMA_GSTS_TES)), sts); 1106 readl, (!(sts & DMA_GSTS_TES)), sts);
1047 1107
1048 spin_unlock_irqrestore(&iommu->register_lock, flag); 1108 spin_unlock_irqrestore(&iommu->register_lock, flag);
1049 return 0; 1109 return 0;
@@ -1142,48 +1202,71 @@ void free_dmar_iommu(struct intel_iommu *iommu)
1142 free_context_table(iommu); 1202 free_context_table(iommu);
1143} 1203}
1144 1204
1145static struct dmar_domain * iommu_alloc_domain(struct intel_iommu *iommu) 1205static struct dmar_domain *alloc_domain(void)
1146{ 1206{
1147 unsigned long num;
1148 unsigned long ndomains;
1149 struct dmar_domain *domain; 1207 struct dmar_domain *domain;
1150 unsigned long flags;
1151 1208
1152 domain = alloc_domain_mem(); 1209 domain = alloc_domain_mem();
1153 if (!domain) 1210 if (!domain)
1154 return NULL; 1211 return NULL;
1155 1212
1213 memset(&domain->iommu_bmp, 0, sizeof(unsigned long));
1214 domain->flags = 0;
1215
1216 return domain;
1217}
1218
1219static int iommu_attach_domain(struct dmar_domain *domain,
1220 struct intel_iommu *iommu)
1221{
1222 int num;
1223 unsigned long ndomains;
1224 unsigned long flags;
1225
1156 ndomains = cap_ndoms(iommu->cap); 1226 ndomains = cap_ndoms(iommu->cap);
1157 1227
1158 spin_lock_irqsave(&iommu->lock, flags); 1228 spin_lock_irqsave(&iommu->lock, flags);
1229
1159 num = find_first_zero_bit(iommu->domain_ids, ndomains); 1230 num = find_first_zero_bit(iommu->domain_ids, ndomains);
1160 if (num >= ndomains) { 1231 if (num >= ndomains) {
1161 spin_unlock_irqrestore(&iommu->lock, flags); 1232 spin_unlock_irqrestore(&iommu->lock, flags);
1162 free_domain_mem(domain);
1163 printk(KERN_ERR "IOMMU: no free domain ids\n"); 1233 printk(KERN_ERR "IOMMU: no free domain ids\n");
1164 return NULL; 1234 return -ENOMEM;
1165 } 1235 }
1166 1236
1167 set_bit(num, iommu->domain_ids);
1168 domain->id = num; 1237 domain->id = num;
1169 memset(&domain->iommu_bmp, 0, sizeof(unsigned long)); 1238 set_bit(num, iommu->domain_ids);
1170 set_bit(iommu->seq_id, &domain->iommu_bmp); 1239 set_bit(iommu->seq_id, &domain->iommu_bmp);
1171 domain->flags = 0;
1172 iommu->domains[num] = domain; 1240 iommu->domains[num] = domain;
1173 spin_unlock_irqrestore(&iommu->lock, flags); 1241 spin_unlock_irqrestore(&iommu->lock, flags);
1174 1242
1175 return domain; 1243 return 0;
1176} 1244}
1177 1245
1178static void iommu_free_domain(struct dmar_domain *domain) 1246static void iommu_detach_domain(struct dmar_domain *domain,
1247 struct intel_iommu *iommu)
1179{ 1248{
1180 unsigned long flags; 1249 unsigned long flags;
1181 struct intel_iommu *iommu; 1250 int num, ndomains;
1182 1251 int found = 0;
1183 iommu = domain_get_iommu(domain);
1184 1252
1185 spin_lock_irqsave(&iommu->lock, flags); 1253 spin_lock_irqsave(&iommu->lock, flags);
1186 clear_bit(domain->id, iommu->domain_ids); 1254 ndomains = cap_ndoms(iommu->cap);
1255 num = find_first_bit(iommu->domain_ids, ndomains);
1256 for (; num < ndomains; ) {
1257 if (iommu->domains[num] == domain) {
1258 found = 1;
1259 break;
1260 }
1261 num = find_next_bit(iommu->domain_ids,
1262 cap_ndoms(iommu->cap), num+1);
1263 }
1264
1265 if (found) {
1266 clear_bit(num, iommu->domain_ids);
1267 clear_bit(iommu->seq_id, &domain->iommu_bmp);
1268 iommu->domains[num] = NULL;
1269 }
1187 spin_unlock_irqrestore(&iommu->lock, flags); 1270 spin_unlock_irqrestore(&iommu->lock, flags);
1188} 1271}
1189 1272
@@ -1303,6 +1386,8 @@ static int domain_init(struct dmar_domain *domain, int guest_width)
1303 1386
1304static void domain_exit(struct dmar_domain *domain) 1387static void domain_exit(struct dmar_domain *domain)
1305{ 1388{
1389 struct dmar_drhd_unit *drhd;
1390 struct intel_iommu *iommu;
1306 u64 end; 1391 u64 end;
1307 1392
1308 /* Domain 0 is reserved, so dont process it */ 1393 /* Domain 0 is reserved, so dont process it */
@@ -1321,12 +1406,15 @@ static void domain_exit(struct dmar_domain *domain)
1321 /* free page tables */ 1406 /* free page tables */
1322 dma_pte_free_pagetable(domain, 0, end); 1407 dma_pte_free_pagetable(domain, 0, end);
1323 1408
1324 iommu_free_domain(domain); 1409 for_each_active_iommu(iommu, drhd)
1410 if (test_bit(iommu->seq_id, &domain->iommu_bmp))
1411 iommu_detach_domain(domain, iommu);
1412
1325 free_domain_mem(domain); 1413 free_domain_mem(domain);
1326} 1414}
1327 1415
1328static int domain_context_mapping_one(struct dmar_domain *domain, 1416static int domain_context_mapping_one(struct dmar_domain *domain, int segment,
1329 int segment, u8 bus, u8 devfn) 1417 u8 bus, u8 devfn, int translation)
1330{ 1418{
1331 struct context_entry *context; 1419 struct context_entry *context;
1332 unsigned long flags; 1420 unsigned long flags;
@@ -1336,10 +1424,14 @@ static int domain_context_mapping_one(struct dmar_domain *domain,
1336 unsigned long ndomains; 1424 unsigned long ndomains;
1337 int id; 1425 int id;
1338 int agaw; 1426 int agaw;
1427 struct device_domain_info *info = NULL;
1339 1428
1340 pr_debug("Set context mapping for %02x:%02x.%d\n", 1429 pr_debug("Set context mapping for %02x:%02x.%d\n",
1341 bus, PCI_SLOT(devfn), PCI_FUNC(devfn)); 1430 bus, PCI_SLOT(devfn), PCI_FUNC(devfn));
1431
1342 BUG_ON(!domain->pgd); 1432 BUG_ON(!domain->pgd);
1433 BUG_ON(translation != CONTEXT_TT_PASS_THROUGH &&
1434 translation != CONTEXT_TT_MULTI_LEVEL);
1343 1435
1344 iommu = device_to_iommu(segment, bus, devfn); 1436 iommu = device_to_iommu(segment, bus, devfn);
1345 if (!iommu) 1437 if (!iommu)
@@ -1357,7 +1449,8 @@ static int domain_context_mapping_one(struct dmar_domain *domain,
1357 id = domain->id; 1449 id = domain->id;
1358 pgd = domain->pgd; 1450 pgd = domain->pgd;
1359 1451
1360 if (domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE) { 1452 if (domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE ||
1453 domain->flags & DOMAIN_FLAG_STATIC_IDENTITY) {
1361 int found = 0; 1454 int found = 0;
1362 1455
1363 /* find an available domain id for this device in iommu */ 1456 /* find an available domain id for this device in iommu */
@@ -1382,6 +1475,7 @@ static int domain_context_mapping_one(struct dmar_domain *domain,
1382 } 1475 }
1383 1476
1384 set_bit(num, iommu->domain_ids); 1477 set_bit(num, iommu->domain_ids);
1478 set_bit(iommu->seq_id, &domain->iommu_bmp);
1385 iommu->domains[num] = domain; 1479 iommu->domains[num] = domain;
1386 id = num; 1480 id = num;
1387 } 1481 }
@@ -1399,21 +1493,44 @@ static int domain_context_mapping_one(struct dmar_domain *domain,
1399 } 1493 }
1400 1494
1401 context_set_domain_id(context, id); 1495 context_set_domain_id(context, id);
1402 context_set_address_width(context, iommu->agaw); 1496
1403 context_set_address_root(context, virt_to_phys(pgd)); 1497 if (translation != CONTEXT_TT_PASS_THROUGH) {
1404 context_set_translation_type(context, CONTEXT_TT_MULTI_LEVEL); 1498 info = iommu_support_dev_iotlb(domain, segment, bus, devfn);
1499 translation = info ? CONTEXT_TT_DEV_IOTLB :
1500 CONTEXT_TT_MULTI_LEVEL;
1501 }
1502 /*
1503 * In pass through mode, AW must be programmed to indicate the largest
1504 * AGAW value supported by hardware. And ASR is ignored by hardware.
1505 */
1506 if (unlikely(translation == CONTEXT_TT_PASS_THROUGH))
1507 context_set_address_width(context, iommu->msagaw);
1508 else {
1509 context_set_address_root(context, virt_to_phys(pgd));
1510 context_set_address_width(context, iommu->agaw);
1511 }
1512
1513 context_set_translation_type(context, translation);
1405 context_set_fault_enable(context); 1514 context_set_fault_enable(context);
1406 context_set_present(context); 1515 context_set_present(context);
1407 domain_flush_cache(domain, context, sizeof(*context)); 1516 domain_flush_cache(domain, context, sizeof(*context));
1408 1517
1409 /* it's a non-present to present mapping */ 1518 /*
1410 if (iommu->flush.flush_context(iommu, domain->id, 1519 * It's a non-present to present mapping. If hardware doesn't cache
1411 (((u16)bus) << 8) | devfn, DMA_CCMD_MASK_NOBIT, 1520 * non-present entry we only need to flush the write-buffer. If the
1412 DMA_CCMD_DEVICE_INVL, 1)) 1521 * _does_ cache non-present entries, then it does so in the special
1522 * domain #0, which we have to flush:
1523 */
1524 if (cap_caching_mode(iommu->cap)) {
1525 iommu->flush.flush_context(iommu, 0,
1526 (((u16)bus) << 8) | devfn,
1527 DMA_CCMD_MASK_NOBIT,
1528 DMA_CCMD_DEVICE_INVL);
1529 iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_DSI_FLUSH);
1530 } else {
1413 iommu_flush_write_buffer(iommu); 1531 iommu_flush_write_buffer(iommu);
1414 else 1532 }
1415 iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_DSI_FLUSH, 0); 1533 iommu_enable_dev_iotlb(info);
1416
1417 spin_unlock_irqrestore(&iommu->lock, flags); 1534 spin_unlock_irqrestore(&iommu->lock, flags);
1418 1535
1419 spin_lock_irqsave(&domain->iommu_lock, flags); 1536 spin_lock_irqsave(&domain->iommu_lock, flags);
@@ -1426,13 +1543,15 @@ static int domain_context_mapping_one(struct dmar_domain *domain,
1426} 1543}
1427 1544
1428static int 1545static int
1429domain_context_mapping(struct dmar_domain *domain, struct pci_dev *pdev) 1546domain_context_mapping(struct dmar_domain *domain, struct pci_dev *pdev,
1547 int translation)
1430{ 1548{
1431 int ret; 1549 int ret;
1432 struct pci_dev *tmp, *parent; 1550 struct pci_dev *tmp, *parent;
1433 1551
1434 ret = domain_context_mapping_one(domain, pci_domain_nr(pdev->bus), 1552 ret = domain_context_mapping_one(domain, pci_domain_nr(pdev->bus),
1435 pdev->bus->number, pdev->devfn); 1553 pdev->bus->number, pdev->devfn,
1554 translation);
1436 if (ret) 1555 if (ret)
1437 return ret; 1556 return ret;
1438 1557
@@ -1446,7 +1565,7 @@ domain_context_mapping(struct dmar_domain *domain, struct pci_dev *pdev)
1446 ret = domain_context_mapping_one(domain, 1565 ret = domain_context_mapping_one(domain,
1447 pci_domain_nr(parent->bus), 1566 pci_domain_nr(parent->bus),
1448 parent->bus->number, 1567 parent->bus->number,
1449 parent->devfn); 1568 parent->devfn, translation);
1450 if (ret) 1569 if (ret)
1451 return ret; 1570 return ret;
1452 parent = parent->bus->self; 1571 parent = parent->bus->self;
@@ -1454,12 +1573,14 @@ domain_context_mapping(struct dmar_domain *domain, struct pci_dev *pdev)
1454 if (tmp->is_pcie) /* this is a PCIE-to-PCI bridge */ 1573 if (tmp->is_pcie) /* this is a PCIE-to-PCI bridge */
1455 return domain_context_mapping_one(domain, 1574 return domain_context_mapping_one(domain,
1456 pci_domain_nr(tmp->subordinate), 1575 pci_domain_nr(tmp->subordinate),
1457 tmp->subordinate->number, 0); 1576 tmp->subordinate->number, 0,
1577 translation);
1458 else /* this is a legacy PCI bridge */ 1578 else /* this is a legacy PCI bridge */
1459 return domain_context_mapping_one(domain, 1579 return domain_context_mapping_one(domain,
1460 pci_domain_nr(tmp->bus), 1580 pci_domain_nr(tmp->bus),
1461 tmp->bus->number, 1581 tmp->bus->number,
1462 tmp->devfn); 1582 tmp->devfn,
1583 translation);
1463} 1584}
1464 1585
1465static int domain_context_mapped(struct pci_dev *pdev) 1586static int domain_context_mapped(struct pci_dev *pdev)
@@ -1540,9 +1661,8 @@ static void iommu_detach_dev(struct intel_iommu *iommu, u8 bus, u8 devfn)
1540 1661
1541 clear_context_table(iommu, bus, devfn); 1662 clear_context_table(iommu, bus, devfn);
1542 iommu->flush.flush_context(iommu, 0, 0, 0, 1663 iommu->flush.flush_context(iommu, 0, 0, 0,
1543 DMA_CCMD_GLOBAL_INVL, 0); 1664 DMA_CCMD_GLOBAL_INVL);
1544 iommu->flush.flush_iotlb(iommu, 0, 0, 0, 1665 iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
1545 DMA_TLB_GLOBAL_FLUSH, 0);
1546} 1666}
1547 1667
1548static void domain_remove_dev_info(struct dmar_domain *domain) 1668static void domain_remove_dev_info(struct dmar_domain *domain)
@@ -1561,6 +1681,7 @@ static void domain_remove_dev_info(struct dmar_domain *domain)
1561 info->dev->dev.archdata.iommu = NULL; 1681 info->dev->dev.archdata.iommu = NULL;
1562 spin_unlock_irqrestore(&device_domain_lock, flags); 1682 spin_unlock_irqrestore(&device_domain_lock, flags);
1563 1683
1684 iommu_disable_dev_iotlb(info);
1564 iommu = device_to_iommu(info->segment, info->bus, info->devfn); 1685 iommu = device_to_iommu(info->segment, info->bus, info->devfn);
1565 iommu_detach_dev(iommu, info->bus, info->devfn); 1686 iommu_detach_dev(iommu, info->bus, info->devfn);
1566 free_devinfo_mem(info); 1687 free_devinfo_mem(info);
@@ -1597,6 +1718,7 @@ static struct dmar_domain *get_domain_for_dev(struct pci_dev *pdev, int gaw)
1597 unsigned long flags; 1718 unsigned long flags;
1598 int bus = 0, devfn = 0; 1719 int bus = 0, devfn = 0;
1599 int segment; 1720 int segment;
1721 int ret;
1600 1722
1601 domain = find_domain(pdev); 1723 domain = find_domain(pdev);
1602 if (domain) 1724 if (domain)
@@ -1629,6 +1751,10 @@ static struct dmar_domain *get_domain_for_dev(struct pci_dev *pdev, int gaw)
1629 } 1751 }
1630 } 1752 }
1631 1753
1754 domain = alloc_domain();
1755 if (!domain)
1756 goto error;
1757
1632 /* Allocate new domain for the device */ 1758 /* Allocate new domain for the device */
1633 drhd = dmar_find_matched_drhd_unit(pdev); 1759 drhd = dmar_find_matched_drhd_unit(pdev);
1634 if (!drhd) { 1760 if (!drhd) {
@@ -1638,9 +1764,11 @@ static struct dmar_domain *get_domain_for_dev(struct pci_dev *pdev, int gaw)
1638 } 1764 }
1639 iommu = drhd->iommu; 1765 iommu = drhd->iommu;
1640 1766
1641 domain = iommu_alloc_domain(iommu); 1767 ret = iommu_attach_domain(domain, iommu);
1642 if (!domain) 1768 if (ret) {
1769 domain_exit(domain);
1643 goto error; 1770 goto error;
1771 }
1644 1772
1645 if (domain_init(domain, gaw)) { 1773 if (domain_init(domain, gaw)) {
1646 domain_exit(domain); 1774 domain_exit(domain);
@@ -1714,6 +1842,8 @@ error:
1714 return find_domain(pdev); 1842 return find_domain(pdev);
1715} 1843}
1716 1844
1845static int iommu_identity_mapping;
1846
1717static int iommu_prepare_identity_map(struct pci_dev *pdev, 1847static int iommu_prepare_identity_map(struct pci_dev *pdev,
1718 unsigned long long start, 1848 unsigned long long start,
1719 unsigned long long end) 1849 unsigned long long end)
@@ -1726,8 +1856,11 @@ static int iommu_prepare_identity_map(struct pci_dev *pdev,
1726 printk(KERN_INFO 1856 printk(KERN_INFO
1727 "IOMMU: Setting identity map for device %s [0x%Lx - 0x%Lx]\n", 1857 "IOMMU: Setting identity map for device %s [0x%Lx - 0x%Lx]\n",
1728 pci_name(pdev), start, end); 1858 pci_name(pdev), start, end);
1729 /* page table init */ 1859 if (iommu_identity_mapping)
1730 domain = get_domain_for_dev(pdev, DEFAULT_DOMAIN_ADDRESS_WIDTH); 1860 domain = si_domain;
1861 else
1862 /* page table init */
1863 domain = get_domain_for_dev(pdev, DEFAULT_DOMAIN_ADDRESS_WIDTH);
1731 if (!domain) 1864 if (!domain)
1732 return -ENOMEM; 1865 return -ENOMEM;
1733 1866
@@ -1756,7 +1889,7 @@ static int iommu_prepare_identity_map(struct pci_dev *pdev,
1756 goto error; 1889 goto error;
1757 1890
1758 /* context entry init */ 1891 /* context entry init */
1759 ret = domain_context_mapping(domain, pdev); 1892 ret = domain_context_mapping(domain, pdev, CONTEXT_TT_MULTI_LEVEL);
1760 if (!ret) 1893 if (!ret)
1761 return 0; 1894 return 0;
1762error: 1895error:
@@ -1774,7 +1907,6 @@ static inline int iommu_prepare_rmrr_dev(struct dmar_rmrr_unit *rmrr,
1774 rmrr->end_address + 1); 1907 rmrr->end_address + 1);
1775} 1908}
1776 1909
1777#ifdef CONFIG_DMAR_GFX_WA
1778struct iommu_prepare_data { 1910struct iommu_prepare_data {
1779 struct pci_dev *pdev; 1911 struct pci_dev *pdev;
1780 int ret; 1912 int ret;
@@ -1809,6 +1941,7 @@ static int __init iommu_prepare_with_active_regions(struct pci_dev *pdev)
1809 return data.ret; 1941 return data.ret;
1810} 1942}
1811 1943
1944#ifdef CONFIG_DMAR_GFX_WA
1812static void __init iommu_prepare_gfx_mapping(void) 1945static void __init iommu_prepare_gfx_mapping(void)
1813{ 1946{
1814 struct pci_dev *pdev = NULL; 1947 struct pci_dev *pdev = NULL;
@@ -1857,13 +1990,133 @@ static inline void iommu_prepare_isa(void)
1857} 1990}
1858#endif /* !CONFIG_DMAR_FLPY_WA */ 1991#endif /* !CONFIG_DMAR_FLPY_WA */
1859 1992
1860static int __init init_dmars(void) 1993/* Initialize each context entry as pass through.*/
1994static int __init init_context_pass_through(void)
1995{
1996 struct pci_dev *pdev = NULL;
1997 struct dmar_domain *domain;
1998 int ret;
1999
2000 for_each_pci_dev(pdev) {
2001 domain = get_domain_for_dev(pdev, DEFAULT_DOMAIN_ADDRESS_WIDTH);
2002 ret = domain_context_mapping(domain, pdev,
2003 CONTEXT_TT_PASS_THROUGH);
2004 if (ret)
2005 return ret;
2006 }
2007 return 0;
2008}
2009
2010static int md_domain_init(struct dmar_domain *domain, int guest_width);
2011static int si_domain_init(void)
2012{
2013 struct dmar_drhd_unit *drhd;
2014 struct intel_iommu *iommu;
2015 int ret = 0;
2016
2017 si_domain = alloc_domain();
2018 if (!si_domain)
2019 return -EFAULT;
2020
2021
2022 for_each_active_iommu(iommu, drhd) {
2023 ret = iommu_attach_domain(si_domain, iommu);
2024 if (ret) {
2025 domain_exit(si_domain);
2026 return -EFAULT;
2027 }
2028 }
2029
2030 if (md_domain_init(si_domain, DEFAULT_DOMAIN_ADDRESS_WIDTH)) {
2031 domain_exit(si_domain);
2032 return -EFAULT;
2033 }
2034
2035 si_domain->flags = DOMAIN_FLAG_STATIC_IDENTITY;
2036
2037 return 0;
2038}
2039
2040static void domain_remove_one_dev_info(struct dmar_domain *domain,
2041 struct pci_dev *pdev);
2042static int identity_mapping(struct pci_dev *pdev)
2043{
2044 struct device_domain_info *info;
2045
2046 if (likely(!iommu_identity_mapping))
2047 return 0;
2048
2049
2050 list_for_each_entry(info, &si_domain->devices, link)
2051 if (info->dev == pdev)
2052 return 1;
2053 return 0;
2054}
2055
2056static int domain_add_dev_info(struct dmar_domain *domain,
2057 struct pci_dev *pdev)
2058{
2059 struct device_domain_info *info;
2060 unsigned long flags;
2061
2062 info = alloc_devinfo_mem();
2063 if (!info)
2064 return -ENOMEM;
2065
2066 info->segment = pci_domain_nr(pdev->bus);
2067 info->bus = pdev->bus->number;
2068 info->devfn = pdev->devfn;
2069 info->dev = pdev;
2070 info->domain = domain;
2071
2072 spin_lock_irqsave(&device_domain_lock, flags);
2073 list_add(&info->link, &domain->devices);
2074 list_add(&info->global, &device_domain_list);
2075 pdev->dev.archdata.iommu = info;
2076 spin_unlock_irqrestore(&device_domain_lock, flags);
2077
2078 return 0;
2079}
2080
2081static int iommu_prepare_static_identity_mapping(void)
2082{
2083 struct pci_dev *pdev = NULL;
2084 int ret;
2085
2086 ret = si_domain_init();
2087 if (ret)
2088 return -EFAULT;
2089
2090 printk(KERN_INFO "IOMMU: Setting identity map:\n");
2091 for_each_pci_dev(pdev) {
2092 ret = iommu_prepare_with_active_regions(pdev);
2093 if (ret) {
2094 printk(KERN_INFO "1:1 mapping to one domain failed.\n");
2095 return -EFAULT;
2096 }
2097 ret = domain_add_dev_info(si_domain, pdev);
2098 if (ret)
2099 return ret;
2100 }
2101
2102 return 0;
2103}
2104
2105int __init init_dmars(void)
1861{ 2106{
1862 struct dmar_drhd_unit *drhd; 2107 struct dmar_drhd_unit *drhd;
1863 struct dmar_rmrr_unit *rmrr; 2108 struct dmar_rmrr_unit *rmrr;
1864 struct pci_dev *pdev; 2109 struct pci_dev *pdev;
1865 struct intel_iommu *iommu; 2110 struct intel_iommu *iommu;
1866 int i, ret; 2111 int i, ret;
2112 int pass_through = 1;
2113
2114 /*
2115 * In case pass through can not be enabled, iommu tries to use identity
2116 * mapping.
2117 */
2118 if (iommu_pass_through)
2119 iommu_identity_mapping = 1;
1867 2120
1868 /* 2121 /*
1869 * for each drhd 2122 * for each drhd
@@ -1917,7 +2170,15 @@ static int __init init_dmars(void)
1917 printk(KERN_ERR "IOMMU: allocate root entry failed\n"); 2170 printk(KERN_ERR "IOMMU: allocate root entry failed\n");
1918 goto error; 2171 goto error;
1919 } 2172 }
2173 if (!ecap_pass_through(iommu->ecap))
2174 pass_through = 0;
1920 } 2175 }
2176 if (iommu_pass_through)
2177 if (!pass_through) {
2178 printk(KERN_INFO
2179 "Pass Through is not supported by hardware.\n");
2180 iommu_pass_through = 0;
2181 }
1921 2182
1922 /* 2183 /*
1923 * Start from the sane iommu hardware state. 2184 * Start from the sane iommu hardware state.
@@ -1973,35 +2234,60 @@ static int __init init_dmars(void)
1973 } 2234 }
1974 2235
1975 /* 2236 /*
1976 * For each rmrr 2237 * If pass through is set and enabled, context entries of all pci
1977 * for each dev attached to rmrr 2238 * devices are intialized by pass through translation type.
1978 * do
1979 * locate drhd for dev, alloc domain for dev
1980 * allocate free domain
1981 * allocate page table entries for rmrr
1982 * if context not allocated for bus
1983 * allocate and init context
1984 * set present in root table for this bus
1985 * init context with domain, translation etc
1986 * endfor
1987 * endfor
1988 */ 2239 */
1989 for_each_rmrr_units(rmrr) { 2240 if (iommu_pass_through) {
1990 for (i = 0; i < rmrr->devices_cnt; i++) { 2241 ret = init_context_pass_through();
1991 pdev = rmrr->devices[i]; 2242 if (ret) {
1992 /* some BIOS lists non-exist devices in DMAR table */ 2243 printk(KERN_ERR "IOMMU: Pass through init failed.\n");
1993 if (!pdev) 2244 iommu_pass_through = 0;
1994 continue;
1995 ret = iommu_prepare_rmrr_dev(rmrr, pdev);
1996 if (ret)
1997 printk(KERN_ERR
1998 "IOMMU: mapping reserved region failed\n");
1999 } 2245 }
2000 } 2246 }
2001 2247
2002 iommu_prepare_gfx_mapping(); 2248 /*
2249 * If pass through is not set or not enabled, setup context entries for
2250 * identity mappings for rmrr, gfx, and isa and may fall back to static
2251 * identity mapping if iommu_identity_mapping is set.
2252 */
2253 if (!iommu_pass_through) {
2254 if (iommu_identity_mapping)
2255 iommu_prepare_static_identity_mapping();
2256 /*
2257 * For each rmrr
2258 * for each dev attached to rmrr
2259 * do
2260 * locate drhd for dev, alloc domain for dev
2261 * allocate free domain
2262 * allocate page table entries for rmrr
2263 * if context not allocated for bus
2264 * allocate and init context
2265 * set present in root table for this bus
2266 * init context with domain, translation etc
2267 * endfor
2268 * endfor
2269 */
2270 printk(KERN_INFO "IOMMU: Setting RMRR:\n");
2271 for_each_rmrr_units(rmrr) {
2272 for (i = 0; i < rmrr->devices_cnt; i++) {
2273 pdev = rmrr->devices[i];
2274 /*
2275 * some BIOS lists non-exist devices in DMAR
2276 * table.
2277 */
2278 if (!pdev)
2279 continue;
2280 ret = iommu_prepare_rmrr_dev(rmrr, pdev);
2281 if (ret)
2282 printk(KERN_ERR
2283 "IOMMU: mapping reserved region failed\n");
2284 }
2285 }
2286
2287 iommu_prepare_gfx_mapping();
2003 2288
2004 iommu_prepare_isa(); 2289 iommu_prepare_isa();
2290 }
2005 2291
2006 /* 2292 /*
2007 * for each drhd 2293 * for each drhd
@@ -2023,10 +2309,8 @@ static int __init init_dmars(void)
2023 2309
2024 iommu_set_root_entry(iommu); 2310 iommu_set_root_entry(iommu);
2025 2311
2026 iommu->flush.flush_context(iommu, 0, 0, 0, DMA_CCMD_GLOBAL_INVL, 2312 iommu->flush.flush_context(iommu, 0, 0, 0, DMA_CCMD_GLOBAL_INVL);
2027 0); 2313 iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
2028 iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH,
2029 0);
2030 iommu_disable_protect_mem_regions(iommu); 2314 iommu_disable_protect_mem_regions(iommu);
2031 2315
2032 ret = iommu_enable_translation(iommu); 2316 ret = iommu_enable_translation(iommu);
@@ -2112,7 +2396,8 @@ get_valid_domain_for_dev(struct pci_dev *pdev)
2112 2396
2113 /* make sure context mapping is ok */ 2397 /* make sure context mapping is ok */
2114 if (unlikely(!domain_context_mapped(pdev))) { 2398 if (unlikely(!domain_context_mapped(pdev))) {
2115 ret = domain_context_mapping(domain, pdev); 2399 ret = domain_context_mapping(domain, pdev,
2400 CONTEXT_TT_MULTI_LEVEL);
2116 if (ret) { 2401 if (ret) {
2117 printk(KERN_ERR 2402 printk(KERN_ERR
2118 "Domain context map for %s failed", 2403 "Domain context map for %s failed",
@@ -2124,6 +2409,52 @@ get_valid_domain_for_dev(struct pci_dev *pdev)
2124 return domain; 2409 return domain;
2125} 2410}
2126 2411
2412static int iommu_dummy(struct pci_dev *pdev)
2413{
2414 return pdev->dev.archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO;
2415}
2416
2417/* Check if the pdev needs to go through non-identity map and unmap process.*/
2418static int iommu_no_mapping(struct pci_dev *pdev)
2419{
2420 int found;
2421
2422 if (!iommu_identity_mapping)
2423 return iommu_dummy(pdev);
2424
2425 found = identity_mapping(pdev);
2426 if (found) {
2427 if (pdev->dma_mask > DMA_BIT_MASK(32))
2428 return 1;
2429 else {
2430 /*
2431 * 32 bit DMA is removed from si_domain and fall back
2432 * to non-identity mapping.
2433 */
2434 domain_remove_one_dev_info(si_domain, pdev);
2435 printk(KERN_INFO "32bit %s uses non-identity mapping\n",
2436 pci_name(pdev));
2437 return 0;
2438 }
2439 } else {
2440 /*
2441 * In case of a detached 64 bit DMA device from vm, the device
2442 * is put into si_domain for identity mapping.
2443 */
2444 if (pdev->dma_mask > DMA_BIT_MASK(32)) {
2445 int ret;
2446 ret = domain_add_dev_info(si_domain, pdev);
2447 if (!ret) {
2448 printk(KERN_INFO "64bit %s uses identity mapping\n",
2449 pci_name(pdev));
2450 return 1;
2451 }
2452 }
2453 }
2454
2455 return iommu_dummy(pdev);
2456}
2457
2127static dma_addr_t __intel_map_single(struct device *hwdev, phys_addr_t paddr, 2458static dma_addr_t __intel_map_single(struct device *hwdev, phys_addr_t paddr,
2128 size_t size, int dir, u64 dma_mask) 2459 size_t size, int dir, u64 dma_mask)
2129{ 2460{
@@ -2136,7 +2467,8 @@ static dma_addr_t __intel_map_single(struct device *hwdev, phys_addr_t paddr,
2136 struct intel_iommu *iommu; 2467 struct intel_iommu *iommu;
2137 2468
2138 BUG_ON(dir == DMA_NONE); 2469 BUG_ON(dir == DMA_NONE);
2139 if (pdev->dev.archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO) 2470
2471 if (iommu_no_mapping(pdev))
2140 return paddr; 2472 return paddr;
2141 2473
2142 domain = get_valid_domain_for_dev(pdev); 2474 domain = get_valid_domain_for_dev(pdev);
@@ -2173,10 +2505,11 @@ static dma_addr_t __intel_map_single(struct device *hwdev, phys_addr_t paddr,
2173 if (ret) 2505 if (ret)
2174 goto error; 2506 goto error;
2175 2507
2176 /* it's a non-present to present mapping */ 2508 /* it's a non-present to present mapping. Only flush if caching mode */
2177 ret = iommu_flush_iotlb_psi(iommu, domain->id, 2509 if (cap_caching_mode(iommu->cap))
2178 start_paddr, size >> VTD_PAGE_SHIFT, 1); 2510 iommu_flush_iotlb_psi(iommu, 0, start_paddr,
2179 if (ret) 2511 size >> VTD_PAGE_SHIFT);
2512 else
2180 iommu_flush_write_buffer(iommu); 2513 iommu_flush_write_buffer(iommu);
2181 2514
2182 return start_paddr + ((u64)paddr & (~PAGE_MASK)); 2515 return start_paddr + ((u64)paddr & (~PAGE_MASK));
@@ -2210,15 +2543,22 @@ static void flush_unmaps(void)
2210 if (!iommu) 2543 if (!iommu)
2211 continue; 2544 continue;
2212 2545
2213 if (deferred_flush[i].next) { 2546 if (!deferred_flush[i].next)
2214 iommu->flush.flush_iotlb(iommu, 0, 0, 0, 2547 continue;
2215 DMA_TLB_GLOBAL_FLUSH, 0); 2548
2216 for (j = 0; j < deferred_flush[i].next; j++) { 2549 iommu->flush.flush_iotlb(iommu, 0, 0, 0,
2217 __free_iova(&deferred_flush[i].domain[j]->iovad, 2550 DMA_TLB_GLOBAL_FLUSH);
2218 deferred_flush[i].iova[j]); 2551 for (j = 0; j < deferred_flush[i].next; j++) {
2219 } 2552 unsigned long mask;
2220 deferred_flush[i].next = 0; 2553 struct iova *iova = deferred_flush[i].iova[j];
2554
2555 mask = (iova->pfn_hi - iova->pfn_lo + 1) << PAGE_SHIFT;
2556 mask = ilog2(mask >> VTD_PAGE_SHIFT);
2557 iommu_flush_dev_iotlb(deferred_flush[i].domain[j],
2558 iova->pfn_lo << PAGE_SHIFT, mask);
2559 __free_iova(&deferred_flush[i].domain[j]->iovad, iova);
2221 } 2560 }
2561 deferred_flush[i].next = 0;
2222 } 2562 }
2223 2563
2224 list_size = 0; 2564 list_size = 0;
@@ -2269,8 +2609,9 @@ static void intel_unmap_page(struct device *dev, dma_addr_t dev_addr,
2269 struct iova *iova; 2609 struct iova *iova;
2270 struct intel_iommu *iommu; 2610 struct intel_iommu *iommu;
2271 2611
2272 if (pdev->dev.archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO) 2612 if (iommu_no_mapping(pdev))
2273 return; 2613 return;
2614
2274 domain = find_domain(pdev); 2615 domain = find_domain(pdev);
2275 BUG_ON(!domain); 2616 BUG_ON(!domain);
2276 2617
@@ -2291,9 +2632,8 @@ static void intel_unmap_page(struct device *dev, dma_addr_t dev_addr,
2291 /* free page tables */ 2632 /* free page tables */
2292 dma_pte_free_pagetable(domain, start_addr, start_addr + size); 2633 dma_pte_free_pagetable(domain, start_addr, start_addr + size);
2293 if (intel_iommu_strict) { 2634 if (intel_iommu_strict) {
2294 if (iommu_flush_iotlb_psi(iommu, 2635 iommu_flush_iotlb_psi(iommu, domain->id, start_addr,
2295 domain->id, start_addr, size >> VTD_PAGE_SHIFT, 0)) 2636 size >> VTD_PAGE_SHIFT);
2296 iommu_flush_write_buffer(iommu);
2297 /* free iova */ 2637 /* free iova */
2298 __free_iova(&domain->iovad, iova); 2638 __free_iova(&domain->iovad, iova);
2299 } else { 2639 } else {
@@ -2361,7 +2701,7 @@ static void intel_unmap_sg(struct device *hwdev, struct scatterlist *sglist,
2361 struct scatterlist *sg; 2701 struct scatterlist *sg;
2362 struct intel_iommu *iommu; 2702 struct intel_iommu *iommu;
2363 2703
2364 if (pdev->dev.archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO) 2704 if (iommu_no_mapping(pdev))
2365 return; 2705 return;
2366 2706
2367 domain = find_domain(pdev); 2707 domain = find_domain(pdev);
@@ -2384,9 +2724,8 @@ static void intel_unmap_sg(struct device *hwdev, struct scatterlist *sglist,
2384 /* free page tables */ 2724 /* free page tables */
2385 dma_pte_free_pagetable(domain, start_addr, start_addr + size); 2725 dma_pte_free_pagetable(domain, start_addr, start_addr + size);
2386 2726
2387 if (iommu_flush_iotlb_psi(iommu, domain->id, start_addr, 2727 iommu_flush_iotlb_psi(iommu, domain->id, start_addr,
2388 size >> VTD_PAGE_SHIFT, 0)) 2728 size >> VTD_PAGE_SHIFT);
2389 iommu_flush_write_buffer(iommu);
2390 2729
2391 /* free iova */ 2730 /* free iova */
2392 __free_iova(&domain->iovad, iova); 2731 __free_iova(&domain->iovad, iova);
@@ -2423,7 +2762,7 @@ static int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, int ne
2423 struct intel_iommu *iommu; 2762 struct intel_iommu *iommu;
2424 2763
2425 BUG_ON(dir == DMA_NONE); 2764 BUG_ON(dir == DMA_NONE);
2426 if (pdev->dev.archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO) 2765 if (iommu_no_mapping(pdev))
2427 return intel_nontranslate_map_sg(hwdev, sglist, nelems, dir); 2766 return intel_nontranslate_map_sg(hwdev, sglist, nelems, dir);
2428 2767
2429 domain = get_valid_domain_for_dev(pdev); 2768 domain = get_valid_domain_for_dev(pdev);
@@ -2478,10 +2817,13 @@ static int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, int ne
2478 offset += size; 2817 offset += size;
2479 } 2818 }
2480 2819
2481 /* it's a non-present to present mapping */ 2820 /* it's a non-present to present mapping. Only flush if caching mode */
2482 if (iommu_flush_iotlb_psi(iommu, domain->id, 2821 if (cap_caching_mode(iommu->cap))
2483 start_addr, offset >> VTD_PAGE_SHIFT, 1)) 2822 iommu_flush_iotlb_psi(iommu, 0, start_addr,
2823 offset >> VTD_PAGE_SHIFT);
2824 else
2484 iommu_flush_write_buffer(iommu); 2825 iommu_flush_write_buffer(iommu);
2826
2485 return nelems; 2827 return nelems;
2486} 2828}
2487 2829
@@ -2640,9 +2982,9 @@ static int init_iommu_hw(void)
2640 iommu_set_root_entry(iommu); 2982 iommu_set_root_entry(iommu);
2641 2983
2642 iommu->flush.flush_context(iommu, 0, 0, 0, 2984 iommu->flush.flush_context(iommu, 0, 0, 0,
2643 DMA_CCMD_GLOBAL_INVL, 0); 2985 DMA_CCMD_GLOBAL_INVL);
2644 iommu->flush.flush_iotlb(iommu, 0, 0, 0, 2986 iommu->flush.flush_iotlb(iommu, 0, 0, 0,
2645 DMA_TLB_GLOBAL_FLUSH, 0); 2987 DMA_TLB_GLOBAL_FLUSH);
2646 iommu_disable_protect_mem_regions(iommu); 2988 iommu_disable_protect_mem_regions(iommu);
2647 iommu_enable_translation(iommu); 2989 iommu_enable_translation(iommu);
2648 } 2990 }
@@ -2657,9 +2999,9 @@ static void iommu_flush_all(void)
2657 2999
2658 for_each_active_iommu(iommu, drhd) { 3000 for_each_active_iommu(iommu, drhd) {
2659 iommu->flush.flush_context(iommu, 0, 0, 0, 3001 iommu->flush.flush_context(iommu, 0, 0, 0,
2660 DMA_CCMD_GLOBAL_INVL, 0); 3002 DMA_CCMD_GLOBAL_INVL);
2661 iommu->flush.flush_iotlb(iommu, 0, 0, 0, 3003 iommu->flush.flush_iotlb(iommu, 0, 0, 0,
2662 DMA_TLB_GLOBAL_FLUSH, 0); 3004 DMA_TLB_GLOBAL_FLUSH);
2663 } 3005 }
2664} 3006}
2665 3007
@@ -2782,7 +3124,7 @@ int __init intel_iommu_init(void)
2782 * Check the need for DMA-remapping initialization now. 3124 * Check the need for DMA-remapping initialization now.
2783 * Above initialization will also be used by Interrupt-remapping. 3125 * Above initialization will also be used by Interrupt-remapping.
2784 */ 3126 */
2785 if (no_iommu || swiotlb || dmar_disabled) 3127 if (no_iommu || (swiotlb && !iommu_pass_through) || dmar_disabled)
2786 return -ENODEV; 3128 return -ENODEV;
2787 3129
2788 iommu_init_mempool(); 3130 iommu_init_mempool();
@@ -2802,35 +3144,18 @@ int __init intel_iommu_init(void)
2802 3144
2803 init_timer(&unmap_timer); 3145 init_timer(&unmap_timer);
2804 force_iommu = 1; 3146 force_iommu = 1;
2805 dma_ops = &intel_dma_ops;
2806 init_iommu_sysfs();
2807
2808 register_iommu(&intel_iommu_ops);
2809
2810 return 0;
2811}
2812 3147
2813static int vm_domain_add_dev_info(struct dmar_domain *domain, 3148 if (!iommu_pass_through) {
2814 struct pci_dev *pdev) 3149 printk(KERN_INFO
2815{ 3150 "Multi-level page-table translation for DMAR.\n");
2816 struct device_domain_info *info; 3151 dma_ops = &intel_dma_ops;
2817 unsigned long flags; 3152 } else
2818 3153 printk(KERN_INFO
2819 info = alloc_devinfo_mem(); 3154 "DMAR: Pass through translation for DMAR.\n");
2820 if (!info)
2821 return -ENOMEM;
2822 3155
2823 info->segment = pci_domain_nr(pdev->bus); 3156 init_iommu_sysfs();
2824 info->bus = pdev->bus->number;
2825 info->devfn = pdev->devfn;
2826 info->dev = pdev;
2827 info->domain = domain;
2828 3157
2829 spin_lock_irqsave(&device_domain_lock, flags); 3158 register_iommu(&intel_iommu_ops);
2830 list_add(&info->link, &domain->devices);
2831 list_add(&info->global, &device_domain_list);
2832 pdev->dev.archdata.iommu = info;
2833 spin_unlock_irqrestore(&device_domain_lock, flags);
2834 3159
2835 return 0; 3160 return 0;
2836} 3161}
@@ -2862,7 +3187,7 @@ static void iommu_detach_dependent_devices(struct intel_iommu *iommu,
2862 } 3187 }
2863} 3188}
2864 3189
2865static void vm_domain_remove_one_dev_info(struct dmar_domain *domain, 3190static void domain_remove_one_dev_info(struct dmar_domain *domain,
2866 struct pci_dev *pdev) 3191 struct pci_dev *pdev)
2867{ 3192{
2868 struct device_domain_info *info; 3193 struct device_domain_info *info;
@@ -2888,6 +3213,7 @@ static void vm_domain_remove_one_dev_info(struct dmar_domain *domain,
2888 info->dev->dev.archdata.iommu = NULL; 3213 info->dev->dev.archdata.iommu = NULL;
2889 spin_unlock_irqrestore(&device_domain_lock, flags); 3214 spin_unlock_irqrestore(&device_domain_lock, flags);
2890 3215
3216 iommu_disable_dev_iotlb(info);
2891 iommu_detach_dev(iommu, info->bus, info->devfn); 3217 iommu_detach_dev(iommu, info->bus, info->devfn);
2892 iommu_detach_dependent_devices(iommu, pdev); 3218 iommu_detach_dependent_devices(iommu, pdev);
2893 free_devinfo_mem(info); 3219 free_devinfo_mem(info);
@@ -2938,6 +3264,7 @@ static void vm_domain_remove_all_dev_info(struct dmar_domain *domain)
2938 3264
2939 spin_unlock_irqrestore(&device_domain_lock, flags1); 3265 spin_unlock_irqrestore(&device_domain_lock, flags1);
2940 3266
3267 iommu_disable_dev_iotlb(info);
2941 iommu = device_to_iommu(info->segment, info->bus, info->devfn); 3268 iommu = device_to_iommu(info->segment, info->bus, info->devfn);
2942 iommu_detach_dev(iommu, info->bus, info->devfn); 3269 iommu_detach_dev(iommu, info->bus, info->devfn);
2943 iommu_detach_dependent_devices(iommu, info->dev); 3270 iommu_detach_dependent_devices(iommu, info->dev);
@@ -2993,7 +3320,7 @@ static struct dmar_domain *iommu_alloc_vm_domain(void)
2993 return domain; 3320 return domain;
2994} 3321}
2995 3322
2996static int vm_domain_init(struct dmar_domain *domain, int guest_width) 3323static int md_domain_init(struct dmar_domain *domain, int guest_width)
2997{ 3324{
2998 int adjust_width; 3325 int adjust_width;
2999 3326
@@ -3084,7 +3411,7 @@ static int intel_iommu_domain_init(struct iommu_domain *domain)
3084 "intel_iommu_domain_init: dmar_domain == NULL\n"); 3411 "intel_iommu_domain_init: dmar_domain == NULL\n");
3085 return -ENOMEM; 3412 return -ENOMEM;
3086 } 3413 }
3087 if (vm_domain_init(dmar_domain, DEFAULT_DOMAIN_ADDRESS_WIDTH)) { 3414 if (md_domain_init(dmar_domain, DEFAULT_DOMAIN_ADDRESS_WIDTH)) {
3088 printk(KERN_ERR 3415 printk(KERN_ERR
3089 "intel_iommu_domain_init() failed\n"); 3416 "intel_iommu_domain_init() failed\n");
3090 vm_domain_exit(dmar_domain); 3417 vm_domain_exit(dmar_domain);
@@ -3119,8 +3446,9 @@ static int intel_iommu_attach_device(struct iommu_domain *domain,
3119 3446
3120 old_domain = find_domain(pdev); 3447 old_domain = find_domain(pdev);
3121 if (old_domain) { 3448 if (old_domain) {
3122 if (dmar_domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE) 3449 if (dmar_domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE ||
3123 vm_domain_remove_one_dev_info(old_domain, pdev); 3450 dmar_domain->flags & DOMAIN_FLAG_STATIC_IDENTITY)
3451 domain_remove_one_dev_info(old_domain, pdev);
3124 else 3452 else
3125 domain_remove_dev_info(old_domain); 3453 domain_remove_dev_info(old_domain);
3126 } 3454 }
@@ -3142,11 +3470,11 @@ static int intel_iommu_attach_device(struct iommu_domain *domain,
3142 return -EFAULT; 3470 return -EFAULT;
3143 } 3471 }
3144 3472
3145 ret = domain_context_mapping(dmar_domain, pdev); 3473 ret = domain_add_dev_info(dmar_domain, pdev);
3146 if (ret) 3474 if (ret)
3147 return ret; 3475 return ret;
3148 3476
3149 ret = vm_domain_add_dev_info(dmar_domain, pdev); 3477 ret = domain_context_mapping(dmar_domain, pdev, CONTEXT_TT_MULTI_LEVEL);
3150 return ret; 3478 return ret;
3151} 3479}
3152 3480
@@ -3156,7 +3484,7 @@ static void intel_iommu_detach_device(struct iommu_domain *domain,
3156 struct dmar_domain *dmar_domain = domain->priv; 3484 struct dmar_domain *dmar_domain = domain->priv;
3157 struct pci_dev *pdev = to_pci_dev(dev); 3485 struct pci_dev *pdev = to_pci_dev(dev);
3158 3486
3159 vm_domain_remove_one_dev_info(dmar_domain, pdev); 3487 domain_remove_one_dev_info(dmar_domain, pdev);
3160} 3488}
3161 3489
3162static int intel_iommu_map_range(struct iommu_domain *domain, 3490static int intel_iommu_map_range(struct iommu_domain *domain,
diff --git a/drivers/pci/intr_remapping.c b/drivers/pci/intr_remapping.c
index 3a0cb0bb0593..4f5b8712931f 100644
--- a/drivers/pci/intr_remapping.c
+++ b/drivers/pci/intr_remapping.c
@@ -10,6 +10,8 @@
10#include <linux/intel-iommu.h> 10#include <linux/intel-iommu.h>
11#include "intr_remapping.h" 11#include "intr_remapping.h"
12#include <acpi/acpi.h> 12#include <acpi/acpi.h>
13#include <asm/pci-direct.h>
14#include "pci.h"
13 15
14static struct ioapic_scope ir_ioapic[MAX_IO_APICS]; 16static struct ioapic_scope ir_ioapic[MAX_IO_APICS];
15static int ir_ioapic_num; 17static int ir_ioapic_num;
@@ -314,7 +316,8 @@ int modify_irte(int irq, struct irte *irte_modified)
314 index = irq_iommu->irte_index + irq_iommu->sub_handle; 316 index = irq_iommu->irte_index + irq_iommu->sub_handle;
315 irte = &iommu->ir_table->base[index]; 317 irte = &iommu->ir_table->base[index];
316 318
317 set_64bit((unsigned long *)irte, irte_modified->low); 319 set_64bit((unsigned long *)&irte->low, irte_modified->low);
320 set_64bit((unsigned long *)&irte->high, irte_modified->high);
318 __iommu_flush_cache(iommu, irte, sizeof(*irte)); 321 __iommu_flush_cache(iommu, irte, sizeof(*irte));
319 322
320 rc = qi_flush_iec(iommu, index, 0); 323 rc = qi_flush_iec(iommu, index, 0);
@@ -369,12 +372,32 @@ struct intel_iommu *map_dev_to_ir(struct pci_dev *dev)
369 return drhd->iommu; 372 return drhd->iommu;
370} 373}
371 374
375static int clear_entries(struct irq_2_iommu *irq_iommu)
376{
377 struct irte *start, *entry, *end;
378 struct intel_iommu *iommu;
379 int index;
380
381 if (irq_iommu->sub_handle)
382 return 0;
383
384 iommu = irq_iommu->iommu;
385 index = irq_iommu->irte_index + irq_iommu->sub_handle;
386
387 start = iommu->ir_table->base + index;
388 end = start + (1 << irq_iommu->irte_mask);
389
390 for (entry = start; entry < end; entry++) {
391 set_64bit((unsigned long *)&entry->low, 0);
392 set_64bit((unsigned long *)&entry->high, 0);
393 }
394
395 return qi_flush_iec(iommu, index, irq_iommu->irte_mask);
396}
397
372int free_irte(int irq) 398int free_irte(int irq)
373{ 399{
374 int rc = 0; 400 int rc = 0;
375 int index, i;
376 struct irte *irte;
377 struct intel_iommu *iommu;
378 struct irq_2_iommu *irq_iommu; 401 struct irq_2_iommu *irq_iommu;
379 unsigned long flags; 402 unsigned long flags;
380 403
@@ -385,16 +408,7 @@ int free_irte(int irq)
385 return -1; 408 return -1;
386 } 409 }
387 410
388 iommu = irq_iommu->iommu; 411 rc = clear_entries(irq_iommu);
389
390 index = irq_iommu->irte_index + irq_iommu->sub_handle;
391 irte = &iommu->ir_table->base[index];
392
393 if (!irq_iommu->sub_handle) {
394 for (i = 0; i < (1 << irq_iommu->irte_mask); i++)
395 set_64bit((unsigned long *)(irte + i), 0);
396 rc = qi_flush_iec(iommu, index, irq_iommu->irte_mask);
397 }
398 412
399 irq_iommu->iommu = NULL; 413 irq_iommu->iommu = NULL;
400 irq_iommu->irte_index = 0; 414 irq_iommu->irte_index = 0;
@@ -406,10 +420,95 @@ int free_irte(int irq)
406 return rc; 420 return rc;
407} 421}
408 422
423/*
424 * source validation type
425 */
426#define SVT_NO_VERIFY 0x0 /* no verification is required */
427#define SVT_VERIFY_SID_SQ 0x1 /* verify using SID and SQ fiels */
428#define SVT_VERIFY_BUS 0x2 /* verify bus of request-id */
429
430/*
431 * source-id qualifier
432 */
433#define SQ_ALL_16 0x0 /* verify all 16 bits of request-id */
434#define SQ_13_IGNORE_1 0x1 /* verify most significant 13 bits, ignore
435 * the third least significant bit
436 */
437#define SQ_13_IGNORE_2 0x2 /* verify most significant 13 bits, ignore
438 * the second and third least significant bits
439 */
440#define SQ_13_IGNORE_3 0x3 /* verify most significant 13 bits, ignore
441 * the least three significant bits
442 */
443
444/*
445 * set SVT, SQ and SID fields of irte to verify
446 * source ids of interrupt requests
447 */
448static void set_irte_sid(struct irte *irte, unsigned int svt,
449 unsigned int sq, unsigned int sid)
450{
451 irte->svt = svt;
452 irte->sq = sq;
453 irte->sid = sid;
454}
455
456int set_ioapic_sid(struct irte *irte, int apic)
457{
458 int i;
459 u16 sid = 0;
460
461 if (!irte)
462 return -1;
463
464 for (i = 0; i < MAX_IO_APICS; i++) {
465 if (ir_ioapic[i].id == apic) {
466 sid = (ir_ioapic[i].bus << 8) | ir_ioapic[i].devfn;
467 break;
468 }
469 }
470
471 if (sid == 0) {
472 pr_warning("Failed to set source-id of IOAPIC (%d)\n", apic);
473 return -1;
474 }
475
476 set_irte_sid(irte, 1, 0, sid);
477
478 return 0;
479}
480
481int set_msi_sid(struct irte *irte, struct pci_dev *dev)
482{
483 struct pci_dev *bridge;
484
485 if (!irte || !dev)
486 return -1;
487
488 /* PCIe device or Root Complex integrated PCI device */
489 if (dev->is_pcie || !dev->bus->parent) {
490 set_irte_sid(irte, SVT_VERIFY_SID_SQ, SQ_ALL_16,
491 (dev->bus->number << 8) | dev->devfn);
492 return 0;
493 }
494
495 bridge = pci_find_upstream_pcie_bridge(dev);
496 if (bridge) {
497 if (bridge->is_pcie) /* this is a PCIE-to-PCI/PCIX bridge */
498 set_irte_sid(irte, SVT_VERIFY_BUS, SQ_ALL_16,
499 (bridge->bus->number << 8) | dev->bus->number);
500 else /* this is a legacy PCI bridge */
501 set_irte_sid(irte, SVT_VERIFY_SID_SQ, SQ_ALL_16,
502 (bridge->bus->number << 8) | bridge->devfn);
503 }
504
505 return 0;
506}
507
409static void iommu_set_intr_remapping(struct intel_iommu *iommu, int mode) 508static void iommu_set_intr_remapping(struct intel_iommu *iommu, int mode)
410{ 509{
411 u64 addr; 510 u64 addr;
412 u32 cmd, sts; 511 u32 sts;
413 unsigned long flags; 512 unsigned long flags;
414 513
415 addr = virt_to_phys((void *)iommu->ir_table->base); 514 addr = virt_to_phys((void *)iommu->ir_table->base);
@@ -420,9 +519,8 @@ static void iommu_set_intr_remapping(struct intel_iommu *iommu, int mode)
420 (addr) | IR_X2APIC_MODE(mode) | INTR_REMAP_TABLE_REG_SIZE); 519 (addr) | IR_X2APIC_MODE(mode) | INTR_REMAP_TABLE_REG_SIZE);
421 520
422 /* Set interrupt-remapping table pointer */ 521 /* Set interrupt-remapping table pointer */
423 cmd = iommu->gcmd | DMA_GCMD_SIRTP;
424 iommu->gcmd |= DMA_GCMD_SIRTP; 522 iommu->gcmd |= DMA_GCMD_SIRTP;
425 writel(cmd, iommu->reg + DMAR_GCMD_REG); 523 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
426 524
427 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, 525 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
428 readl, (sts & DMA_GSTS_IRTPS), sts); 526 readl, (sts & DMA_GSTS_IRTPS), sts);
@@ -437,9 +535,8 @@ static void iommu_set_intr_remapping(struct intel_iommu *iommu, int mode)
437 spin_lock_irqsave(&iommu->register_lock, flags); 535 spin_lock_irqsave(&iommu->register_lock, flags);
438 536
439 /* Enable interrupt-remapping */ 537 /* Enable interrupt-remapping */
440 cmd = iommu->gcmd | DMA_GCMD_IRE;
441 iommu->gcmd |= DMA_GCMD_IRE; 538 iommu->gcmd |= DMA_GCMD_IRE;
442 writel(cmd, iommu->reg + DMAR_GCMD_REG); 539 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
443 540
444 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, 541 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
445 readl, (sts & DMA_GSTS_IRES), sts); 542 readl, (sts & DMA_GSTS_IRES), sts);
@@ -614,6 +711,35 @@ error:
614 return -1; 711 return -1;
615} 712}
616 713
714static void ir_parse_one_ioapic_scope(struct acpi_dmar_device_scope *scope,
715 struct intel_iommu *iommu)
716{
717 struct acpi_dmar_pci_path *path;
718 u8 bus;
719 int count;
720
721 bus = scope->bus;
722 path = (struct acpi_dmar_pci_path *)(scope + 1);
723 count = (scope->length - sizeof(struct acpi_dmar_device_scope))
724 / sizeof(struct acpi_dmar_pci_path);
725
726 while (--count > 0) {
727 /*
728 * Access PCI directly due to the PCI
729 * subsystem isn't initialized yet.
730 */
731 bus = read_pci_config_byte(bus, path->dev, path->fn,
732 PCI_SECONDARY_BUS);
733 path++;
734 }
735
736 ir_ioapic[ir_ioapic_num].bus = bus;
737 ir_ioapic[ir_ioapic_num].devfn = PCI_DEVFN(path->dev, path->fn);
738 ir_ioapic[ir_ioapic_num].iommu = iommu;
739 ir_ioapic[ir_ioapic_num].id = scope->enumeration_id;
740 ir_ioapic_num++;
741}
742
617static int ir_parse_ioapic_scope(struct acpi_dmar_header *header, 743static int ir_parse_ioapic_scope(struct acpi_dmar_header *header,
618 struct intel_iommu *iommu) 744 struct intel_iommu *iommu)
619{ 745{
@@ -638,9 +764,7 @@ static int ir_parse_ioapic_scope(struct acpi_dmar_header *header,
638 " 0x%Lx\n", scope->enumeration_id, 764 " 0x%Lx\n", scope->enumeration_id,
639 drhd->address); 765 drhd->address);
640 766
641 ir_ioapic[ir_ioapic_num].iommu = iommu; 767 ir_parse_one_ioapic_scope(scope, iommu);
642 ir_ioapic[ir_ioapic_num].id = scope->enumeration_id;
643 ir_ioapic_num++;
644 } 768 }
645 start += scope->length; 769 start += scope->length;
646 } 770 }
diff --git a/drivers/pci/intr_remapping.h b/drivers/pci/intr_remapping.h
index ca48f0df8ac9..63a263c18415 100644
--- a/drivers/pci/intr_remapping.h
+++ b/drivers/pci/intr_remapping.h
@@ -3,6 +3,8 @@
3struct ioapic_scope { 3struct ioapic_scope {
4 struct intel_iommu *iommu; 4 struct intel_iommu *iommu;
5 unsigned int id; 5 unsigned int id;
6 unsigned int bus; /* PCI bus number */
7 unsigned int devfn; /* PCI devfn number */
6}; 8};
7 9
8#define IR_X2APIC_MODE(mode) (mode ? (1 << 11) : 0) 10#define IR_X2APIC_MODE(mode) (mode ? (1 << 11) : 0)
diff --git a/drivers/pci/iov.c b/drivers/pci/iov.c
index b497daab3d4a..e3a87210e947 100644
--- a/drivers/pci/iov.c
+++ b/drivers/pci/iov.c
@@ -5,6 +5,7 @@
5 * 5 *
6 * PCI Express I/O Virtualization (IOV) support. 6 * PCI Express I/O Virtualization (IOV) support.
7 * Single Root IOV 1.0 7 * Single Root IOV 1.0
8 * Address Translation Service 1.0
8 */ 9 */
9 10
10#include <linux/pci.h> 11#include <linux/pci.h>
@@ -110,7 +111,7 @@ static int virtfn_add(struct pci_dev *dev, int id, int reset)
110 } 111 }
111 112
112 if (reset) 113 if (reset)
113 pci_execute_reset_function(virtfn); 114 __pci_reset_function(virtfn);
114 115
115 pci_device_add(virtfn, virtfn->bus); 116 pci_device_add(virtfn, virtfn->bus);
116 mutex_unlock(&iov->dev->sriov->lock); 117 mutex_unlock(&iov->dev->sriov->lock);
@@ -164,7 +165,7 @@ static void virtfn_remove(struct pci_dev *dev, int id, int reset)
164 165
165 if (reset) { 166 if (reset) {
166 device_release_driver(&virtfn->dev); 167 device_release_driver(&virtfn->dev);
167 pci_execute_reset_function(virtfn); 168 __pci_reset_function(virtfn);
168 } 169 }
169 170
170 sprintf(buf, "virtfn%u", id); 171 sprintf(buf, "virtfn%u", id);
@@ -487,13 +488,15 @@ found:
487 iov->self = dev; 488 iov->self = dev;
488 pci_read_config_dword(dev, pos + PCI_SRIOV_CAP, &iov->cap); 489 pci_read_config_dword(dev, pos + PCI_SRIOV_CAP, &iov->cap);
489 pci_read_config_byte(dev, pos + PCI_SRIOV_FUNC_LINK, &iov->link); 490 pci_read_config_byte(dev, pos + PCI_SRIOV_FUNC_LINK, &iov->link);
491 if (dev->pcie_type == PCI_EXP_TYPE_RC_END)
492 iov->link = PCI_DEVFN(PCI_SLOT(dev->devfn), iov->link);
490 493
491 if (pdev) 494 if (pdev)
492 iov->dev = pci_dev_get(pdev); 495 iov->dev = pci_dev_get(pdev);
493 else { 496 else
494 iov->dev = dev; 497 iov->dev = dev;
495 mutex_init(&iov->lock); 498
496 } 499 mutex_init(&iov->lock);
497 500
498 dev->sriov = iov; 501 dev->sriov = iov;
499 dev->is_physfn = 1; 502 dev->is_physfn = 1;
@@ -513,11 +516,11 @@ static void sriov_release(struct pci_dev *dev)
513{ 516{
514 BUG_ON(dev->sriov->nr_virtfn); 517 BUG_ON(dev->sriov->nr_virtfn);
515 518
516 if (dev == dev->sriov->dev) 519 if (dev != dev->sriov->dev)
517 mutex_destroy(&dev->sriov->lock);
518 else
519 pci_dev_put(dev->sriov->dev); 520 pci_dev_put(dev->sriov->dev);
520 521
522 mutex_destroy(&dev->sriov->lock);
523
521 kfree(dev->sriov); 524 kfree(dev->sriov);
522 dev->sriov = NULL; 525 dev->sriov = NULL;
523} 526}
@@ -679,3 +682,145 @@ irqreturn_t pci_sriov_migration(struct pci_dev *dev)
679 return sriov_migration(dev) ? IRQ_HANDLED : IRQ_NONE; 682 return sriov_migration(dev) ? IRQ_HANDLED : IRQ_NONE;
680} 683}
681EXPORT_SYMBOL_GPL(pci_sriov_migration); 684EXPORT_SYMBOL_GPL(pci_sriov_migration);
685
686static int ats_alloc_one(struct pci_dev *dev, int ps)
687{
688 int pos;
689 u16 cap;
690 struct pci_ats *ats;
691
692 pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ATS);
693 if (!pos)
694 return -ENODEV;
695
696 ats = kzalloc(sizeof(*ats), GFP_KERNEL);
697 if (!ats)
698 return -ENOMEM;
699
700 ats->pos = pos;
701 ats->stu = ps;
702 pci_read_config_word(dev, pos + PCI_ATS_CAP, &cap);
703 ats->qdep = PCI_ATS_CAP_QDEP(cap) ? PCI_ATS_CAP_QDEP(cap) :
704 PCI_ATS_MAX_QDEP;
705 dev->ats = ats;
706
707 return 0;
708}
709
710static void ats_free_one(struct pci_dev *dev)
711{
712 kfree(dev->ats);
713 dev->ats = NULL;
714}
715
716/**
717 * pci_enable_ats - enable the ATS capability
718 * @dev: the PCI device
719 * @ps: the IOMMU page shift
720 *
721 * Returns 0 on success, or negative on failure.
722 */
723int pci_enable_ats(struct pci_dev *dev, int ps)
724{
725 int rc;
726 u16 ctrl;
727
728 BUG_ON(dev->ats && dev->ats->is_enabled);
729
730 if (ps < PCI_ATS_MIN_STU)
731 return -EINVAL;
732
733 if (dev->is_physfn || dev->is_virtfn) {
734 struct pci_dev *pdev = dev->is_physfn ? dev : dev->physfn;
735
736 mutex_lock(&pdev->sriov->lock);
737 if (pdev->ats)
738 rc = pdev->ats->stu == ps ? 0 : -EINVAL;
739 else
740 rc = ats_alloc_one(pdev, ps);
741
742 if (!rc)
743 pdev->ats->ref_cnt++;
744 mutex_unlock(&pdev->sriov->lock);
745 if (rc)
746 return rc;
747 }
748
749 if (!dev->is_physfn) {
750 rc = ats_alloc_one(dev, ps);
751 if (rc)
752 return rc;
753 }
754
755 ctrl = PCI_ATS_CTRL_ENABLE;
756 if (!dev->is_virtfn)
757 ctrl |= PCI_ATS_CTRL_STU(ps - PCI_ATS_MIN_STU);
758 pci_write_config_word(dev, dev->ats->pos + PCI_ATS_CTRL, ctrl);
759
760 dev->ats->is_enabled = 1;
761
762 return 0;
763}
764
765/**
766 * pci_disable_ats - disable the ATS capability
767 * @dev: the PCI device
768 */
769void pci_disable_ats(struct pci_dev *dev)
770{
771 u16 ctrl;
772
773 BUG_ON(!dev->ats || !dev->ats->is_enabled);
774
775 pci_read_config_word(dev, dev->ats->pos + PCI_ATS_CTRL, &ctrl);
776 ctrl &= ~PCI_ATS_CTRL_ENABLE;
777 pci_write_config_word(dev, dev->ats->pos + PCI_ATS_CTRL, ctrl);
778
779 dev->ats->is_enabled = 0;
780
781 if (dev->is_physfn || dev->is_virtfn) {
782 struct pci_dev *pdev = dev->is_physfn ? dev : dev->physfn;
783
784 mutex_lock(&pdev->sriov->lock);
785 pdev->ats->ref_cnt--;
786 if (!pdev->ats->ref_cnt)
787 ats_free_one(pdev);
788 mutex_unlock(&pdev->sriov->lock);
789 }
790
791 if (!dev->is_physfn)
792 ats_free_one(dev);
793}
794
795/**
796 * pci_ats_queue_depth - query the ATS Invalidate Queue Depth
797 * @dev: the PCI device
798 *
799 * Returns the queue depth on success, or negative on failure.
800 *
801 * The ATS spec uses 0 in the Invalidate Queue Depth field to
802 * indicate that the function can accept 32 Invalidate Request.
803 * But here we use the `real' values (i.e. 1~32) for the Queue
804 * Depth; and 0 indicates the function shares the Queue with
805 * other functions (doesn't exclusively own a Queue).
806 */
807int pci_ats_queue_depth(struct pci_dev *dev)
808{
809 int pos;
810 u16 cap;
811
812 if (dev->is_virtfn)
813 return 0;
814
815 if (dev->ats)
816 return dev->ats->qdep;
817
818 pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ATS);
819 if (!pos)
820 return -ENODEV;
821
822 pci_read_config_word(dev, pos + PCI_ATS_CAP, &cap);
823
824 return PCI_ATS_CAP_QDEP(cap) ? PCI_ATS_CAP_QDEP(cap) :
825 PCI_ATS_MAX_QDEP;
826}
diff --git a/drivers/pci/msi.c b/drivers/pci/msi.c
index 362773247fbf..d9f06fbfa0bf 100644
--- a/drivers/pci/msi.c
+++ b/drivers/pci/msi.c
@@ -75,22 +75,17 @@ void arch_teardown_msi_irqs(struct pci_dev *dev)
75} 75}
76#endif 76#endif
77 77
78static void __msi_set_enable(struct pci_dev *dev, int pos, int enable) 78static void msi_set_enable(struct pci_dev *dev, int pos, int enable)
79{ 79{
80 u16 control; 80 u16 control;
81 81
82 if (pos) { 82 BUG_ON(!pos);
83 pci_read_config_word(dev, pos + PCI_MSI_FLAGS, &control);
84 control &= ~PCI_MSI_FLAGS_ENABLE;
85 if (enable)
86 control |= PCI_MSI_FLAGS_ENABLE;
87 pci_write_config_word(dev, pos + PCI_MSI_FLAGS, control);
88 }
89}
90 83
91static void msi_set_enable(struct pci_dev *dev, int enable) 84 pci_read_config_word(dev, pos + PCI_MSI_FLAGS, &control);
92{ 85 control &= ~PCI_MSI_FLAGS_ENABLE;
93 __msi_set_enable(dev, pci_find_capability(dev, PCI_CAP_ID_MSI), enable); 86 if (enable)
87 control |= PCI_MSI_FLAGS_ENABLE;
88 pci_write_config_word(dev, pos + PCI_MSI_FLAGS, control);
94} 89}
95 90
96static void msix_set_enable(struct pci_dev *dev, int enable) 91static void msix_set_enable(struct pci_dev *dev, int enable)
@@ -131,9 +126,6 @@ static inline __attribute_const__ u32 msi_enabled_mask(u16 control)
131 * mask all MSI interrupts by clearing the MSI enable bit does not work 126 * mask all MSI interrupts by clearing the MSI enable bit does not work
132 * reliably as devices without an INTx disable bit will then generate a 127 * reliably as devices without an INTx disable bit will then generate a
133 * level IRQ which will never be cleared. 128 * level IRQ which will never be cleared.
134 *
135 * Returns 1 if it succeeded in masking the interrupt and 0 if the device
136 * doesn't support MSI masking.
137 */ 129 */
138static void msi_mask_irq(struct msi_desc *desc, u32 mask, u32 flag) 130static void msi_mask_irq(struct msi_desc *desc, u32 mask, u32 flag)
139{ 131{
@@ -303,7 +295,7 @@ static void __pci_restore_msi_state(struct pci_dev *dev)
303 pos = entry->msi_attrib.pos; 295 pos = entry->msi_attrib.pos;
304 296
305 pci_intx_for_msi(dev, 0); 297 pci_intx_for_msi(dev, 0);
306 msi_set_enable(dev, 0); 298 msi_set_enable(dev, pos, 0);
307 write_msi_msg(dev->irq, &entry->msg); 299 write_msi_msg(dev->irq, &entry->msg);
308 300
309 pci_read_config_word(dev, pos + PCI_MSI_FLAGS, &control); 301 pci_read_config_word(dev, pos + PCI_MSI_FLAGS, &control);
@@ -321,22 +313,22 @@ static void __pci_restore_msix_state(struct pci_dev *dev)
321 313
322 if (!dev->msix_enabled) 314 if (!dev->msix_enabled)
323 return; 315 return;
316 BUG_ON(list_empty(&dev->msi_list));
317 entry = list_entry(dev->msi_list.next, struct msi_desc, list);
318 pos = entry->msi_attrib.pos;
319 pci_read_config_word(dev, pos + PCI_MSIX_FLAGS, &control);
324 320
325 /* route the table */ 321 /* route the table */
326 pci_intx_for_msi(dev, 0); 322 pci_intx_for_msi(dev, 0);
327 msix_set_enable(dev, 0); 323 control |= PCI_MSIX_FLAGS_ENABLE | PCI_MSIX_FLAGS_MASKALL;
324 pci_write_config_word(dev, pos + PCI_MSIX_FLAGS, control);
328 325
329 list_for_each_entry(entry, &dev->msi_list, list) { 326 list_for_each_entry(entry, &dev->msi_list, list) {
330 write_msi_msg(entry->irq, &entry->msg); 327 write_msi_msg(entry->irq, &entry->msg);
331 msix_mask_irq(entry, entry->masked); 328 msix_mask_irq(entry, entry->masked);
332 } 329 }
333 330
334 BUG_ON(list_empty(&dev->msi_list));
335 entry = list_entry(dev->msi_list.next, struct msi_desc, list);
336 pos = entry->msi_attrib.pos;
337 pci_read_config_word(dev, pos + PCI_MSIX_FLAGS, &control);
338 control &= ~PCI_MSIX_FLAGS_MASKALL; 331 control &= ~PCI_MSIX_FLAGS_MASKALL;
339 control |= PCI_MSIX_FLAGS_ENABLE;
340 pci_write_config_word(dev, pos + PCI_MSIX_FLAGS, control); 332 pci_write_config_word(dev, pos + PCI_MSIX_FLAGS, control);
341} 333}
342 334
@@ -365,9 +357,9 @@ static int msi_capability_init(struct pci_dev *dev, int nvec)
365 u16 control; 357 u16 control;
366 unsigned mask; 358 unsigned mask;
367 359
368 msi_set_enable(dev, 0); /* Ensure msi is disabled as I set it up */
369
370 pos = pci_find_capability(dev, PCI_CAP_ID_MSI); 360 pos = pci_find_capability(dev, PCI_CAP_ID_MSI);
361 msi_set_enable(dev, pos, 0); /* Disable MSI during set up */
362
371 pci_read_config_word(dev, msi_control_reg(pos), &control); 363 pci_read_config_word(dev, msi_control_reg(pos), &control);
372 /* MSI Entry Initialization */ 364 /* MSI Entry Initialization */
373 entry = alloc_msi_entry(dev); 365 entry = alloc_msi_entry(dev);
@@ -381,7 +373,7 @@ static int msi_capability_init(struct pci_dev *dev, int nvec)
381 entry->msi_attrib.default_irq = dev->irq; /* Save IOAPIC IRQ */ 373 entry->msi_attrib.default_irq = dev->irq; /* Save IOAPIC IRQ */
382 entry->msi_attrib.pos = pos; 374 entry->msi_attrib.pos = pos;
383 375
384 entry->mask_pos = msi_mask_bits_reg(pos, entry->msi_attrib.is_64); 376 entry->mask_pos = msi_mask_reg(pos, entry->msi_attrib.is_64);
385 /* All MSIs are unmasked by default, Mask them all */ 377 /* All MSIs are unmasked by default, Mask them all */
386 if (entry->msi_attrib.maskbit) 378 if (entry->msi_attrib.maskbit)
387 pci_read_config_dword(dev, entry->mask_pos, &entry->masked); 379 pci_read_config_dword(dev, entry->mask_pos, &entry->masked);
@@ -399,7 +391,7 @@ static int msi_capability_init(struct pci_dev *dev, int nvec)
399 391
400 /* Set MSI enabled bits */ 392 /* Set MSI enabled bits */
401 pci_intx_for_msi(dev, 0); 393 pci_intx_for_msi(dev, 0);
402 msi_set_enable(dev, 1); 394 msi_set_enable(dev, pos, 1);
403 dev->msi_enabled = 1; 395 dev->msi_enabled = 1;
404 396
405 dev->irq = entry->irq; 397 dev->irq = entry->irq;
@@ -427,11 +419,14 @@ static int msix_capability_init(struct pci_dev *dev,
427 u8 bir; 419 u8 bir;
428 void __iomem *base; 420 void __iomem *base;
429 421
430 msix_set_enable(dev, 0);/* Ensure msix is disabled as I set it up */
431
432 pos = pci_find_capability(dev, PCI_CAP_ID_MSIX); 422 pos = pci_find_capability(dev, PCI_CAP_ID_MSIX);
423 pci_read_config_word(dev, pos + PCI_MSIX_FLAGS, &control);
424
425 /* Ensure MSI-X is disabled while it is set up */
426 control &= ~PCI_MSIX_FLAGS_ENABLE;
427 pci_write_config_word(dev, pos + PCI_MSIX_FLAGS, control);
428
433 /* Request & Map MSI-X table region */ 429 /* Request & Map MSI-X table region */
434 pci_read_config_word(dev, msi_control_reg(pos), &control);
435 nr_entries = multi_msix_capable(control); 430 nr_entries = multi_msix_capable(control);
436 431
437 pci_read_config_dword(dev, msix_table_offset_reg(pos), &table_offset); 432 pci_read_config_dword(dev, msix_table_offset_reg(pos), &table_offset);
@@ -442,7 +437,6 @@ static int msix_capability_init(struct pci_dev *dev,
442 if (base == NULL) 437 if (base == NULL)
443 return -ENOMEM; 438 return -ENOMEM;
444 439
445 /* MSI-X Table Initialization */
446 for (i = 0; i < nvec; i++) { 440 for (i = 0; i < nvec; i++) {
447 entry = alloc_msi_entry(dev); 441 entry = alloc_msi_entry(dev);
448 if (!entry) 442 if (!entry)
@@ -455,7 +449,6 @@ static int msix_capability_init(struct pci_dev *dev,
455 entry->msi_attrib.default_irq = dev->irq; 449 entry->msi_attrib.default_irq = dev->irq;
456 entry->msi_attrib.pos = pos; 450 entry->msi_attrib.pos = pos;
457 entry->mask_base = base; 451 entry->mask_base = base;
458 msix_mask_irq(entry, 1);
459 452
460 list_add_tail(&entry->list, &dev->msi_list); 453 list_add_tail(&entry->list, &dev->msi_list);
461 } 454 }
@@ -480,22 +473,31 @@ static int msix_capability_init(struct pci_dev *dev,
480 return ret; 473 return ret;
481 } 474 }
482 475
476 /*
477 * Some devices require MSI-X to be enabled before we can touch the
478 * MSI-X registers. We need to mask all the vectors to prevent
479 * interrupts coming in before they're fully set up.
480 */
481 control |= PCI_MSIX_FLAGS_MASKALL | PCI_MSIX_FLAGS_ENABLE;
482 pci_write_config_word(dev, pos + PCI_MSIX_FLAGS, control);
483
483 i = 0; 484 i = 0;
484 list_for_each_entry(entry, &dev->msi_list, list) { 485 list_for_each_entry(entry, &dev->msi_list, list) {
485 entries[i].vector = entry->irq; 486 entries[i].vector = entry->irq;
486 set_irq_msi(entry->irq, entry); 487 set_irq_msi(entry->irq, entry);
488 j = entries[i].entry;
489 entry->masked = readl(base + j * PCI_MSIX_ENTRY_SIZE +
490 PCI_MSIX_ENTRY_VECTOR_CTRL_OFFSET);
491 msix_mask_irq(entry, 1);
487 i++; 492 i++;
488 } 493 }
489 /* Set MSI-X enabled bits */ 494
495 /* Set MSI-X enabled bits and unmask the function */
490 pci_intx_for_msi(dev, 0); 496 pci_intx_for_msi(dev, 0);
491 msix_set_enable(dev, 1);
492 dev->msix_enabled = 1; 497 dev->msix_enabled = 1;
493 498
494 list_for_each_entry(entry, &dev->msi_list, list) { 499 control &= ~PCI_MSIX_FLAGS_MASKALL;
495 int vector = entry->msi_attrib.entry_nr; 500 pci_write_config_word(dev, pos + PCI_MSIX_FLAGS, control);
496 entry->masked = readl(base + vector * PCI_MSIX_ENTRY_SIZE +
497 PCI_MSIX_ENTRY_VECTOR_CTRL_OFFSET);
498 }
499 501
500 return 0; 502 return 0;
501} 503}
@@ -596,17 +598,20 @@ void pci_msi_shutdown(struct pci_dev *dev)
596 struct msi_desc *desc; 598 struct msi_desc *desc;
597 u32 mask; 599 u32 mask;
598 u16 ctrl; 600 u16 ctrl;
601 unsigned pos;
599 602
600 if (!pci_msi_enable || !dev || !dev->msi_enabled) 603 if (!pci_msi_enable || !dev || !dev->msi_enabled)
601 return; 604 return;
602 605
603 msi_set_enable(dev, 0); 606 BUG_ON(list_empty(&dev->msi_list));
607 desc = list_first_entry(&dev->msi_list, struct msi_desc, list);
608 pos = desc->msi_attrib.pos;
609
610 msi_set_enable(dev, pos, 0);
604 pci_intx_for_msi(dev, 1); 611 pci_intx_for_msi(dev, 1);
605 dev->msi_enabled = 0; 612 dev->msi_enabled = 0;
606 613
607 BUG_ON(list_empty(&dev->msi_list)); 614 pci_read_config_word(dev, pos + PCI_MSI_FLAGS, &ctrl);
608 desc = list_first_entry(&dev->msi_list, struct msi_desc, list);
609 pci_read_config_word(dev, desc->msi_attrib.pos + PCI_MSI_FLAGS, &ctrl);
610 mask = msi_capable_mask(ctrl); 615 mask = msi_capable_mask(ctrl);
611 msi_mask_irq(desc, mask, ~mask); 616 msi_mask_irq(desc, mask, ~mask);
612 617
@@ -648,10 +653,7 @@ static int msi_free_irqs(struct pci_dev* dev)
648 653
649 list_for_each_entry_safe(entry, tmp, &dev->msi_list, list) { 654 list_for_each_entry_safe(entry, tmp, &dev->msi_list, list) {
650 if (entry->msi_attrib.is_msix) { 655 if (entry->msi_attrib.is_msix) {
651 writel(1, entry->mask_base + entry->msi_attrib.entry_nr 656 msix_mask_irq(entry, 1);
652 * PCI_MSIX_ENTRY_SIZE
653 + PCI_MSIX_ENTRY_VECTOR_CTRL_OFFSET);
654
655 if (list_is_last(&entry->list, &dev->msi_list)) 657 if (list_is_last(&entry->list, &dev->msi_list))
656 iounmap(entry->mask_base); 658 iounmap(entry->mask_base);
657 } 659 }
@@ -691,8 +693,8 @@ int pci_msix_table_size(struct pci_dev *dev)
691 * indicates the successful configuration of MSI-X capability structure 693 * indicates the successful configuration of MSI-X capability structure
692 * with new allocated MSI-X irqs. A return of < 0 indicates a failure. 694 * with new allocated MSI-X irqs. A return of < 0 indicates a failure.
693 * Or a return of > 0 indicates that driver request is exceeding the number 695 * Or a return of > 0 indicates that driver request is exceeding the number
694 * of irqs available. Driver should use the returned value to re-send 696 * of irqs or MSI-X vectors available. Driver should use the returned value to
695 * its request. 697 * re-send its request.
696 **/ 698 **/
697int pci_enable_msix(struct pci_dev* dev, struct msix_entry *entries, int nvec) 699int pci_enable_msix(struct pci_dev* dev, struct msix_entry *entries, int nvec)
698{ 700{
@@ -708,7 +710,7 @@ int pci_enable_msix(struct pci_dev* dev, struct msix_entry *entries, int nvec)
708 710
709 nr_entries = pci_msix_table_size(dev); 711 nr_entries = pci_msix_table_size(dev);
710 if (nvec > nr_entries) 712 if (nvec > nr_entries)
711 return -EINVAL; 713 return nr_entries;
712 714
713 /* Check for any invalid entries */ 715 /* Check for any invalid entries */
714 for (i = 0; i < nvec; i++) { 716 for (i = 0; i < nvec; i++) {
diff --git a/drivers/pci/msi.h b/drivers/pci/msi.h
index 71f4df2ef654..a0662842550b 100644
--- a/drivers/pci/msi.h
+++ b/drivers/pci/msi.h
@@ -16,21 +16,15 @@
16#define msi_lower_address_reg(base) (base + PCI_MSI_ADDRESS_LO) 16#define msi_lower_address_reg(base) (base + PCI_MSI_ADDRESS_LO)
17#define msi_upper_address_reg(base) (base + PCI_MSI_ADDRESS_HI) 17#define msi_upper_address_reg(base) (base + PCI_MSI_ADDRESS_HI)
18#define msi_data_reg(base, is64bit) \ 18#define msi_data_reg(base, is64bit) \
19 ( (is64bit == 1) ? base+PCI_MSI_DATA_64 : base+PCI_MSI_DATA_32 ) 19 (base + ((is64bit == 1) ? PCI_MSI_DATA_64 : PCI_MSI_DATA_32))
20#define msi_mask_bits_reg(base, is64bit) \ 20#define msi_mask_reg(base, is64bit) \
21 ( (is64bit == 1) ? base+PCI_MSI_MASK_BIT : base+PCI_MSI_MASK_BIT-4) 21 (base + ((is64bit == 1) ? PCI_MSI_MASK_64 : PCI_MSI_MASK_32))
22#define msi_disable(control) control &= ~PCI_MSI_FLAGS_ENABLE
23#define is_64bit_address(control) (!!(control & PCI_MSI_FLAGS_64BIT)) 22#define is_64bit_address(control) (!!(control & PCI_MSI_FLAGS_64BIT))
24#define is_mask_bit_support(control) (!!(control & PCI_MSI_FLAGS_MASKBIT)) 23#define is_mask_bit_support(control) (!!(control & PCI_MSI_FLAGS_MASKBIT))
25 24
26#define msix_table_offset_reg(base) (base + 0x04) 25#define msix_table_offset_reg(base) (base + 0x04)
27#define msix_pba_offset_reg(base) (base + 0x08) 26#define msix_pba_offset_reg(base) (base + 0x08)
28#define msix_enable(control) control |= PCI_MSIX_FLAGS_ENABLE
29#define msix_disable(control) control &= ~PCI_MSIX_FLAGS_ENABLE
30#define msix_table_size(control) ((control & PCI_MSIX_FLAGS_QSIZE)+1) 27#define msix_table_size(control) ((control & PCI_MSIX_FLAGS_QSIZE)+1)
31#define multi_msix_capable msix_table_size 28#define multi_msix_capable(control) msix_table_size((control))
32#define msix_unmask(address) (address & ~PCI_MSIX_FLAGS_BITMASK)
33#define msix_mask(address) (address | PCI_MSIX_FLAGS_BITMASK)
34#define msix_is_pending(address) (address & PCI_MSIX_FLAGS_PENDMASK)
35 29
36#endif /* MSI_H */ 30#endif /* MSI_H */
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
index 07bbb9b3b93f..6c93af5ced18 100644
--- a/drivers/pci/pci.c
+++ b/drivers/pci/pci.c
@@ -485,6 +485,8 @@ static int pci_raw_set_power_state(struct pci_dev *dev, pci_power_t state)
485 pmcsr &= ~PCI_PM_CTRL_STATE_MASK; 485 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
486 pmcsr |= state; 486 pmcsr |= state;
487 break; 487 break;
488 case PCI_D3hot:
489 case PCI_D3cold:
488 case PCI_UNKNOWN: /* Boot-up */ 490 case PCI_UNKNOWN: /* Boot-up */
489 if ((pmcsr & PCI_PM_CTRL_STATE_MASK) == PCI_D3hot 491 if ((pmcsr & PCI_PM_CTRL_STATE_MASK) == PCI_D3hot
490 && !(pmcsr & PCI_PM_CTRL_NO_SOFT_RESET)) 492 && !(pmcsr & PCI_PM_CTRL_NO_SOFT_RESET))
@@ -1208,7 +1210,7 @@ void pci_pme_active(struct pci_dev *dev, bool enable)
1208 * Error code depending on the platform is returned if both the platform and 1210 * Error code depending on the platform is returned if both the platform and
1209 * the native mechanism fail to enable the generation of wake-up events 1211 * the native mechanism fail to enable the generation of wake-up events
1210 */ 1212 */
1211int pci_enable_wake(struct pci_dev *dev, pci_power_t state, int enable) 1213int pci_enable_wake(struct pci_dev *dev, pci_power_t state, bool enable)
1212{ 1214{
1213 int error = 0; 1215 int error = 0;
1214 bool pme_done = false; 1216 bool pme_done = false;
@@ -1287,15 +1289,14 @@ pci_power_t pci_target_state(struct pci_dev *dev)
1287 default: 1289 default:
1288 target_state = state; 1290 target_state = state;
1289 } 1291 }
1292 } else if (!dev->pm_cap) {
1293 target_state = PCI_D0;
1290 } else if (device_may_wakeup(&dev->dev)) { 1294 } else if (device_may_wakeup(&dev->dev)) {
1291 /* 1295 /*
1292 * Find the deepest state from which the device can generate 1296 * Find the deepest state from which the device can generate
1293 * wake-up events, make it the target state and enable device 1297 * wake-up events, make it the target state and enable device
1294 * to generate PME#. 1298 * to generate PME#.
1295 */ 1299 */
1296 if (!dev->pm_cap)
1297 return PCI_POWER_ERROR;
1298
1299 if (dev->pme_support) { 1300 if (dev->pme_support) {
1300 while (target_state 1301 while (target_state
1301 && !(dev->pme_support & (1 << target_state))) 1302 && !(dev->pme_support & (1 << target_state)))
@@ -1532,7 +1533,7 @@ pci_get_interrupt_pin(struct pci_dev *dev, struct pci_dev **bridge)
1532 if (!pin) 1533 if (!pin)
1533 return -1; 1534 return -1;
1534 1535
1535 while (dev->bus->parent) { 1536 while (!pci_is_root_bus(dev->bus)) {
1536 pin = pci_swizzle_interrupt_pin(dev, pin); 1537 pin = pci_swizzle_interrupt_pin(dev, pin);
1537 dev = dev->bus->self; 1538 dev = dev->bus->self;
1538 } 1539 }
@@ -1552,7 +1553,7 @@ u8 pci_common_swizzle(struct pci_dev *dev, u8 *pinp)
1552{ 1553{
1553 u8 pin = *pinp; 1554 u8 pin = *pinp;
1554 1555
1555 while (dev->bus->parent) { 1556 while (!pci_is_root_bus(dev->bus)) {
1556 pin = pci_swizzle_interrupt_pin(dev, pin); 1557 pin = pci_swizzle_interrupt_pin(dev, pin);
1557 dev = dev->bus->self; 1558 dev = dev->bus->self;
1558 } 1559 }
@@ -2058,111 +2059,177 @@ int pci_set_dma_seg_boundary(struct pci_dev *dev, unsigned long mask)
2058EXPORT_SYMBOL(pci_set_dma_seg_boundary); 2059EXPORT_SYMBOL(pci_set_dma_seg_boundary);
2059#endif 2060#endif
2060 2061
2061static int __pcie_flr(struct pci_dev *dev, int probe) 2062static int pcie_flr(struct pci_dev *dev, int probe)
2062{ 2063{
2063 u16 status; 2064 int i;
2065 int pos;
2064 u32 cap; 2066 u32 cap;
2065 int exppos = pci_find_capability(dev, PCI_CAP_ID_EXP); 2067 u16 status;
2066 2068
2067 if (!exppos) 2069 pos = pci_find_capability(dev, PCI_CAP_ID_EXP);
2070 if (!pos)
2068 return -ENOTTY; 2071 return -ENOTTY;
2069 pci_read_config_dword(dev, exppos + PCI_EXP_DEVCAP, &cap); 2072
2073 pci_read_config_dword(dev, pos + PCI_EXP_DEVCAP, &cap);
2070 if (!(cap & PCI_EXP_DEVCAP_FLR)) 2074 if (!(cap & PCI_EXP_DEVCAP_FLR))
2071 return -ENOTTY; 2075 return -ENOTTY;
2072 2076
2073 if (probe) 2077 if (probe)
2074 return 0; 2078 return 0;
2075 2079
2076 pci_block_user_cfg_access(dev);
2077
2078 /* Wait for Transaction Pending bit clean */ 2080 /* Wait for Transaction Pending bit clean */
2079 pci_read_config_word(dev, exppos + PCI_EXP_DEVSTA, &status); 2081 for (i = 0; i < 4; i++) {
2080 if (!(status & PCI_EXP_DEVSTA_TRPND)) 2082 if (i)
2081 goto transaction_done; 2083 msleep((1 << (i - 1)) * 100);
2082 2084
2083 msleep(100); 2085 pci_read_config_word(dev, pos + PCI_EXP_DEVSTA, &status);
2084 pci_read_config_word(dev, exppos + PCI_EXP_DEVSTA, &status); 2086 if (!(status & PCI_EXP_DEVSTA_TRPND))
2085 if (!(status & PCI_EXP_DEVSTA_TRPND)) 2087 goto clear;
2086 goto transaction_done; 2088 }
2087 2089
2088 dev_info(&dev->dev, "Busy after 100ms while trying to reset; " 2090 dev_err(&dev->dev, "transaction is not cleared; "
2089 "sleeping for 1 second\n"); 2091 "proceeding with reset anyway\n");
2090 ssleep(1); 2092
2091 pci_read_config_word(dev, exppos + PCI_EXP_DEVSTA, &status); 2093clear:
2092 if (status & PCI_EXP_DEVSTA_TRPND) 2094 pci_write_config_word(dev, pos + PCI_EXP_DEVCTL,
2093 dev_info(&dev->dev, "Still busy after 1s; "
2094 "proceeding with reset anyway\n");
2095
2096transaction_done:
2097 pci_write_config_word(dev, exppos + PCI_EXP_DEVCTL,
2098 PCI_EXP_DEVCTL_BCR_FLR); 2095 PCI_EXP_DEVCTL_BCR_FLR);
2099 mdelay(100); 2096 msleep(100);
2100 2097
2101 pci_unblock_user_cfg_access(dev);
2102 return 0; 2098 return 0;
2103} 2099}
2104 2100
2105static int __pci_af_flr(struct pci_dev *dev, int probe) 2101static int pci_af_flr(struct pci_dev *dev, int probe)
2106{ 2102{
2107 int cappos = pci_find_capability(dev, PCI_CAP_ID_AF); 2103 int i;
2108 u8 status; 2104 int pos;
2109 u8 cap; 2105 u8 cap;
2106 u8 status;
2110 2107
2111 if (!cappos) 2108 pos = pci_find_capability(dev, PCI_CAP_ID_AF);
2109 if (!pos)
2112 return -ENOTTY; 2110 return -ENOTTY;
2113 pci_read_config_byte(dev, cappos + PCI_AF_CAP, &cap); 2111
2112 pci_read_config_byte(dev, pos + PCI_AF_CAP, &cap);
2114 if (!(cap & PCI_AF_CAP_TP) || !(cap & PCI_AF_CAP_FLR)) 2113 if (!(cap & PCI_AF_CAP_TP) || !(cap & PCI_AF_CAP_FLR))
2115 return -ENOTTY; 2114 return -ENOTTY;
2116 2115
2117 if (probe) 2116 if (probe)
2118 return 0; 2117 return 0;
2119 2118
2120 pci_block_user_cfg_access(dev);
2121
2122 /* Wait for Transaction Pending bit clean */ 2119 /* Wait for Transaction Pending bit clean */
2123 pci_read_config_byte(dev, cappos + PCI_AF_STATUS, &status); 2120 for (i = 0; i < 4; i++) {
2124 if (!(status & PCI_AF_STATUS_TP)) 2121 if (i)
2125 goto transaction_done; 2122 msleep((1 << (i - 1)) * 100);
2123
2124 pci_read_config_byte(dev, pos + PCI_AF_STATUS, &status);
2125 if (!(status & PCI_AF_STATUS_TP))
2126 goto clear;
2127 }
2126 2128
2129 dev_err(&dev->dev, "transaction is not cleared; "
2130 "proceeding with reset anyway\n");
2131
2132clear:
2133 pci_write_config_byte(dev, pos + PCI_AF_CTRL, PCI_AF_CTRL_FLR);
2127 msleep(100); 2134 msleep(100);
2128 pci_read_config_byte(dev, cappos + PCI_AF_STATUS, &status); 2135
2129 if (!(status & PCI_AF_STATUS_TP))
2130 goto transaction_done;
2131
2132 dev_info(&dev->dev, "Busy after 100ms while trying to"
2133 " reset; sleeping for 1 second\n");
2134 ssleep(1);
2135 pci_read_config_byte(dev, cappos + PCI_AF_STATUS, &status);
2136 if (status & PCI_AF_STATUS_TP)
2137 dev_info(&dev->dev, "Still busy after 1s; "
2138 "proceeding with reset anyway\n");
2139
2140transaction_done:
2141 pci_write_config_byte(dev, cappos + PCI_AF_CTRL, PCI_AF_CTRL_FLR);
2142 mdelay(100);
2143
2144 pci_unblock_user_cfg_access(dev);
2145 return 0; 2136 return 0;
2146} 2137}
2147 2138
2148static int __pci_reset_function(struct pci_dev *pdev, int probe) 2139static int pci_pm_reset(struct pci_dev *dev, int probe)
2149{ 2140{
2150 int res; 2141 u16 csr;
2142
2143 if (!dev->pm_cap)
2144 return -ENOTTY;
2151 2145
2152 res = __pcie_flr(pdev, probe); 2146 pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &csr);
2153 if (res != -ENOTTY) 2147 if (csr & PCI_PM_CTRL_NO_SOFT_RESET)
2154 return res; 2148 return -ENOTTY;
2155 2149
2156 res = __pci_af_flr(pdev, probe); 2150 if (probe)
2157 if (res != -ENOTTY) 2151 return 0;
2158 return res;
2159 2152
2160 return res; 2153 if (dev->current_state != PCI_D0)
2154 return -EINVAL;
2155
2156 csr &= ~PCI_PM_CTRL_STATE_MASK;
2157 csr |= PCI_D3hot;
2158 pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, csr);
2159 msleep(pci_pm_d3_delay);
2160
2161 csr &= ~PCI_PM_CTRL_STATE_MASK;
2162 csr |= PCI_D0;
2163 pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, csr);
2164 msleep(pci_pm_d3_delay);
2165
2166 return 0;
2167}
2168
2169static int pci_parent_bus_reset(struct pci_dev *dev, int probe)
2170{
2171 u16 ctrl;
2172 struct pci_dev *pdev;
2173
2174 if (dev->subordinate)
2175 return -ENOTTY;
2176
2177 list_for_each_entry(pdev, &dev->bus->devices, bus_list)
2178 if (pdev != dev)
2179 return -ENOTTY;
2180
2181 if (probe)
2182 return 0;
2183
2184 pci_read_config_word(dev->bus->self, PCI_BRIDGE_CONTROL, &ctrl);
2185 ctrl |= PCI_BRIDGE_CTL_BUS_RESET;
2186 pci_write_config_word(dev->bus->self, PCI_BRIDGE_CONTROL, ctrl);
2187 msleep(100);
2188
2189 ctrl &= ~PCI_BRIDGE_CTL_BUS_RESET;
2190 pci_write_config_word(dev->bus->self, PCI_BRIDGE_CONTROL, ctrl);
2191 msleep(100);
2192
2193 return 0;
2194}
2195
2196static int pci_dev_reset(struct pci_dev *dev, int probe)
2197{
2198 int rc;
2199
2200 might_sleep();
2201
2202 if (!probe) {
2203 pci_block_user_cfg_access(dev);
2204 /* block PM suspend, driver probe, etc. */
2205 down(&dev->dev.sem);
2206 }
2207
2208 rc = pcie_flr(dev, probe);
2209 if (rc != -ENOTTY)
2210 goto done;
2211
2212 rc = pci_af_flr(dev, probe);
2213 if (rc != -ENOTTY)
2214 goto done;
2215
2216 rc = pci_pm_reset(dev, probe);
2217 if (rc != -ENOTTY)
2218 goto done;
2219
2220 rc = pci_parent_bus_reset(dev, probe);
2221done:
2222 if (!probe) {
2223 up(&dev->dev.sem);
2224 pci_unblock_user_cfg_access(dev);
2225 }
2226
2227 return rc;
2161} 2228}
2162 2229
2163/** 2230/**
2164 * pci_execute_reset_function() - Reset a PCI device function 2231 * __pci_reset_function - reset a PCI device function
2165 * @dev: Device function to reset 2232 * @dev: PCI device to reset
2166 * 2233 *
2167 * Some devices allow an individual function to be reset without affecting 2234 * Some devices allow an individual function to be reset without affecting
2168 * other functions in the same device. The PCI device must be responsive 2235 * other functions in the same device. The PCI device must be responsive
@@ -2174,18 +2241,18 @@ static int __pci_reset_function(struct pci_dev *pdev, int probe)
2174 * device including MSI, bus mastering, BARs, decoding IO and memory spaces, 2241 * device including MSI, bus mastering, BARs, decoding IO and memory spaces,
2175 * etc. 2242 * etc.
2176 * 2243 *
2177 * Returns 0 if the device function was successfully reset or -ENOTTY if the 2244 * Returns 0 if the device function was successfully reset or negative if the
2178 * device doesn't support resetting a single function. 2245 * device doesn't support resetting a single function.
2179 */ 2246 */
2180int pci_execute_reset_function(struct pci_dev *dev) 2247int __pci_reset_function(struct pci_dev *dev)
2181{ 2248{
2182 return __pci_reset_function(dev, 0); 2249 return pci_dev_reset(dev, 0);
2183} 2250}
2184EXPORT_SYMBOL_GPL(pci_execute_reset_function); 2251EXPORT_SYMBOL_GPL(__pci_reset_function);
2185 2252
2186/** 2253/**
2187 * pci_reset_function() - quiesce and reset a PCI device function 2254 * pci_reset_function - quiesce and reset a PCI device function
2188 * @dev: Device function to reset 2255 * @dev: PCI device to reset
2189 * 2256 *
2190 * Some devices allow an individual function to be reset without affecting 2257 * Some devices allow an individual function to be reset without affecting
2191 * other functions in the same device. The PCI device must be responsive 2258 * other functions in the same device. The PCI device must be responsive
@@ -2193,32 +2260,33 @@ EXPORT_SYMBOL_GPL(pci_execute_reset_function);
2193 * 2260 *
2194 * This function does not just reset the PCI portion of a device, but 2261 * This function does not just reset the PCI portion of a device, but
2195 * clears all the state associated with the device. This function differs 2262 * clears all the state associated with the device. This function differs
2196 * from pci_execute_reset_function in that it saves and restores device state 2263 * from __pci_reset_function in that it saves and restores device state
2197 * over the reset. 2264 * over the reset.
2198 * 2265 *
2199 * Returns 0 if the device function was successfully reset or -ENOTTY if the 2266 * Returns 0 if the device function was successfully reset or negative if the
2200 * device doesn't support resetting a single function. 2267 * device doesn't support resetting a single function.
2201 */ 2268 */
2202int pci_reset_function(struct pci_dev *dev) 2269int pci_reset_function(struct pci_dev *dev)
2203{ 2270{
2204 int r = __pci_reset_function(dev, 1); 2271 int rc;
2205 2272
2206 if (r < 0) 2273 rc = pci_dev_reset(dev, 1);
2207 return r; 2274 if (rc)
2275 return rc;
2208 2276
2209 if (!dev->msi_enabled && !dev->msix_enabled && dev->irq != 0)
2210 disable_irq(dev->irq);
2211 pci_save_state(dev); 2277 pci_save_state(dev);
2212 2278
2279 /*
2280 * both INTx and MSI are disabled after the Interrupt Disable bit
2281 * is set and the Bus Master bit is cleared.
2282 */
2213 pci_write_config_word(dev, PCI_COMMAND, PCI_COMMAND_INTX_DISABLE); 2283 pci_write_config_word(dev, PCI_COMMAND, PCI_COMMAND_INTX_DISABLE);
2214 2284
2215 r = pci_execute_reset_function(dev); 2285 rc = pci_dev_reset(dev, 0);
2216 2286
2217 pci_restore_state(dev); 2287 pci_restore_state(dev);
2218 if (!dev->msi_enabled && !dev->msix_enabled && dev->irq != 0)
2219 enable_irq(dev->irq);
2220 2288
2221 return r; 2289 return rc;
2222} 2290}
2223EXPORT_SYMBOL_GPL(pci_reset_function); 2291EXPORT_SYMBOL_GPL(pci_reset_function);
2224 2292
@@ -2591,6 +2659,8 @@ static int __init pci_setup(char *str)
2591 } else if (!strncmp(str, "resource_alignment=", 19)) { 2659 } else if (!strncmp(str, "resource_alignment=", 19)) {
2592 pci_set_resource_alignment_param(str + 19, 2660 pci_set_resource_alignment_param(str + 19,
2593 strlen(str + 19)); 2661 strlen(str + 19));
2662 } else if (!strncmp(str, "ecrc=", 5)) {
2663 pcie_ecrc_get_policy(str + 5);
2594 } else { 2664 } else {
2595 printk(KERN_ERR "PCI: Unknown option `%s'\n", 2665 printk(KERN_ERR "PCI: Unknown option `%s'\n",
2596 str); 2666 str);
diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h
index d03f6b99f292..f73bcbedf37c 100644
--- a/drivers/pci/pci.h
+++ b/drivers/pci/pci.h
@@ -229,6 +229,15 @@ struct pci_sriov {
229 u8 __iomem *mstate; /* VF Migration State Array */ 229 u8 __iomem *mstate; /* VF Migration State Array */
230}; 230};
231 231
232/* Address Translation Service */
233struct pci_ats {
234 int pos; /* capability position */
235 int stu; /* Smallest Translation Unit */
236 int qdep; /* Invalidate Queue Depth */
237 int ref_cnt; /* Physical Function reference count */
238 int is_enabled:1; /* Enable bit is set */
239};
240
232#ifdef CONFIG_PCI_IOV 241#ifdef CONFIG_PCI_IOV
233extern int pci_iov_init(struct pci_dev *dev); 242extern int pci_iov_init(struct pci_dev *dev);
234extern void pci_iov_release(struct pci_dev *dev); 243extern void pci_iov_release(struct pci_dev *dev);
@@ -236,6 +245,20 @@ extern int pci_iov_resource_bar(struct pci_dev *dev, int resno,
236 enum pci_bar_type *type); 245 enum pci_bar_type *type);
237extern void pci_restore_iov_state(struct pci_dev *dev); 246extern void pci_restore_iov_state(struct pci_dev *dev);
238extern int pci_iov_bus_range(struct pci_bus *bus); 247extern int pci_iov_bus_range(struct pci_bus *bus);
248
249extern int pci_enable_ats(struct pci_dev *dev, int ps);
250extern void pci_disable_ats(struct pci_dev *dev);
251extern int pci_ats_queue_depth(struct pci_dev *dev);
252/**
253 * pci_ats_enabled - query the ATS status
254 * @dev: the PCI device
255 *
256 * Returns 1 if ATS capability is enabled, or 0 if not.
257 */
258static inline int pci_ats_enabled(struct pci_dev *dev)
259{
260 return dev->ats && dev->ats->is_enabled;
261}
239#else 262#else
240static inline int pci_iov_init(struct pci_dev *dev) 263static inline int pci_iov_init(struct pci_dev *dev)
241{ 264{
@@ -257,6 +280,22 @@ static inline int pci_iov_bus_range(struct pci_bus *bus)
257{ 280{
258 return 0; 281 return 0;
259} 282}
283
284static inline int pci_enable_ats(struct pci_dev *dev, int ps)
285{
286 return -ENODEV;
287}
288static inline void pci_disable_ats(struct pci_dev *dev)
289{
290}
291static inline int pci_ats_queue_depth(struct pci_dev *dev)
292{
293 return -ENODEV;
294}
295static inline int pci_ats_enabled(struct pci_dev *dev)
296{
297 return 0;
298}
260#endif /* CONFIG_PCI_IOV */ 299#endif /* CONFIG_PCI_IOV */
261 300
262#endif /* DRIVERS_PCI_H */ 301#endif /* DRIVERS_PCI_H */
diff --git a/drivers/pci/pcie/aer/Kconfig b/drivers/pci/pcie/aer/Kconfig
index c3bde588aa13..50e94e02378a 100644
--- a/drivers/pci/pcie/aer/Kconfig
+++ b/drivers/pci/pcie/aer/Kconfig
@@ -10,3 +10,18 @@ config PCIEAER
10 This enables PCI Express Root Port Advanced Error Reporting 10 This enables PCI Express Root Port Advanced Error Reporting
11 (AER) driver support. Error reporting messages sent to Root 11 (AER) driver support. Error reporting messages sent to Root
12 Port will be handled by PCI Express AER driver. 12 Port will be handled by PCI Express AER driver.
13
14
15#
16# PCI Express ECRC
17#
18config PCIE_ECRC
19 bool "PCI Express ECRC settings control"
20 depends on PCIEAER
21 help
22 Used to override firmware/bios settings for PCI Express ECRC
23 (transaction layer end-to-end CRC checking).
24
25 When in doubt, say N.
26
27source "drivers/pci/pcie/aer/Kconfig.debug"
diff --git a/drivers/pci/pcie/aer/Kconfig.debug b/drivers/pci/pcie/aer/Kconfig.debug
new file mode 100644
index 000000000000..b8c925c1f6aa
--- /dev/null
+++ b/drivers/pci/pcie/aer/Kconfig.debug
@@ -0,0 +1,18 @@
1#
2# PCI Express Root Port Device AER Debug Configuration
3#
4
5config PCIEAER_INJECT
6 tristate "PCIE AER error injector support"
7 depends on PCIEAER
8 default n
9 help
10 This enables PCI Express Root Port Advanced Error Reporting
11 (AER) software error injector.
12
13 Debuging PCIE AER code is quite difficult because it is hard
14 to trigger various real hardware errors. Software based
15 error injection can fake almost all kinds of errors with the
16 help of a user space helper tool aer-inject, which can be
17 gotten from:
18 http://www.kernel.org/pub/linux/utils/pci/aer-inject/
diff --git a/drivers/pci/pcie/aer/Makefile b/drivers/pci/pcie/aer/Makefile
index 8da3bd8455a8..2cba67510dc8 100644
--- a/drivers/pci/pcie/aer/Makefile
+++ b/drivers/pci/pcie/aer/Makefile
@@ -4,6 +4,9 @@
4 4
5obj-$(CONFIG_PCIEAER) += aerdriver.o 5obj-$(CONFIG_PCIEAER) += aerdriver.o
6 6
7obj-$(CONFIG_PCIE_ECRC) += ecrc.o
8
7aerdriver-objs := aerdrv_errprint.o aerdrv_core.o aerdrv.o 9aerdriver-objs := aerdrv_errprint.o aerdrv_core.o aerdrv.o
8aerdriver-$(CONFIG_ACPI) += aerdrv_acpi.o 10aerdriver-$(CONFIG_ACPI) += aerdrv_acpi.o
9 11
12obj-$(CONFIG_PCIEAER_INJECT) += aer_inject.o
diff --git a/drivers/pci/pcie/aer/aer_inject.c b/drivers/pci/pcie/aer/aer_inject.c
new file mode 100644
index 000000000000..d92ae21a59d8
--- /dev/null
+++ b/drivers/pci/pcie/aer/aer_inject.c
@@ -0,0 +1,473 @@
1/*
2 * PCIE AER software error injection support.
3 *
4 * Debuging PCIE AER code is quite difficult because it is hard to
5 * trigger various real hardware errors. Software based error
6 * injection can fake almost all kinds of errors with the help of a
7 * user space helper tool aer-inject, which can be gotten from:
8 * http://www.kernel.org/pub/linux/utils/pci/aer-inject/
9 *
10 * Copyright 2009 Intel Corporation.
11 * Huang Ying <ying.huang@intel.com>
12 *
13 * This program is free software; you can redistribute it and/or
14 * modify it under the terms of the GNU General Public License
15 * as published by the Free Software Foundation; version 2
16 * of the License.
17 *
18 */
19
20#include <linux/module.h>
21#include <linux/init.h>
22#include <linux/miscdevice.h>
23#include <linux/pci.h>
24#include <linux/fs.h>
25#include <asm/uaccess.h>
26#include "aerdrv.h"
27
28struct aer_error_inj
29{
30 u8 bus;
31 u8 dev;
32 u8 fn;
33 u32 uncor_status;
34 u32 cor_status;
35 u32 header_log0;
36 u32 header_log1;
37 u32 header_log2;
38 u32 header_log3;
39};
40
41struct aer_error
42{
43 struct list_head list;
44 unsigned int bus;
45 unsigned int devfn;
46 int pos_cap_err;
47
48 u32 uncor_status;
49 u32 cor_status;
50 u32 header_log0;
51 u32 header_log1;
52 u32 header_log2;
53 u32 header_log3;
54 u32 root_status;
55 u32 source_id;
56};
57
58struct pci_bus_ops
59{
60 struct list_head list;
61 struct pci_bus *bus;
62 struct pci_ops *ops;
63};
64
65static LIST_HEAD(einjected);
66
67static LIST_HEAD(pci_bus_ops_list);
68
69/* Protect einjected and pci_bus_ops_list */
70static DEFINE_SPINLOCK(inject_lock);
71
72static void aer_error_init(struct aer_error *err, unsigned int bus,
73 unsigned int devfn, int pos_cap_err)
74{
75 INIT_LIST_HEAD(&err->list);
76 err->bus = bus;
77 err->devfn = devfn;
78 err->pos_cap_err = pos_cap_err;
79}
80
81/* inject_lock must be held before calling */
82static struct aer_error *__find_aer_error(unsigned int bus, unsigned int devfn)
83{
84 struct aer_error *err;
85
86 list_for_each_entry(err, &einjected, list) {
87 if (bus == err->bus && devfn == err->devfn)
88 return err;
89 }
90 return NULL;
91}
92
93/* inject_lock must be held before calling */
94static struct aer_error *__find_aer_error_by_dev(struct pci_dev *dev)
95{
96 return __find_aer_error(dev->bus->number, dev->devfn);
97}
98
99/* inject_lock must be held before calling */
100static struct pci_ops *__find_pci_bus_ops(struct pci_bus *bus)
101{
102 struct pci_bus_ops *bus_ops;
103
104 list_for_each_entry(bus_ops, &pci_bus_ops_list, list) {
105 if (bus_ops->bus == bus)
106 return bus_ops->ops;
107 }
108 return NULL;
109}
110
111static struct pci_bus_ops *pci_bus_ops_pop(void)
112{
113 unsigned long flags;
114 struct pci_bus_ops *bus_ops = NULL;
115
116 spin_lock_irqsave(&inject_lock, flags);
117 if (list_empty(&pci_bus_ops_list))
118 bus_ops = NULL;
119 else {
120 struct list_head *lh = pci_bus_ops_list.next;
121 list_del(lh);
122 bus_ops = list_entry(lh, struct pci_bus_ops, list);
123 }
124 spin_unlock_irqrestore(&inject_lock, flags);
125 return bus_ops;
126}
127
128static u32 *find_pci_config_dword(struct aer_error *err, int where,
129 int *prw1cs)
130{
131 int rw1cs = 0;
132 u32 *target = NULL;
133
134 if (err->pos_cap_err == -1)
135 return NULL;
136
137 switch (where - err->pos_cap_err) {
138 case PCI_ERR_UNCOR_STATUS:
139 target = &err->uncor_status;
140 rw1cs = 1;
141 break;
142 case PCI_ERR_COR_STATUS:
143 target = &err->cor_status;
144 rw1cs = 1;
145 break;
146 case PCI_ERR_HEADER_LOG:
147 target = &err->header_log0;
148 break;
149 case PCI_ERR_HEADER_LOG+4:
150 target = &err->header_log1;
151 break;
152 case PCI_ERR_HEADER_LOG+8:
153 target = &err->header_log2;
154 break;
155 case PCI_ERR_HEADER_LOG+12:
156 target = &err->header_log3;
157 break;
158 case PCI_ERR_ROOT_STATUS:
159 target = &err->root_status;
160 rw1cs = 1;
161 break;
162 case PCI_ERR_ROOT_COR_SRC:
163 target = &err->source_id;
164 break;
165 }
166 if (prw1cs)
167 *prw1cs = rw1cs;
168 return target;
169}
170
171static int pci_read_aer(struct pci_bus *bus, unsigned int devfn, int where,
172 int size, u32 *val)
173{
174 u32 *sim;
175 struct aer_error *err;
176 unsigned long flags;
177 struct pci_ops *ops;
178
179 spin_lock_irqsave(&inject_lock, flags);
180 if (size != sizeof(u32))
181 goto out;
182 err = __find_aer_error(bus->number, devfn);
183 if (!err)
184 goto out;
185
186 sim = find_pci_config_dword(err, where, NULL);
187 if (sim) {
188 *val = *sim;
189 spin_unlock_irqrestore(&inject_lock, flags);
190 return 0;
191 }
192out:
193 ops = __find_pci_bus_ops(bus);
194 spin_unlock_irqrestore(&inject_lock, flags);
195 return ops->read(bus, devfn, where, size, val);
196}
197
198int pci_write_aer(struct pci_bus *bus, unsigned int devfn, int where, int size,
199 u32 val)
200{
201 u32 *sim;
202 struct aer_error *err;
203 unsigned long flags;
204 int rw1cs;
205 struct pci_ops *ops;
206
207 spin_lock_irqsave(&inject_lock, flags);
208 if (size != sizeof(u32))
209 goto out;
210 err = __find_aer_error(bus->number, devfn);
211 if (!err)
212 goto out;
213
214 sim = find_pci_config_dword(err, where, &rw1cs);
215 if (sim) {
216 if (rw1cs)
217 *sim ^= val;
218 else
219 *sim = val;
220 spin_unlock_irqrestore(&inject_lock, flags);
221 return 0;
222 }
223out:
224 ops = __find_pci_bus_ops(bus);
225 spin_unlock_irqrestore(&inject_lock, flags);
226 return ops->write(bus, devfn, where, size, val);
227}
228
229static struct pci_ops pci_ops_aer = {
230 .read = pci_read_aer,
231 .write = pci_write_aer,
232};
233
234static void pci_bus_ops_init(struct pci_bus_ops *bus_ops,
235 struct pci_bus *bus,
236 struct pci_ops *ops)
237{
238 INIT_LIST_HEAD(&bus_ops->list);
239 bus_ops->bus = bus;
240 bus_ops->ops = ops;
241}
242
243static int pci_bus_set_aer_ops(struct pci_bus *bus)
244{
245 struct pci_ops *ops;
246 struct pci_bus_ops *bus_ops;
247 unsigned long flags;
248
249 bus_ops = kmalloc(sizeof(*bus_ops), GFP_KERNEL);
250 if (!bus_ops)
251 return -ENOMEM;
252 ops = pci_bus_set_ops(bus, &pci_ops_aer);
253 spin_lock_irqsave(&inject_lock, flags);
254 if (ops == &pci_ops_aer)
255 goto out;
256 pci_bus_ops_init(bus_ops, bus, ops);
257 list_add(&bus_ops->list, &pci_bus_ops_list);
258 bus_ops = NULL;
259out:
260 spin_unlock_irqrestore(&inject_lock, flags);
261 if (bus_ops)
262 kfree(bus_ops);
263 return 0;
264}
265
266static struct pci_dev *pcie_find_root_port(struct pci_dev *dev)
267{
268 while (1) {
269 if (!dev->is_pcie)
270 break;
271 if (dev->pcie_type == PCI_EXP_TYPE_ROOT_PORT)
272 return dev;
273 if (!dev->bus->self)
274 break;
275 dev = dev->bus->self;
276 }
277 return NULL;
278}
279
280static int find_aer_device_iter(struct device *device, void *data)
281{
282 struct pcie_device **result = data;
283 struct pcie_device *pcie_dev;
284
285 if (device->bus == &pcie_port_bus_type) {
286 pcie_dev = to_pcie_device(device);
287 if (pcie_dev->service & PCIE_PORT_SERVICE_AER) {
288 *result = pcie_dev;
289 return 1;
290 }
291 }
292 return 0;
293}
294
295static int find_aer_device(struct pci_dev *dev, struct pcie_device **result)
296{
297 return device_for_each_child(&dev->dev, result, find_aer_device_iter);
298}
299
300static int aer_inject(struct aer_error_inj *einj)
301{
302 struct aer_error *err, *rperr;
303 struct aer_error *err_alloc = NULL, *rperr_alloc = NULL;
304 struct pci_dev *dev, *rpdev;
305 struct pcie_device *edev;
306 unsigned long flags;
307 unsigned int devfn = PCI_DEVFN(einj->dev, einj->fn);
308 int pos_cap_err, rp_pos_cap_err;
309 u32 sever;
310 int ret = 0;
311
312 dev = pci_get_bus_and_slot(einj->bus, devfn);
313 if (!dev)
314 return -EINVAL;
315 rpdev = pcie_find_root_port(dev);
316 if (!rpdev) {
317 ret = -EINVAL;
318 goto out_put;
319 }
320
321 pos_cap_err = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR);
322 if (!pos_cap_err) {
323 ret = -EIO;
324 goto out_put;
325 }
326 pci_read_config_dword(dev, pos_cap_err + PCI_ERR_UNCOR_SEVER, &sever);
327
328 rp_pos_cap_err = pci_find_ext_capability(rpdev, PCI_EXT_CAP_ID_ERR);
329 if (!rp_pos_cap_err) {
330 ret = -EIO;
331 goto out_put;
332 }
333
334 err_alloc = kzalloc(sizeof(struct aer_error), GFP_KERNEL);
335 if (!err_alloc) {
336 ret = -ENOMEM;
337 goto out_put;
338 }
339 rperr_alloc = kzalloc(sizeof(struct aer_error), GFP_KERNEL);
340 if (!rperr_alloc) {
341 ret = -ENOMEM;
342 goto out_put;
343 }
344
345 spin_lock_irqsave(&inject_lock, flags);
346
347 err = __find_aer_error_by_dev(dev);
348 if (!err) {
349 err = err_alloc;
350 err_alloc = NULL;
351 aer_error_init(err, einj->bus, devfn, pos_cap_err);
352 list_add(&err->list, &einjected);
353 }
354 err->uncor_status |= einj->uncor_status;
355 err->cor_status |= einj->cor_status;
356 err->header_log0 = einj->header_log0;
357 err->header_log1 = einj->header_log1;
358 err->header_log2 = einj->header_log2;
359 err->header_log3 = einj->header_log3;
360
361 rperr = __find_aer_error_by_dev(rpdev);
362 if (!rperr) {
363 rperr = rperr_alloc;
364 rperr_alloc = NULL;
365 aer_error_init(rperr, rpdev->bus->number, rpdev->devfn,
366 rp_pos_cap_err);
367 list_add(&rperr->list, &einjected);
368 }
369 if (einj->cor_status) {
370 if (rperr->root_status & PCI_ERR_ROOT_COR_RCV)
371 rperr->root_status |= PCI_ERR_ROOT_MULTI_COR_RCV;
372 else
373 rperr->root_status |= PCI_ERR_ROOT_COR_RCV;
374 rperr->source_id &= 0xffff0000;
375 rperr->source_id |= (einj->bus << 8) | devfn;
376 }
377 if (einj->uncor_status) {
378 if (rperr->root_status & PCI_ERR_ROOT_UNCOR_RCV)
379 rperr->root_status |= PCI_ERR_ROOT_MULTI_UNCOR_RCV;
380 if (sever & einj->uncor_status) {
381 rperr->root_status |= PCI_ERR_ROOT_FATAL_RCV;
382 if (!(rperr->root_status & PCI_ERR_ROOT_UNCOR_RCV))
383 rperr->root_status |= PCI_ERR_ROOT_FIRST_FATAL;
384 } else
385 rperr->root_status |= PCI_ERR_ROOT_NONFATAL_RCV;
386 rperr->root_status |= PCI_ERR_ROOT_UNCOR_RCV;
387 rperr->source_id &= 0x0000ffff;
388 rperr->source_id |= ((einj->bus << 8) | devfn) << 16;
389 }
390 spin_unlock_irqrestore(&inject_lock, flags);
391
392 ret = pci_bus_set_aer_ops(dev->bus);
393 if (ret)
394 goto out_put;
395 ret = pci_bus_set_aer_ops(rpdev->bus);
396 if (ret)
397 goto out_put;
398
399 if (find_aer_device(rpdev, &edev))
400 aer_irq(-1, edev);
401 else
402 ret = -EINVAL;
403out_put:
404 if (err_alloc)
405 kfree(err_alloc);
406 if (rperr_alloc)
407 kfree(rperr_alloc);
408 pci_dev_put(dev);
409 return ret;
410}
411
412static ssize_t aer_inject_write(struct file *filp, const char __user *ubuf,
413 size_t usize, loff_t *off)
414{
415 struct aer_error_inj einj;
416 int ret;
417
418 if (!capable(CAP_SYS_ADMIN))
419 return -EPERM;
420
421 if (usize != sizeof(struct aer_error_inj))
422 return -EINVAL;
423
424 if (copy_from_user(&einj, ubuf, usize))
425 return -EFAULT;
426
427 ret = aer_inject(&einj);
428 return ret ? ret : usize;
429}
430
431static const struct file_operations aer_inject_fops = {
432 .write = aer_inject_write,
433 .owner = THIS_MODULE,
434};
435
436static struct miscdevice aer_inject_device = {
437 .minor = MISC_DYNAMIC_MINOR,
438 .name = "aer_inject",
439 .fops = &aer_inject_fops,
440};
441
442static int __init aer_inject_init(void)
443{
444 return misc_register(&aer_inject_device);
445}
446
447static void __exit aer_inject_exit(void)
448{
449 struct aer_error *err, *err_next;
450 unsigned long flags;
451 struct pci_bus_ops *bus_ops;
452
453 misc_deregister(&aer_inject_device);
454
455 while ((bus_ops = pci_bus_ops_pop())) {
456 pci_bus_set_ops(bus_ops->bus, bus_ops->ops);
457 kfree(bus_ops);
458 }
459
460 spin_lock_irqsave(&inject_lock, flags);
461 list_for_each_entry_safe(err, err_next,
462 &pci_bus_ops_list, list) {
463 list_del(&err->list);
464 kfree(err);
465 }
466 spin_unlock_irqrestore(&inject_lock, flags);
467}
468
469module_init(aer_inject_init);
470module_exit(aer_inject_exit);
471
472MODULE_DESCRIPTION("PCIE AER software error injector");
473MODULE_LICENSE("GPL");
diff --git a/drivers/pci/pcie/aer/aerdrv.c b/drivers/pci/pcie/aer/aerdrv.c
index 32ade5af927e..4770f13b3ca1 100644
--- a/drivers/pci/pcie/aer/aerdrv.c
+++ b/drivers/pci/pcie/aer/aerdrv.c
@@ -77,7 +77,7 @@ void pci_no_aer(void)
77 * 77 *
78 * Invoked when Root Port detects AER messages. 78 * Invoked when Root Port detects AER messages.
79 **/ 79 **/
80static irqreturn_t aer_irq(int irq, void *context) 80irqreturn_t aer_irq(int irq, void *context)
81{ 81{
82 unsigned int status, id; 82 unsigned int status, id;
83 struct pcie_device *pdev = (struct pcie_device *)context; 83 struct pcie_device *pdev = (struct pcie_device *)context;
@@ -126,6 +126,7 @@ static irqreturn_t aer_irq(int irq, void *context)
126 126
127 return IRQ_HANDLED; 127 return IRQ_HANDLED;
128} 128}
129EXPORT_SYMBOL_GPL(aer_irq);
129 130
130/** 131/**
131 * aer_alloc_rpc - allocate Root Port data structure 132 * aer_alloc_rpc - allocate Root Port data structure
diff --git a/drivers/pci/pcie/aer/aerdrv.h b/drivers/pci/pcie/aer/aerdrv.h
index aa14482a4779..bbd7428ca2d0 100644
--- a/drivers/pci/pcie/aer/aerdrv.h
+++ b/drivers/pci/pcie/aer/aerdrv.h
@@ -11,6 +11,7 @@
11#include <linux/workqueue.h> 11#include <linux/workqueue.h>
12#include <linux/pcieport_if.h> 12#include <linux/pcieport_if.h>
13#include <linux/aer.h> 13#include <linux/aer.h>
14#include <linux/interrupt.h>
14 15
15#define AER_NONFATAL 0 16#define AER_NONFATAL 0
16#define AER_FATAL 1 17#define AER_FATAL 1
@@ -56,7 +57,11 @@ struct header_log_regs {
56 unsigned int dw3; 57 unsigned int dw3;
57}; 58};
58 59
60#define AER_MAX_MULTI_ERR_DEVICES 5 /* Not likely to have more */
59struct aer_err_info { 61struct aer_err_info {
62 struct pci_dev *dev[AER_MAX_MULTI_ERR_DEVICES];
63 int error_dev_num;
64 u16 id;
60 int severity; /* 0:NONFATAL | 1:FATAL | 2:COR */ 65 int severity; /* 0:NONFATAL | 1:FATAL | 2:COR */
61 int flags; 66 int flags;
62 unsigned int status; /* COR/UNCOR Error Status */ 67 unsigned int status; /* COR/UNCOR Error Status */
@@ -120,6 +125,7 @@ extern void aer_delete_rootport(struct aer_rpc *rpc);
120extern int aer_init(struct pcie_device *dev); 125extern int aer_init(struct pcie_device *dev);
121extern void aer_isr(struct work_struct *work); 126extern void aer_isr(struct work_struct *work);
122extern void aer_print_error(struct pci_dev *dev, struct aer_err_info *info); 127extern void aer_print_error(struct pci_dev *dev, struct aer_err_info *info);
128extern irqreturn_t aer_irq(int irq, void *context);
123 129
124#ifdef CONFIG_ACPI 130#ifdef CONFIG_ACPI
125extern int aer_osc_setup(struct pcie_device *pciedev); 131extern int aer_osc_setup(struct pcie_device *pciedev);
diff --git a/drivers/pci/pcie/aer/aerdrv_core.c b/drivers/pci/pcie/aer/aerdrv_core.c
index 307452f30035..3d8872704a58 100644
--- a/drivers/pci/pcie/aer/aerdrv_core.c
+++ b/drivers/pci/pcie/aer/aerdrv_core.c
@@ -26,7 +26,9 @@
26#include "aerdrv.h" 26#include "aerdrv.h"
27 27
28static int forceload; 28static int forceload;
29static int nosourceid;
29module_param(forceload, bool, 0); 30module_param(forceload, bool, 0);
31module_param(nosourceid, bool, 0);
30 32
31int pci_enable_pcie_error_reporting(struct pci_dev *dev) 33int pci_enable_pcie_error_reporting(struct pci_dev *dev)
32{ 34{
@@ -109,19 +111,23 @@ int pci_cleanup_aer_correct_error_status(struct pci_dev *dev)
109#endif /* 0 */ 111#endif /* 0 */
110 112
111 113
112static void set_device_error_reporting(struct pci_dev *dev, void *data) 114static int set_device_error_reporting(struct pci_dev *dev, void *data)
113{ 115{
114 bool enable = *((bool *)data); 116 bool enable = *((bool *)data);
115 117
116 if (dev->pcie_type != PCIE_RC_PORT && 118 if (dev->pcie_type == PCIE_RC_PORT ||
117 dev->pcie_type != PCIE_SW_UPSTREAM_PORT && 119 dev->pcie_type == PCIE_SW_UPSTREAM_PORT ||
118 dev->pcie_type != PCIE_SW_DOWNSTREAM_PORT) 120 dev->pcie_type == PCIE_SW_DOWNSTREAM_PORT) {
119 return; 121 if (enable)
122 pci_enable_pcie_error_reporting(dev);
123 else
124 pci_disable_pcie_error_reporting(dev);
125 }
120 126
121 if (enable) 127 if (enable)
122 pci_enable_pcie_error_reporting(dev); 128 pcie_set_ecrc_checking(dev);
123 else 129
124 pci_disable_pcie_error_reporting(dev); 130 return 0;
125} 131}
126 132
127/** 133/**
@@ -139,73 +145,148 @@ static void set_downstream_devices_error_reporting(struct pci_dev *dev,
139 pci_walk_bus(dev->subordinate, set_device_error_reporting, &enable); 145 pci_walk_bus(dev->subordinate, set_device_error_reporting, &enable);
140} 146}
141 147
142static int find_device_iter(struct device *device, void *data) 148static inline int compare_device_id(struct pci_dev *dev,
149 struct aer_err_info *e_info)
143{ 150{
144 struct pci_dev *dev; 151 if (e_info->id == ((dev->bus->number << 8) | dev->devfn)) {
145 u16 id = *(unsigned long *)data; 152 /*
146 u8 secondary, subordinate, d_bus = id >> 8; 153 * Device ID match
154 */
155 return 1;
156 }
147 157
148 if (device->bus == &pci_bus_type) { 158 return 0;
149 dev = to_pci_dev(device); 159}
150 if (id == ((dev->bus->number << 8) | dev->devfn)) { 160
151 /* 161static int add_error_device(struct aer_err_info *e_info, struct pci_dev *dev)
152 * Device ID match 162{
153 */ 163 if (e_info->error_dev_num < AER_MAX_MULTI_ERR_DEVICES) {
154 *(unsigned long*)data = (unsigned long)device; 164 e_info->dev[e_info->error_dev_num] = dev;
155 return 1; 165 e_info->error_dev_num++;
156 } 166 return 1;
167 } else
168 return 0;
169}
170
171
172#define PCI_BUS(x) (((x) >> 8) & 0xff)
173
174static int find_device_iter(struct pci_dev *dev, void *data)
175{
176 int pos;
177 u32 status;
178 u32 mask;
179 u16 reg16;
180 int result;
181 struct aer_err_info *e_info = (struct aer_err_info *)data;
182
183 /*
184 * When bus id is equal to 0, it might be a bad id
185 * reported by root port.
186 */
187 if (!nosourceid && (PCI_BUS(e_info->id) != 0)) {
188 result = compare_device_id(dev, e_info);
189 if (result)
190 add_error_device(e_info, dev);
157 191
158 /* 192 /*
159 * If device is P2P, check if it is an upstream? 193 * If there is no multiple error, we stop
194 * or continue based on the id comparing.
160 */ 195 */
161 if (dev->hdr_type & PCI_HEADER_TYPE_BRIDGE) { 196 if (!(e_info->flags & AER_MULTI_ERROR_VALID_FLAG))
162 pci_read_config_byte(dev, PCI_SECONDARY_BUS, 197 return result;
163 &secondary); 198
164 pci_read_config_byte(dev, PCI_SUBORDINATE_BUS, 199 /*
165 &subordinate); 200 * If there are multiple errors and id does match,
166 if (d_bus >= secondary && d_bus <= subordinate) { 201 * We need continue to search other devices under
167 *(unsigned long*)data = (unsigned long)device; 202 * the root port. Return 0 means that.
168 return 1; 203 */
169 } 204 if (result)
205 return 0;
206 }
207
208 /*
209 * When either
210 * 1) nosourceid==y;
211 * 2) bus id is equal to 0. Some ports might lose the bus
212 * id of error source id;
213 * 3) There are multiple errors and prior id comparing fails;
214 * We check AER status registers to find the initial reporter.
215 */
216 if (atomic_read(&dev->enable_cnt) == 0)
217 return 0;
218 pos = pci_find_capability(dev, PCI_CAP_ID_EXP);
219 if (!pos)
220 return 0;
221 /* Check if AER is enabled */
222 pci_read_config_word(dev, pos+PCI_EXP_DEVCTL, &reg16);
223 if (!(reg16 & (
224 PCI_EXP_DEVCTL_CERE |
225 PCI_EXP_DEVCTL_NFERE |
226 PCI_EXP_DEVCTL_FERE |
227 PCI_EXP_DEVCTL_URRE)))
228 return 0;
229 pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR);
230 if (!pos)
231 return 0;
232
233 status = 0;
234 mask = 0;
235 if (e_info->severity == AER_CORRECTABLE) {
236 pci_read_config_dword(dev,
237 pos + PCI_ERR_COR_STATUS,
238 &status);
239 pci_read_config_dword(dev,
240 pos + PCI_ERR_COR_MASK,
241 &mask);
242 if (status & ERR_CORRECTABLE_ERROR_MASK & ~mask) {
243 add_error_device(e_info, dev);
244 goto added;
245 }
246 } else {
247 pci_read_config_dword(dev,
248 pos + PCI_ERR_UNCOR_STATUS,
249 &status);
250 pci_read_config_dword(dev,
251 pos + PCI_ERR_UNCOR_MASK,
252 &mask);
253 if (status & ERR_UNCORRECTABLE_ERROR_MASK & ~mask) {
254 add_error_device(e_info, dev);
255 goto added;
170 } 256 }
171 } 257 }
172 258
173 return 0; 259 return 0;
260
261added:
262 if (e_info->flags & AER_MULTI_ERROR_VALID_FLAG)
263 return 0;
264 else
265 return 1;
174} 266}
175 267
176/** 268/**
177 * find_source_device - search through device hierarchy for source device 269 * find_source_device - search through device hierarchy for source device
178 * @parent: pointer to Root Port pci_dev data structure 270 * @parent: pointer to Root Port pci_dev data structure
179 * @id: device ID of agent who sends an error message to this Root Port 271 * @err_info: including detailed error information such like id
180 * 272 *
181 * Invoked when error is detected at the Root Port. 273 * Invoked when error is detected at the Root Port.
182 */ 274 */
183static struct device* find_source_device(struct pci_dev *parent, u16 id) 275static void find_source_device(struct pci_dev *parent,
276 struct aer_err_info *e_info)
184{ 277{
185 struct pci_dev *dev = parent; 278 struct pci_dev *dev = parent;
186 struct device *device; 279 int result;
187 unsigned long device_addr;
188 int status;
189 280
190 /* Is Root Port an agent that sends error message? */ 281 /* Is Root Port an agent that sends error message? */
191 if (id == ((dev->bus->number << 8) | dev->devfn)) 282 result = find_device_iter(dev, e_info);
192 return &dev->dev; 283 if (result)
193 284 return;
194 do {
195 device_addr = id;
196 if ((status = device_for_each_child(&dev->dev,
197 &device_addr, find_device_iter))) {
198 device = (struct device*)device_addr;
199 dev = to_pci_dev(device);
200 if (id == ((dev->bus->number << 8) | dev->devfn))
201 return device;
202 }
203 }while (status);
204 285
205 return NULL; 286 pci_walk_bus(parent->subordinate, find_device_iter, e_info);
206} 287}
207 288
208static void report_error_detected(struct pci_dev *dev, void *data) 289static int report_error_detected(struct pci_dev *dev, void *data)
209{ 290{
210 pci_ers_result_t vote; 291 pci_ers_result_t vote;
211 struct pci_error_handlers *err_handler; 292 struct pci_error_handlers *err_handler;
@@ -230,16 +311,16 @@ static void report_error_detected(struct pci_dev *dev, void *data)
230 dev->driver ? 311 dev->driver ?
231 "no AER-aware driver" : "no driver"); 312 "no AER-aware driver" : "no driver");
232 } 313 }
233 return; 314 return 0;
234 } 315 }
235 316
236 err_handler = dev->driver->err_handler; 317 err_handler = dev->driver->err_handler;
237 vote = err_handler->error_detected(dev, result_data->state); 318 vote = err_handler->error_detected(dev, result_data->state);
238 result_data->result = merge_result(result_data->result, vote); 319 result_data->result = merge_result(result_data->result, vote);
239 return; 320 return 0;
240} 321}
241 322
242static void report_mmio_enabled(struct pci_dev *dev, void *data) 323static int report_mmio_enabled(struct pci_dev *dev, void *data)
243{ 324{
244 pci_ers_result_t vote; 325 pci_ers_result_t vote;
245 struct pci_error_handlers *err_handler; 326 struct pci_error_handlers *err_handler;
@@ -249,15 +330,15 @@ static void report_mmio_enabled(struct pci_dev *dev, void *data)
249 if (!dev->driver || 330 if (!dev->driver ||
250 !dev->driver->err_handler || 331 !dev->driver->err_handler ||
251 !dev->driver->err_handler->mmio_enabled) 332 !dev->driver->err_handler->mmio_enabled)
252 return; 333 return 0;
253 334
254 err_handler = dev->driver->err_handler; 335 err_handler = dev->driver->err_handler;
255 vote = err_handler->mmio_enabled(dev); 336 vote = err_handler->mmio_enabled(dev);
256 result_data->result = merge_result(result_data->result, vote); 337 result_data->result = merge_result(result_data->result, vote);
257 return; 338 return 0;
258} 339}
259 340
260static void report_slot_reset(struct pci_dev *dev, void *data) 341static int report_slot_reset(struct pci_dev *dev, void *data)
261{ 342{
262 pci_ers_result_t vote; 343 pci_ers_result_t vote;
263 struct pci_error_handlers *err_handler; 344 struct pci_error_handlers *err_handler;
@@ -267,15 +348,15 @@ static void report_slot_reset(struct pci_dev *dev, void *data)
267 if (!dev->driver || 348 if (!dev->driver ||
268 !dev->driver->err_handler || 349 !dev->driver->err_handler ||
269 !dev->driver->err_handler->slot_reset) 350 !dev->driver->err_handler->slot_reset)
270 return; 351 return 0;
271 352
272 err_handler = dev->driver->err_handler; 353 err_handler = dev->driver->err_handler;
273 vote = err_handler->slot_reset(dev); 354 vote = err_handler->slot_reset(dev);
274 result_data->result = merge_result(result_data->result, vote); 355 result_data->result = merge_result(result_data->result, vote);
275 return; 356 return 0;
276} 357}
277 358
278static void report_resume(struct pci_dev *dev, void *data) 359static int report_resume(struct pci_dev *dev, void *data)
279{ 360{
280 struct pci_error_handlers *err_handler; 361 struct pci_error_handlers *err_handler;
281 362
@@ -284,11 +365,11 @@ static void report_resume(struct pci_dev *dev, void *data)
284 if (!dev->driver || 365 if (!dev->driver ||
285 !dev->driver->err_handler || 366 !dev->driver->err_handler ||
286 !dev->driver->err_handler->resume) 367 !dev->driver->err_handler->resume)
287 return; 368 return 0;
288 369
289 err_handler = dev->driver->err_handler; 370 err_handler = dev->driver->err_handler;
290 err_handler->resume(dev); 371 err_handler->resume(dev);
291 return; 372 return 0;
292} 373}
293 374
294/** 375/**
@@ -305,7 +386,7 @@ static void report_resume(struct pci_dev *dev, void *data)
305static pci_ers_result_t broadcast_error_message(struct pci_dev *dev, 386static pci_ers_result_t broadcast_error_message(struct pci_dev *dev,
306 enum pci_channel_state state, 387 enum pci_channel_state state,
307 char *error_mesg, 388 char *error_mesg,
308 void (*cb)(struct pci_dev *, void *)) 389 int (*cb)(struct pci_dev *, void *))
309{ 390{
310 struct aer_broadcast_data result_data; 391 struct aer_broadcast_data result_data;
311 392
@@ -497,12 +578,12 @@ static pci_ers_result_t do_recovery(struct pcie_device *aerdev,
497 */ 578 */
498static void handle_error_source(struct pcie_device * aerdev, 579static void handle_error_source(struct pcie_device * aerdev,
499 struct pci_dev *dev, 580 struct pci_dev *dev,
500 struct aer_err_info info) 581 struct aer_err_info *info)
501{ 582{
502 pci_ers_result_t status = 0; 583 pci_ers_result_t status = 0;
503 int pos; 584 int pos;
504 585
505 if (info.severity == AER_CORRECTABLE) { 586 if (info->severity == AER_CORRECTABLE) {
506 /* 587 /*
507 * Correctable error does not need software intevention. 588 * Correctable error does not need software intevention.
508 * No need to go through error recovery process. 589 * No need to go through error recovery process.
@@ -510,9 +591,9 @@ static void handle_error_source(struct pcie_device * aerdev,
510 pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR); 591 pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR);
511 if (pos) 592 if (pos)
512 pci_write_config_dword(dev, pos + PCI_ERR_COR_STATUS, 593 pci_write_config_dword(dev, pos + PCI_ERR_COR_STATUS,
513 info.status); 594 info->status);
514 } else { 595 } else {
515 status = do_recovery(aerdev, dev, info.severity); 596 status = do_recovery(aerdev, dev, info->severity);
516 if (status == PCI_ERS_RESULT_RECOVERED) { 597 if (status == PCI_ERS_RESULT_RECOVERED) {
517 dev_printk(KERN_DEBUG, &dev->dev, "AER driver " 598 dev_printk(KERN_DEBUG, &dev->dev, "AER driver "
518 "successfully recovered\n"); 599 "successfully recovered\n");
@@ -661,6 +742,28 @@ static int get_device_error_info(struct pci_dev *dev, struct aer_err_info *info)
661 return AER_SUCCESS; 742 return AER_SUCCESS;
662} 743}
663 744
745static inline void aer_process_err_devices(struct pcie_device *p_device,
746 struct aer_err_info *e_info)
747{
748 int i;
749
750 if (!e_info->dev[0]) {
751 dev_printk(KERN_DEBUG, &p_device->port->dev,
752 "can't find device of ID%04x\n",
753 e_info->id);
754 }
755
756 for (i = 0; i < e_info->error_dev_num && e_info->dev[i]; i++) {
757 if (get_device_error_info(e_info->dev[i], e_info) ==
758 AER_SUCCESS) {
759 aer_print_error(e_info->dev[i], e_info);
760 handle_error_source(p_device,
761 e_info->dev[i],
762 e_info);
763 }
764 }
765}
766
664/** 767/**
665 * aer_isr_one_error - consume an error detected by root port 768 * aer_isr_one_error - consume an error detected by root port
666 * @p_device: pointer to error root port service device 769 * @p_device: pointer to error root port service device
@@ -669,10 +772,16 @@ static int get_device_error_info(struct pci_dev *dev, struct aer_err_info *info)
669static void aer_isr_one_error(struct pcie_device *p_device, 772static void aer_isr_one_error(struct pcie_device *p_device,
670 struct aer_err_source *e_src) 773 struct aer_err_source *e_src)
671{ 774{
672 struct device *s_device; 775 struct aer_err_info *e_info;
673 struct aer_err_info e_info = {0, 0, 0,};
674 int i; 776 int i;
675 u16 id; 777
778 /* struct aer_err_info might be big, so we allocate it with slab */
779 e_info = kmalloc(sizeof(struct aer_err_info), GFP_KERNEL);
780 if (e_info == NULL) {
781 dev_printk(KERN_DEBUG, &p_device->port->dev,
782 "Can't allocate mem when processing AER errors\n");
783 return;
784 }
676 785
677 /* 786 /*
678 * There is a possibility that both correctable error and 787 * There is a possibility that both correctable error and
@@ -684,31 +793,26 @@ static void aer_isr_one_error(struct pcie_device *p_device,
684 if (!(e_src->status & i)) 793 if (!(e_src->status & i))
685 continue; 794 continue;
686 795
796 memset(e_info, 0, sizeof(struct aer_err_info));
797
687 /* Init comprehensive error information */ 798 /* Init comprehensive error information */
688 if (i & PCI_ERR_ROOT_COR_RCV) { 799 if (i & PCI_ERR_ROOT_COR_RCV) {
689 id = ERR_COR_ID(e_src->id); 800 e_info->id = ERR_COR_ID(e_src->id);
690 e_info.severity = AER_CORRECTABLE; 801 e_info->severity = AER_CORRECTABLE;
691 } else { 802 } else {
692 id = ERR_UNCOR_ID(e_src->id); 803 e_info->id = ERR_UNCOR_ID(e_src->id);
693 e_info.severity = ((e_src->status >> 6) & 1); 804 e_info->severity = ((e_src->status >> 6) & 1);
694 } 805 }
695 if (e_src->status & 806 if (e_src->status &
696 (PCI_ERR_ROOT_MULTI_COR_RCV | 807 (PCI_ERR_ROOT_MULTI_COR_RCV |
697 PCI_ERR_ROOT_MULTI_UNCOR_RCV)) 808 PCI_ERR_ROOT_MULTI_UNCOR_RCV))
698 e_info.flags |= AER_MULTI_ERROR_VALID_FLAG; 809 e_info->flags |= AER_MULTI_ERROR_VALID_FLAG;
699 if (!(s_device = find_source_device(p_device->port, id))) { 810
700 printk(KERN_DEBUG "%s->can't find device of ID%04x\n", 811 find_source_device(p_device->port, e_info);
701 __func__, id); 812 aer_process_err_devices(p_device, e_info);
702 continue;
703 }
704 if (get_device_error_info(to_pci_dev(s_device), &e_info) ==
705 AER_SUCCESS) {
706 aer_print_error(to_pci_dev(s_device), &e_info);
707 handle_error_source(p_device,
708 to_pci_dev(s_device),
709 e_info);
710 }
711 } 813 }
814
815 kfree(e_info);
712} 816}
713 817
714/** 818/**
diff --git a/drivers/pci/pcie/aer/ecrc.c b/drivers/pci/pcie/aer/ecrc.c
new file mode 100644
index 000000000000..ece97df4df6d
--- /dev/null
+++ b/drivers/pci/pcie/aer/ecrc.c
@@ -0,0 +1,131 @@
1/*
2 * Enables/disables PCIe ECRC checking.
3 *
4 * (C) Copyright 2009 Hewlett-Packard Development Company, L.P.
5 * Andrew Patterson <andrew.patterson@hp.com>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; version 2 of the License.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
19 * 02111-1307, USA.
20 *
21 */
22
23#include <linux/kernel.h>
24#include <linux/module.h>
25#include <linux/moduleparam.h>
26#include <linux/pci.h>
27#include <linux/pci_regs.h>
28#include <linux/errno.h>
29#include "../../pci.h"
30
31#define ECRC_POLICY_DEFAULT 0 /* ECRC set by BIOS */
32#define ECRC_POLICY_OFF 1 /* ECRC off for performance */
33#define ECRC_POLICY_ON 2 /* ECRC on for data integrity */
34
35static int ecrc_policy = ECRC_POLICY_DEFAULT;
36
37static const char *ecrc_policy_str[] = {
38 [ECRC_POLICY_DEFAULT] = "bios",
39 [ECRC_POLICY_OFF] = "off",
40 [ECRC_POLICY_ON] = "on"
41};
42
43/**
44 * enable_ercr_checking - enable PCIe ECRC checking for a device
45 * @dev: the PCI device
46 *
47 * Returns 0 on success, or negative on failure.
48 */
49static int enable_ecrc_checking(struct pci_dev *dev)
50{
51 int pos;
52 u32 reg32;
53
54 if (!dev->is_pcie)
55 return -ENODEV;
56
57 pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR);
58 if (!pos)
59 return -ENODEV;
60
61 pci_read_config_dword(dev, pos + PCI_ERR_CAP, &reg32);
62 if (reg32 & PCI_ERR_CAP_ECRC_GENC)
63 reg32 |= PCI_ERR_CAP_ECRC_GENE;
64 if (reg32 & PCI_ERR_CAP_ECRC_CHKC)
65 reg32 |= PCI_ERR_CAP_ECRC_CHKE;
66 pci_write_config_dword(dev, pos + PCI_ERR_CAP, reg32);
67
68 return 0;
69}
70
71/**
72 * disable_ercr_checking - disables PCIe ECRC checking for a device
73 * @dev: the PCI device
74 *
75 * Returns 0 on success, or negative on failure.
76 */
77static int disable_ecrc_checking(struct pci_dev *dev)
78{
79 int pos;
80 u32 reg32;
81
82 if (!dev->is_pcie)
83 return -ENODEV;
84
85 pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR);
86 if (!pos)
87 return -ENODEV;
88
89 pci_read_config_dword(dev, pos + PCI_ERR_CAP, &reg32);
90 reg32 &= ~(PCI_ERR_CAP_ECRC_GENE | PCI_ERR_CAP_ECRC_CHKE);
91 pci_write_config_dword(dev, pos + PCI_ERR_CAP, reg32);
92
93 return 0;
94}
95
96/**
97 * pcie_set_ecrc_checking - set/unset PCIe ECRC checking for a device based on global policy
98 * @dev: the PCI device
99 */
100void pcie_set_ecrc_checking(struct pci_dev *dev)
101{
102 switch (ecrc_policy) {
103 case ECRC_POLICY_DEFAULT:
104 return;
105 case ECRC_POLICY_OFF:
106 disable_ecrc_checking(dev);
107 break;
108 case ECRC_POLICY_ON:
109 enable_ecrc_checking(dev);;
110 break;
111 default:
112 return;
113 }
114}
115
116/**
117 * pcie_ecrc_get_policy - parse kernel command-line ecrc option
118 */
119void pcie_ecrc_get_policy(char *str)
120{
121 int i;
122
123 for (i = 0; i < ARRAY_SIZE(ecrc_policy_str); i++)
124 if (!strncmp(str, ecrc_policy_str[i],
125 strlen(ecrc_policy_str[i])))
126 break;
127 if (i >= ARRAY_SIZE(ecrc_policy_str))
128 return;
129
130 ecrc_policy = i;
131}
diff --git a/drivers/pci/pcie/aspm.c b/drivers/pci/pcie/aspm.c
index b0367f168af4..3d27c97e0486 100644
--- a/drivers/pci/pcie/aspm.c
+++ b/drivers/pci/pcie/aspm.c
@@ -26,40 +26,36 @@
26#endif 26#endif
27#define MODULE_PARAM_PREFIX "pcie_aspm." 27#define MODULE_PARAM_PREFIX "pcie_aspm."
28 28
29struct endpoint_state { 29struct aspm_latency {
30 unsigned int l0s_acceptable_latency; 30 u32 l0s; /* L0s latency (nsec) */
31 unsigned int l1_acceptable_latency; 31 u32 l1; /* L1 latency (nsec) */
32}; 32};
33 33
34struct pcie_link_state { 34struct pcie_link_state {
35 struct list_head sibiling; 35 struct pci_dev *pdev; /* Upstream component of the Link */
36 struct pci_dev *pdev; 36 struct pcie_link_state *root; /* pointer to the root port link */
37 bool downstream_has_switch; 37 struct pcie_link_state *parent; /* pointer to the parent Link state */
38 38 struct list_head sibling; /* node in link_list */
39 struct pcie_link_state *parent; 39 struct list_head children; /* list of child link states */
40 struct list_head children; 40 struct list_head link; /* node in parent's children list */
41 struct list_head link;
42 41
43 /* ASPM state */ 42 /* ASPM state */
44 unsigned int support_state; 43 u32 aspm_support:2; /* Supported ASPM state */
45 unsigned int enabled_state; 44 u32 aspm_enabled:2; /* Enabled ASPM state */
46 unsigned int bios_aspm_state; 45 u32 aspm_default:2; /* Default ASPM state by BIOS */
47 /* upstream component */ 46
48 unsigned int l0s_upper_latency; 47 /* Clock PM state */
49 unsigned int l1_upper_latency; 48 u32 clkpm_capable:1; /* Clock PM capable? */
50 /* downstream component */ 49 u32 clkpm_enabled:1; /* Current Clock PM state */
51 unsigned int l0s_down_latency; 50 u32 clkpm_default:1; /* Default Clock PM state by BIOS */
52 unsigned int l1_down_latency;
53 /* Clock PM state*/
54 unsigned int clk_pm_capable;
55 unsigned int clk_pm_enabled;
56 unsigned int bios_clk_state;
57 51
52 /* Latencies */
53 struct aspm_latency latency; /* Exit latency */
58 /* 54 /*
59 * A pcie downstream port only has one slot under it, so at most there 55 * Endpoint acceptable latencies. A pcie downstream port only
60 * are 8 functions 56 * has one slot under it, so at most there are 8 functions.
61 */ 57 */
62 struct endpoint_state endpoints[8]; 58 struct aspm_latency acceptable[8];
63}; 59};
64 60
65static int aspm_disabled, aspm_force; 61static int aspm_disabled, aspm_force;
@@ -78,27 +74,23 @@ static const char *policy_str[] = {
78 74
79#define LINK_RETRAIN_TIMEOUT HZ 75#define LINK_RETRAIN_TIMEOUT HZ
80 76
81static int policy_to_aspm_state(struct pci_dev *pdev) 77static int policy_to_aspm_state(struct pcie_link_state *link)
82{ 78{
83 struct pcie_link_state *link_state = pdev->link_state;
84
85 switch (aspm_policy) { 79 switch (aspm_policy) {
86 case POLICY_PERFORMANCE: 80 case POLICY_PERFORMANCE:
87 /* Disable ASPM and Clock PM */ 81 /* Disable ASPM and Clock PM */
88 return 0; 82 return 0;
89 case POLICY_POWERSAVE: 83 case POLICY_POWERSAVE:
90 /* Enable ASPM L0s/L1 */ 84 /* Enable ASPM L0s/L1 */
91 return PCIE_LINK_STATE_L0S|PCIE_LINK_STATE_L1; 85 return PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1;
92 case POLICY_DEFAULT: 86 case POLICY_DEFAULT:
93 return link_state->bios_aspm_state; 87 return link->aspm_default;
94 } 88 }
95 return 0; 89 return 0;
96} 90}
97 91
98static int policy_to_clkpm_state(struct pci_dev *pdev) 92static int policy_to_clkpm_state(struct pcie_link_state *link)
99{ 93{
100 struct pcie_link_state *link_state = pdev->link_state;
101
102 switch (aspm_policy) { 94 switch (aspm_policy) {
103 case POLICY_PERFORMANCE: 95 case POLICY_PERFORMANCE:
104 /* Disable ASPM and Clock PM */ 96 /* Disable ASPM and Clock PM */
@@ -107,73 +99,78 @@ static int policy_to_clkpm_state(struct pci_dev *pdev)
107 /* Disable Clock PM */ 99 /* Disable Clock PM */
108 return 1; 100 return 1;
109 case POLICY_DEFAULT: 101 case POLICY_DEFAULT:
110 return link_state->bios_clk_state; 102 return link->clkpm_default;
111 } 103 }
112 return 0; 104 return 0;
113} 105}
114 106
115static void pcie_set_clock_pm(struct pci_dev *pdev, int enable) 107static void pcie_set_clkpm_nocheck(struct pcie_link_state *link, int enable)
116{ 108{
117 struct pci_dev *child_dev;
118 int pos; 109 int pos;
119 u16 reg16; 110 u16 reg16;
120 struct pcie_link_state *link_state = pdev->link_state; 111 struct pci_dev *child;
112 struct pci_bus *linkbus = link->pdev->subordinate;
121 113
122 list_for_each_entry(child_dev, &pdev->subordinate->devices, bus_list) { 114 list_for_each_entry(child, &linkbus->devices, bus_list) {
123 pos = pci_find_capability(child_dev, PCI_CAP_ID_EXP); 115 pos = pci_find_capability(child, PCI_CAP_ID_EXP);
124 if (!pos) 116 if (!pos)
125 return; 117 return;
126 pci_read_config_word(child_dev, pos + PCI_EXP_LNKCTL, &reg16); 118 pci_read_config_word(child, pos + PCI_EXP_LNKCTL, &reg16);
127 if (enable) 119 if (enable)
128 reg16 |= PCI_EXP_LNKCTL_CLKREQ_EN; 120 reg16 |= PCI_EXP_LNKCTL_CLKREQ_EN;
129 else 121 else
130 reg16 &= ~PCI_EXP_LNKCTL_CLKREQ_EN; 122 reg16 &= ~PCI_EXP_LNKCTL_CLKREQ_EN;
131 pci_write_config_word(child_dev, pos + PCI_EXP_LNKCTL, reg16); 123 pci_write_config_word(child, pos + PCI_EXP_LNKCTL, reg16);
132 } 124 }
133 link_state->clk_pm_enabled = !!enable; 125 link->clkpm_enabled = !!enable;
134} 126}
135 127
136static void pcie_check_clock_pm(struct pci_dev *pdev, int blacklist) 128static void pcie_set_clkpm(struct pcie_link_state *link, int enable)
137{ 129{
138 int pos; 130 /* Don't enable Clock PM if the link is not Clock PM capable */
131 if (!link->clkpm_capable && enable)
132 return;
133 /* Need nothing if the specified equals to current state */
134 if (link->clkpm_enabled == enable)
135 return;
136 pcie_set_clkpm_nocheck(link, enable);
137}
138
139static void pcie_clkpm_cap_init(struct pcie_link_state *link, int blacklist)
140{
141 int pos, capable = 1, enabled = 1;
139 u32 reg32; 142 u32 reg32;
140 u16 reg16; 143 u16 reg16;
141 int capable = 1, enabled = 1; 144 struct pci_dev *child;
142 struct pci_dev *child_dev; 145 struct pci_bus *linkbus = link->pdev->subordinate;
143 struct pcie_link_state *link_state = pdev->link_state;
144 146
145 /* All functions should have the same cap and state, take the worst */ 147 /* All functions should have the same cap and state, take the worst */
146 list_for_each_entry(child_dev, &pdev->subordinate->devices, bus_list) { 148 list_for_each_entry(child, &linkbus->devices, bus_list) {
147 pos = pci_find_capability(child_dev, PCI_CAP_ID_EXP); 149 pos = pci_find_capability(child, PCI_CAP_ID_EXP);
148 if (!pos) 150 if (!pos)
149 return; 151 return;
150 pci_read_config_dword(child_dev, pos + PCI_EXP_LNKCAP, &reg32); 152 pci_read_config_dword(child, pos + PCI_EXP_LNKCAP, &reg32);
151 if (!(reg32 & PCI_EXP_LNKCAP_CLKPM)) { 153 if (!(reg32 & PCI_EXP_LNKCAP_CLKPM)) {
152 capable = 0; 154 capable = 0;
153 enabled = 0; 155 enabled = 0;
154 break; 156 break;
155 } 157 }
156 pci_read_config_word(child_dev, pos + PCI_EXP_LNKCTL, &reg16); 158 pci_read_config_word(child, pos + PCI_EXP_LNKCTL, &reg16);
157 if (!(reg16 & PCI_EXP_LNKCTL_CLKREQ_EN)) 159 if (!(reg16 & PCI_EXP_LNKCTL_CLKREQ_EN))
158 enabled = 0; 160 enabled = 0;
159 } 161 }
160 link_state->clk_pm_enabled = enabled; 162 link->clkpm_enabled = enabled;
161 link_state->bios_clk_state = enabled; 163 link->clkpm_default = enabled;
162 if (!blacklist) { 164 link->clkpm_capable = (blacklist) ? 0 : capable;
163 link_state->clk_pm_capable = capable;
164 pcie_set_clock_pm(pdev, policy_to_clkpm_state(pdev));
165 } else {
166 link_state->clk_pm_capable = 0;
167 pcie_set_clock_pm(pdev, 0);
168 }
169} 165}
170 166
171static bool pcie_aspm_downstream_has_switch(struct pci_dev *pdev) 167static bool pcie_aspm_downstream_has_switch(struct pcie_link_state *link)
172{ 168{
173 struct pci_dev *child_dev; 169 struct pci_dev *child;
170 struct pci_bus *linkbus = link->pdev->subordinate;
174 171
175 list_for_each_entry(child_dev, &pdev->subordinate->devices, bus_list) { 172 list_for_each_entry(child, &linkbus->devices, bus_list) {
176 if (child_dev->pcie_type == PCI_EXP_TYPE_UPSTREAM) 173 if (child->pcie_type == PCI_EXP_TYPE_UPSTREAM)
177 return true; 174 return true;
178 } 175 }
179 return false; 176 return false;
@@ -184,289 +181,263 @@ static bool pcie_aspm_downstream_has_switch(struct pci_dev *pdev)
184 * could use common clock. If they are, configure them to use the 181 * could use common clock. If they are, configure them to use the
185 * common clock. That will reduce the ASPM state exit latency. 182 * common clock. That will reduce the ASPM state exit latency.
186 */ 183 */
187static void pcie_aspm_configure_common_clock(struct pci_dev *pdev) 184static void pcie_aspm_configure_common_clock(struct pcie_link_state *link)
188{ 185{
189 int pos, child_pos, i = 0; 186 int ppos, cpos, same_clock = 1;
190 u16 reg16 = 0; 187 u16 reg16, parent_reg, child_reg[8];
191 struct pci_dev *child_dev;
192 int same_clock = 1;
193 unsigned long start_jiffies; 188 unsigned long start_jiffies;
194 u16 child_regs[8], parent_reg; 189 struct pci_dev *child, *parent = link->pdev;
190 struct pci_bus *linkbus = parent->subordinate;
195 /* 191 /*
196 * all functions of a slot should have the same Slot Clock 192 * All functions of a slot should have the same Slot Clock
197 * Configuration, so just check one function 193 * Configuration, so just check one function
198 * */ 194 */
199 child_dev = list_entry(pdev->subordinate->devices.next, struct pci_dev, 195 child = list_entry(linkbus->devices.next, struct pci_dev, bus_list);
200 bus_list); 196 BUG_ON(!child->is_pcie);
201 BUG_ON(!child_dev->is_pcie);
202 197
203 /* Check downstream component if bit Slot Clock Configuration is 1 */ 198 /* Check downstream component if bit Slot Clock Configuration is 1 */
204 child_pos = pci_find_capability(child_dev, PCI_CAP_ID_EXP); 199 cpos = pci_find_capability(child, PCI_CAP_ID_EXP);
205 pci_read_config_word(child_dev, child_pos + PCI_EXP_LNKSTA, &reg16); 200 pci_read_config_word(child, cpos + PCI_EXP_LNKSTA, &reg16);
206 if (!(reg16 & PCI_EXP_LNKSTA_SLC)) 201 if (!(reg16 & PCI_EXP_LNKSTA_SLC))
207 same_clock = 0; 202 same_clock = 0;
208 203
209 /* Check upstream component if bit Slot Clock Configuration is 1 */ 204 /* Check upstream component if bit Slot Clock Configuration is 1 */
210 pos = pci_find_capability(pdev, PCI_CAP_ID_EXP); 205 ppos = pci_find_capability(parent, PCI_CAP_ID_EXP);
211 pci_read_config_word(pdev, pos + PCI_EXP_LNKSTA, &reg16); 206 pci_read_config_word(parent, ppos + PCI_EXP_LNKSTA, &reg16);
212 if (!(reg16 & PCI_EXP_LNKSTA_SLC)) 207 if (!(reg16 & PCI_EXP_LNKSTA_SLC))
213 same_clock = 0; 208 same_clock = 0;
214 209
215 /* Configure downstream component, all functions */ 210 /* Configure downstream component, all functions */
216 list_for_each_entry(child_dev, &pdev->subordinate->devices, bus_list) { 211 list_for_each_entry(child, &linkbus->devices, bus_list) {
217 child_pos = pci_find_capability(child_dev, PCI_CAP_ID_EXP); 212 cpos = pci_find_capability(child, PCI_CAP_ID_EXP);
218 pci_read_config_word(child_dev, child_pos + PCI_EXP_LNKCTL, 213 pci_read_config_word(child, cpos + PCI_EXP_LNKCTL, &reg16);
219 &reg16); 214 child_reg[PCI_FUNC(child->devfn)] = reg16;
220 child_regs[i] = reg16;
221 if (same_clock) 215 if (same_clock)
222 reg16 |= PCI_EXP_LNKCTL_CCC; 216 reg16 |= PCI_EXP_LNKCTL_CCC;
223 else 217 else
224 reg16 &= ~PCI_EXP_LNKCTL_CCC; 218 reg16 &= ~PCI_EXP_LNKCTL_CCC;
225 pci_write_config_word(child_dev, child_pos + PCI_EXP_LNKCTL, 219 pci_write_config_word(child, cpos + PCI_EXP_LNKCTL, reg16);
226 reg16);
227 i++;
228 } 220 }
229 221
230 /* Configure upstream component */ 222 /* Configure upstream component */
231 pci_read_config_word(pdev, pos + PCI_EXP_LNKCTL, &reg16); 223 pci_read_config_word(parent, ppos + PCI_EXP_LNKCTL, &reg16);
232 parent_reg = reg16; 224 parent_reg = reg16;
233 if (same_clock) 225 if (same_clock)
234 reg16 |= PCI_EXP_LNKCTL_CCC; 226 reg16 |= PCI_EXP_LNKCTL_CCC;
235 else 227 else
236 reg16 &= ~PCI_EXP_LNKCTL_CCC; 228 reg16 &= ~PCI_EXP_LNKCTL_CCC;
237 pci_write_config_word(pdev, pos + PCI_EXP_LNKCTL, reg16); 229 pci_write_config_word(parent, ppos + PCI_EXP_LNKCTL, reg16);
238 230
239 /* retrain link */ 231 /* Retrain link */
240 reg16 |= PCI_EXP_LNKCTL_RL; 232 reg16 |= PCI_EXP_LNKCTL_RL;
241 pci_write_config_word(pdev, pos + PCI_EXP_LNKCTL, reg16); 233 pci_write_config_word(parent, ppos + PCI_EXP_LNKCTL, reg16);
242 234
243 /* Wait for link training end */ 235 /* Wait for link training end. Break out after waiting for timeout */
244 /* break out after waiting for timeout */
245 start_jiffies = jiffies; 236 start_jiffies = jiffies;
246 for (;;) { 237 for (;;) {
247 pci_read_config_word(pdev, pos + PCI_EXP_LNKSTA, &reg16); 238 pci_read_config_word(parent, ppos + PCI_EXP_LNKSTA, &reg16);
248 if (!(reg16 & PCI_EXP_LNKSTA_LT)) 239 if (!(reg16 & PCI_EXP_LNKSTA_LT))
249 break; 240 break;
250 if (time_after(jiffies, start_jiffies + LINK_RETRAIN_TIMEOUT)) 241 if (time_after(jiffies, start_jiffies + LINK_RETRAIN_TIMEOUT))
251 break; 242 break;
252 msleep(1); 243 msleep(1);
253 } 244 }
254 /* training failed -> recover */ 245 if (!(reg16 & PCI_EXP_LNKSTA_LT))
255 if (reg16 & PCI_EXP_LNKSTA_LT) { 246 return;
256 dev_printk (KERN_ERR, &pdev->dev, "ASPM: Could not configure" 247
257 " common clock\n"); 248 /* Training failed. Restore common clock configurations */
258 i = 0; 249 dev_printk(KERN_ERR, &parent->dev,
259 list_for_each_entry(child_dev, &pdev->subordinate->devices, 250 "ASPM: Could not configure common clock\n");
260 bus_list) { 251 list_for_each_entry(child, &linkbus->devices, bus_list) {
261 child_pos = pci_find_capability(child_dev, 252 cpos = pci_find_capability(child, PCI_CAP_ID_EXP);
262 PCI_CAP_ID_EXP); 253 pci_write_config_word(child, cpos + PCI_EXP_LNKCTL,
263 pci_write_config_word(child_dev, 254 child_reg[PCI_FUNC(child->devfn)]);
264 child_pos + PCI_EXP_LNKCTL,
265 child_regs[i]);
266 i++;
267 }
268 pci_write_config_word(pdev, pos + PCI_EXP_LNKCTL, parent_reg);
269 } 255 }
256 pci_write_config_word(parent, ppos + PCI_EXP_LNKCTL, parent_reg);
270} 257}
271 258
272/* 259/* Convert L0s latency encoding to ns */
273 * calc_L0S_latency: Convert L0s latency encoding to ns 260static u32 calc_l0s_latency(u32 encoding)
274 */
275static unsigned int calc_L0S_latency(unsigned int latency_encoding, int ac)
276{ 261{
277 unsigned int ns = 64; 262 if (encoding == 0x7)
263 return (5 * 1000); /* > 4us */
264 return (64 << encoding);
265}
278 266
279 if (latency_encoding == 0x7) { 267/* Convert L0s acceptable latency encoding to ns */
280 if (ac) 268static u32 calc_l0s_acceptable(u32 encoding)
281 ns = -1U; 269{
282 else 270 if (encoding == 0x7)
283 ns = 5*1000; /* > 4us */ 271 return -1U;
284 } else 272 return (64 << encoding);
285 ns *= (1 << latency_encoding);
286 return ns;
287} 273}
288 274
289/* 275/* Convert L1 latency encoding to ns */
290 * calc_L1_latency: Convert L1 latency encoding to ns 276static u32 calc_l1_latency(u32 encoding)
291 */
292static unsigned int calc_L1_latency(unsigned int latency_encoding, int ac)
293{ 277{
294 unsigned int ns = 1000; 278 if (encoding == 0x7)
279 return (65 * 1000); /* > 64us */
280 return (1000 << encoding);
281}
295 282
296 if (latency_encoding == 0x7) { 283/* Convert L1 acceptable latency encoding to ns */
297 if (ac) 284static u32 calc_l1_acceptable(u32 encoding)
298 ns = -1U; 285{
299 else 286 if (encoding == 0x7)
300 ns = 65*1000; /* > 64us */ 287 return -1U;
301 } else 288 return (1000 << encoding);
302 ns *= (1 << latency_encoding);
303 return ns;
304} 289}
305 290
306static void pcie_aspm_get_cap_device(struct pci_dev *pdev, u32 *state, 291static void pcie_aspm_get_cap_device(struct pci_dev *pdev, u32 *state,
307 unsigned int *l0s, unsigned int *l1, unsigned int *enabled) 292 u32 *l0s, u32 *l1, u32 *enabled)
308{ 293{
309 int pos; 294 int pos;
310 u16 reg16; 295 u16 reg16;
311 u32 reg32; 296 u32 reg32, encoding;
312 unsigned int latency;
313 297
298 *l0s = *l1 = *enabled = 0;
314 pos = pci_find_capability(pdev, PCI_CAP_ID_EXP); 299 pos = pci_find_capability(pdev, PCI_CAP_ID_EXP);
315 pci_read_config_dword(pdev, pos + PCI_EXP_LNKCAP, &reg32); 300 pci_read_config_dword(pdev, pos + PCI_EXP_LNKCAP, &reg32);
316 *state = (reg32 & PCI_EXP_LNKCAP_ASPMS) >> 10; 301 *state = (reg32 & PCI_EXP_LNKCAP_ASPMS) >> 10;
317 if (*state != PCIE_LINK_STATE_L0S && 302 if (*state != PCIE_LINK_STATE_L0S &&
318 *state != (PCIE_LINK_STATE_L1|PCIE_LINK_STATE_L0S)) 303 *state != (PCIE_LINK_STATE_L1 | PCIE_LINK_STATE_L0S))
319 *state = 0; 304 *state = 0;
320 if (*state == 0) 305 if (*state == 0)
321 return; 306 return;
322 307
323 latency = (reg32 & PCI_EXP_LNKCAP_L0SEL) >> 12; 308 encoding = (reg32 & PCI_EXP_LNKCAP_L0SEL) >> 12;
324 *l0s = calc_L0S_latency(latency, 0); 309 *l0s = calc_l0s_latency(encoding);
325 if (*state & PCIE_LINK_STATE_L1) { 310 if (*state & PCIE_LINK_STATE_L1) {
326 latency = (reg32 & PCI_EXP_LNKCAP_L1EL) >> 15; 311 encoding = (reg32 & PCI_EXP_LNKCAP_L1EL) >> 15;
327 *l1 = calc_L1_latency(latency, 0); 312 *l1 = calc_l1_latency(encoding);
328 } 313 }
329 pci_read_config_word(pdev, pos + PCI_EXP_LNKCTL, &reg16); 314 pci_read_config_word(pdev, pos + PCI_EXP_LNKCTL, &reg16);
330 *enabled = reg16 & (PCIE_LINK_STATE_L0S|PCIE_LINK_STATE_L1); 315 *enabled = reg16 & (PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1);
331} 316}
332 317
333static void pcie_aspm_cap_init(struct pci_dev *pdev) 318static void pcie_aspm_cap_init(struct pcie_link_state *link, int blacklist)
334{ 319{
335 struct pci_dev *child_dev; 320 u32 support, l0s, l1, enabled;
336 u32 state, tmp; 321 struct pci_dev *child, *parent = link->pdev;
337 struct pcie_link_state *link_state = pdev->link_state; 322 struct pci_bus *linkbus = parent->subordinate;
323
324 if (blacklist) {
325 /* Set support state to 0, so we will disable ASPM later */
326 link->aspm_support = 0;
327 link->aspm_default = 0;
328 link->aspm_enabled = PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1;
329 return;
330 }
331
332 /* Configure common clock before checking latencies */
333 pcie_aspm_configure_common_clock(link);
338 334
339 /* upstream component states */ 335 /* upstream component states */
340 pcie_aspm_get_cap_device(pdev, &link_state->support_state, 336 pcie_aspm_get_cap_device(parent, &support, &l0s, &l1, &enabled);
341 &link_state->l0s_upper_latency, 337 link->aspm_support = support;
342 &link_state->l1_upper_latency, 338 link->latency.l0s = l0s;
343 &link_state->enabled_state); 339 link->latency.l1 = l1;
340 link->aspm_enabled = enabled;
341
344 /* downstream component states, all functions have the same setting */ 342 /* downstream component states, all functions have the same setting */
345 child_dev = list_entry(pdev->subordinate->devices.next, struct pci_dev, 343 child = list_entry(linkbus->devices.next, struct pci_dev, bus_list);
346 bus_list); 344 pcie_aspm_get_cap_device(child, &support, &l0s, &l1, &enabled);
347 pcie_aspm_get_cap_device(child_dev, &state, 345 link->aspm_support &= support;
348 &link_state->l0s_down_latency, 346 link->latency.l0s = max_t(u32, link->latency.l0s, l0s);
349 &link_state->l1_down_latency, 347 link->latency.l1 = max_t(u32, link->latency.l1, l1);
350 &tmp); 348
351 link_state->support_state &= state; 349 if (!link->aspm_support)
352 if (!link_state->support_state)
353 return; 350 return;
354 link_state->enabled_state &= link_state->support_state; 351
355 link_state->bios_aspm_state = link_state->enabled_state; 352 link->aspm_enabled &= link->aspm_support;
353 link->aspm_default = link->aspm_enabled;
356 354
357 /* ENDPOINT states*/ 355 /* ENDPOINT states*/
358 list_for_each_entry(child_dev, &pdev->subordinate->devices, bus_list) { 356 list_for_each_entry(child, &linkbus->devices, bus_list) {
359 int pos; 357 int pos;
360 u32 reg32; 358 u32 reg32, encoding;
361 unsigned int latency; 359 struct aspm_latency *acceptable =
362 struct endpoint_state *ep_state = 360 &link->acceptable[PCI_FUNC(child->devfn)];
363 &link_state->endpoints[PCI_FUNC(child_dev->devfn)];
364 361
365 if (child_dev->pcie_type != PCI_EXP_TYPE_ENDPOINT && 362 if (child->pcie_type != PCI_EXP_TYPE_ENDPOINT &&
366 child_dev->pcie_type != PCI_EXP_TYPE_LEG_END) 363 child->pcie_type != PCI_EXP_TYPE_LEG_END)
367 continue; 364 continue;
368 365
369 pos = pci_find_capability(child_dev, PCI_CAP_ID_EXP); 366 pos = pci_find_capability(child, PCI_CAP_ID_EXP);
370 pci_read_config_dword(child_dev, pos + PCI_EXP_DEVCAP, &reg32); 367 pci_read_config_dword(child, pos + PCI_EXP_DEVCAP, &reg32);
371 latency = (reg32 & PCI_EXP_DEVCAP_L0S) >> 6; 368 encoding = (reg32 & PCI_EXP_DEVCAP_L0S) >> 6;
372 latency = calc_L0S_latency(latency, 1); 369 acceptable->l0s = calc_l0s_acceptable(encoding);
373 ep_state->l0s_acceptable_latency = latency; 370 if (link->aspm_support & PCIE_LINK_STATE_L1) {
374 if (link_state->support_state & PCIE_LINK_STATE_L1) { 371 encoding = (reg32 & PCI_EXP_DEVCAP_L1) >> 9;
375 latency = (reg32 & PCI_EXP_DEVCAP_L1) >> 9; 372 acceptable->l1 = calc_l1_acceptable(encoding);
376 latency = calc_L1_latency(latency, 1);
377 ep_state->l1_acceptable_latency = latency;
378 } 373 }
379 } 374 }
380} 375}
381 376
382static unsigned int __pcie_aspm_check_state_one(struct pci_dev *pdev, 377/**
383 unsigned int state) 378 * __pcie_aspm_check_state_one - check latency for endpoint device.
384{ 379 * @endpoint: pointer to the struct pci_dev of endpoint device
385 struct pci_dev *parent_dev, *tmp_dev; 380 *
386 unsigned int latency, l1_latency = 0; 381 * TBD: The latency from the endpoint to root complex vary per switch's
387 struct pcie_link_state *link_state; 382 * upstream link state above the device. Here we just do a simple check
388 struct endpoint_state *ep_state; 383 * which assumes all links above the device can be in L1 state, that
389 384 * is we just consider the worst case. If switch's upstream link can't
390 parent_dev = pdev->bus->self; 385 * be put into L0S/L1, then our check is too strictly.
391 link_state = parent_dev->link_state; 386 */
392 state &= link_state->support_state; 387static u32 __pcie_aspm_check_state_one(struct pci_dev *endpoint, u32 state)
393 if (state == 0) 388{
394 return 0; 389 u32 l1_switch_latency = 0;
395 ep_state = &link_state->endpoints[PCI_FUNC(pdev->devfn)]; 390 struct aspm_latency *acceptable;
396 391 struct pcie_link_state *link;
397 /* 392
398 * Check latency for endpoint device. 393 link = endpoint->bus->self->link_state;
399 * TBD: The latency from the endpoint to root complex vary per 394 state &= link->aspm_support;
400 * switch's upstream link state above the device. Here we just do a 395 acceptable = &link->acceptable[PCI_FUNC(endpoint->devfn)];
401 * simple check which assumes all links above the device can be in L1 396
402 * state, that is we just consider the worst case. If switch's upstream 397 while (link && state) {
403 * link can't be put into L0S/L1, then our check is too strictly. 398 if ((state & PCIE_LINK_STATE_L0S) &&
404 */ 399 (link->latency.l0s > acceptable->l0s))
405 tmp_dev = pdev; 400 state &= ~PCIE_LINK_STATE_L0S;
406 while (state & (PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1)) { 401 if ((state & PCIE_LINK_STATE_L1) &&
407 parent_dev = tmp_dev->bus->self; 402 (link->latency.l1 + l1_switch_latency > acceptable->l1))
408 link_state = parent_dev->link_state; 403 state &= ~PCIE_LINK_STATE_L1;
409 if (state & PCIE_LINK_STATE_L0S) { 404 link = link->parent;
410 latency = max_t(unsigned int, 405 /*
411 link_state->l0s_upper_latency, 406 * Every switch on the path to root complex need 1
412 link_state->l0s_down_latency); 407 * more microsecond for L1. Spec doesn't mention L0s.
413 if (latency > ep_state->l0s_acceptable_latency) 408 */
414 state &= ~PCIE_LINK_STATE_L0S; 409 l1_switch_latency += 1000;
415 }
416 if (state & PCIE_LINK_STATE_L1) {
417 latency = max_t(unsigned int,
418 link_state->l1_upper_latency,
419 link_state->l1_down_latency);
420 if (latency + l1_latency >
421 ep_state->l1_acceptable_latency)
422 state &= ~PCIE_LINK_STATE_L1;
423 }
424 if (!parent_dev->bus->self) /* parent_dev is a root port */
425 break;
426 else {
427 /*
428 * parent_dev is the downstream port of a switch, make
429 * tmp_dev the upstream port of the switch
430 */
431 tmp_dev = parent_dev->bus->self;
432 /*
433 * every switch on the path to root complex need 1 more
434 * microsecond for L1. Spec doesn't mention L0S.
435 */
436 if (state & PCIE_LINK_STATE_L1)
437 l1_latency += 1000;
438 }
439 } 410 }
440 return state; 411 return state;
441} 412}
442 413
443static unsigned int pcie_aspm_check_state(struct pci_dev *pdev, 414static u32 pcie_aspm_check_state(struct pcie_link_state *link, u32 state)
444 unsigned int state)
445{ 415{
446 struct pci_dev *child_dev; 416 pci_power_t power_state;
417 struct pci_dev *child;
418 struct pci_bus *linkbus = link->pdev->subordinate;
447 419
448 /* If no child, ignore the link */ 420 /* If no child, ignore the link */
449 if (list_empty(&pdev->subordinate->devices)) 421 if (list_empty(&linkbus->devices))
450 return state; 422 return state;
451 list_for_each_entry(child_dev, &pdev->subordinate->devices, bus_list) { 423
452 if (child_dev->pcie_type == PCI_EXP_TYPE_PCI_BRIDGE) { 424 list_for_each_entry(child, &linkbus->devices, bus_list) {
453 /* 425 /*
454 * If downstream component of a link is pci bridge, we 426 * If downstream component of a link is pci bridge, we
455 * disable ASPM for now for the link 427 * disable ASPM for now for the link
456 * */ 428 */
457 state = 0; 429 if (child->pcie_type == PCI_EXP_TYPE_PCI_BRIDGE)
458 break; 430 return 0;
459 } 431
460 if ((child_dev->pcie_type != PCI_EXP_TYPE_ENDPOINT && 432 if ((child->pcie_type != PCI_EXP_TYPE_ENDPOINT &&
461 child_dev->pcie_type != PCI_EXP_TYPE_LEG_END)) 433 child->pcie_type != PCI_EXP_TYPE_LEG_END))
462 continue; 434 continue;
463 /* Device not in D0 doesn't need check latency */ 435 /* Device not in D0 doesn't need check latency */
464 if (child_dev->current_state == PCI_D1 || 436 power_state = child->current_state;
465 child_dev->current_state == PCI_D2 || 437 if (power_state == PCI_D1 || power_state == PCI_D2 ||
466 child_dev->current_state == PCI_D3hot || 438 power_state == PCI_D3hot || power_state == PCI_D3cold)
467 child_dev->current_state == PCI_D3cold)
468 continue; 439 continue;
469 state = __pcie_aspm_check_state_one(child_dev, state); 440 state = __pcie_aspm_check_state_one(child, state);
470 } 441 }
471 return state; 442 return state;
472} 443}
@@ -482,90 +453,71 @@ static void __pcie_aspm_config_one_dev(struct pci_dev *pdev, unsigned int state)
482 pci_write_config_word(pdev, pos + PCI_EXP_LNKCTL, reg16); 453 pci_write_config_word(pdev, pos + PCI_EXP_LNKCTL, reg16);
483} 454}
484 455
485static void __pcie_aspm_config_link(struct pci_dev *pdev, unsigned int state) 456static void __pcie_aspm_config_link(struct pcie_link_state *link, u32 state)
486{ 457{
487 struct pci_dev *child_dev; 458 struct pci_dev *child, *parent = link->pdev;
488 int valid = 1; 459 struct pci_bus *linkbus = parent->subordinate;
489 struct pcie_link_state *link_state = pdev->link_state;
490 460
491 /* If no child, disable the link */ 461 /* If no child, disable the link */
492 if (list_empty(&pdev->subordinate->devices)) 462 if (list_empty(&linkbus->devices))
493 state = 0; 463 state = 0;
494 /* 464 /*
495 * if the downstream component has pci bridge function, don't do ASPM 465 * If the downstream component has pci bridge function, don't
496 * now 466 * do ASPM now.
497 */ 467 */
498 list_for_each_entry(child_dev, &pdev->subordinate->devices, bus_list) { 468 list_for_each_entry(child, &linkbus->devices, bus_list) {
499 if (child_dev->pcie_type == PCI_EXP_TYPE_PCI_BRIDGE) { 469 if (child->pcie_type == PCI_EXP_TYPE_PCI_BRIDGE)
500 valid = 0; 470 return;
501 break;
502 }
503 } 471 }
504 if (!valid)
505 return;
506
507 /* 472 /*
508 * spec 2.0 suggests all functions should be configured the same 473 * Spec 2.0 suggests all functions should be configured the
509 * setting for ASPM. Enabling ASPM L1 should be done in upstream 474 * same setting for ASPM. Enabling ASPM L1 should be done in
510 * component first and then downstream, and vice versa for disabling 475 * upstream component first and then downstream, and vice
511 * ASPM L1. Spec doesn't mention L0S. 476 * versa for disabling ASPM L1. Spec doesn't mention L0S.
512 */ 477 */
513 if (state & PCIE_LINK_STATE_L1) 478 if (state & PCIE_LINK_STATE_L1)
514 __pcie_aspm_config_one_dev(pdev, state); 479 __pcie_aspm_config_one_dev(parent, state);
515 480
516 list_for_each_entry(child_dev, &pdev->subordinate->devices, bus_list) 481 list_for_each_entry(child, &linkbus->devices, bus_list)
517 __pcie_aspm_config_one_dev(child_dev, state); 482 __pcie_aspm_config_one_dev(child, state);
518 483
519 if (!(state & PCIE_LINK_STATE_L1)) 484 if (!(state & PCIE_LINK_STATE_L1))
520 __pcie_aspm_config_one_dev(pdev, state); 485 __pcie_aspm_config_one_dev(parent, state);
521 486
522 link_state->enabled_state = state; 487 link->aspm_enabled = state;
523} 488}
524 489
525static struct pcie_link_state *get_root_port_link(struct pcie_link_state *link) 490/* Check the whole hierarchy, and configure each link in the hierarchy */
491static void __pcie_aspm_configure_link_state(struct pcie_link_state *link,
492 u32 state)
526{ 493{
527 struct pcie_link_state *root_port_link = link; 494 struct pcie_link_state *leaf, *root = link->root;
528 while (root_port_link->parent)
529 root_port_link = root_port_link->parent;
530 return root_port_link;
531}
532 495
533/* check the whole hierarchy, and configure each link in the hierarchy */ 496 state &= (PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1);
534static void __pcie_aspm_configure_link_state(struct pci_dev *pdev,
535 unsigned int state)
536{
537 struct pcie_link_state *link_state = pdev->link_state;
538 struct pcie_link_state *root_port_link = get_root_port_link(link_state);
539 struct pcie_link_state *leaf;
540 497
541 state &= PCIE_LINK_STATE_L0S|PCIE_LINK_STATE_L1; 498 /* Check all links who have specific root port link */
542 499 list_for_each_entry(leaf, &link_list, sibling) {
543 /* check all links who have specific root port link */ 500 if (!list_empty(&leaf->children) || (leaf->root != root))
544 list_for_each_entry(leaf, &link_list, sibiling) {
545 if (!list_empty(&leaf->children) ||
546 get_root_port_link(leaf) != root_port_link)
547 continue; 501 continue;
548 state = pcie_aspm_check_state(leaf->pdev, state); 502 state = pcie_aspm_check_state(leaf, state);
549 } 503 }
550 /* check root port link too in case it hasn't children */ 504 /* Check root port link too in case it hasn't children */
551 state = pcie_aspm_check_state(root_port_link->pdev, state); 505 state = pcie_aspm_check_state(root, state);
552 506 if (link->aspm_enabled == state)
553 if (link_state->enabled_state == state)
554 return; 507 return;
555
556 /* 508 /*
557 * we must change the hierarchy. See comments in 509 * We must change the hierarchy. See comments in
558 * __pcie_aspm_config_link for the order 510 * __pcie_aspm_config_link for the order
559 **/ 511 **/
560 if (state & PCIE_LINK_STATE_L1) { 512 if (state & PCIE_LINK_STATE_L1) {
561 list_for_each_entry(leaf, &link_list, sibiling) { 513 list_for_each_entry(leaf, &link_list, sibling) {
562 if (get_root_port_link(leaf) == root_port_link) 514 if (leaf->root == root)
563 __pcie_aspm_config_link(leaf->pdev, state); 515 __pcie_aspm_config_link(leaf, state);
564 } 516 }
565 } else { 517 } else {
566 list_for_each_entry_reverse(leaf, &link_list, sibiling) { 518 list_for_each_entry_reverse(leaf, &link_list, sibling) {
567 if (get_root_port_link(leaf) == root_port_link) 519 if (leaf->root == root)
568 __pcie_aspm_config_link(leaf->pdev, state); 520 __pcie_aspm_config_link(leaf, state);
569 } 521 }
570 } 522 }
571} 523}
@@ -574,45 +526,42 @@ static void __pcie_aspm_configure_link_state(struct pci_dev *pdev,
574 * pcie_aspm_configure_link_state: enable/disable PCI express link state 526 * pcie_aspm_configure_link_state: enable/disable PCI express link state
575 * @pdev: the root port or switch downstream port 527 * @pdev: the root port or switch downstream port
576 */ 528 */
577static void pcie_aspm_configure_link_state(struct pci_dev *pdev, 529static void pcie_aspm_configure_link_state(struct pcie_link_state *link,
578 unsigned int state) 530 u32 state)
579{ 531{
580 down_read(&pci_bus_sem); 532 down_read(&pci_bus_sem);
581 mutex_lock(&aspm_lock); 533 mutex_lock(&aspm_lock);
582 __pcie_aspm_configure_link_state(pdev, state); 534 __pcie_aspm_configure_link_state(link, state);
583 mutex_unlock(&aspm_lock); 535 mutex_unlock(&aspm_lock);
584 up_read(&pci_bus_sem); 536 up_read(&pci_bus_sem);
585} 537}
586 538
587static void free_link_state(struct pci_dev *pdev) 539static void free_link_state(struct pcie_link_state *link)
588{ 540{
589 kfree(pdev->link_state); 541 link->pdev->link_state = NULL;
590 pdev->link_state = NULL; 542 kfree(link);
591} 543}
592 544
593static int pcie_aspm_sanity_check(struct pci_dev *pdev) 545static int pcie_aspm_sanity_check(struct pci_dev *pdev)
594{ 546{
595 struct pci_dev *child_dev; 547 struct pci_dev *child;
596 int child_pos; 548 int pos;
597 u32 reg32; 549 u32 reg32;
598
599 /* 550 /*
600 * Some functions in a slot might not all be PCIE functions, very 551 * Some functions in a slot might not all be PCIE functions,
601 * strange. Disable ASPM for the whole slot 552 * very strange. Disable ASPM for the whole slot
602 */ 553 */
603 list_for_each_entry(child_dev, &pdev->subordinate->devices, bus_list) { 554 list_for_each_entry(child, &pdev->subordinate->devices, bus_list) {
604 child_pos = pci_find_capability(child_dev, PCI_CAP_ID_EXP); 555 pos = pci_find_capability(child, PCI_CAP_ID_EXP);
605 if (!child_pos) 556 if (!pos)
606 return -EINVAL; 557 return -EINVAL;
607
608 /* 558 /*
609 * Disable ASPM for pre-1.1 PCIe device, we follow MS to use 559 * Disable ASPM for pre-1.1 PCIe device, we follow MS to use
610 * RBER bit to determine if a function is 1.1 version device 560 * RBER bit to determine if a function is 1.1 version device
611 */ 561 */
612 pci_read_config_dword(child_dev, child_pos + PCI_EXP_DEVCAP, 562 pci_read_config_dword(child, pos + PCI_EXP_DEVCAP, &reg32);
613 &reg32);
614 if (!(reg32 & PCI_EXP_DEVCAP_RBER) && !aspm_force) { 563 if (!(reg32 & PCI_EXP_DEVCAP_RBER) && !aspm_force) {
615 dev_printk(KERN_INFO, &child_dev->dev, "disabling ASPM" 564 dev_printk(KERN_INFO, &child->dev, "disabling ASPM"
616 " on pre-1.1 PCIe device. You can enable it" 565 " on pre-1.1 PCIe device. You can enable it"
617 " with 'pcie_aspm=force'\n"); 566 " with 'pcie_aspm=force'\n");
618 return -EINVAL; 567 return -EINVAL;
@@ -621,6 +570,47 @@ static int pcie_aspm_sanity_check(struct pci_dev *pdev)
621 return 0; 570 return 0;
622} 571}
623 572
573static struct pcie_link_state *pcie_aspm_setup_link_state(struct pci_dev *pdev)
574{
575 struct pcie_link_state *link;
576 int blacklist = !!pcie_aspm_sanity_check(pdev);
577
578 link = kzalloc(sizeof(*link), GFP_KERNEL);
579 if (!link)
580 return NULL;
581 INIT_LIST_HEAD(&link->sibling);
582 INIT_LIST_HEAD(&link->children);
583 INIT_LIST_HEAD(&link->link);
584 link->pdev = pdev;
585 if (pdev->pcie_type == PCI_EXP_TYPE_DOWNSTREAM) {
586 struct pcie_link_state *parent;
587 parent = pdev->bus->parent->self->link_state;
588 if (!parent) {
589 kfree(link);
590 return NULL;
591 }
592 link->parent = parent;
593 list_add(&link->link, &parent->children);
594 }
595 /* Setup a pointer to the root port link */
596 if (!link->parent)
597 link->root = link;
598 else
599 link->root = link->parent->root;
600
601 list_add(&link->sibling, &link_list);
602
603 pdev->link_state = link;
604
605 /* Check ASPM capability */
606 pcie_aspm_cap_init(link, blacklist);
607
608 /* Check Clock PM capability */
609 pcie_clkpm_cap_init(link, blacklist);
610
611 return link;
612}
613
624/* 614/*
625 * pcie_aspm_init_link_state: Initiate PCI express link state. 615 * pcie_aspm_init_link_state: Initiate PCI express link state.
626 * It is called after the pcie and its children devices are scaned. 616 * It is called after the pcie and its children devices are scaned.
@@ -628,75 +618,47 @@ static int pcie_aspm_sanity_check(struct pci_dev *pdev)
628 */ 618 */
629void pcie_aspm_init_link_state(struct pci_dev *pdev) 619void pcie_aspm_init_link_state(struct pci_dev *pdev)
630{ 620{
631 unsigned int state; 621 u32 state;
632 struct pcie_link_state *link_state; 622 struct pcie_link_state *link;
633 int error = 0;
634 int blacklist;
635 623
636 if (aspm_disabled || !pdev->is_pcie || pdev->link_state) 624 if (aspm_disabled || !pdev->is_pcie || pdev->link_state)
637 return; 625 return;
638 if (pdev->pcie_type != PCI_EXP_TYPE_ROOT_PORT && 626 if (pdev->pcie_type != PCI_EXP_TYPE_ROOT_PORT &&
639 pdev->pcie_type != PCI_EXP_TYPE_DOWNSTREAM) 627 pdev->pcie_type != PCI_EXP_TYPE_DOWNSTREAM)
628 return;
629
630 /* VIA has a strange chipset, root port is under a bridge */
631 if (pdev->pcie_type == PCI_EXP_TYPE_ROOT_PORT &&
632 pdev->bus->self)
640 return; 633 return;
634
641 down_read(&pci_bus_sem); 635 down_read(&pci_bus_sem);
642 if (list_empty(&pdev->subordinate->devices)) 636 if (list_empty(&pdev->subordinate->devices))
643 goto out; 637 goto out;
644 638
645 blacklist = !!pcie_aspm_sanity_check(pdev);
646
647 mutex_lock(&aspm_lock); 639 mutex_lock(&aspm_lock);
648 640 link = pcie_aspm_setup_link_state(pdev);
649 link_state = kzalloc(sizeof(*link_state), GFP_KERNEL); 641 if (!link)
650 if (!link_state) 642 goto unlock;
651 goto unlock_out; 643 /*
652 644 * Setup initial ASPM state
653 link_state->downstream_has_switch = pcie_aspm_downstream_has_switch(pdev); 645 *
654 INIT_LIST_HEAD(&link_state->children); 646 * If link has switch, delay the link config. The leaf link
655 INIT_LIST_HEAD(&link_state->link); 647 * initialization will config the whole hierarchy. But we must
656 if (pdev->bus->self) {/* this is a switch */ 648 * make sure BIOS doesn't set unsupported link state.
657 struct pcie_link_state *parent_link_state; 649 */
658 650 if (pcie_aspm_downstream_has_switch(link)) {
659 parent_link_state = pdev->bus->parent->self->link_state; 651 state = pcie_aspm_check_state(link, link->aspm_default);
660 if (!parent_link_state) { 652 __pcie_aspm_config_link(link, state);
661 kfree(link_state);
662 goto unlock_out;
663 }
664 list_add(&link_state->link, &parent_link_state->children);
665 link_state->parent = parent_link_state;
666 }
667
668 pdev->link_state = link_state;
669
670 if (!blacklist) {
671 pcie_aspm_configure_common_clock(pdev);
672 pcie_aspm_cap_init(pdev);
673 } else { 653 } else {
674 link_state->enabled_state = PCIE_LINK_STATE_L0S|PCIE_LINK_STATE_L1; 654 state = policy_to_aspm_state(link);
675 link_state->bios_aspm_state = 0; 655 __pcie_aspm_configure_link_state(link, state);
676 /* Set support state to 0, so we will disable ASPM later */
677 link_state->support_state = 0;
678 } 656 }
679 657
680 link_state->pdev = pdev; 658 /* Setup initial Clock PM state */
681 list_add(&link_state->sibiling, &link_list); 659 state = (link->clkpm_capable) ? policy_to_clkpm_state(link) : 0;
682 660 pcie_set_clkpm(link, state);
683 if (link_state->downstream_has_switch) { 661unlock:
684 /*
685 * If link has switch, delay the link config. The leaf link
686 * initialization will config the whole hierarchy. but we must
687 * make sure BIOS doesn't set unsupported link state
688 **/
689 state = pcie_aspm_check_state(pdev, link_state->bios_aspm_state);
690 __pcie_aspm_config_link(pdev, state);
691 } else
692 __pcie_aspm_configure_link_state(pdev,
693 policy_to_aspm_state(pdev));
694
695 pcie_check_clock_pm(pdev, blacklist);
696
697unlock_out:
698 if (error)
699 free_link_state(pdev);
700 mutex_unlock(&aspm_lock); 662 mutex_unlock(&aspm_lock);
701out: 663out:
702 up_read(&pci_bus_sem); 664 up_read(&pci_bus_sem);
@@ -725,11 +687,11 @@ void pcie_aspm_exit_link_state(struct pci_dev *pdev)
725 687
726 /* All functions are removed, so just disable ASPM for the link */ 688 /* All functions are removed, so just disable ASPM for the link */
727 __pcie_aspm_config_one_dev(parent, 0); 689 __pcie_aspm_config_one_dev(parent, 0);
728 list_del(&link_state->sibiling); 690 list_del(&link_state->sibling);
729 list_del(&link_state->link); 691 list_del(&link_state->link);
730 /* Clock PM is for endpoint device */ 692 /* Clock PM is for endpoint device */
731 693
732 free_link_state(parent); 694 free_link_state(link_state);
733out: 695out:
734 mutex_unlock(&aspm_lock); 696 mutex_unlock(&aspm_lock);
735 up_read(&pci_bus_sem); 697 up_read(&pci_bus_sem);
@@ -749,7 +711,7 @@ void pcie_aspm_pm_state_change(struct pci_dev *pdev)
749 * devices changed PM state, we should recheck if latency meets all 711 * devices changed PM state, we should recheck if latency meets all
750 * functions' requirement 712 * functions' requirement
751 */ 713 */
752 pcie_aspm_configure_link_state(pdev, link_state->enabled_state); 714 pcie_aspm_configure_link_state(link_state, link_state->aspm_enabled);
753} 715}
754 716
755/* 717/*
@@ -772,14 +734,12 @@ void pci_disable_link_state(struct pci_dev *pdev, int state)
772 down_read(&pci_bus_sem); 734 down_read(&pci_bus_sem);
773 mutex_lock(&aspm_lock); 735 mutex_lock(&aspm_lock);
774 link_state = parent->link_state; 736 link_state = parent->link_state;
775 link_state->support_state &= 737 link_state->aspm_support &= ~state;
776 ~(state & (PCIE_LINK_STATE_L0S|PCIE_LINK_STATE_L1)); 738 __pcie_aspm_configure_link_state(link_state, link_state->aspm_enabled);
777 if (state & PCIE_LINK_STATE_CLKPM) 739 if (state & PCIE_LINK_STATE_CLKPM) {
778 link_state->clk_pm_capable = 0; 740 link_state->clkpm_capable = 0;
779 741 pcie_set_clkpm(link_state, 0);
780 __pcie_aspm_configure_link_state(parent, link_state->enabled_state); 742 }
781 if (!link_state->clk_pm_capable && link_state->clk_pm_enabled)
782 pcie_set_clock_pm(parent, 0);
783 mutex_unlock(&aspm_lock); 743 mutex_unlock(&aspm_lock);
784 up_read(&pci_bus_sem); 744 up_read(&pci_bus_sem);
785} 745}
@@ -788,7 +748,6 @@ EXPORT_SYMBOL(pci_disable_link_state);
788static int pcie_aspm_set_policy(const char *val, struct kernel_param *kp) 748static int pcie_aspm_set_policy(const char *val, struct kernel_param *kp)
789{ 749{
790 int i; 750 int i;
791 struct pci_dev *pdev;
792 struct pcie_link_state *link_state; 751 struct pcie_link_state *link_state;
793 752
794 for (i = 0; i < ARRAY_SIZE(policy_str); i++) 753 for (i = 0; i < ARRAY_SIZE(policy_str); i++)
@@ -802,14 +761,10 @@ static int pcie_aspm_set_policy(const char *val, struct kernel_param *kp)
802 down_read(&pci_bus_sem); 761 down_read(&pci_bus_sem);
803 mutex_lock(&aspm_lock); 762 mutex_lock(&aspm_lock);
804 aspm_policy = i; 763 aspm_policy = i;
805 list_for_each_entry(link_state, &link_list, sibiling) { 764 list_for_each_entry(link_state, &link_list, sibling) {
806 pdev = link_state->pdev; 765 __pcie_aspm_configure_link_state(link_state,
807 __pcie_aspm_configure_link_state(pdev, 766 policy_to_aspm_state(link_state));
808 policy_to_aspm_state(pdev)); 767 pcie_set_clkpm(link_state, policy_to_clkpm_state(link_state));
809 if (link_state->clk_pm_capable &&
810 link_state->clk_pm_enabled != policy_to_clkpm_state(pdev))
811 pcie_set_clock_pm(pdev, policy_to_clkpm_state(pdev));
812
813 } 768 }
814 mutex_unlock(&aspm_lock); 769 mutex_unlock(&aspm_lock);
815 up_read(&pci_bus_sem); 770 up_read(&pci_bus_sem);
@@ -838,7 +793,7 @@ static ssize_t link_state_show(struct device *dev,
838 struct pci_dev *pci_device = to_pci_dev(dev); 793 struct pci_dev *pci_device = to_pci_dev(dev);
839 struct pcie_link_state *link_state = pci_device->link_state; 794 struct pcie_link_state *link_state = pci_device->link_state;
840 795
841 return sprintf(buf, "%d\n", link_state->enabled_state); 796 return sprintf(buf, "%d\n", link_state->aspm_enabled);
842} 797}
843 798
844static ssize_t link_state_store(struct device *dev, 799static ssize_t link_state_store(struct device *dev,
@@ -846,7 +801,7 @@ static ssize_t link_state_store(struct device *dev,
846 const char *buf, 801 const char *buf,
847 size_t n) 802 size_t n)
848{ 803{
849 struct pci_dev *pci_device = to_pci_dev(dev); 804 struct pci_dev *pdev = to_pci_dev(dev);
850 int state; 805 int state;
851 806
852 if (n < 1) 807 if (n < 1)
@@ -854,7 +809,7 @@ static ssize_t link_state_store(struct device *dev,
854 state = buf[0]-'0'; 809 state = buf[0]-'0';
855 if (state >= 0 && state <= 3) { 810 if (state >= 0 && state <= 3) {
856 /* setup link aspm state */ 811 /* setup link aspm state */
857 pcie_aspm_configure_link_state(pci_device, state); 812 pcie_aspm_configure_link_state(pdev->link_state, state);
858 return n; 813 return n;
859 } 814 }
860 815
@@ -868,7 +823,7 @@ static ssize_t clk_ctl_show(struct device *dev,
868 struct pci_dev *pci_device = to_pci_dev(dev); 823 struct pci_dev *pci_device = to_pci_dev(dev);
869 struct pcie_link_state *link_state = pci_device->link_state; 824 struct pcie_link_state *link_state = pci_device->link_state;
870 825
871 return sprintf(buf, "%d\n", link_state->clk_pm_enabled); 826 return sprintf(buf, "%d\n", link_state->clkpm_enabled);
872} 827}
873 828
874static ssize_t clk_ctl_store(struct device *dev, 829static ssize_t clk_ctl_store(struct device *dev,
@@ -876,7 +831,7 @@ static ssize_t clk_ctl_store(struct device *dev,
876 const char *buf, 831 const char *buf,
877 size_t n) 832 size_t n)
878{ 833{
879 struct pci_dev *pci_device = to_pci_dev(dev); 834 struct pci_dev *pdev = to_pci_dev(dev);
880 int state; 835 int state;
881 836
882 if (n < 1) 837 if (n < 1)
@@ -885,7 +840,7 @@ static ssize_t clk_ctl_store(struct device *dev,
885 840
886 down_read(&pci_bus_sem); 841 down_read(&pci_bus_sem);
887 mutex_lock(&aspm_lock); 842 mutex_lock(&aspm_lock);
888 pcie_set_clock_pm(pci_device, !!state); 843 pcie_set_clkpm_nocheck(pdev->link_state, !!state);
889 mutex_unlock(&aspm_lock); 844 mutex_unlock(&aspm_lock);
890 up_read(&pci_bus_sem); 845 up_read(&pci_bus_sem);
891 846
@@ -904,10 +859,10 @@ void pcie_aspm_create_sysfs_dev_files(struct pci_dev *pdev)
904 pdev->pcie_type != PCI_EXP_TYPE_DOWNSTREAM) || !link_state) 859 pdev->pcie_type != PCI_EXP_TYPE_DOWNSTREAM) || !link_state)
905 return; 860 return;
906 861
907 if (link_state->support_state) 862 if (link_state->aspm_support)
908 sysfs_add_file_to_group(&pdev->dev.kobj, 863 sysfs_add_file_to_group(&pdev->dev.kobj,
909 &dev_attr_link_state.attr, power_group); 864 &dev_attr_link_state.attr, power_group);
910 if (link_state->clk_pm_capable) 865 if (link_state->clkpm_capable)
911 sysfs_add_file_to_group(&pdev->dev.kobj, 866 sysfs_add_file_to_group(&pdev->dev.kobj,
912 &dev_attr_clk_ctl.attr, power_group); 867 &dev_attr_clk_ctl.attr, power_group);
913} 868}
@@ -920,10 +875,10 @@ void pcie_aspm_remove_sysfs_dev_files(struct pci_dev *pdev)
920 pdev->pcie_type != PCI_EXP_TYPE_DOWNSTREAM) || !link_state) 875 pdev->pcie_type != PCI_EXP_TYPE_DOWNSTREAM) || !link_state)
921 return; 876 return;
922 877
923 if (link_state->support_state) 878 if (link_state->aspm_support)
924 sysfs_remove_file_from_group(&pdev->dev.kobj, 879 sysfs_remove_file_from_group(&pdev->dev.kobj,
925 &dev_attr_link_state.attr, power_group); 880 &dev_attr_link_state.attr, power_group);
926 if (link_state->clk_pm_capable) 881 if (link_state->clkpm_capable)
927 sysfs_remove_file_from_group(&pdev->dev.kobj, 882 sysfs_remove_file_from_group(&pdev->dev.kobj,
928 &dev_attr_clk_ctl.attr, power_group); 883 &dev_attr_clk_ctl.attr, power_group);
929} 884}
diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
index f1ae2475ffff..40e75f6a5056 100644
--- a/drivers/pci/probe.c
+++ b/drivers/pci/probe.c
@@ -193,7 +193,7 @@ int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type,
193 res->flags |= pci_calc_resource_flags(l) | IORESOURCE_SIZEALIGN; 193 res->flags |= pci_calc_resource_flags(l) | IORESOURCE_SIZEALIGN;
194 if (type == pci_bar_io) { 194 if (type == pci_bar_io) {
195 l &= PCI_BASE_ADDRESS_IO_MASK; 195 l &= PCI_BASE_ADDRESS_IO_MASK;
196 mask = PCI_BASE_ADDRESS_IO_MASK & 0xffff; 196 mask = PCI_BASE_ADDRESS_IO_MASK & IO_SPACE_LIMIT;
197 } else { 197 } else {
198 l &= PCI_BASE_ADDRESS_MEM_MASK; 198 l &= PCI_BASE_ADDRESS_MEM_MASK;
199 mask = (u32)PCI_BASE_ADDRESS_MEM_MASK; 199 mask = (u32)PCI_BASE_ADDRESS_MEM_MASK;
@@ -237,6 +237,8 @@ int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type,
237 dev_printk(KERN_DEBUG, &dev->dev, 237 dev_printk(KERN_DEBUG, &dev->dev,
238 "reg %x 64bit mmio: %pR\n", pos, res); 238 "reg %x 64bit mmio: %pR\n", pos, res);
239 } 239 }
240
241 res->flags |= IORESOURCE_MEM_64;
240 } else { 242 } else {
241 sz = pci_size(l, sz, mask); 243 sz = pci_size(l, sz, mask);
242 244
@@ -287,7 +289,7 @@ void __devinit pci_read_bridge_bases(struct pci_bus *child)
287 struct resource *res; 289 struct resource *res;
288 int i; 290 int i;
289 291
290 if (!child->parent) /* It's a host bus, nothing to read */ 292 if (pci_is_root_bus(child)) /* It's a host bus, nothing to read */
291 return; 293 return;
292 294
293 if (dev->transparent) { 295 if (dev->transparent) {
@@ -362,7 +364,10 @@ void __devinit pci_read_bridge_bases(struct pci_bus *child)
362 } 364 }
363 } 365 }
364 if (base <= limit) { 366 if (base <= limit) {
365 res->flags = (mem_base_lo & PCI_MEMORY_RANGE_TYPE_MASK) | IORESOURCE_MEM | IORESOURCE_PREFETCH; 367 res->flags = (mem_base_lo & PCI_PREF_RANGE_TYPE_MASK) |
368 IORESOURCE_MEM | IORESOURCE_PREFETCH;
369 if (res->flags & PCI_PREF_RANGE_TYPE_64)
370 res->flags |= IORESOURCE_MEM_64;
366 res->start = base; 371 res->start = base;
367 res->end = limit + 0xfffff; 372 res->end = limit + 0xfffff;
368 dev_printk(KERN_DEBUG, &dev->dev, "bridge %sbit mmio pref: %pR\n", 373 dev_printk(KERN_DEBUG, &dev->dev, "bridge %sbit mmio pref: %pR\n",
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
index bd4253f93d5a..56552d74abea 100644
--- a/drivers/pci/quirks.c
+++ b/drivers/pci/quirks.c
@@ -1133,6 +1133,7 @@ static void __init asus_hides_smbus_hostbridge(struct pci_dev *dev)
1133 switch (dev->subsystem_device) { 1133 switch (dev->subsystem_device) {
1134 case 0x1751: /* M2N notebook */ 1134 case 0x1751: /* M2N notebook */
1135 case 0x1821: /* M5N notebook */ 1135 case 0x1821: /* M5N notebook */
1136 case 0x1897: /* A6L notebook */
1136 asus_hides_smbus = 1; 1137 asus_hides_smbus = 1;
1137 } 1138 }
1138 else if (dev->device == PCI_DEVICE_ID_INTEL_82855PM_HB) 1139 else if (dev->device == PCI_DEVICE_ID_INTEL_82855PM_HB)
@@ -1163,6 +1164,7 @@ static void __init asus_hides_smbus_hostbridge(struct pci_dev *dev)
1163 switch (dev->subsystem_device) { 1164 switch (dev->subsystem_device) {
1164 case 0x12bc: /* HP D330L */ 1165 case 0x12bc: /* HP D330L */
1165 case 0x12bd: /* HP D530 */ 1166 case 0x12bd: /* HP D530 */
1167 case 0x006a: /* HP Compaq nx9500 */
1166 asus_hides_smbus = 1; 1168 asus_hides_smbus = 1;
1167 } 1169 }
1168 else if (dev->device == PCI_DEVICE_ID_INTEL_82875_HB) 1170 else if (dev->device == PCI_DEVICE_ID_INTEL_82875_HB)
@@ -2016,6 +2018,28 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_BROADCOM,
2016 PCI_DEVICE_ID_NX2_5709S, 2018 PCI_DEVICE_ID_NX2_5709S,
2017 quirk_brcm_570x_limit_vpd); 2019 quirk_brcm_570x_limit_vpd);
2018 2020
2021/* Originally in EDAC sources for i82875P:
2022 * Intel tells BIOS developers to hide device 6 which
2023 * configures the overflow device access containing
2024 * the DRBs - this is where we expose device 6.
2025 * http://www.x86-secret.com/articles/tweak/pat/patsecrets-2.htm
2026 */
2027static void __devinit quirk_unhide_mch_dev6(struct pci_dev *dev)
2028{
2029 u8 reg;
2030
2031 if (pci_read_config_byte(dev, 0xF4, &reg) == 0 && !(reg & 0x02)) {
2032 dev_info(&dev->dev, "Enabling MCH 'Overflow' Device\n");
2033 pci_write_config_byte(dev, 0xF4, reg | 0x02);
2034 }
2035}
2036
2037DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82865_HB,
2038 quirk_unhide_mch_dev6);
2039DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82875_HB,
2040 quirk_unhide_mch_dev6);
2041
2042
2019#ifdef CONFIG_PCI_MSI 2043#ifdef CONFIG_PCI_MSI
2020/* Some chipsets do not support MSI. We cannot easily rely on setting 2044/* Some chipsets do not support MSI. We cannot easily rely on setting
2021 * PCI_BUS_FLAGS_NO_MSI in its bus flags because there are actually 2045 * PCI_BUS_FLAGS_NO_MSI in its bus flags because there are actually
diff --git a/drivers/pci/remove.c b/drivers/pci/remove.c
index 86503c14ce7e..176615e7231f 100644
--- a/drivers/pci/remove.c
+++ b/drivers/pci/remove.c
@@ -32,8 +32,6 @@ static void pci_stop_dev(struct pci_dev *dev)
32 32
33static void pci_destroy_dev(struct pci_dev *dev) 33static void pci_destroy_dev(struct pci_dev *dev)
34{ 34{
35 pci_stop_dev(dev);
36
37 /* Remove the device from the device lists, and prevent any further 35 /* Remove the device from the device lists, and prevent any further
38 * list accesses from this device */ 36 * list accesses from this device */
39 down_write(&pci_bus_sem); 37 down_write(&pci_bus_sem);
diff --git a/drivers/pci/search.c b/drivers/pci/search.c
index 710d4ea69568..e8cb5051c311 100644
--- a/drivers/pci/search.c
+++ b/drivers/pci/search.c
@@ -29,7 +29,7 @@ pci_find_upstream_pcie_bridge(struct pci_dev *pdev)
29 if (pdev->is_pcie) 29 if (pdev->is_pcie)
30 return NULL; 30 return NULL;
31 while (1) { 31 while (1) {
32 if (!pdev->bus->parent) 32 if (pci_is_root_bus(pdev->bus))
33 break; 33 break;
34 pdev = pdev->bus->self; 34 pdev = pdev->bus->self;
35 /* a p2p bridge */ 35 /* a p2p bridge */
@@ -115,36 +115,6 @@ pci_find_next_bus(const struct pci_bus *from)
115 115
116#ifdef CONFIG_PCI_LEGACY 116#ifdef CONFIG_PCI_LEGACY
117/** 117/**
118 * pci_find_slot - locate PCI device from a given PCI slot
119 * @bus: number of PCI bus on which desired PCI device resides
120 * @devfn: encodes number of PCI slot in which the desired PCI
121 * device resides and the logical device number within that slot
122 * in case of multi-function devices.
123 *
124 * Given a PCI bus and slot/function number, the desired PCI device
125 * is located in system global list of PCI devices. If the device
126 * is found, a pointer to its data structure is returned. If no
127 * device is found, %NULL is returned.
128 *
129 * NOTE: Do not use this function any more; use pci_get_slot() instead, as
130 * the PCI device returned by this function can disappear at any moment in
131 * time.
132 */
133struct pci_dev *pci_find_slot(unsigned int bus, unsigned int devfn)
134{
135 struct pci_dev *dev = NULL;
136
137 while ((dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev)) != NULL) {
138 if (dev->bus->number == bus && dev->devfn == devfn) {
139 pci_dev_put(dev);
140 return dev;
141 }
142 }
143 return NULL;
144}
145EXPORT_SYMBOL(pci_find_slot);
146
147/**
148 * pci_find_device - begin or continue searching for a PCI device by vendor/device id 118 * pci_find_device - begin or continue searching for a PCI device by vendor/device id
149 * @vendor: PCI vendor id to match, or %PCI_ANY_ID to match all vendor ids 119 * @vendor: PCI vendor id to match, or %PCI_ANY_ID to match all vendor ids
150 * @device: PCI device id to match, or %PCI_ANY_ID to match all device ids 120 * @device: PCI device id to match, or %PCI_ANY_ID to match all device ids
diff --git a/drivers/pci/setup-bus.c b/drivers/pci/setup-bus.c
index a00f85471b6e..b636e245445d 100644
--- a/drivers/pci/setup-bus.c
+++ b/drivers/pci/setup-bus.c
@@ -58,7 +58,6 @@ static void pbus_assign_resources_sorted(const struct pci_bus *bus)
58 res = list->res; 58 res = list->res;
59 idx = res - &list->dev->resource[0]; 59 idx = res - &list->dev->resource[0];
60 if (pci_assign_resource(list->dev, idx)) { 60 if (pci_assign_resource(list->dev, idx)) {
61 /* FIXME: get rid of this */
62 res->start = 0; 61 res->start = 0;
63 res->end = 0; 62 res->end = 0;
64 res->flags = 0; 63 res->flags = 0;
@@ -143,6 +142,7 @@ static void pci_setup_bridge(struct pci_bus *bus)
143 struct pci_dev *bridge = bus->self; 142 struct pci_dev *bridge = bus->self;
144 struct pci_bus_region region; 143 struct pci_bus_region region;
145 u32 l, bu, lu, io_upper16; 144 u32 l, bu, lu, io_upper16;
145 int pref_mem64;
146 146
147 if (pci_is_enabled(bridge)) 147 if (pci_is_enabled(bridge))
148 return; 148 return;
@@ -198,16 +198,22 @@ static void pci_setup_bridge(struct pci_bus *bus)
198 pci_write_config_dword(bridge, PCI_PREF_LIMIT_UPPER32, 0); 198 pci_write_config_dword(bridge, PCI_PREF_LIMIT_UPPER32, 0);
199 199
200 /* Set up PREF base/limit. */ 200 /* Set up PREF base/limit. */
201 pref_mem64 = 0;
201 bu = lu = 0; 202 bu = lu = 0;
202 pcibios_resource_to_bus(bridge, &region, bus->resource[2]); 203 pcibios_resource_to_bus(bridge, &region, bus->resource[2]);
203 if (bus->resource[2]->flags & IORESOURCE_PREFETCH) { 204 if (bus->resource[2]->flags & IORESOURCE_PREFETCH) {
205 int width = 8;
204 l = (region.start >> 16) & 0xfff0; 206 l = (region.start >> 16) & 0xfff0;
205 l |= region.end & 0xfff00000; 207 l |= region.end & 0xfff00000;
206 bu = upper_32_bits(region.start); 208 if (bus->resource[2]->flags & IORESOURCE_MEM_64) {
207 lu = upper_32_bits(region.end); 209 pref_mem64 = 1;
208 dev_info(&bridge->dev, " PREFETCH window: %#016llx-%#016llx\n", 210 bu = upper_32_bits(region.start);
209 (unsigned long long)region.start, 211 lu = upper_32_bits(region.end);
210 (unsigned long long)region.end); 212 width = 16;
213 }
214 dev_info(&bridge->dev, " PREFETCH window: %#0*llx-%#0*llx\n",
215 width, (unsigned long long)region.start,
216 width, (unsigned long long)region.end);
211 } 217 }
212 else { 218 else {
213 l = 0x0000fff0; 219 l = 0x0000fff0;
@@ -215,9 +221,11 @@ static void pci_setup_bridge(struct pci_bus *bus)
215 } 221 }
216 pci_write_config_dword(bridge, PCI_PREF_MEMORY_BASE, l); 222 pci_write_config_dword(bridge, PCI_PREF_MEMORY_BASE, l);
217 223
218 /* Set the upper 32 bits of PREF base & limit. */ 224 if (pref_mem64) {
219 pci_write_config_dword(bridge, PCI_PREF_BASE_UPPER32, bu); 225 /* Set the upper 32 bits of PREF base & limit. */
220 pci_write_config_dword(bridge, PCI_PREF_LIMIT_UPPER32, lu); 226 pci_write_config_dword(bridge, PCI_PREF_BASE_UPPER32, bu);
227 pci_write_config_dword(bridge, PCI_PREF_LIMIT_UPPER32, lu);
228 }
221 229
222 pci_write_config_word(bridge, PCI_BRIDGE_CONTROL, bus->bridge_ctl); 230 pci_write_config_word(bridge, PCI_BRIDGE_CONTROL, bus->bridge_ctl);
223} 231}
@@ -255,8 +263,25 @@ static void pci_bridge_check_ranges(struct pci_bus *bus)
255 pci_read_config_dword(bridge, PCI_PREF_MEMORY_BASE, &pmem); 263 pci_read_config_dword(bridge, PCI_PREF_MEMORY_BASE, &pmem);
256 pci_write_config_dword(bridge, PCI_PREF_MEMORY_BASE, 0x0); 264 pci_write_config_dword(bridge, PCI_PREF_MEMORY_BASE, 0x0);
257 } 265 }
258 if (pmem) 266 if (pmem) {
259 b_res[2].flags |= IORESOURCE_MEM | IORESOURCE_PREFETCH; 267 b_res[2].flags |= IORESOURCE_MEM | IORESOURCE_PREFETCH;
268 if ((pmem & PCI_PREF_RANGE_TYPE_MASK) == PCI_PREF_RANGE_TYPE_64)
269 b_res[2].flags |= IORESOURCE_MEM_64;
270 }
271
272 /* double check if bridge does support 64 bit pref */
273 if (b_res[2].flags & IORESOURCE_MEM_64) {
274 u32 mem_base_hi, tmp;
275 pci_read_config_dword(bridge, PCI_PREF_BASE_UPPER32,
276 &mem_base_hi);
277 pci_write_config_dword(bridge, PCI_PREF_BASE_UPPER32,
278 0xffffffff);
279 pci_read_config_dword(bridge, PCI_PREF_BASE_UPPER32, &tmp);
280 if (!tmp)
281 b_res[2].flags &= ~IORESOURCE_MEM_64;
282 pci_write_config_dword(bridge, PCI_PREF_BASE_UPPER32,
283 mem_base_hi);
284 }
260} 285}
261 286
262/* Helper function for sizing routines: find first available 287/* Helper function for sizing routines: find first available
@@ -336,6 +361,7 @@ static int pbus_size_mem(struct pci_bus *bus, unsigned long mask, unsigned long
336 resource_size_t aligns[12]; /* Alignments from 1Mb to 2Gb */ 361 resource_size_t aligns[12]; /* Alignments from 1Mb to 2Gb */
337 int order, max_order; 362 int order, max_order;
338 struct resource *b_res = find_free_bus_resource(bus, type); 363 struct resource *b_res = find_free_bus_resource(bus, type);
364 unsigned int mem64_mask = 0;
339 365
340 if (!b_res) 366 if (!b_res)
341 return 0; 367 return 0;
@@ -344,9 +370,12 @@ static int pbus_size_mem(struct pci_bus *bus, unsigned long mask, unsigned long
344 max_order = 0; 370 max_order = 0;
345 size = 0; 371 size = 0;
346 372
373 mem64_mask = b_res->flags & IORESOURCE_MEM_64;
374 b_res->flags &= ~IORESOURCE_MEM_64;
375
347 list_for_each_entry(dev, &bus->devices, bus_list) { 376 list_for_each_entry(dev, &bus->devices, bus_list) {
348 int i; 377 int i;
349 378
350 for (i = 0; i < PCI_NUM_RESOURCES; i++) { 379 for (i = 0; i < PCI_NUM_RESOURCES; i++) {
351 struct resource *r = &dev->resource[i]; 380 struct resource *r = &dev->resource[i];
352 resource_size_t r_size; 381 resource_size_t r_size;
@@ -372,6 +401,7 @@ static int pbus_size_mem(struct pci_bus *bus, unsigned long mask, unsigned long
372 aligns[order] += align; 401 aligns[order] += align;
373 if (order > max_order) 402 if (order > max_order)
374 max_order = order; 403 max_order = order;
404 mem64_mask &= r->flags & IORESOURCE_MEM_64;
375 } 405 }
376 } 406 }
377 407
@@ -396,6 +426,7 @@ static int pbus_size_mem(struct pci_bus *bus, unsigned long mask, unsigned long
396 b_res->start = min_align; 426 b_res->start = min_align;
397 b_res->end = size + min_align - 1; 427 b_res->end = size + min_align - 1;
398 b_res->flags |= IORESOURCE_STARTALIGN; 428 b_res->flags |= IORESOURCE_STARTALIGN;
429 b_res->flags |= mem64_mask;
399 return 1; 430 return 1;
400} 431}
401 432
diff --git a/drivers/pci/setup-res.c b/drivers/pci/setup-res.c
index 12403516776a..b711fb7181e2 100644
--- a/drivers/pci/setup-res.c
+++ b/drivers/pci/setup-res.c
@@ -135,23 +135,16 @@ void pci_disable_bridge_window(struct pci_dev *dev)
135} 135}
136#endif /* CONFIG_PCI_QUIRKS */ 136#endif /* CONFIG_PCI_QUIRKS */
137 137
138int pci_assign_resource(struct pci_dev *dev, int resno) 138static int __pci_assign_resource(struct pci_bus *bus, struct pci_dev *dev,
139 int resno)
139{ 140{
140 struct pci_bus *bus = dev->bus;
141 struct resource *res = dev->resource + resno; 141 struct resource *res = dev->resource + resno;
142 resource_size_t size, min, align; 142 resource_size_t size, min, align;
143 int ret; 143 int ret;
144 144
145 size = resource_size(res); 145 size = resource_size(res);
146 min = (res->flags & IORESOURCE_IO) ? PCIBIOS_MIN_IO : PCIBIOS_MIN_MEM; 146 min = (res->flags & IORESOURCE_IO) ? PCIBIOS_MIN_IO : PCIBIOS_MIN_MEM;
147
148 align = resource_alignment(res); 147 align = resource_alignment(res);
149 if (!align) {
150 dev_info(&dev->dev, "BAR %d: can't allocate resource (bogus "
151 "alignment) %pR flags %#lx\n",
152 resno, res, res->flags);
153 return -EINVAL;
154 }
155 148
156 /* First, try exact prefetching match.. */ 149 /* First, try exact prefetching match.. */
157 ret = pci_bus_alloc_resource(bus, res, size, align, min, 150 ret = pci_bus_alloc_resource(bus, res, size, align, min,
@@ -169,10 +162,7 @@ int pci_assign_resource(struct pci_dev *dev, int resno)
169 pcibios_align_resource, dev); 162 pcibios_align_resource, dev);
170 } 163 }
171 164
172 if (ret) { 165 if (!ret) {
173 dev_info(&dev->dev, "BAR %d: can't allocate %s resource %pR\n",
174 resno, res->flags & IORESOURCE_IO ? "I/O" : "mem", res);
175 } else {
176 res->flags &= ~IORESOURCE_STARTALIGN; 166 res->flags &= ~IORESOURCE_STARTALIGN;
177 if (resno < PCI_BRIDGE_RESOURCES) 167 if (resno < PCI_BRIDGE_RESOURCES)
178 pci_update_resource(dev, resno); 168 pci_update_resource(dev, resno);
@@ -181,6 +171,39 @@ int pci_assign_resource(struct pci_dev *dev, int resno)
181 return ret; 171 return ret;
182} 172}
183 173
174int pci_assign_resource(struct pci_dev *dev, int resno)
175{
176 struct resource *res = dev->resource + resno;
177 resource_size_t align;
178 struct pci_bus *bus;
179 int ret;
180
181 align = resource_alignment(res);
182 if (!align) {
183 dev_info(&dev->dev, "BAR %d: can't allocate resource (bogus "
184 "alignment) %pR flags %#lx\n",
185 resno, res, res->flags);
186 return -EINVAL;
187 }
188
189 bus = dev->bus;
190 while ((ret = __pci_assign_resource(bus, dev, resno))) {
191 if (bus->parent && bus->self->transparent)
192 bus = bus->parent;
193 else
194 bus = NULL;
195 if (bus)
196 continue;
197 break;
198 }
199
200 if (ret)
201 dev_info(&dev->dev, "BAR %d: can't allocate %s resource %pR\n",
202 resno, res->flags & IORESOURCE_IO ? "I/O" : "mem", res);
203
204 return ret;
205}
206
184#if 0 207#if 0
185int pci_assign_resource_fixed(struct pci_dev *dev, int resno) 208int pci_assign_resource_fixed(struct pci_dev *dev, int resno)
186{ 209{
diff --git a/drivers/pci/slot.c b/drivers/pci/slot.c
index fe95ce20bcbd..eddb0748b0ea 100644
--- a/drivers/pci/slot.c
+++ b/drivers/pci/slot.c
@@ -307,6 +307,45 @@ void pci_destroy_slot(struct pci_slot *slot)
307} 307}
308EXPORT_SYMBOL_GPL(pci_destroy_slot); 308EXPORT_SYMBOL_GPL(pci_destroy_slot);
309 309
310#if defined(CONFIG_HOTPLUG_PCI) || defined(CONFIG_HOTPLUG_PCI_MODULE)
311#include <linux/pci_hotplug.h>
312/**
313 * pci_hp_create_link - create symbolic link to the hotplug driver module.
314 * @slot: struct pci_slot
315 *
316 * Helper function for pci_hotplug_core.c to create symbolic link to
317 * the hotplug driver module.
318 */
319void pci_hp_create_module_link(struct pci_slot *pci_slot)
320{
321 struct hotplug_slot *slot = pci_slot->hotplug;
322 struct kobject *kobj = NULL;
323 int no_warn;
324
325 if (!slot || !slot->ops)
326 return;
327 kobj = kset_find_obj(module_kset, slot->ops->mod_name);
328 if (!kobj)
329 return;
330 no_warn = sysfs_create_link(&pci_slot->kobj, kobj, "module");
331 kobject_put(kobj);
332}
333EXPORT_SYMBOL_GPL(pci_hp_create_module_link);
334
335/**
336 * pci_hp_remove_link - remove symbolic link to the hotplug driver module.
337 * @slot: struct pci_slot
338 *
339 * Helper function for pci_hotplug_core.c to remove symbolic link to
340 * the hotplug driver module.
341 */
342void pci_hp_remove_module_link(struct pci_slot *pci_slot)
343{
344 sysfs_remove_link(&pci_slot->kobj, "module");
345}
346EXPORT_SYMBOL_GPL(pci_hp_remove_module_link);
347#endif
348
310static int pci_slot_init(void) 349static int pci_slot_init(void)
311{ 350{
312 struct kset *pci_bus_kset; 351 struct kset *pci_bus_kset;