aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/pci
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2008-10-28 11:26:12 -0400
committerIngo Molnar <mingo@elte.hu>2008-10-28 11:26:12 -0400
commit7a9787e1eba95a166265e6a260cf30af04ef0a99 (patch)
treee730a4565e0318140d2fbd2f0415d18a339d7336 /drivers/pci
parent41b9eb264c8407655db57b60b4457fe1b2ec9977 (diff)
parent0173a3265b228da319ceb9c1ec6a5682fd1b2d92 (diff)
Merge commit 'v2.6.28-rc2' into x86/pci-ioapic-boot-irq-quirks
Diffstat (limited to 'drivers/pci')
-rw-r--r--drivers/pci/Makefile5
-rw-r--r--drivers/pci/bus.c7
-rw-r--r--drivers/pci/dmar.c491
-rw-r--r--drivers/pci/hotplug/acpi_pcihp.c38
-rw-r--r--drivers/pci/hotplug/acpiphp.h11
-rw-r--r--drivers/pci/hotplug/acpiphp_core.c32
-rw-r--r--drivers/pci/hotplug/acpiphp_glue.c20
-rw-r--r--drivers/pci/hotplug/acpiphp_ibm.c4
-rw-r--r--drivers/pci/hotplug/cpci_hotplug.h6
-rw-r--r--drivers/pci/hotplug/cpci_hotplug_core.c75
-rw-r--r--drivers/pci/hotplug/cpci_hotplug_pci.c4
-rw-r--r--drivers/pci/hotplug/cpqphp.h13
-rw-r--r--drivers/pci/hotplug/cpqphp_core.c45
-rw-r--r--drivers/pci/hotplug/cpqphp_ctrl.c2
-rw-r--r--drivers/pci/hotplug/fakephp.c32
-rw-r--r--drivers/pci/hotplug/ibmphp.h5
-rw-r--r--drivers/pci/hotplug/ibmphp_ebda.c113
-rw-r--r--drivers/pci/hotplug/pci_hotplug_core.c78
-rw-r--r--drivers/pci/hotplug/pciehp.h27
-rw-r--r--drivers/pci/hotplug/pciehp_core.c100
-rw-r--r--drivers/pci/hotplug/pciehp_ctrl.c144
-rw-r--r--drivers/pci/hotplug/pciehp_hpc.c264
-rw-r--r--drivers/pci/hotplug/pciehp_pci.c35
-rw-r--r--drivers/pci/hotplug/rpaphp.h4
-rw-r--r--drivers/pci/hotplug/rpaphp_core.c4
-rw-r--r--drivers/pci/hotplug/rpaphp_pci.c2
-rw-r--r--drivers/pci/hotplug/rpaphp_slot.c14
-rw-r--r--drivers/pci/hotplug/sgi_hotplug.c22
-rw-r--r--drivers/pci/hotplug/shpchp.h31
-rw-r--r--drivers/pci/hotplug/shpchp_core.c82
-rw-r--r--drivers/pci/hotplug/shpchp_ctrl.c158
-rw-r--r--drivers/pci/hotplug/shpchp_hpc.c113
-rw-r--r--drivers/pci/hotplug/shpchp_pci.c36
-rw-r--r--drivers/pci/htirq.c3
-rw-r--r--drivers/pci/intel-iommu.c530
-rw-r--r--drivers/pci/intel-iommu.h344
-rw-r--r--drivers/pci/intr_remapping.c512
-rw-r--r--drivers/pci/intr_remapping.h8
-rw-r--r--drivers/pci/iova.c2
-rw-r--r--drivers/pci/iova.h52
-rw-r--r--drivers/pci/irq.c60
-rw-r--r--drivers/pci/msi.c51
-rw-r--r--drivers/pci/pci-acpi.c92
-rw-r--r--drivers/pci/pci-driver.c21
-rw-r--r--drivers/pci/pci-sysfs.c260
-rw-r--r--drivers/pci/pci.c251
-rw-r--r--drivers/pci/pci.h28
-rw-r--r--drivers/pci/pcie/aer/aerdrv.c6
-rw-r--r--drivers/pci/pcie/aer/aerdrv_acpi.c7
-rw-r--r--drivers/pci/pcie/aer/aerdrv_core.c47
-rw-r--r--drivers/pci/pcie/aspm.c32
-rw-r--r--drivers/pci/pcie/portdrv.h1
-rw-r--r--drivers/pci/pcie/portdrv_core.c23
-rw-r--r--drivers/pci/pcie/portdrv_pci.c2
-rw-r--r--drivers/pci/probe.c406
-rw-r--r--drivers/pci/proc.c18
-rw-r--r--drivers/pci/quirks.c208
-rw-r--r--drivers/pci/remove.c11
-rw-r--r--drivers/pci/rom.c6
-rw-r--r--drivers/pci/search.c17
-rw-r--r--drivers/pci/setup-bus.c58
-rw-r--r--drivers/pci/setup-res.c42
-rw-r--r--drivers/pci/slot.c170
63 files changed, 3234 insertions, 2051 deletions
diff --git a/drivers/pci/Makefile b/drivers/pci/Makefile
index 7d63f8ced24b..af3bfe22847b 100644
--- a/drivers/pci/Makefile
+++ b/drivers/pci/Makefile
@@ -3,7 +3,8 @@
3# 3#
4 4
5obj-y += access.o bus.o probe.o remove.o pci.o quirks.o slot.o \ 5obj-y += access.o bus.o probe.o remove.o pci.o quirks.o slot.o \
6 pci-driver.o search.o pci-sysfs.o rom.o setup-res.o 6 pci-driver.o search.o pci-sysfs.o rom.o setup-res.o \
7 irq.o
7obj-$(CONFIG_PROC_FS) += proc.o 8obj-$(CONFIG_PROC_FS) += proc.o
8 9
9# Build PCI Express stuff if needed 10# Build PCI Express stuff if needed
@@ -26,6 +27,8 @@ obj-$(CONFIG_HT_IRQ) += htirq.o
26# Build Intel IOMMU support 27# Build Intel IOMMU support
27obj-$(CONFIG_DMAR) += dmar.o iova.o intel-iommu.o 28obj-$(CONFIG_DMAR) += dmar.o iova.o intel-iommu.o
28 29
30obj-$(CONFIG_INTR_REMAP) += dmar.o intr_remapping.o
31
29# 32#
30# Some architectures use the generic PCI setup functions 33# Some architectures use the generic PCI setup functions
31# 34#
diff --git a/drivers/pci/bus.c b/drivers/pci/bus.c
index 529d9d7727b0..999cc4088b59 100644
--- a/drivers/pci/bus.c
+++ b/drivers/pci/bus.c
@@ -151,6 +151,13 @@ void pci_bus_add_devices(struct pci_bus *bus)
151 if (retval) 151 if (retval)
152 dev_err(&dev->dev, "Error creating cpuaffinity" 152 dev_err(&dev->dev, "Error creating cpuaffinity"
153 " file, continuing...\n"); 153 " file, continuing...\n");
154
155 retval = device_create_file(&child_bus->dev,
156 &dev_attr_cpulistaffinity);
157 if (retval)
158 dev_err(&dev->dev,
159 "Error creating cpulistaffinity"
160 " file, continuing...\n");
154 } 161 }
155 } 162 }
156} 163}
diff --git a/drivers/pci/dmar.c b/drivers/pci/dmar.c
index f941f609dbf3..691b3adeb870 100644
--- a/drivers/pci/dmar.c
+++ b/drivers/pci/dmar.c
@@ -19,15 +19,18 @@
19 * Author: Shaohua Li <shaohua.li@intel.com> 19 * Author: Shaohua Li <shaohua.li@intel.com>
20 * Author: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com> 20 * Author: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
21 * 21 *
22 * This file implements early detection/parsing of DMA Remapping Devices 22 * This file implements early detection/parsing of Remapping Devices
23 * reported to OS through BIOS via DMA remapping reporting (DMAR) ACPI 23 * reported to OS through BIOS via DMA remapping reporting (DMAR) ACPI
24 * tables. 24 * tables.
25 *
26 * These routines are used by both DMA-remapping and Interrupt-remapping
25 */ 27 */
26 28
27#include <linux/pci.h> 29#include <linux/pci.h>
28#include <linux/dmar.h> 30#include <linux/dmar.h>
29#include "iova.h" 31#include <linux/iova.h>
30#include "intel-iommu.h" 32#include <linux/intel-iommu.h>
33#include <linux/timer.h>
31 34
32#undef PREFIX 35#undef PREFIX
33#define PREFIX "DMAR:" 36#define PREFIX "DMAR:"
@@ -37,7 +40,6 @@
37 * these units are not supported by the architecture. 40 * these units are not supported by the architecture.
38 */ 41 */
39LIST_HEAD(dmar_drhd_units); 42LIST_HEAD(dmar_drhd_units);
40LIST_HEAD(dmar_rmrr_units);
41 43
42static struct acpi_table_header * __initdata dmar_tbl; 44static struct acpi_table_header * __initdata dmar_tbl;
43 45
@@ -53,11 +55,6 @@ static void __init dmar_register_drhd_unit(struct dmar_drhd_unit *drhd)
53 list_add(&drhd->list, &dmar_drhd_units); 55 list_add(&drhd->list, &dmar_drhd_units);
54} 56}
55 57
56static void __init dmar_register_rmrr_unit(struct dmar_rmrr_unit *rmrr)
57{
58 list_add(&rmrr->list, &dmar_rmrr_units);
59}
60
61static int __init dmar_parse_one_dev_scope(struct acpi_dmar_device_scope *scope, 58static int __init dmar_parse_one_dev_scope(struct acpi_dmar_device_scope *scope,
62 struct pci_dev **dev, u16 segment) 59 struct pci_dev **dev, u16 segment)
63{ 60{
@@ -172,19 +169,36 @@ dmar_parse_one_drhd(struct acpi_dmar_header *header)
172 struct acpi_dmar_hardware_unit *drhd; 169 struct acpi_dmar_hardware_unit *drhd;
173 struct dmar_drhd_unit *dmaru; 170 struct dmar_drhd_unit *dmaru;
174 int ret = 0; 171 int ret = 0;
175 static int include_all;
176 172
177 dmaru = kzalloc(sizeof(*dmaru), GFP_KERNEL); 173 dmaru = kzalloc(sizeof(*dmaru), GFP_KERNEL);
178 if (!dmaru) 174 if (!dmaru)
179 return -ENOMEM; 175 return -ENOMEM;
180 176
177 dmaru->hdr = header;
181 drhd = (struct acpi_dmar_hardware_unit *)header; 178 drhd = (struct acpi_dmar_hardware_unit *)header;
182 dmaru->reg_base_addr = drhd->address; 179 dmaru->reg_base_addr = drhd->address;
183 dmaru->include_all = drhd->flags & 0x1; /* BIT0: INCLUDE_ALL */ 180 dmaru->include_all = drhd->flags & 0x1; /* BIT0: INCLUDE_ALL */
184 181
182 ret = alloc_iommu(dmaru);
183 if (ret) {
184 kfree(dmaru);
185 return ret;
186 }
187 dmar_register_drhd_unit(dmaru);
188 return 0;
189}
190
191static int __init dmar_parse_dev(struct dmar_drhd_unit *dmaru)
192{
193 struct acpi_dmar_hardware_unit *drhd;
194 static int include_all;
195 int ret = 0;
196
197 drhd = (struct acpi_dmar_hardware_unit *) dmaru->hdr;
198
185 if (!dmaru->include_all) 199 if (!dmaru->include_all)
186 ret = dmar_parse_dev_scope((void *)(drhd + 1), 200 ret = dmar_parse_dev_scope((void *)(drhd + 1),
187 ((void *)drhd) + header->length, 201 ((void *)drhd) + drhd->header.length,
188 &dmaru->devices_cnt, &dmaru->devices, 202 &dmaru->devices_cnt, &dmaru->devices,
189 drhd->segment); 203 drhd->segment);
190 else { 204 else {
@@ -197,37 +211,59 @@ dmar_parse_one_drhd(struct acpi_dmar_header *header)
197 include_all = 1; 211 include_all = 1;
198 } 212 }
199 213
200 if (ret || (dmaru->devices_cnt == 0 && !dmaru->include_all)) 214 if (ret) {
215 list_del(&dmaru->list);
201 kfree(dmaru); 216 kfree(dmaru);
202 else 217 }
203 dmar_register_drhd_unit(dmaru);
204 return ret; 218 return ret;
205} 219}
206 220
221#ifdef CONFIG_DMAR
222LIST_HEAD(dmar_rmrr_units);
223
224static void __init dmar_register_rmrr_unit(struct dmar_rmrr_unit *rmrr)
225{
226 list_add(&rmrr->list, &dmar_rmrr_units);
227}
228
229
207static int __init 230static int __init
208dmar_parse_one_rmrr(struct acpi_dmar_header *header) 231dmar_parse_one_rmrr(struct acpi_dmar_header *header)
209{ 232{
210 struct acpi_dmar_reserved_memory *rmrr; 233 struct acpi_dmar_reserved_memory *rmrr;
211 struct dmar_rmrr_unit *rmrru; 234 struct dmar_rmrr_unit *rmrru;
212 int ret = 0;
213 235
214 rmrru = kzalloc(sizeof(*rmrru), GFP_KERNEL); 236 rmrru = kzalloc(sizeof(*rmrru), GFP_KERNEL);
215 if (!rmrru) 237 if (!rmrru)
216 return -ENOMEM; 238 return -ENOMEM;
217 239
240 rmrru->hdr = header;
218 rmrr = (struct acpi_dmar_reserved_memory *)header; 241 rmrr = (struct acpi_dmar_reserved_memory *)header;
219 rmrru->base_address = rmrr->base_address; 242 rmrru->base_address = rmrr->base_address;
220 rmrru->end_address = rmrr->end_address; 243 rmrru->end_address = rmrr->end_address;
244
245 dmar_register_rmrr_unit(rmrru);
246 return 0;
247}
248
249static int __init
250rmrr_parse_dev(struct dmar_rmrr_unit *rmrru)
251{
252 struct acpi_dmar_reserved_memory *rmrr;
253 int ret;
254
255 rmrr = (struct acpi_dmar_reserved_memory *) rmrru->hdr;
221 ret = dmar_parse_dev_scope((void *)(rmrr + 1), 256 ret = dmar_parse_dev_scope((void *)(rmrr + 1),
222 ((void *)rmrr) + header->length, 257 ((void *)rmrr) + rmrr->header.length,
223 &rmrru->devices_cnt, &rmrru->devices, rmrr->segment); 258 &rmrru->devices_cnt, &rmrru->devices, rmrr->segment);
224 259
225 if (ret || (rmrru->devices_cnt == 0)) 260 if (ret || (rmrru->devices_cnt == 0)) {
261 list_del(&rmrru->list);
226 kfree(rmrru); 262 kfree(rmrru);
227 else 263 }
228 dmar_register_rmrr_unit(rmrru);
229 return ret; 264 return ret;
230} 265}
266#endif
231 267
232static void __init 268static void __init
233dmar_table_print_dmar_entry(struct acpi_dmar_header *header) 269dmar_table_print_dmar_entry(struct acpi_dmar_header *header)
@@ -240,19 +276,39 @@ dmar_table_print_dmar_entry(struct acpi_dmar_header *header)
240 drhd = (struct acpi_dmar_hardware_unit *)header; 276 drhd = (struct acpi_dmar_hardware_unit *)header;
241 printk (KERN_INFO PREFIX 277 printk (KERN_INFO PREFIX
242 "DRHD (flags: 0x%08x)base: 0x%016Lx\n", 278 "DRHD (flags: 0x%08x)base: 0x%016Lx\n",
243 drhd->flags, drhd->address); 279 drhd->flags, (unsigned long long)drhd->address);
244 break; 280 break;
245 case ACPI_DMAR_TYPE_RESERVED_MEMORY: 281 case ACPI_DMAR_TYPE_RESERVED_MEMORY:
246 rmrr = (struct acpi_dmar_reserved_memory *)header; 282 rmrr = (struct acpi_dmar_reserved_memory *)header;
247 283
248 printk (KERN_INFO PREFIX 284 printk (KERN_INFO PREFIX
249 "RMRR base: 0x%016Lx end: 0x%016Lx\n", 285 "RMRR base: 0x%016Lx end: 0x%016Lx\n",
250 rmrr->base_address, rmrr->end_address); 286 (unsigned long long)rmrr->base_address,
287 (unsigned long long)rmrr->end_address);
251 break; 288 break;
252 } 289 }
253} 290}
254 291
255/** 292/**
293 * dmar_table_detect - checks to see if the platform supports DMAR devices
294 */
295static int __init dmar_table_detect(void)
296{
297 acpi_status status = AE_OK;
298
299 /* if we could find DMAR table, then there are DMAR devices */
300 status = acpi_get_table(ACPI_SIG_DMAR, 0,
301 (struct acpi_table_header **)&dmar_tbl);
302
303 if (ACPI_SUCCESS(status) && !dmar_tbl) {
304 printk (KERN_WARNING PREFIX "Unable to map DMAR\n");
305 status = AE_NOT_FOUND;
306 }
307
308 return (ACPI_SUCCESS(status) ? 1 : 0);
309}
310
311/**
256 * parse_dmar_table - parses the DMA reporting table 312 * parse_dmar_table - parses the DMA reporting table
257 */ 313 */
258static int __init 314static int __init
@@ -262,11 +318,17 @@ parse_dmar_table(void)
262 struct acpi_dmar_header *entry_header; 318 struct acpi_dmar_header *entry_header;
263 int ret = 0; 319 int ret = 0;
264 320
321 /*
322 * Do it again, earlier dmar_tbl mapping could be mapped with
323 * fixed map.
324 */
325 dmar_table_detect();
326
265 dmar = (struct acpi_table_dmar *)dmar_tbl; 327 dmar = (struct acpi_table_dmar *)dmar_tbl;
266 if (!dmar) 328 if (!dmar)
267 return -ENODEV; 329 return -ENODEV;
268 330
269 if (dmar->width < PAGE_SHIFT_4K - 1) { 331 if (dmar->width < PAGE_SHIFT - 1) {
270 printk(KERN_WARNING PREFIX "Invalid DMAR haw\n"); 332 printk(KERN_WARNING PREFIX "Invalid DMAR haw\n");
271 return -EINVAL; 333 return -EINVAL;
272 } 334 }
@@ -284,7 +346,9 @@ parse_dmar_table(void)
284 ret = dmar_parse_one_drhd(entry_header); 346 ret = dmar_parse_one_drhd(entry_header);
285 break; 347 break;
286 case ACPI_DMAR_TYPE_RESERVED_MEMORY: 348 case ACPI_DMAR_TYPE_RESERVED_MEMORY:
349#ifdef CONFIG_DMAR
287 ret = dmar_parse_one_rmrr(entry_header); 350 ret = dmar_parse_one_rmrr(entry_header);
351#endif
288 break; 352 break;
289 default: 353 default:
290 printk(KERN_WARNING PREFIX 354 printk(KERN_WARNING PREFIX
@@ -300,15 +364,77 @@ parse_dmar_table(void)
300 return ret; 364 return ret;
301} 365}
302 366
367int dmar_pci_device_match(struct pci_dev *devices[], int cnt,
368 struct pci_dev *dev)
369{
370 int index;
303 371
304int __init dmar_table_init(void) 372 while (dev) {
373 for (index = 0; index < cnt; index++)
374 if (dev == devices[index])
375 return 1;
376
377 /* Check our parent */
378 dev = dev->bus->self;
379 }
380
381 return 0;
382}
383
384struct dmar_drhd_unit *
385dmar_find_matched_drhd_unit(struct pci_dev *dev)
305{ 386{
387 struct dmar_drhd_unit *drhd = NULL;
388
389 list_for_each_entry(drhd, &dmar_drhd_units, list) {
390 if (drhd->include_all || dmar_pci_device_match(drhd->devices,
391 drhd->devices_cnt, dev))
392 return drhd;
393 }
394
395 return NULL;
396}
397
398int __init dmar_dev_scope_init(void)
399{
400 struct dmar_drhd_unit *drhd, *drhd_n;
401 int ret = -ENODEV;
402
403 list_for_each_entry_safe(drhd, drhd_n, &dmar_drhd_units, list) {
404 ret = dmar_parse_dev(drhd);
405 if (ret)
406 return ret;
407 }
408
409#ifdef CONFIG_DMAR
410 {
411 struct dmar_rmrr_unit *rmrr, *rmrr_n;
412 list_for_each_entry_safe(rmrr, rmrr_n, &dmar_rmrr_units, list) {
413 ret = rmrr_parse_dev(rmrr);
414 if (ret)
415 return ret;
416 }
417 }
418#endif
419
420 return ret;
421}
422
306 423
424int __init dmar_table_init(void)
425{
426 static int dmar_table_initialized;
307 int ret; 427 int ret;
308 428
429 if (dmar_table_initialized)
430 return 0;
431
432 dmar_table_initialized = 1;
433
309 ret = parse_dmar_table(); 434 ret = parse_dmar_table();
310 if (ret) { 435 if (ret) {
311 printk(KERN_INFO PREFIX "parse DMAR table failure.\n"); 436 if (ret != -ENODEV)
437 printk(KERN_INFO PREFIX "parse DMAR table failure.\n");
312 return ret; 438 return ret;
313 } 439 }
314 440
@@ -317,29 +443,320 @@ int __init dmar_table_init(void)
317 return -ENODEV; 443 return -ENODEV;
318 } 444 }
319 445
320 if (list_empty(&dmar_rmrr_units)) { 446#ifdef CONFIG_DMAR
447 if (list_empty(&dmar_rmrr_units))
321 printk(KERN_INFO PREFIX "No RMRR found\n"); 448 printk(KERN_INFO PREFIX "No RMRR found\n");
322 return -ENODEV; 449#endif
450
451#ifdef CONFIG_INTR_REMAP
452 parse_ioapics_under_ir();
453#endif
454 return 0;
455}
456
457void __init detect_intel_iommu(void)
458{
459 int ret;
460
461 ret = dmar_table_detect();
462
463 {
464#ifdef CONFIG_INTR_REMAP
465 struct acpi_table_dmar *dmar;
466 /*
467 * for now we will disable dma-remapping when interrupt
468 * remapping is enabled.
469 * When support for queued invalidation for IOTLB invalidation
470 * is added, we will not need this any more.
471 */
472 dmar = (struct acpi_table_dmar *) dmar_tbl;
473 if (ret && cpu_has_x2apic && dmar->flags & 0x1)
474 printk(KERN_INFO
475 "Queued invalidation will be enabled to support "
476 "x2apic and Intr-remapping.\n");
477#endif
478#ifdef CONFIG_DMAR
479 if (ret && !no_iommu && !iommu_detected && !swiotlb &&
480 !dmar_disabled)
481 iommu_detected = 1;
482#endif
483 }
484 dmar_tbl = NULL;
485}
486
487
488int alloc_iommu(struct dmar_drhd_unit *drhd)
489{
490 struct intel_iommu *iommu;
491 int map_size;
492 u32 ver;
493 static int iommu_allocated = 0;
494
495 iommu = kzalloc(sizeof(*iommu), GFP_KERNEL);
496 if (!iommu)
497 return -ENOMEM;
498
499 iommu->seq_id = iommu_allocated++;
500
501 iommu->reg = ioremap(drhd->reg_base_addr, VTD_PAGE_SIZE);
502 if (!iommu->reg) {
503 printk(KERN_ERR "IOMMU: can't map the region\n");
504 goto error;
323 } 505 }
506 iommu->cap = dmar_readq(iommu->reg + DMAR_CAP_REG);
507 iommu->ecap = dmar_readq(iommu->reg + DMAR_ECAP_REG);
508
509 /* the registers might be more than one page */
510 map_size = max_t(int, ecap_max_iotlb_offset(iommu->ecap),
511 cap_max_fault_reg_offset(iommu->cap));
512 map_size = VTD_PAGE_ALIGN(map_size);
513 if (map_size > VTD_PAGE_SIZE) {
514 iounmap(iommu->reg);
515 iommu->reg = ioremap(drhd->reg_base_addr, map_size);
516 if (!iommu->reg) {
517 printk(KERN_ERR "IOMMU: can't map the region\n");
518 goto error;
519 }
520 }
521
522 ver = readl(iommu->reg + DMAR_VER_REG);
523 pr_debug("IOMMU %llx: ver %d:%d cap %llx ecap %llx\n",
524 (unsigned long long)drhd->reg_base_addr,
525 DMAR_VER_MAJOR(ver), DMAR_VER_MINOR(ver),
526 (unsigned long long)iommu->cap,
527 (unsigned long long)iommu->ecap);
528
529 spin_lock_init(&iommu->register_lock);
324 530
531 drhd->iommu = iommu;
325 return 0; 532 return 0;
533error:
534 kfree(iommu);
535 return -1;
326} 536}
327 537
328/** 538void free_iommu(struct intel_iommu *iommu)
329 * early_dmar_detect - checks to see if the platform supports DMAR devices 539{
540 if (!iommu)
541 return;
542
543#ifdef CONFIG_DMAR
544 free_dmar_iommu(iommu);
545#endif
546
547 if (iommu->reg)
548 iounmap(iommu->reg);
549 kfree(iommu);
550}
551
552/*
553 * Reclaim all the submitted descriptors which have completed its work.
330 */ 554 */
331int __init early_dmar_detect(void) 555static inline void reclaim_free_desc(struct q_inval *qi)
332{ 556{
333 acpi_status status = AE_OK; 557 while (qi->desc_status[qi->free_tail] == QI_DONE) {
558 qi->desc_status[qi->free_tail] = QI_FREE;
559 qi->free_tail = (qi->free_tail + 1) % QI_LENGTH;
560 qi->free_cnt++;
561 }
562}
334 563
335 /* if we could find DMAR table, then there are DMAR devices */ 564/*
336 status = acpi_get_table(ACPI_SIG_DMAR, 0, 565 * Submit the queued invalidation descriptor to the remapping
337 (struct acpi_table_header **)&dmar_tbl); 566 * hardware unit and wait for its completion.
567 */
568void qi_submit_sync(struct qi_desc *desc, struct intel_iommu *iommu)
569{
570 struct q_inval *qi = iommu->qi;
571 struct qi_desc *hw, wait_desc;
572 int wait_index, index;
573 unsigned long flags;
338 574
339 if (ACPI_SUCCESS(status) && !dmar_tbl) { 575 if (!qi)
340 printk (KERN_WARNING PREFIX "Unable to map DMAR\n"); 576 return;
341 status = AE_NOT_FOUND; 577
578 hw = qi->desc;
579
580 spin_lock_irqsave(&qi->q_lock, flags);
581 while (qi->free_cnt < 3) {
582 spin_unlock_irqrestore(&qi->q_lock, flags);
583 cpu_relax();
584 spin_lock_irqsave(&qi->q_lock, flags);
342 } 585 }
343 586
344 return (ACPI_SUCCESS(status) ? 1 : 0); 587 index = qi->free_head;
588 wait_index = (index + 1) % QI_LENGTH;
589
590 qi->desc_status[index] = qi->desc_status[wait_index] = QI_IN_USE;
591
592 hw[index] = *desc;
593
594 wait_desc.low = QI_IWD_STATUS_DATA(2) | QI_IWD_STATUS_WRITE | QI_IWD_TYPE;
595 wait_desc.high = virt_to_phys(&qi->desc_status[wait_index]);
596
597 hw[wait_index] = wait_desc;
598
599 __iommu_flush_cache(iommu, &hw[index], sizeof(struct qi_desc));
600 __iommu_flush_cache(iommu, &hw[wait_index], sizeof(struct qi_desc));
601
602 qi->free_head = (qi->free_head + 2) % QI_LENGTH;
603 qi->free_cnt -= 2;
604
605 spin_lock(&iommu->register_lock);
606 /*
607 * update the HW tail register indicating the presence of
608 * new descriptors.
609 */
610 writel(qi->free_head << 4, iommu->reg + DMAR_IQT_REG);
611 spin_unlock(&iommu->register_lock);
612
613 while (qi->desc_status[wait_index] != QI_DONE) {
614 /*
615 * We will leave the interrupts disabled, to prevent interrupt
616 * context to queue another cmd while a cmd is already submitted
617 * and waiting for completion on this cpu. This is to avoid
618 * a deadlock where the interrupt context can wait indefinitely
619 * for free slots in the queue.
620 */
621 spin_unlock(&qi->q_lock);
622 cpu_relax();
623 spin_lock(&qi->q_lock);
624 }
625
626 qi->desc_status[index] = QI_DONE;
627
628 reclaim_free_desc(qi);
629 spin_unlock_irqrestore(&qi->q_lock, flags);
630}
631
632/*
633 * Flush the global interrupt entry cache.
634 */
635void qi_global_iec(struct intel_iommu *iommu)
636{
637 struct qi_desc desc;
638
639 desc.low = QI_IEC_TYPE;
640 desc.high = 0;
641
642 qi_submit_sync(&desc, iommu);
643}
644
645int qi_flush_context(struct intel_iommu *iommu, u16 did, u16 sid, u8 fm,
646 u64 type, int non_present_entry_flush)
647{
648
649 struct qi_desc desc;
650
651 if (non_present_entry_flush) {
652 if (!cap_caching_mode(iommu->cap))
653 return 1;
654 else
655 did = 0;
656 }
657
658 desc.low = QI_CC_FM(fm) | QI_CC_SID(sid) | QI_CC_DID(did)
659 | QI_CC_GRAN(type) | QI_CC_TYPE;
660 desc.high = 0;
661
662 qi_submit_sync(&desc, iommu);
663
664 return 0;
665
666}
667
668int qi_flush_iotlb(struct intel_iommu *iommu, u16 did, u64 addr,
669 unsigned int size_order, u64 type,
670 int non_present_entry_flush)
671{
672 u8 dw = 0, dr = 0;
673
674 struct qi_desc desc;
675 int ih = 0;
676
677 if (non_present_entry_flush) {
678 if (!cap_caching_mode(iommu->cap))
679 return 1;
680 else
681 did = 0;
682 }
683
684 if (cap_write_drain(iommu->cap))
685 dw = 1;
686
687 if (cap_read_drain(iommu->cap))
688 dr = 1;
689
690 desc.low = QI_IOTLB_DID(did) | QI_IOTLB_DR(dr) | QI_IOTLB_DW(dw)
691 | QI_IOTLB_GRAN(type) | QI_IOTLB_TYPE;
692 desc.high = QI_IOTLB_ADDR(addr) | QI_IOTLB_IH(ih)
693 | QI_IOTLB_AM(size_order);
694
695 qi_submit_sync(&desc, iommu);
696
697 return 0;
698
699}
700
701/*
702 * Enable Queued Invalidation interface. This is a must to support
703 * interrupt-remapping. Also used by DMA-remapping, which replaces
704 * register based IOTLB invalidation.
705 */
706int dmar_enable_qi(struct intel_iommu *iommu)
707{
708 u32 cmd, sts;
709 unsigned long flags;
710 struct q_inval *qi;
711
712 if (!ecap_qis(iommu->ecap))
713 return -ENOENT;
714
715 /*
716 * queued invalidation is already setup and enabled.
717 */
718 if (iommu->qi)
719 return 0;
720
721 iommu->qi = kmalloc(sizeof(*qi), GFP_KERNEL);
722 if (!iommu->qi)
723 return -ENOMEM;
724
725 qi = iommu->qi;
726
727 qi->desc = (void *)(get_zeroed_page(GFP_KERNEL));
728 if (!qi->desc) {
729 kfree(qi);
730 iommu->qi = 0;
731 return -ENOMEM;
732 }
733
734 qi->desc_status = kmalloc(QI_LENGTH * sizeof(int), GFP_KERNEL);
735 if (!qi->desc_status) {
736 free_page((unsigned long) qi->desc);
737 kfree(qi);
738 iommu->qi = 0;
739 return -ENOMEM;
740 }
741
742 qi->free_head = qi->free_tail = 0;
743 qi->free_cnt = QI_LENGTH;
744
745 spin_lock_init(&qi->q_lock);
746
747 spin_lock_irqsave(&iommu->register_lock, flags);
748 /* write zero to the tail reg */
749 writel(0, iommu->reg + DMAR_IQT_REG);
750
751 dmar_writeq(iommu->reg + DMAR_IQA_REG, virt_to_phys(qi->desc));
752
753 cmd = iommu->gcmd | DMA_GCMD_QIE;
754 iommu->gcmd |= DMA_GCMD_QIE;
755 writel(cmd, iommu->reg + DMAR_GCMD_REG);
756
757 /* Make sure hardware complete it */
758 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, readl, (sts & DMA_GSTS_QIES), sts);
759 spin_unlock_irqrestore(&iommu->register_lock, flags);
760
761 return 0;
345} 762}
diff --git a/drivers/pci/hotplug/acpi_pcihp.c b/drivers/pci/hotplug/acpi_pcihp.c
index 93e37f0666ab..e17ef54f0efc 100644
--- a/drivers/pci/hotplug/acpi_pcihp.c
+++ b/drivers/pci/hotplug/acpi_pcihp.c
@@ -382,7 +382,7 @@ EXPORT_SYMBOL_GPL(acpi_get_hp_params_from_firmware);
382int acpi_get_hp_hw_control_from_firmware(struct pci_dev *dev, u32 flags) 382int acpi_get_hp_hw_control_from_firmware(struct pci_dev *dev, u32 flags)
383{ 383{
384 acpi_status status; 384 acpi_status status;
385 acpi_handle chandle, handle = DEVICE_ACPI_HANDLE(&(dev->dev)); 385 acpi_handle chandle, handle;
386 struct pci_dev *pdev = dev; 386 struct pci_dev *pdev = dev;
387 struct pci_bus *parent; 387 struct pci_bus *parent;
388 struct acpi_buffer string = { ACPI_ALLOCATE_BUFFER, NULL }; 388 struct acpi_buffer string = { ACPI_ALLOCATE_BUFFER, NULL };
@@ -399,10 +399,25 @@ int acpi_get_hp_hw_control_from_firmware(struct pci_dev *dev, u32 flags)
399 * Per PCI firmware specification, we should run the ACPI _OSC 399 * Per PCI firmware specification, we should run the ACPI _OSC
400 * method to get control of hotplug hardware before using it. If 400 * method to get control of hotplug hardware before using it. If
401 * an _OSC is missing, we look for an OSHP to do the same thing. 401 * an _OSC is missing, we look for an OSHP to do the same thing.
402 * To handle different BIOS behavior, we look for _OSC and OSHP 402 * To handle different BIOS behavior, we look for _OSC on a root
403 * within the scope of the hotplug controller and its parents, 403 * bridge preferentially (according to PCI fw spec). Later for
404 * OSHP within the scope of the hotplug controller and its parents,
404 * upto the host bridge under which this controller exists. 405 * upto the host bridge under which this controller exists.
405 */ 406 */
407 handle = acpi_find_root_bridge_handle(pdev);
408 if (handle) {
409 acpi_get_name(handle, ACPI_FULL_PATHNAME, &string);
410 dbg("Trying to get hotplug control for %s\n",
411 (char *)string.pointer);
412 status = pci_osc_control_set(handle, flags);
413 if (ACPI_SUCCESS(status))
414 goto got_one;
415 kfree(string.pointer);
416 string = (struct acpi_buffer){ ACPI_ALLOCATE_BUFFER, NULL };
417 }
418
419 pdev = dev;
420 handle = DEVICE_ACPI_HANDLE(&dev->dev);
406 while (!handle) { 421 while (!handle) {
407 /* 422 /*
408 * This hotplug controller was not listed in the ACPI name 423 * This hotplug controller was not listed in the ACPI name
@@ -427,15 +442,9 @@ int acpi_get_hp_hw_control_from_firmware(struct pci_dev *dev, u32 flags)
427 acpi_get_name(handle, ACPI_FULL_PATHNAME, &string); 442 acpi_get_name(handle, ACPI_FULL_PATHNAME, &string);
428 dbg("Trying to get hotplug control for %s \n", 443 dbg("Trying to get hotplug control for %s \n",
429 (char *)string.pointer); 444 (char *)string.pointer);
430 status = pci_osc_control_set(handle, flags); 445 status = acpi_run_oshp(handle);
431 if (status == AE_NOT_FOUND) 446 if (ACPI_SUCCESS(status))
432 status = acpi_run_oshp(handle); 447 goto got_one;
433 if (ACPI_SUCCESS(status)) {
434 dbg("Gained control for hotplug HW for pci %s (%s)\n",
435 pci_name(dev), (char *)string.pointer);
436 kfree(string.pointer);
437 return 0;
438 }
439 if (acpi_root_bridge(handle)) 448 if (acpi_root_bridge(handle))
440 break; 449 break;
441 chandle = handle; 450 chandle = handle;
@@ -449,6 +458,11 @@ int acpi_get_hp_hw_control_from_firmware(struct pci_dev *dev, u32 flags)
449 458
450 kfree(string.pointer); 459 kfree(string.pointer);
451 return -ENODEV; 460 return -ENODEV;
461got_one:
462 dbg("Gained control for hotplug HW for pci %s (%s)\n", pci_name(dev),
463 (char *)string.pointer);
464 kfree(string.pointer);
465 return 0;
452} 466}
453EXPORT_SYMBOL(acpi_get_hp_hw_control_from_firmware); 467EXPORT_SYMBOL(acpi_get_hp_hw_control_from_firmware);
454 468
diff --git a/drivers/pci/hotplug/acpiphp.h b/drivers/pci/hotplug/acpiphp.h
index eecf7cbf4139..f9e244da30ae 100644
--- a/drivers/pci/hotplug/acpiphp.h
+++ b/drivers/pci/hotplug/acpiphp.h
@@ -36,7 +36,7 @@
36#define _ACPIPHP_H 36#define _ACPIPHP_H
37 37
38#include <linux/acpi.h> 38#include <linux/acpi.h>
39#include <linux/kobject.h> /* for KOBJ_NAME_LEN */ 39#include <linux/kobject.h>
40#include <linux/mutex.h> 40#include <linux/mutex.h>
41#include <linux/pci_hotplug.h> 41#include <linux/pci_hotplug.h>
42 42
@@ -50,9 +50,6 @@
50#define info(format, arg...) printk(KERN_INFO "%s: " format, MY_NAME , ## arg) 50#define info(format, arg...) printk(KERN_INFO "%s: " format, MY_NAME , ## arg)
51#define warn(format, arg...) printk(KERN_WARNING "%s: " format, MY_NAME , ## arg) 51#define warn(format, arg...) printk(KERN_WARNING "%s: " format, MY_NAME , ## arg)
52 52
53/* name size which is used for entries in pcihpfs */
54#define SLOT_NAME_SIZE KOBJ_NAME_LEN /* {_SUN} */
55
56struct acpiphp_bridge; 53struct acpiphp_bridge;
57struct acpiphp_slot; 54struct acpiphp_slot;
58 55
@@ -63,9 +60,13 @@ struct slot {
63 struct hotplug_slot *hotplug_slot; 60 struct hotplug_slot *hotplug_slot;
64 struct acpiphp_slot *acpi_slot; 61 struct acpiphp_slot *acpi_slot;
65 struct hotplug_slot_info info; 62 struct hotplug_slot_info info;
66 char name[SLOT_NAME_SIZE];
67}; 63};
68 64
65static inline const char *slot_name(struct slot *slot)
66{
67 return hotplug_slot_name(slot->hotplug_slot);
68}
69
69/* 70/*
70 * struct acpiphp_bridge - PCI bridge information 71 * struct acpiphp_bridge - PCI bridge information
71 * 72 *
diff --git a/drivers/pci/hotplug/acpiphp_core.c b/drivers/pci/hotplug/acpiphp_core.c
index 0e496e866a84..95b536a23d25 100644
--- a/drivers/pci/hotplug/acpiphp_core.c
+++ b/drivers/pci/hotplug/acpiphp_core.c
@@ -44,6 +44,9 @@
44 44
45#define MY_NAME "acpiphp" 45#define MY_NAME "acpiphp"
46 46
47/* name size which is used for entries in pcihpfs */
48#define SLOT_NAME_SIZE 21 /* {_SUN} */
49
47static int debug; 50static int debug;
48int acpiphp_debug; 51int acpiphp_debug;
49 52
@@ -84,7 +87,6 @@ static struct hotplug_slot_ops acpi_hotplug_slot_ops = {
84 .get_adapter_status = get_adapter_status, 87 .get_adapter_status = get_adapter_status,
85}; 88};
86 89
87
88/** 90/**
89 * acpiphp_register_attention - set attention LED callback 91 * acpiphp_register_attention - set attention LED callback
90 * @info: must be completely filled with LED callbacks 92 * @info: must be completely filled with LED callbacks
@@ -136,7 +138,7 @@ static int enable_slot(struct hotplug_slot *hotplug_slot)
136{ 138{
137 struct slot *slot = hotplug_slot->private; 139 struct slot *slot = hotplug_slot->private;
138 140
139 dbg("%s - physical_slot = %s\n", __func__, hotplug_slot->name); 141 dbg("%s - physical_slot = %s\n", __func__, slot_name(slot));
140 142
141 /* enable the specified slot */ 143 /* enable the specified slot */
142 return acpiphp_enable_slot(slot->acpi_slot); 144 return acpiphp_enable_slot(slot->acpi_slot);
@@ -154,7 +156,7 @@ static int disable_slot(struct hotplug_slot *hotplug_slot)
154 struct slot *slot = hotplug_slot->private; 156 struct slot *slot = hotplug_slot->private;
155 int retval; 157 int retval;
156 158
157 dbg("%s - physical_slot = %s\n", __func__, hotplug_slot->name); 159 dbg("%s - physical_slot = %s\n", __func__, slot_name(slot));
158 160
159 /* disable the specified slot */ 161 /* disable the specified slot */
160 retval = acpiphp_disable_slot(slot->acpi_slot); 162 retval = acpiphp_disable_slot(slot->acpi_slot);
@@ -177,7 +179,7 @@ static int disable_slot(struct hotplug_slot *hotplug_slot)
177 { 179 {
178 int retval = -ENODEV; 180 int retval = -ENODEV;
179 181
180 dbg("%s - physical_slot = %s\n", __func__, hotplug_slot->name); 182 dbg("%s - physical_slot = %s\n", __func__, hotplug_slot_name(hotplug_slot));
181 183
182 if (attention_info && try_module_get(attention_info->owner)) { 184 if (attention_info && try_module_get(attention_info->owner)) {
183 retval = attention_info->set_attn(hotplug_slot, status); 185 retval = attention_info->set_attn(hotplug_slot, status);
@@ -200,7 +202,7 @@ static int get_power_status(struct hotplug_slot *hotplug_slot, u8 *value)
200{ 202{
201 struct slot *slot = hotplug_slot->private; 203 struct slot *slot = hotplug_slot->private;
202 204
203 dbg("%s - physical_slot = %s\n", __func__, hotplug_slot->name); 205 dbg("%s - physical_slot = %s\n", __func__, slot_name(slot));
204 206
205 *value = acpiphp_get_power_status(slot->acpi_slot); 207 *value = acpiphp_get_power_status(slot->acpi_slot);
206 208
@@ -222,7 +224,7 @@ static int get_attention_status(struct hotplug_slot *hotplug_slot, u8 *value)
222{ 224{
223 int retval = -EINVAL; 225 int retval = -EINVAL;
224 226
225 dbg("%s - physical_slot = %s\n", __func__, hotplug_slot->name); 227 dbg("%s - physical_slot = %s\n", __func__, hotplug_slot_name(hotplug_slot));
226 228
227 if (attention_info && try_module_get(attention_info->owner)) { 229 if (attention_info && try_module_get(attention_info->owner)) {
228 retval = attention_info->get_attn(hotplug_slot, value); 230 retval = attention_info->get_attn(hotplug_slot, value);
@@ -245,7 +247,7 @@ static int get_latch_status(struct hotplug_slot *hotplug_slot, u8 *value)
245{ 247{
246 struct slot *slot = hotplug_slot->private; 248 struct slot *slot = hotplug_slot->private;
247 249
248 dbg("%s - physical_slot = %s\n", __func__, hotplug_slot->name); 250 dbg("%s - physical_slot = %s\n", __func__, slot_name(slot));
249 251
250 *value = acpiphp_get_latch_status(slot->acpi_slot); 252 *value = acpiphp_get_latch_status(slot->acpi_slot);
251 253
@@ -265,7 +267,7 @@ static int get_adapter_status(struct hotplug_slot *hotplug_slot, u8 *value)
265{ 267{
266 struct slot *slot = hotplug_slot->private; 268 struct slot *slot = hotplug_slot->private;
267 269
268 dbg("%s - physical_slot = %s\n", __func__, hotplug_slot->name); 270 dbg("%s - physical_slot = %s\n", __func__, slot_name(slot));
269 271
270 *value = acpiphp_get_adapter_status(slot->acpi_slot); 272 *value = acpiphp_get_adapter_status(slot->acpi_slot);
271 273
@@ -299,7 +301,7 @@ static void release_slot(struct hotplug_slot *hotplug_slot)
299{ 301{
300 struct slot *slot = hotplug_slot->private; 302 struct slot *slot = hotplug_slot->private;
301 303
302 dbg("%s - physical_slot = %s\n", __func__, hotplug_slot->name); 304 dbg("%s - physical_slot = %s\n", __func__, slot_name(slot));
303 305
304 kfree(slot->hotplug_slot); 306 kfree(slot->hotplug_slot);
305 kfree(slot); 307 kfree(slot);
@@ -310,6 +312,7 @@ int acpiphp_register_hotplug_slot(struct acpiphp_slot *acpiphp_slot)
310{ 312{
311 struct slot *slot; 313 struct slot *slot;
312 int retval = -ENOMEM; 314 int retval = -ENOMEM;
315 char name[SLOT_NAME_SIZE];
313 316
314 slot = kzalloc(sizeof(*slot), GFP_KERNEL); 317 slot = kzalloc(sizeof(*slot), GFP_KERNEL);
315 if (!slot) 318 if (!slot)
@@ -321,8 +324,6 @@ int acpiphp_register_hotplug_slot(struct acpiphp_slot *acpiphp_slot)
321 324
322 slot->hotplug_slot->info = &slot->info; 325 slot->hotplug_slot->info = &slot->info;
323 326
324 slot->hotplug_slot->name = slot->name;
325
326 slot->hotplug_slot->private = slot; 327 slot->hotplug_slot->private = slot;
327 slot->hotplug_slot->release = &release_slot; 328 slot->hotplug_slot->release = &release_slot;
328 slot->hotplug_slot->ops = &acpi_hotplug_slot_ops; 329 slot->hotplug_slot->ops = &acpi_hotplug_slot_ops;
@@ -336,11 +337,12 @@ int acpiphp_register_hotplug_slot(struct acpiphp_slot *acpiphp_slot)
336 slot->hotplug_slot->info->cur_bus_speed = PCI_SPEED_UNKNOWN; 337 slot->hotplug_slot->info->cur_bus_speed = PCI_SPEED_UNKNOWN;
337 338
338 acpiphp_slot->slot = slot; 339 acpiphp_slot->slot = slot;
339 snprintf(slot->name, sizeof(slot->name), "%u", slot->acpi_slot->sun); 340 snprintf(name, SLOT_NAME_SIZE, "%u", slot->acpi_slot->sun);
340 341
341 retval = pci_hp_register(slot->hotplug_slot, 342 retval = pci_hp_register(slot->hotplug_slot,
342 acpiphp_slot->bridge->pci_bus, 343 acpiphp_slot->bridge->pci_bus,
343 acpiphp_slot->device); 344 acpiphp_slot->device,
345 name);
344 if (retval == -EBUSY) 346 if (retval == -EBUSY)
345 goto error_hpslot; 347 goto error_hpslot;
346 if (retval) { 348 if (retval) {
@@ -348,7 +350,7 @@ int acpiphp_register_hotplug_slot(struct acpiphp_slot *acpiphp_slot)
348 goto error_hpslot; 350 goto error_hpslot;
349 } 351 }
350 352
351 info("Slot [%s] registered\n", slot->hotplug_slot->name); 353 info("Slot [%s] registered\n", slot_name(slot));
352 354
353 return 0; 355 return 0;
354error_hpslot: 356error_hpslot:
@@ -365,7 +367,7 @@ void acpiphp_unregister_hotplug_slot(struct acpiphp_slot *acpiphp_slot)
365 struct slot *slot = acpiphp_slot->slot; 367 struct slot *slot = acpiphp_slot->slot;
366 int retval = 0; 368 int retval = 0;
367 369
368 info ("Slot [%s] unregistered\n", slot->hotplug_slot->name); 370 info("Slot [%s] unregistered\n", slot_name(slot));
369 371
370 retval = pci_hp_deregister(slot->hotplug_slot); 372 retval = pci_hp_deregister(slot->hotplug_slot);
371 if (retval) 373 if (retval)
diff --git a/drivers/pci/hotplug/acpiphp_glue.c b/drivers/pci/hotplug/acpiphp_glue.c
index a3e4705dd8f0..955aae4071f7 100644
--- a/drivers/pci/hotplug/acpiphp_glue.c
+++ b/drivers/pci/hotplug/acpiphp_glue.c
@@ -169,7 +169,9 @@ static int post_dock_fixups(struct notifier_block *nb, unsigned long val,
169} 169}
170 170
171 171
172 172static struct acpi_dock_ops acpiphp_dock_ops = {
173 .handler = handle_hotplug_event_func,
174};
173 175
174/* callback routine to register each ACPI PCI slot object */ 176/* callback routine to register each ACPI PCI slot object */
175static acpi_status 177static acpi_status
@@ -180,7 +182,7 @@ register_slot(acpi_handle handle, u32 lvl, void *context, void **rv)
180 struct acpiphp_func *newfunc; 182 struct acpiphp_func *newfunc;
181 acpi_handle tmp; 183 acpi_handle tmp;
182 acpi_status status = AE_OK; 184 acpi_status status = AE_OK;
183 unsigned long adr, sun; 185 unsigned long long adr, sun;
184 int device, function, retval; 186 int device, function, retval;
185 187
186 status = acpi_evaluate_integer(handle, "_ADR", NULL, &adr); 188 status = acpi_evaluate_integer(handle, "_ADR", NULL, &adr);
@@ -285,7 +287,7 @@ register_slot(acpi_handle handle, u32 lvl, void *context, void **rv)
285 */ 287 */
286 newfunc->flags &= ~FUNC_HAS_EJ0; 288 newfunc->flags &= ~FUNC_HAS_EJ0;
287 if (register_hotplug_dock_device(handle, 289 if (register_hotplug_dock_device(handle,
288 handle_hotplug_event_func, newfunc)) 290 &acpiphp_dock_ops, newfunc))
289 dbg("failed to register dock device\n"); 291 dbg("failed to register dock device\n");
290 292
291 /* we need to be notified when dock events happen 293 /* we need to be notified when dock events happen
@@ -528,7 +530,7 @@ find_p2p_bridge(acpi_handle handle, u32 lvl, void *context, void **rv)
528{ 530{
529 acpi_status status; 531 acpi_status status;
530 acpi_handle dummy_handle; 532 acpi_handle dummy_handle;
531 unsigned long tmp; 533 unsigned long long tmp;
532 int device, function; 534 int device, function;
533 struct pci_dev *dev; 535 struct pci_dev *dev;
534 struct pci_bus *pci_bus = context; 536 struct pci_bus *pci_bus = context;
@@ -573,7 +575,7 @@ find_p2p_bridge(acpi_handle handle, u32 lvl, void *context, void **rv)
573static int add_bridge(acpi_handle handle) 575static int add_bridge(acpi_handle handle)
574{ 576{
575 acpi_status status; 577 acpi_status status;
576 unsigned long tmp; 578 unsigned long long tmp;
577 int seg, bus; 579 int seg, bus;
578 acpi_handle dummy_handle; 580 acpi_handle dummy_handle;
579 struct pci_bus *pci_bus; 581 struct pci_bus *pci_bus;
@@ -767,7 +769,7 @@ static int get_gsi_base(acpi_handle handle, u32 *gsi_base)
767{ 769{
768 acpi_status status; 770 acpi_status status;
769 int result = -1; 771 int result = -1;
770 unsigned long gsb; 772 unsigned long long gsb;
771 struct acpi_buffer buffer = {ACPI_ALLOCATE_BUFFER, NULL}; 773 struct acpi_buffer buffer = {ACPI_ALLOCATE_BUFFER, NULL};
772 union acpi_object *obj; 774 union acpi_object *obj;
773 void *table; 775 void *table;
@@ -808,7 +810,7 @@ static acpi_status
808ioapic_add(acpi_handle handle, u32 lvl, void *context, void **rv) 810ioapic_add(acpi_handle handle, u32 lvl, void *context, void **rv)
809{ 811{
810 acpi_status status; 812 acpi_status status;
811 unsigned long sta; 813 unsigned long long sta;
812 acpi_handle tmp; 814 acpi_handle tmp;
813 struct pci_dev *pdev; 815 struct pci_dev *pdev;
814 u32 gsi_base; 816 u32 gsi_base;
@@ -872,7 +874,7 @@ static acpi_status
872ioapic_remove(acpi_handle handle, u32 lvl, void *context, void **rv) 874ioapic_remove(acpi_handle handle, u32 lvl, void *context, void **rv)
873{ 875{
874 acpi_status status; 876 acpi_status status;
875 unsigned long sta; 877 unsigned long long sta;
876 acpi_handle tmp; 878 acpi_handle tmp;
877 u32 gsi_base; 879 u32 gsi_base;
878 struct acpiphp_ioapic *pos, *n, *ioapic = NULL; 880 struct acpiphp_ioapic *pos, *n, *ioapic = NULL;
@@ -1264,7 +1266,7 @@ static int disable_device(struct acpiphp_slot *slot)
1264static unsigned int get_slot_status(struct acpiphp_slot *slot) 1266static unsigned int get_slot_status(struct acpiphp_slot *slot)
1265{ 1267{
1266 acpi_status status; 1268 acpi_status status;
1267 unsigned long sta = 0; 1269 unsigned long long sta = 0;
1268 u32 dvid; 1270 u32 dvid;
1269 struct list_head *l; 1271 struct list_head *l;
1270 struct acpiphp_func *func; 1272 struct acpiphp_func *func;
diff --git a/drivers/pci/hotplug/acpiphp_ibm.c b/drivers/pci/hotplug/acpiphp_ibm.c
index 2b7c45e39370..881fdd2b7313 100644
--- a/drivers/pci/hotplug/acpiphp_ibm.c
+++ b/drivers/pci/hotplug/acpiphp_ibm.c
@@ -183,7 +183,7 @@ static int ibm_set_attention_status(struct hotplug_slot *slot, u8 status)
183 union acpi_object args[2]; 183 union acpi_object args[2];
184 struct acpi_object_list params = { .pointer = args, .count = 2 }; 184 struct acpi_object_list params = { .pointer = args, .count = 2 };
185 acpi_status stat; 185 acpi_status stat;
186 unsigned long rc; 186 unsigned long long rc;
187 union apci_descriptor *ibm_slot; 187 union apci_descriptor *ibm_slot;
188 188
189 ibm_slot = ibm_slot_from_id(hpslot_to_sun(slot)); 189 ibm_slot = ibm_slot_from_id(hpslot_to_sun(slot));
@@ -204,7 +204,7 @@ static int ibm_set_attention_status(struct hotplug_slot *slot, u8 status)
204 err("APLS evaluation failed: 0x%08x\n", stat); 204 err("APLS evaluation failed: 0x%08x\n", stat);
205 return -ENODEV; 205 return -ENODEV;
206 } else if (!rc) { 206 } else if (!rc) {
207 err("APLS method failed: 0x%08lx\n", rc); 207 err("APLS method failed: 0x%08llx\n", rc);
208 return -ERANGE; 208 return -ERANGE;
209 } 209 }
210 return 0; 210 return 0;
diff --git a/drivers/pci/hotplug/cpci_hotplug.h b/drivers/pci/hotplug/cpci_hotplug.h
index d9769b30be9a..9fff878cf026 100644
--- a/drivers/pci/hotplug/cpci_hotplug.h
+++ b/drivers/pci/hotplug/cpci_hotplug.h
@@ -30,6 +30,7 @@
30 30
31#include <linux/types.h> 31#include <linux/types.h>
32#include <linux/pci.h> 32#include <linux/pci.h>
33#include <linux/pci_hotplug.h>
33 34
34/* PICMG 2.1 R2.0 HS CSR bits: */ 35/* PICMG 2.1 R2.0 HS CSR bits: */
35#define HS_CSR_INS 0x0080 36#define HS_CSR_INS 0x0080
@@ -69,6 +70,11 @@ struct cpci_hp_controller {
69 struct cpci_hp_controller_ops *ops; 70 struct cpci_hp_controller_ops *ops;
70}; 71};
71 72
73static inline const char *slot_name(struct slot *slot)
74{
75 return hotplug_slot_name(slot->hotplug_slot);
76}
77
72extern int cpci_hp_register_controller(struct cpci_hp_controller *controller); 78extern int cpci_hp_register_controller(struct cpci_hp_controller *controller);
73extern int cpci_hp_unregister_controller(struct cpci_hp_controller *controller); 79extern int cpci_hp_unregister_controller(struct cpci_hp_controller *controller);
74extern int cpci_hp_register_bus(struct pci_bus *bus, u8 first, u8 last); 80extern int cpci_hp_register_bus(struct pci_bus *bus, u8 first, u8 last);
diff --git a/drivers/pci/hotplug/cpci_hotplug_core.c b/drivers/pci/hotplug/cpci_hotplug_core.c
index 935947991dc9..de94f4feef8c 100644
--- a/drivers/pci/hotplug/cpci_hotplug_core.c
+++ b/drivers/pci/hotplug/cpci_hotplug_core.c
@@ -108,7 +108,7 @@ enable_slot(struct hotplug_slot *hotplug_slot)
108 struct slot *slot = hotplug_slot->private; 108 struct slot *slot = hotplug_slot->private;
109 int retval = 0; 109 int retval = 0;
110 110
111 dbg("%s - physical_slot = %s", __func__, hotplug_slot->name); 111 dbg("%s - physical_slot = %s", __func__, slot_name(slot));
112 112
113 if (controller->ops->set_power) 113 if (controller->ops->set_power)
114 retval = controller->ops->set_power(slot, 1); 114 retval = controller->ops->set_power(slot, 1);
@@ -121,25 +121,23 @@ disable_slot(struct hotplug_slot *hotplug_slot)
121 struct slot *slot = hotplug_slot->private; 121 struct slot *slot = hotplug_slot->private;
122 int retval = 0; 122 int retval = 0;
123 123
124 dbg("%s - physical_slot = %s", __func__, hotplug_slot->name); 124 dbg("%s - physical_slot = %s", __func__, slot_name(slot));
125 125
126 down_write(&list_rwsem); 126 down_write(&list_rwsem);
127 127
128 /* Unconfigure device */ 128 /* Unconfigure device */
129 dbg("%s - unconfiguring slot %s", 129 dbg("%s - unconfiguring slot %s", __func__, slot_name(slot));
130 __func__, slot->hotplug_slot->name);
131 if ((retval = cpci_unconfigure_slot(slot))) { 130 if ((retval = cpci_unconfigure_slot(slot))) {
132 err("%s - could not unconfigure slot %s", 131 err("%s - could not unconfigure slot %s",
133 __func__, slot->hotplug_slot->name); 132 __func__, slot_name(slot));
134 goto disable_error; 133 goto disable_error;
135 } 134 }
136 dbg("%s - finished unconfiguring slot %s", 135 dbg("%s - finished unconfiguring slot %s", __func__, slot_name(slot));
137 __func__, slot->hotplug_slot->name);
138 136
139 /* Clear EXT (by setting it) */ 137 /* Clear EXT (by setting it) */
140 if (cpci_clear_ext(slot)) { 138 if (cpci_clear_ext(slot)) {
141 err("%s - could not clear EXT for slot %s", 139 err("%s - could not clear EXT for slot %s",
142 __func__, slot->hotplug_slot->name); 140 __func__, slot_name(slot));
143 retval = -ENODEV; 141 retval = -ENODEV;
144 goto disable_error; 142 goto disable_error;
145 } 143 }
@@ -214,7 +212,6 @@ static void release_slot(struct hotplug_slot *hotplug_slot)
214 struct slot *slot = hotplug_slot->private; 212 struct slot *slot = hotplug_slot->private;
215 213
216 kfree(slot->hotplug_slot->info); 214 kfree(slot->hotplug_slot->info);
217 kfree(slot->hotplug_slot->name);
218 kfree(slot->hotplug_slot); 215 kfree(slot->hotplug_slot);
219 if (slot->dev) 216 if (slot->dev)
220 pci_dev_put(slot->dev); 217 pci_dev_put(slot->dev);
@@ -222,12 +219,6 @@ static void release_slot(struct hotplug_slot *hotplug_slot)
222} 219}
223 220
224#define SLOT_NAME_SIZE 6 221#define SLOT_NAME_SIZE 6
225static void
226make_slot_name(struct slot *slot)
227{
228 snprintf(slot->hotplug_slot->name,
229 SLOT_NAME_SIZE, "%02x:%02x", slot->bus->number, slot->number);
230}
231 222
232int 223int
233cpci_hp_register_bus(struct pci_bus *bus, u8 first, u8 last) 224cpci_hp_register_bus(struct pci_bus *bus, u8 first, u8 last)
@@ -235,7 +226,7 @@ cpci_hp_register_bus(struct pci_bus *bus, u8 first, u8 last)
235 struct slot *slot; 226 struct slot *slot;
236 struct hotplug_slot *hotplug_slot; 227 struct hotplug_slot *hotplug_slot;
237 struct hotplug_slot_info *info; 228 struct hotplug_slot_info *info;
238 char *name; 229 char name[SLOT_NAME_SIZE];
239 int status = -ENOMEM; 230 int status = -ENOMEM;
240 int i; 231 int i;
241 232
@@ -262,34 +253,31 @@ cpci_hp_register_bus(struct pci_bus *bus, u8 first, u8 last)
262 goto error_hpslot; 253 goto error_hpslot;
263 hotplug_slot->info = info; 254 hotplug_slot->info = info;
264 255
265 name = kmalloc(SLOT_NAME_SIZE, GFP_KERNEL);
266 if (!name)
267 goto error_info;
268 hotplug_slot->name = name;
269
270 slot->bus = bus; 256 slot->bus = bus;
271 slot->number = i; 257 slot->number = i;
272 slot->devfn = PCI_DEVFN(i, 0); 258 slot->devfn = PCI_DEVFN(i, 0);
273 259
260 snprintf(name, SLOT_NAME_SIZE, "%02x:%02x", bus->number, i);
261
274 hotplug_slot->private = slot; 262 hotplug_slot->private = slot;
275 hotplug_slot->release = &release_slot; 263 hotplug_slot->release = &release_slot;
276 make_slot_name(slot);
277 hotplug_slot->ops = &cpci_hotplug_slot_ops; 264 hotplug_slot->ops = &cpci_hotplug_slot_ops;
278 265
279 /* 266 /*
280 * Initialize the slot info structure with some known 267 * Initialize the slot info structure with some known
281 * good values. 268 * good values.
282 */ 269 */
283 dbg("initializing slot %s", slot->hotplug_slot->name); 270 dbg("initializing slot %s", name);
284 info->power_status = cpci_get_power_status(slot); 271 info->power_status = cpci_get_power_status(slot);
285 info->attention_status = cpci_get_attention_status(slot); 272 info->attention_status = cpci_get_attention_status(slot);
286 273
287 dbg("registering slot %s", slot->hotplug_slot->name); 274 dbg("registering slot %s", name);
288 status = pci_hp_register(slot->hotplug_slot, bus, i); 275 status = pci_hp_register(slot->hotplug_slot, bus, i, name);
289 if (status) { 276 if (status) {
290 err("pci_hp_register failed with error %d", status); 277 err("pci_hp_register failed with error %d", status);
291 goto error_name; 278 goto error_info;
292 } 279 }
280 dbg("slot registered with name: %s", slot_name(slot));
293 281
294 /* Add slot to our internal list */ 282 /* Add slot to our internal list */
295 down_write(&list_rwsem); 283 down_write(&list_rwsem);
@@ -298,8 +286,6 @@ cpci_hp_register_bus(struct pci_bus *bus, u8 first, u8 last)
298 up_write(&list_rwsem); 286 up_write(&list_rwsem);
299 } 287 }
300 return 0; 288 return 0;
301error_name:
302 kfree(name);
303error_info: 289error_info:
304 kfree(info); 290 kfree(info);
305error_hpslot: 291error_hpslot:
@@ -327,7 +313,7 @@ cpci_hp_unregister_bus(struct pci_bus *bus)
327 list_del(&slot->slot_list); 313 list_del(&slot->slot_list);
328 slots--; 314 slots--;
329 315
330 dbg("deregistering slot %s", slot->hotplug_slot->name); 316 dbg("deregistering slot %s", slot_name(slot));
331 status = pci_hp_deregister(slot->hotplug_slot); 317 status = pci_hp_deregister(slot->hotplug_slot);
332 if (status) { 318 if (status) {
333 err("pci_hp_deregister failed with error %d", 319 err("pci_hp_deregister failed with error %d",
@@ -379,11 +365,10 @@ init_slots(int clear_ins)
379 return -1; 365 return -1;
380 } 366 }
381 list_for_each_entry(slot, &slot_list, slot_list) { 367 list_for_each_entry(slot, &slot_list, slot_list) {
382 dbg("%s - looking at slot %s", 368 dbg("%s - looking at slot %s", __func__, slot_name(slot));
383 __func__, slot->hotplug_slot->name);
384 if (clear_ins && cpci_check_and_clear_ins(slot)) 369 if (clear_ins && cpci_check_and_clear_ins(slot))
385 dbg("%s - cleared INS for slot %s", 370 dbg("%s - cleared INS for slot %s",
386 __func__, slot->hotplug_slot->name); 371 __func__, slot_name(slot));
387 dev = pci_get_slot(slot->bus, PCI_DEVFN(slot->number, 0)); 372 dev = pci_get_slot(slot->bus, PCI_DEVFN(slot->number, 0));
388 if (dev) { 373 if (dev) {
389 if (update_adapter_status(slot->hotplug_slot, 1)) 374 if (update_adapter_status(slot->hotplug_slot, 1))
@@ -414,8 +399,7 @@ check_slots(void)
414 } 399 }
415 extracted = inserted = 0; 400 extracted = inserted = 0;
416 list_for_each_entry(slot, &slot_list, slot_list) { 401 list_for_each_entry(slot, &slot_list, slot_list) {
417 dbg("%s - looking at slot %s", 402 dbg("%s - looking at slot %s", __func__, slot_name(slot));
418 __func__, slot->hotplug_slot->name);
419 if (cpci_check_and_clear_ins(slot)) { 403 if (cpci_check_and_clear_ins(slot)) {
420 /* 404 /*
421 * Some broken hardware (e.g. PLX 9054AB) asserts 405 * Some broken hardware (e.g. PLX 9054AB) asserts
@@ -423,35 +407,34 @@ check_slots(void)
423 */ 407 */
424 if (slot->dev) { 408 if (slot->dev) {
425 warn("slot %s already inserted", 409 warn("slot %s already inserted",
426 slot->hotplug_slot->name); 410 slot_name(slot));
427 inserted++; 411 inserted++;
428 continue; 412 continue;
429 } 413 }
430 414
431 /* Process insertion */ 415 /* Process insertion */
432 dbg("%s - slot %s inserted", 416 dbg("%s - slot %s inserted", __func__, slot_name(slot));
433 __func__, slot->hotplug_slot->name);
434 417
435 /* GSM, debug */ 418 /* GSM, debug */
436 hs_csr = cpci_get_hs_csr(slot); 419 hs_csr = cpci_get_hs_csr(slot);
437 dbg("%s - slot %s HS_CSR (1) = %04x", 420 dbg("%s - slot %s HS_CSR (1) = %04x",
438 __func__, slot->hotplug_slot->name, hs_csr); 421 __func__, slot_name(slot), hs_csr);
439 422
440 /* Configure device */ 423 /* Configure device */
441 dbg("%s - configuring slot %s", 424 dbg("%s - configuring slot %s",
442 __func__, slot->hotplug_slot->name); 425 __func__, slot_name(slot));
443 if (cpci_configure_slot(slot)) { 426 if (cpci_configure_slot(slot)) {
444 err("%s - could not configure slot %s", 427 err("%s - could not configure slot %s",
445 __func__, slot->hotplug_slot->name); 428 __func__, slot_name(slot));
446 continue; 429 continue;
447 } 430 }
448 dbg("%s - finished configuring slot %s", 431 dbg("%s - finished configuring slot %s",
449 __func__, slot->hotplug_slot->name); 432 __func__, slot_name(slot));
450 433
451 /* GSM, debug */ 434 /* GSM, debug */
452 hs_csr = cpci_get_hs_csr(slot); 435 hs_csr = cpci_get_hs_csr(slot);
453 dbg("%s - slot %s HS_CSR (2) = %04x", 436 dbg("%s - slot %s HS_CSR (2) = %04x",
454 __func__, slot->hotplug_slot->name, hs_csr); 437 __func__, slot_name(slot), hs_csr);
455 438
456 if (update_latch_status(slot->hotplug_slot, 1)) 439 if (update_latch_status(slot->hotplug_slot, 1))
457 warn("failure to update latch file"); 440 warn("failure to update latch file");
@@ -464,18 +447,18 @@ check_slots(void)
464 /* GSM, debug */ 447 /* GSM, debug */
465 hs_csr = cpci_get_hs_csr(slot); 448 hs_csr = cpci_get_hs_csr(slot);
466 dbg("%s - slot %s HS_CSR (3) = %04x", 449 dbg("%s - slot %s HS_CSR (3) = %04x",
467 __func__, slot->hotplug_slot->name, hs_csr); 450 __func__, slot_name(slot), hs_csr);
468 451
469 inserted++; 452 inserted++;
470 } else if (cpci_check_ext(slot)) { 453 } else if (cpci_check_ext(slot)) {
471 /* Process extraction request */ 454 /* Process extraction request */
472 dbg("%s - slot %s extracted", 455 dbg("%s - slot %s extracted",
473 __func__, slot->hotplug_slot->name); 456 __func__, slot_name(slot));
474 457
475 /* GSM, debug */ 458 /* GSM, debug */
476 hs_csr = cpci_get_hs_csr(slot); 459 hs_csr = cpci_get_hs_csr(slot);
477 dbg("%s - slot %s HS_CSR = %04x", 460 dbg("%s - slot %s HS_CSR = %04x",
478 __func__, slot->hotplug_slot->name, hs_csr); 461 __func__, slot_name(slot), hs_csr);
479 462
480 if (!slot->extracting) { 463 if (!slot->extracting) {
481 if (update_latch_status(slot->hotplug_slot, 0)) { 464 if (update_latch_status(slot->hotplug_slot, 0)) {
@@ -493,7 +476,7 @@ check_slots(void)
493 * bother trying to tell the driver or not? 476 * bother trying to tell the driver or not?
494 */ 477 */
495 err("card in slot %s was improperly removed", 478 err("card in slot %s was improperly removed",
496 slot->hotplug_slot->name); 479 slot_name(slot));
497 if (update_adapter_status(slot->hotplug_slot, 0)) 480 if (update_adapter_status(slot->hotplug_slot, 0))
498 warn("failure to update adapter file"); 481 warn("failure to update adapter file");
499 slot->extracting = 0; 482 slot->extracting = 0;
diff --git a/drivers/pci/hotplug/cpci_hotplug_pci.c b/drivers/pci/hotplug/cpci_hotplug_pci.c
index df82b95e2874..829c327cfb5e 100644
--- a/drivers/pci/hotplug/cpci_hotplug_pci.c
+++ b/drivers/pci/hotplug/cpci_hotplug_pci.c
@@ -209,7 +209,7 @@ int cpci_led_on(struct slot* slot)
209 hs_cap + 2, 209 hs_cap + 2,
210 hs_csr)) { 210 hs_csr)) {
211 err("Could not set LOO for slot %s", 211 err("Could not set LOO for slot %s",
212 slot->hotplug_slot->name); 212 hotplug_slot_name(slot->hotplug_slot));
213 return -ENODEV; 213 return -ENODEV;
214 } 214 }
215 } 215 }
@@ -238,7 +238,7 @@ int cpci_led_off(struct slot* slot)
238 hs_cap + 2, 238 hs_cap + 2,
239 hs_csr)) { 239 hs_csr)) {
240 err("Could not clear LOO for slot %s", 240 err("Could not clear LOO for slot %s",
241 slot->hotplug_slot->name); 241 hotplug_slot_name(slot->hotplug_slot));
242 return -ENODEV; 242 return -ENODEV;
243 } 243 }
244 } 244 }
diff --git a/drivers/pci/hotplug/cpqphp.h b/drivers/pci/hotplug/cpqphp.h
index b1decfa88b7a..afaf8f69f73e 100644
--- a/drivers/pci/hotplug/cpqphp.h
+++ b/drivers/pci/hotplug/cpqphp.h
@@ -449,6 +449,11 @@ extern u8 cpqhp_disk_irq;
449 449
450/* inline functions */ 450/* inline functions */
451 451
452static inline char *slot_name(struct slot *slot)
453{
454 return hotplug_slot_name(slot->hotplug_slot);
455}
456
452/* 457/*
453 * return_resource 458 * return_resource
454 * 459 *
@@ -696,14 +701,6 @@ static inline int get_presence_status(struct controller *ctrl, struct slot *slot
696 return presence_save; 701 return presence_save;
697} 702}
698 703
699#define SLOT_NAME_SIZE 10
700
701static inline void make_slot_name(char *buffer, int buffer_size, struct slot *slot)
702{
703 snprintf(buffer, buffer_size, "%d", slot->number);
704}
705
706
707static inline int wait_for_ctrl_irq(struct controller *ctrl) 704static inline int wait_for_ctrl_irq(struct controller *ctrl)
708{ 705{
709 DECLARE_WAITQUEUE(wait, current); 706 DECLARE_WAITQUEUE(wait, current);
diff --git a/drivers/pci/hotplug/cpqphp_core.c b/drivers/pci/hotplug/cpqphp_core.c
index 54defec51d08..8514c3a1746a 100644
--- a/drivers/pci/hotplug/cpqphp_core.c
+++ b/drivers/pci/hotplug/cpqphp_core.c
@@ -315,14 +315,15 @@ static void release_slot(struct hotplug_slot *hotplug_slot)
315{ 315{
316 struct slot *slot = hotplug_slot->private; 316 struct slot *slot = hotplug_slot->private;
317 317
318 dbg("%s - physical_slot = %s\n", __func__, hotplug_slot->name); 318 dbg("%s - physical_slot = %s\n", __func__, slot_name(slot));
319 319
320 kfree(slot->hotplug_slot->info); 320 kfree(slot->hotplug_slot->info);
321 kfree(slot->hotplug_slot->name);
322 kfree(slot->hotplug_slot); 321 kfree(slot->hotplug_slot);
323 kfree(slot); 322 kfree(slot);
324} 323}
325 324
325#define SLOT_NAME_SIZE 10
326
326static int ctrl_slot_setup(struct controller *ctrl, 327static int ctrl_slot_setup(struct controller *ctrl,
327 void __iomem *smbios_start, 328 void __iomem *smbios_start,
328 void __iomem *smbios_table) 329 void __iomem *smbios_table)
@@ -335,6 +336,7 @@ static int ctrl_slot_setup(struct controller *ctrl,
335 u8 slot_number; 336 u8 slot_number;
336 u8 ctrl_slot; 337 u8 ctrl_slot;
337 u32 tempdword; 338 u32 tempdword;
339 char name[SLOT_NAME_SIZE];
338 void __iomem *slot_entry= NULL; 340 void __iomem *slot_entry= NULL;
339 int result = -ENOMEM; 341 int result = -ENOMEM;
340 342
@@ -363,16 +365,12 @@ static int ctrl_slot_setup(struct controller *ctrl,
363 if (!hotplug_slot->info) 365 if (!hotplug_slot->info)
364 goto error_hpslot; 366 goto error_hpslot;
365 hotplug_slot_info = hotplug_slot->info; 367 hotplug_slot_info = hotplug_slot->info;
366 hotplug_slot->name = kmalloc(SLOT_NAME_SIZE, GFP_KERNEL);
367
368 if (!hotplug_slot->name)
369 goto error_info;
370 368
371 slot->ctrl = ctrl; 369 slot->ctrl = ctrl;
372 slot->bus = ctrl->bus; 370 slot->bus = ctrl->bus;
373 slot->device = slot_device; 371 slot->device = slot_device;
374 slot->number = slot_number; 372 slot->number = slot_number;
375 dbg("slot->number = %d\n", slot->number); 373 dbg("slot->number = %u\n", slot->number);
376 374
377 slot_entry = get_SMBIOS_entry(smbios_start, smbios_table, 9, 375 slot_entry = get_SMBIOS_entry(smbios_start, smbios_table, 9,
378 slot_entry); 376 slot_entry);
@@ -418,9 +416,9 @@ static int ctrl_slot_setup(struct controller *ctrl,
418 /* register this slot with the hotplug pci core */ 416 /* register this slot with the hotplug pci core */
419 hotplug_slot->release = &release_slot; 417 hotplug_slot->release = &release_slot;
420 hotplug_slot->private = slot; 418 hotplug_slot->private = slot;
421 make_slot_name(hotplug_slot->name, SLOT_NAME_SIZE, slot); 419 snprintf(name, SLOT_NAME_SIZE, "%u", slot->number);
422 hotplug_slot->ops = &cpqphp_hotplug_slot_ops; 420 hotplug_slot->ops = &cpqphp_hotplug_slot_ops;
423 421
424 hotplug_slot_info->power_status = get_slot_enabled(ctrl, slot); 422 hotplug_slot_info->power_status = get_slot_enabled(ctrl, slot);
425 hotplug_slot_info->attention_status = 423 hotplug_slot_info->attention_status =
426 cpq_get_attention_status(ctrl, slot); 424 cpq_get_attention_status(ctrl, slot);
@@ -435,11 +433,12 @@ static int ctrl_slot_setup(struct controller *ctrl,
435 slot->number, ctrl->slot_device_offset, 433 slot->number, ctrl->slot_device_offset,
436 slot_number); 434 slot_number);
437 result = pci_hp_register(hotplug_slot, 435 result = pci_hp_register(hotplug_slot,
438 ctrl->pci_dev->subordinate, 436 ctrl->pci_dev->bus,
439 slot->device); 437 slot->device,
438 name);
440 if (result) { 439 if (result) {
441 err("pci_hp_register failed with error %d\n", result); 440 err("pci_hp_register failed with error %d\n", result);
442 goto error_name; 441 goto error_info;
443 } 442 }
444 443
445 slot->next = ctrl->slot; 444 slot->next = ctrl->slot;
@@ -451,8 +450,6 @@ static int ctrl_slot_setup(struct controller *ctrl,
451 } 450 }
452 451
453 return 0; 452 return 0;
454error_name:
455 kfree(hotplug_slot->name);
456error_info: 453error_info:
457 kfree(hotplug_slot_info); 454 kfree(hotplug_slot_info);
458error_hpslot: 455error_hpslot:
@@ -638,7 +635,7 @@ static int set_attention_status (struct hotplug_slot *hotplug_slot, u8 status)
638 u8 device; 635 u8 device;
639 u8 function; 636 u8 function;
640 637
641 dbg("%s - physical_slot = %s\n", __func__, hotplug_slot->name); 638 dbg("%s - physical_slot = %s\n", __func__, slot_name(slot));
642 639
643 if (cpqhp_get_bus_dev(ctrl, &bus, &devfn, slot->number) == -1) 640 if (cpqhp_get_bus_dev(ctrl, &bus, &devfn, slot->number) == -1)
644 return -ENODEV; 641 return -ENODEV;
@@ -665,7 +662,7 @@ static int process_SI(struct hotplug_slot *hotplug_slot)
665 u8 device; 662 u8 device;
666 u8 function; 663 u8 function;
667 664
668 dbg("%s - physical_slot = %s\n", __func__, hotplug_slot->name); 665 dbg("%s - physical_slot = %s\n", __func__, slot_name(slot));
669 666
670 if (cpqhp_get_bus_dev(ctrl, &bus, &devfn, slot->number) == -1) 667 if (cpqhp_get_bus_dev(ctrl, &bus, &devfn, slot->number) == -1)
671 return -ENODEV; 668 return -ENODEV;
@@ -697,7 +694,7 @@ static int process_SS(struct hotplug_slot *hotplug_slot)
697 u8 device; 694 u8 device;
698 u8 function; 695 u8 function;
699 696
700 dbg("%s - physical_slot = %s\n", __func__, hotplug_slot->name); 697 dbg("%s - physical_slot = %s\n", __func__, slot_name(slot));
701 698
702 if (cpqhp_get_bus_dev(ctrl, &bus, &devfn, slot->number) == -1) 699 if (cpqhp_get_bus_dev(ctrl, &bus, &devfn, slot->number) == -1)
703 return -ENODEV; 700 return -ENODEV;
@@ -720,7 +717,7 @@ static int hardware_test(struct hotplug_slot *hotplug_slot, u32 value)
720 struct slot *slot = hotplug_slot->private; 717 struct slot *slot = hotplug_slot->private;
721 struct controller *ctrl = slot->ctrl; 718 struct controller *ctrl = slot->ctrl;
722 719
723 dbg("%s - physical_slot = %s\n", __func__, hotplug_slot->name); 720 dbg("%s - physical_slot = %s\n", __func__, slot_name(slot));
724 721
725 return cpqhp_hardware_test(ctrl, value); 722 return cpqhp_hardware_test(ctrl, value);
726} 723}
@@ -731,7 +728,7 @@ static int get_power_status(struct hotplug_slot *hotplug_slot, u8 *value)
731 struct slot *slot = hotplug_slot->private; 728 struct slot *slot = hotplug_slot->private;
732 struct controller *ctrl = slot->ctrl; 729 struct controller *ctrl = slot->ctrl;
733 730
734 dbg("%s - physical_slot = %s\n", __func__, hotplug_slot->name); 731 dbg("%s - physical_slot = %s\n", __func__, slot_name(slot));
735 732
736 *value = get_slot_enabled(ctrl, slot); 733 *value = get_slot_enabled(ctrl, slot);
737 return 0; 734 return 0;
@@ -742,7 +739,7 @@ static int get_attention_status(struct hotplug_slot *hotplug_slot, u8 *value)
742 struct slot *slot = hotplug_slot->private; 739 struct slot *slot = hotplug_slot->private;
743 struct controller *ctrl = slot->ctrl; 740 struct controller *ctrl = slot->ctrl;
744 741
745 dbg("%s - physical_slot = %s\n", __func__, hotplug_slot->name); 742 dbg("%s - physical_slot = %s\n", __func__, slot_name(slot));
746 743
747 *value = cpq_get_attention_status(ctrl, slot); 744 *value = cpq_get_attention_status(ctrl, slot);
748 return 0; 745 return 0;
@@ -753,7 +750,7 @@ static int get_latch_status(struct hotplug_slot *hotplug_slot, u8 *value)
753 struct slot *slot = hotplug_slot->private; 750 struct slot *slot = hotplug_slot->private;
754 struct controller *ctrl = slot->ctrl; 751 struct controller *ctrl = slot->ctrl;
755 752
756 dbg("%s - physical_slot = %s\n", __func__, hotplug_slot->name); 753 dbg("%s - physical_slot = %s\n", __func__, slot_name(slot));
757 754
758 *value = cpq_get_latch_status(ctrl, slot); 755 *value = cpq_get_latch_status(ctrl, slot);
759 756
@@ -765,7 +762,7 @@ static int get_adapter_status(struct hotplug_slot *hotplug_slot, u8 *value)
765 struct slot *slot = hotplug_slot->private; 762 struct slot *slot = hotplug_slot->private;
766 struct controller *ctrl = slot->ctrl; 763 struct controller *ctrl = slot->ctrl;
767 764
768 dbg("%s - physical_slot = %s\n", __func__, hotplug_slot->name); 765 dbg("%s - physical_slot = %s\n", __func__, slot_name(slot));
769 766
770 *value = get_presence_status(ctrl, slot); 767 *value = get_presence_status(ctrl, slot);
771 768
@@ -777,7 +774,7 @@ static int get_max_bus_speed (struct hotplug_slot *hotplug_slot, enum pci_bus_sp
777 struct slot *slot = hotplug_slot->private; 774 struct slot *slot = hotplug_slot->private;
778 struct controller *ctrl = slot->ctrl; 775 struct controller *ctrl = slot->ctrl;
779 776
780 dbg("%s - physical_slot = %s\n", __func__, hotplug_slot->name); 777 dbg("%s - physical_slot = %s\n", __func__, slot_name(slot));
781 778
782 *value = ctrl->speed_capability; 779 *value = ctrl->speed_capability;
783 780
@@ -789,7 +786,7 @@ static int get_cur_bus_speed (struct hotplug_slot *hotplug_slot, enum pci_bus_sp
789 struct slot *slot = hotplug_slot->private; 786 struct slot *slot = hotplug_slot->private;
790 struct controller *ctrl = slot->ctrl; 787 struct controller *ctrl = slot->ctrl;
791 788
792 dbg("%s - physical_slot = %s\n", __func__, hotplug_slot->name); 789 dbg("%s - physical_slot = %s\n", __func__, slot_name(slot));
793 790
794 *value = ctrl->speed; 791 *value = ctrl->speed;
795 792
diff --git a/drivers/pci/hotplug/cpqphp_ctrl.c b/drivers/pci/hotplug/cpqphp_ctrl.c
index ef041ca91c27..a60a25290995 100644
--- a/drivers/pci/hotplug/cpqphp_ctrl.c
+++ b/drivers/pci/hotplug/cpqphp_ctrl.c
@@ -1139,7 +1139,7 @@ static u8 set_controller_speed(struct controller *ctrl, u8 adapter_speed, u8 hp_
1139 for(slot = ctrl->slot; slot; slot = slot->next) { 1139 for(slot = ctrl->slot; slot; slot = slot->next) {
1140 if (slot->device == (hp_slot + ctrl->slot_device_offset)) 1140 if (slot->device == (hp_slot + ctrl->slot_device_offset))
1141 continue; 1141 continue;
1142 if (!slot->hotplug_slot && !slot->hotplug_slot->info) 1142 if (!slot->hotplug_slot || !slot->hotplug_slot->info)
1143 continue; 1143 continue;
1144 if (slot->hotplug_slot->info->adapter_status == 0) 1144 if (slot->hotplug_slot->info->adapter_status == 0)
1145 continue; 1145 continue;
diff --git a/drivers/pci/hotplug/fakephp.c b/drivers/pci/hotplug/fakephp.c
index 40337a06c18a..3a2637a00934 100644
--- a/drivers/pci/hotplug/fakephp.c
+++ b/drivers/pci/hotplug/fakephp.c
@@ -66,10 +66,10 @@ struct dummy_slot {
66 struct pci_dev *dev; 66 struct pci_dev *dev;
67 struct work_struct remove_work; 67 struct work_struct remove_work;
68 unsigned long removed; 68 unsigned long removed;
69 char name[8];
70}; 69};
71 70
72static int debug; 71static int debug;
72static int dup_slots;
73static LIST_HEAD(slot_list); 73static LIST_HEAD(slot_list);
74static struct workqueue_struct *dummyphp_wq; 74static struct workqueue_struct *dummyphp_wq;
75 75
@@ -96,10 +96,13 @@ static void dummy_release(struct hotplug_slot *slot)
96 kfree(dslot); 96 kfree(dslot);
97} 97}
98 98
99#define SLOT_NAME_SIZE 8
100
99static int add_slot(struct pci_dev *dev) 101static int add_slot(struct pci_dev *dev)
100{ 102{
101 struct dummy_slot *dslot; 103 struct dummy_slot *dslot;
102 struct hotplug_slot *slot; 104 struct hotplug_slot *slot;
105 char name[SLOT_NAME_SIZE];
103 int retval = -ENOMEM; 106 int retval = -ENOMEM;
104 static int count = 1; 107 static int count = 1;
105 108
@@ -119,19 +122,22 @@ static int add_slot(struct pci_dev *dev)
119 if (!dslot) 122 if (!dslot)
120 goto error_info; 123 goto error_info;
121 124
122 slot->name = dslot->name; 125 if (dup_slots)
123 snprintf(slot->name, sizeof(dslot->name), "fake%d", count++); 126 snprintf(name, SLOT_NAME_SIZE, "fake");
124 dbg("slot->name = %s\n", slot->name); 127 else
128 snprintf(name, SLOT_NAME_SIZE, "fake%d", count++);
129 dbg("slot->name = %s\n", name);
125 slot->ops = &dummy_hotplug_slot_ops; 130 slot->ops = &dummy_hotplug_slot_ops;
126 slot->release = &dummy_release; 131 slot->release = &dummy_release;
127 slot->private = dslot; 132 slot->private = dslot;
128 133
129 retval = pci_hp_register(slot, dev->bus, PCI_SLOT(dev->devfn)); 134 retval = pci_hp_register(slot, dev->bus, PCI_SLOT(dev->devfn), name);
130 if (retval) { 135 if (retval) {
131 err("pci_hp_register failed with error %d\n", retval); 136 err("pci_hp_register failed with error %d\n", retval);
132 goto error_dslot; 137 goto error_dslot;
133 } 138 }
134 139
140 dbg("slot->name = %s\n", hotplug_slot_name(slot));
135 dslot->slot = slot; 141 dslot->slot = slot;
136 dslot->dev = pci_dev_get(dev); 142 dslot->dev = pci_dev_get(dev);
137 list_add (&dslot->node, &slot_list); 143 list_add (&dslot->node, &slot_list);
@@ -167,10 +173,11 @@ static void remove_slot(struct dummy_slot *dslot)
167{ 173{
168 int retval; 174 int retval;
169 175
170 dbg("removing slot %s\n", dslot->slot->name); 176 dbg("removing slot %s\n", hotplug_slot_name(dslot->slot));
171 retval = pci_hp_deregister(dslot->slot); 177 retval = pci_hp_deregister(dslot->slot);
172 if (retval) 178 if (retval)
173 err("Problem unregistering a slot %s\n", dslot->slot->name); 179 err("Problem unregistering a slot %s\n",
180 hotplug_slot_name(dslot->slot));
174} 181}
175 182
176/* called from the single-threaded workqueue handler to remove a slot */ 183/* called from the single-threaded workqueue handler to remove a slot */
@@ -308,7 +315,7 @@ static int disable_slot(struct hotplug_slot *slot)
308 return -ENODEV; 315 return -ENODEV;
309 dslot = slot->private; 316 dslot = slot->private;
310 317
311 dbg("%s - physical_slot = %s\n", __func__, slot->name); 318 dbg("%s - physical_slot = %s\n", __func__, hotplug_slot_name(slot));
312 319
313 for (func = 7; func >= 0; func--) { 320 for (func = 7; func >= 0; func--) {
314 dev = pci_get_slot(dslot->dev->bus, dslot->dev->devfn + func); 321 dev = pci_get_slot(dslot->dev->bus, dslot->dev->devfn + func);
@@ -320,15 +327,15 @@ static int disable_slot(struct hotplug_slot *slot)
320 return -ENODEV; 327 return -ENODEV;
321 } 328 }
322 329
330 /* remove the device from the pci core */
331 pci_remove_bus_device(dev);
332
323 /* queue work item to blow away this sysfs entry and other 333 /* queue work item to blow away this sysfs entry and other
324 * parts. 334 * parts.
325 */ 335 */
326 INIT_WORK(&dslot->remove_work, remove_slot_worker); 336 INIT_WORK(&dslot->remove_work, remove_slot_worker);
327 queue_work(dummyphp_wq, &dslot->remove_work); 337 queue_work(dummyphp_wq, &dslot->remove_work);
328 338
329 /* blow away this sysfs entry and other parts. */
330 remove_slot(dslot);
331
332 pci_dev_put(dev); 339 pci_dev_put(dev);
333 } 340 }
334 return 0; 341 return 0;
@@ -373,4 +380,5 @@ MODULE_DESCRIPTION(DRIVER_DESC);
373MODULE_LICENSE("GPL"); 380MODULE_LICENSE("GPL");
374module_param(debug, bool, S_IRUGO | S_IWUSR); 381module_param(debug, bool, S_IRUGO | S_IWUSR);
375MODULE_PARM_DESC(debug, "Debugging mode enabled or not"); 382MODULE_PARM_DESC(debug, "Debugging mode enabled or not");
376 383module_param(dup_slots, bool, S_IRUGO | S_IWUSR);
384MODULE_PARM_DESC(dup_slots, "Force duplicate slot names for debugging");
diff --git a/drivers/pci/hotplug/ibmphp.h b/drivers/pci/hotplug/ibmphp.h
index 612d96301509..a8d391a4957d 100644
--- a/drivers/pci/hotplug/ibmphp.h
+++ b/drivers/pci/hotplug/ibmphp.h
@@ -707,17 +707,16 @@ struct slot {
707 u8 device; 707 u8 device;
708 u8 number; 708 u8 number;
709 u8 real_physical_slot_num; 709 u8 real_physical_slot_num;
710 char name[100];
711 u32 capabilities; 710 u32 capabilities;
712 u8 supported_speed; 711 u8 supported_speed;
713 u8 supported_bus_mode; 712 u8 supported_bus_mode;
713 u8 flag; /* this is for disable slot and polling */
714 u8 ctlr_index;
714 struct hotplug_slot *hotplug_slot; 715 struct hotplug_slot *hotplug_slot;
715 struct controller *ctrl; 716 struct controller *ctrl;
716 struct pci_func *func; 717 struct pci_func *func;
717 u8 irq[4]; 718 u8 irq[4];
718 u8 flag; /* this is for disable slot and polling */
719 int bit_mode; /* 0 = 32, 1 = 64 */ 719 int bit_mode; /* 0 = 32, 1 = 64 */
720 u8 ctlr_index;
721 struct bus_info *bus_on; 720 struct bus_info *bus_on;
722 struct list_head ibm_slot_list; 721 struct list_head ibm_slot_list;
723 u8 status; 722 u8 status;
diff --git a/drivers/pci/hotplug/ibmphp_ebda.c b/drivers/pci/hotplug/ibmphp_ebda.c
index 8467d0287325..c1abac8ab5c3 100644
--- a/drivers/pci/hotplug/ibmphp_ebda.c
+++ b/drivers/pci/hotplug/ibmphp_ebda.c
@@ -123,10 +123,8 @@ static struct ebda_pci_rsrc *alloc_ebda_pci_rsrc (void)
123static void __init print_bus_info (void) 123static void __init print_bus_info (void)
124{ 124{
125 struct bus_info *ptr; 125 struct bus_info *ptr;
126 struct list_head *ptr1;
127 126
128 list_for_each (ptr1, &bus_info_head) { 127 list_for_each_entry(ptr, &bus_info_head, bus_info_list) {
129 ptr = list_entry (ptr1, struct bus_info, bus_info_list);
130 debug ("%s - slot_min = %x\n", __func__, ptr->slot_min); 128 debug ("%s - slot_min = %x\n", __func__, ptr->slot_min);
131 debug ("%s - slot_max = %x\n", __func__, ptr->slot_max); 129 debug ("%s - slot_max = %x\n", __func__, ptr->slot_max);
132 debug ("%s - slot_count = %x\n", __func__, ptr->slot_count); 130 debug ("%s - slot_count = %x\n", __func__, ptr->slot_count);
@@ -146,10 +144,8 @@ static void __init print_bus_info (void)
146static void print_lo_info (void) 144static void print_lo_info (void)
147{ 145{
148 struct rio_detail *ptr; 146 struct rio_detail *ptr;
149 struct list_head *ptr1;
150 debug ("print_lo_info ----\n"); 147 debug ("print_lo_info ----\n");
151 list_for_each (ptr1, &rio_lo_head) { 148 list_for_each_entry(ptr, &rio_lo_head, rio_detail_list) {
152 ptr = list_entry (ptr1, struct rio_detail, rio_detail_list);
153 debug ("%s - rio_node_id = %x\n", __func__, ptr->rio_node_id); 149 debug ("%s - rio_node_id = %x\n", __func__, ptr->rio_node_id);
154 debug ("%s - rio_type = %x\n", __func__, ptr->rio_type); 150 debug ("%s - rio_type = %x\n", __func__, ptr->rio_type);
155 debug ("%s - owner_id = %x\n", __func__, ptr->owner_id); 151 debug ("%s - owner_id = %x\n", __func__, ptr->owner_id);
@@ -163,10 +159,8 @@ static void print_lo_info (void)
163static void print_vg_info (void) 159static void print_vg_info (void)
164{ 160{
165 struct rio_detail *ptr; 161 struct rio_detail *ptr;
166 struct list_head *ptr1;
167 debug ("%s ---\n", __func__); 162 debug ("%s ---\n", __func__);
168 list_for_each (ptr1, &rio_vg_head) { 163 list_for_each_entry(ptr, &rio_vg_head, rio_detail_list) {
169 ptr = list_entry (ptr1, struct rio_detail, rio_detail_list);
170 debug ("%s - rio_node_id = %x\n", __func__, ptr->rio_node_id); 164 debug ("%s - rio_node_id = %x\n", __func__, ptr->rio_node_id);
171 debug ("%s - rio_type = %x\n", __func__, ptr->rio_type); 165 debug ("%s - rio_type = %x\n", __func__, ptr->rio_type);
172 debug ("%s - owner_id = %x\n", __func__, ptr->owner_id); 166 debug ("%s - owner_id = %x\n", __func__, ptr->owner_id);
@@ -180,10 +174,8 @@ static void print_vg_info (void)
180static void __init print_ebda_pci_rsrc (void) 174static void __init print_ebda_pci_rsrc (void)
181{ 175{
182 struct ebda_pci_rsrc *ptr; 176 struct ebda_pci_rsrc *ptr;
183 struct list_head *ptr1;
184 177
185 list_for_each (ptr1, &ibmphp_ebda_pci_rsrc_head) { 178 list_for_each_entry(ptr, &ibmphp_ebda_pci_rsrc_head, ebda_pci_rsrc_list) {
186 ptr = list_entry (ptr1, struct ebda_pci_rsrc, ebda_pci_rsrc_list);
187 debug ("%s - rsrc type: %x bus#: %x dev_func: %x start addr: %x end addr: %x\n", 179 debug ("%s - rsrc type: %x bus#: %x dev_func: %x start addr: %x end addr: %x\n",
188 __func__, ptr->rsrc_type ,ptr->bus_num, ptr->dev_fun,ptr->start_addr, ptr->end_addr); 180 __func__, ptr->rsrc_type ,ptr->bus_num, ptr->dev_fun,ptr->start_addr, ptr->end_addr);
189 } 181 }
@@ -192,10 +184,8 @@ static void __init print_ebda_pci_rsrc (void)
192static void __init print_ibm_slot (void) 184static void __init print_ibm_slot (void)
193{ 185{
194 struct slot *ptr; 186 struct slot *ptr;
195 struct list_head *ptr1;
196 187
197 list_for_each (ptr1, &ibmphp_slot_head) { 188 list_for_each_entry(ptr, &ibmphp_slot_head, ibm_slot_list) {
198 ptr = list_entry (ptr1, struct slot, ibm_slot_list);
199 debug ("%s - slot_number: %x\n", __func__, ptr->number); 189 debug ("%s - slot_number: %x\n", __func__, ptr->number);
200 } 190 }
201} 191}
@@ -203,10 +193,8 @@ static void __init print_ibm_slot (void)
203static void __init print_opt_vg (void) 193static void __init print_opt_vg (void)
204{ 194{
205 struct opt_rio *ptr; 195 struct opt_rio *ptr;
206 struct list_head *ptr1;
207 debug ("%s ---\n", __func__); 196 debug ("%s ---\n", __func__);
208 list_for_each (ptr1, &opt_vg_head) { 197 list_for_each_entry(ptr, &opt_vg_head, opt_rio_list) {
209 ptr = list_entry (ptr1, struct opt_rio, opt_rio_list);
210 debug ("%s - rio_type %x\n", __func__, ptr->rio_type); 198 debug ("%s - rio_type %x\n", __func__, ptr->rio_type);
211 debug ("%s - chassis_num: %x\n", __func__, ptr->chassis_num); 199 debug ("%s - chassis_num: %x\n", __func__, ptr->chassis_num);
212 debug ("%s - first_slot_num: %x\n", __func__, ptr->first_slot_num); 200 debug ("%s - first_slot_num: %x\n", __func__, ptr->first_slot_num);
@@ -217,13 +205,9 @@ static void __init print_opt_vg (void)
217static void __init print_ebda_hpc (void) 205static void __init print_ebda_hpc (void)
218{ 206{
219 struct controller *hpc_ptr; 207 struct controller *hpc_ptr;
220 struct list_head *ptr1;
221 u16 index; 208 u16 index;
222 209
223 list_for_each (ptr1, &ebda_hpc_head) { 210 list_for_each_entry(hpc_ptr, &ebda_hpc_head, ebda_hpc_list) {
224
225 hpc_ptr = list_entry (ptr1, struct controller, ebda_hpc_list);
226
227 for (index = 0; index < hpc_ptr->slot_count; index++) { 211 for (index = 0; index < hpc_ptr->slot_count; index++) {
228 debug ("%s - physical slot#: %x\n", __func__, hpc_ptr->slots[index].slot_num); 212 debug ("%s - physical slot#: %x\n", __func__, hpc_ptr->slots[index].slot_num);
229 debug ("%s - pci bus# of the slot: %x\n", __func__, hpc_ptr->slots[index].slot_bus_num); 213 debug ("%s - pci bus# of the slot: %x\n", __func__, hpc_ptr->slots[index].slot_bus_num);
@@ -276,7 +260,7 @@ int __init ibmphp_access_ebda (void)
276 iounmap (io_mem); 260 iounmap (io_mem);
277 debug ("returned ebda segment: %x\n", ebda_seg); 261 debug ("returned ebda segment: %x\n", ebda_seg);
278 262
279 io_mem = ioremap (ebda_seg<<4, 65000); 263 io_mem = ioremap(ebda_seg<<4, 1024);
280 if (!io_mem ) 264 if (!io_mem )
281 return -ENOMEM; 265 return -ENOMEM;
282 next_offset = 0x180; 266 next_offset = 0x180;
@@ -460,9 +444,7 @@ static int __init ebda_rio_table (void)
460static struct opt_rio *search_opt_vg (u8 chassis_num) 444static struct opt_rio *search_opt_vg (u8 chassis_num)
461{ 445{
462 struct opt_rio *ptr; 446 struct opt_rio *ptr;
463 struct list_head *ptr1; 447 list_for_each_entry(ptr, &opt_vg_head, opt_rio_list) {
464 list_for_each (ptr1, &opt_vg_head) {
465 ptr = list_entry (ptr1, struct opt_rio, opt_rio_list);
466 if (ptr->chassis_num == chassis_num) 448 if (ptr->chassis_num == chassis_num)
467 return ptr; 449 return ptr;
468 } 450 }
@@ -473,10 +455,8 @@ static int __init combine_wpg_for_chassis (void)
473{ 455{
474 struct opt_rio *opt_rio_ptr = NULL; 456 struct opt_rio *opt_rio_ptr = NULL;
475 struct rio_detail *rio_detail_ptr = NULL; 457 struct rio_detail *rio_detail_ptr = NULL;
476 struct list_head *list_head_ptr = NULL;
477 458
478 list_for_each (list_head_ptr, &rio_vg_head) { 459 list_for_each_entry(rio_detail_ptr, &rio_vg_head, rio_detail_list) {
479 rio_detail_ptr = list_entry (list_head_ptr, struct rio_detail, rio_detail_list);
480 opt_rio_ptr = search_opt_vg (rio_detail_ptr->chassis_num); 460 opt_rio_ptr = search_opt_vg (rio_detail_ptr->chassis_num);
481 if (!opt_rio_ptr) { 461 if (!opt_rio_ptr) {
482 opt_rio_ptr = kzalloc(sizeof(struct opt_rio), GFP_KERNEL); 462 opt_rio_ptr = kzalloc(sizeof(struct opt_rio), GFP_KERNEL);
@@ -497,14 +477,12 @@ static int __init combine_wpg_for_chassis (void)
497} 477}
498 478
499/* 479/*
500 * reorgnizing linked list of expansion box 480 * reorganizing linked list of expansion box
501 */ 481 */
502static struct opt_rio_lo *search_opt_lo (u8 chassis_num) 482static struct opt_rio_lo *search_opt_lo (u8 chassis_num)
503{ 483{
504 struct opt_rio_lo *ptr; 484 struct opt_rio_lo *ptr;
505 struct list_head *ptr1; 485 list_for_each_entry(ptr, &opt_lo_head, opt_rio_lo_list) {
506 list_for_each (ptr1, &opt_lo_head) {
507 ptr = list_entry (ptr1, struct opt_rio_lo, opt_rio_lo_list);
508 if (ptr->chassis_num == chassis_num) 486 if (ptr->chassis_num == chassis_num)
509 return ptr; 487 return ptr;
510 } 488 }
@@ -515,10 +493,8 @@ static int combine_wpg_for_expansion (void)
515{ 493{
516 struct opt_rio_lo *opt_rio_lo_ptr = NULL; 494 struct opt_rio_lo *opt_rio_lo_ptr = NULL;
517 struct rio_detail *rio_detail_ptr = NULL; 495 struct rio_detail *rio_detail_ptr = NULL;
518 struct list_head *list_head_ptr = NULL;
519 496
520 list_for_each (list_head_ptr, &rio_lo_head) { 497 list_for_each_entry(rio_detail_ptr, &rio_lo_head, rio_detail_list) {
521 rio_detail_ptr = list_entry (list_head_ptr, struct rio_detail, rio_detail_list);
522 opt_rio_lo_ptr = search_opt_lo (rio_detail_ptr->chassis_num); 498 opt_rio_lo_ptr = search_opt_lo (rio_detail_ptr->chassis_num);
523 if (!opt_rio_lo_ptr) { 499 if (!opt_rio_lo_ptr) {
524 opt_rio_lo_ptr = kzalloc(sizeof(struct opt_rio_lo), GFP_KERNEL); 500 opt_rio_lo_ptr = kzalloc(sizeof(struct opt_rio_lo), GFP_KERNEL);
@@ -550,20 +526,17 @@ static int first_slot_num (u8 slot_num, u8 first_slot, u8 var)
550{ 526{
551 struct opt_rio *opt_vg_ptr = NULL; 527 struct opt_rio *opt_vg_ptr = NULL;
552 struct opt_rio_lo *opt_lo_ptr = NULL; 528 struct opt_rio_lo *opt_lo_ptr = NULL;
553 struct list_head *ptr = NULL;
554 int rc = 0; 529 int rc = 0;
555 530
556 if (!var) { 531 if (!var) {
557 list_for_each (ptr, &opt_vg_head) { 532 list_for_each_entry(opt_vg_ptr, &opt_vg_head, opt_rio_list) {
558 opt_vg_ptr = list_entry (ptr, struct opt_rio, opt_rio_list);
559 if ((first_slot < opt_vg_ptr->first_slot_num) && (slot_num >= opt_vg_ptr->first_slot_num)) { 533 if ((first_slot < opt_vg_ptr->first_slot_num) && (slot_num >= opt_vg_ptr->first_slot_num)) {
560 rc = -ENODEV; 534 rc = -ENODEV;
561 break; 535 break;
562 } 536 }
563 } 537 }
564 } else { 538 } else {
565 list_for_each (ptr, &opt_lo_head) { 539 list_for_each_entry(opt_lo_ptr, &opt_lo_head, opt_rio_lo_list) {
566 opt_lo_ptr = list_entry (ptr, struct opt_rio_lo, opt_rio_lo_list);
567 if ((first_slot < opt_lo_ptr->first_slot_num) && (slot_num >= opt_lo_ptr->first_slot_num)) { 540 if ((first_slot < opt_lo_ptr->first_slot_num) && (slot_num >= opt_lo_ptr->first_slot_num)) {
568 rc = -ENODEV; 541 rc = -ENODEV;
569 break; 542 break;
@@ -576,10 +549,8 @@ static int first_slot_num (u8 slot_num, u8 first_slot, u8 var)
576static struct opt_rio_lo * find_rxe_num (u8 slot_num) 549static struct opt_rio_lo * find_rxe_num (u8 slot_num)
577{ 550{
578 struct opt_rio_lo *opt_lo_ptr; 551 struct opt_rio_lo *opt_lo_ptr;
579 struct list_head *ptr;
580 552
581 list_for_each (ptr, &opt_lo_head) { 553 list_for_each_entry(opt_lo_ptr, &opt_lo_head, opt_rio_lo_list) {
582 opt_lo_ptr = list_entry (ptr, struct opt_rio_lo, opt_rio_lo_list);
583 //check to see if this slot_num belongs to expansion box 554 //check to see if this slot_num belongs to expansion box
584 if ((slot_num >= opt_lo_ptr->first_slot_num) && (!first_slot_num (slot_num, opt_lo_ptr->first_slot_num, 1))) 555 if ((slot_num >= opt_lo_ptr->first_slot_num) && (!first_slot_num (slot_num, opt_lo_ptr->first_slot_num, 1)))
585 return opt_lo_ptr; 556 return opt_lo_ptr;
@@ -590,10 +561,8 @@ static struct opt_rio_lo * find_rxe_num (u8 slot_num)
590static struct opt_rio * find_chassis_num (u8 slot_num) 561static struct opt_rio * find_chassis_num (u8 slot_num)
591{ 562{
592 struct opt_rio *opt_vg_ptr; 563 struct opt_rio *opt_vg_ptr;
593 struct list_head *ptr;
594 564
595 list_for_each (ptr, &opt_vg_head) { 565 list_for_each_entry(opt_vg_ptr, &opt_vg_head, opt_rio_list) {
596 opt_vg_ptr = list_entry (ptr, struct opt_rio, opt_rio_list);
597 //check to see if this slot_num belongs to chassis 566 //check to see if this slot_num belongs to chassis
598 if ((slot_num >= opt_vg_ptr->first_slot_num) && (!first_slot_num (slot_num, opt_vg_ptr->first_slot_num, 0))) 567 if ((slot_num >= opt_vg_ptr->first_slot_num) && (!first_slot_num (slot_num, opt_vg_ptr->first_slot_num, 0)))
599 return opt_vg_ptr; 568 return opt_vg_ptr;
@@ -607,11 +576,9 @@ static struct opt_rio * find_chassis_num (u8 slot_num)
607static u8 calculate_first_slot (u8 slot_num) 576static u8 calculate_first_slot (u8 slot_num)
608{ 577{
609 u8 first_slot = 1; 578 u8 first_slot = 1;
610 struct list_head * list;
611 struct slot * slot_cur; 579 struct slot * slot_cur;
612 580
613 list_for_each (list, &ibmphp_slot_head) { 581 list_for_each_entry(slot_cur, &ibmphp_slot_head, ibm_slot_list) {
614 slot_cur = list_entry (list, struct slot, ibm_slot_list);
615 if (slot_cur->ctrl) { 582 if (slot_cur->ctrl) {
616 if ((slot_cur->ctrl->ctlr_type != 4) && (slot_cur->ctrl->ending_slot_num > first_slot) && (slot_num > slot_cur->ctrl->ending_slot_num)) 583 if ((slot_cur->ctrl->ctlr_type != 4) && (slot_cur->ctrl->ending_slot_num > first_slot) && (slot_num > slot_cur->ctrl->ending_slot_num))
617 first_slot = slot_cur->ctrl->ending_slot_num; 584 first_slot = slot_cur->ctrl->ending_slot_num;
@@ -620,11 +587,14 @@ static u8 calculate_first_slot (u8 slot_num)
620 return first_slot + 1; 587 return first_slot + 1;
621 588
622} 589}
590
591#define SLOT_NAME_SIZE 30
592
623static char *create_file_name (struct slot * slot_cur) 593static char *create_file_name (struct slot * slot_cur)
624{ 594{
625 struct opt_rio *opt_vg_ptr = NULL; 595 struct opt_rio *opt_vg_ptr = NULL;
626 struct opt_rio_lo *opt_lo_ptr = NULL; 596 struct opt_rio_lo *opt_lo_ptr = NULL;
627 static char str[30]; 597 static char str[SLOT_NAME_SIZE];
628 int which = 0; /* rxe = 1, chassis = 0 */ 598 int which = 0; /* rxe = 1, chassis = 0 */
629 u8 number = 1; /* either chassis or rxe # */ 599 u8 number = 1; /* either chassis or rxe # */
630 u8 first_slot = 1; 600 u8 first_slot = 1;
@@ -736,7 +706,6 @@ static void release_slot(struct hotplug_slot *hotplug_slot)
736 706
737 slot = hotplug_slot->private; 707 slot = hotplug_slot->private;
738 kfree(slot->hotplug_slot->info); 708 kfree(slot->hotplug_slot->info);
739 kfree(slot->hotplug_slot->name);
740 kfree(slot->hotplug_slot); 709 kfree(slot->hotplug_slot);
741 slot->ctrl = NULL; 710 slot->ctrl = NULL;
742 slot->bus_on = NULL; 711 slot->bus_on = NULL;
@@ -767,7 +736,7 @@ static int __init ebda_rsrc_controller (void)
767 struct bus_info *bus_info_ptr1, *bus_info_ptr2; 736 struct bus_info *bus_info_ptr1, *bus_info_ptr2;
768 int rc; 737 int rc;
769 struct slot *tmp_slot; 738 struct slot *tmp_slot;
770 struct list_head *list; 739 char name[SLOT_NAME_SIZE];
771 740
772 addr = hpc_list_ptr->phys_addr; 741 addr = hpc_list_ptr->phys_addr;
773 for (ctlr = 0; ctlr < hpc_list_ptr->num_ctlrs; ctlr++) { 742 for (ctlr = 0; ctlr < hpc_list_ptr->num_ctlrs; ctlr++) {
@@ -931,12 +900,6 @@ static int __init ebda_rsrc_controller (void)
931 goto error_no_hp_info; 900 goto error_no_hp_info;
932 } 901 }
933 902
934 hp_slot_ptr->name = kmalloc(30, GFP_KERNEL);
935 if (!hp_slot_ptr->name) {
936 rc = -ENOMEM;
937 goto error_no_hp_name;
938 }
939
940 tmp_slot = kzalloc(sizeof(*tmp_slot), GFP_KERNEL); 903 tmp_slot = kzalloc(sizeof(*tmp_slot), GFP_KERNEL);
941 if (!tmp_slot) { 904 if (!tmp_slot) {
942 rc = -ENOMEM; 905 rc = -ENOMEM;
@@ -997,12 +960,10 @@ static int __init ebda_rsrc_controller (void)
997 960
998 } /* each hpc */ 961 } /* each hpc */
999 962
1000 list_for_each (list, &ibmphp_slot_head) { 963 list_for_each_entry(tmp_slot, &ibmphp_slot_head, ibm_slot_list) {
1001 tmp_slot = list_entry (list, struct slot, ibm_slot_list); 964 snprintf(name, SLOT_NAME_SIZE, "%s", create_file_name(tmp_slot));
1002
1003 snprintf (tmp_slot->hotplug_slot->name, 30, "%s", create_file_name (tmp_slot));
1004 pci_hp_register(tmp_slot->hotplug_slot, 965 pci_hp_register(tmp_slot->hotplug_slot,
1005 pci_find_bus(0, tmp_slot->bus), tmp_slot->device); 966 pci_find_bus(0, tmp_slot->bus), tmp_slot->device, name);
1006 } 967 }
1007 968
1008 print_ebda_hpc (); 969 print_ebda_hpc ();
@@ -1012,8 +973,6 @@ static int __init ebda_rsrc_controller (void)
1012error: 973error:
1013 kfree (hp_slot_ptr->private); 974 kfree (hp_slot_ptr->private);
1014error_no_slot: 975error_no_slot:
1015 kfree (hp_slot_ptr->name);
1016error_no_hp_name:
1017 kfree (hp_slot_ptr->info); 976 kfree (hp_slot_ptr->info);
1018error_no_hp_info: 977error_no_hp_info:
1019 kfree (hp_slot_ptr); 978 kfree (hp_slot_ptr);
@@ -1101,10 +1060,8 @@ u16 ibmphp_get_total_controllers (void)
1101struct slot *ibmphp_get_slot_from_physical_num (u8 physical_num) 1060struct slot *ibmphp_get_slot_from_physical_num (u8 physical_num)
1102{ 1061{
1103 struct slot *slot; 1062 struct slot *slot;
1104 struct list_head *list;
1105 1063
1106 list_for_each (list, &ibmphp_slot_head) { 1064 list_for_each_entry(slot, &ibmphp_slot_head, ibm_slot_list) {
1107 slot = list_entry (list, struct slot, ibm_slot_list);
1108 if (slot->number == physical_num) 1065 if (slot->number == physical_num)
1109 return slot; 1066 return slot;
1110 } 1067 }
@@ -1120,10 +1077,8 @@ struct slot *ibmphp_get_slot_from_physical_num (u8 physical_num)
1120struct bus_info *ibmphp_find_same_bus_num (u32 num) 1077struct bus_info *ibmphp_find_same_bus_num (u32 num)
1121{ 1078{
1122 struct bus_info *ptr; 1079 struct bus_info *ptr;
1123 struct list_head *ptr1;
1124 1080
1125 list_for_each (ptr1, &bus_info_head) { 1081 list_for_each_entry(ptr, &bus_info_head, bus_info_list) {
1126 ptr = list_entry (ptr1, struct bus_info, bus_info_list);
1127 if (ptr->busno == num) 1082 if (ptr->busno == num)
1128 return ptr; 1083 return ptr;
1129 } 1084 }
@@ -1136,10 +1091,8 @@ struct bus_info *ibmphp_find_same_bus_num (u32 num)
1136int ibmphp_get_bus_index (u8 num) 1091int ibmphp_get_bus_index (u8 num)
1137{ 1092{
1138 struct bus_info *ptr; 1093 struct bus_info *ptr;
1139 struct list_head *ptr1;
1140 1094
1141 list_for_each (ptr1, &bus_info_head) { 1095 list_for_each_entry(ptr, &bus_info_head, bus_info_list) {
1142 ptr = list_entry (ptr1, struct bus_info, bus_info_list);
1143 if (ptr->busno == num) 1096 if (ptr->busno == num)
1144 return ptr->index; 1097 return ptr->index;
1145 } 1098 }
@@ -1212,11 +1165,9 @@ static struct pci_driver ibmphp_driver = {
1212int ibmphp_register_pci (void) 1165int ibmphp_register_pci (void)
1213{ 1166{
1214 struct controller *ctrl; 1167 struct controller *ctrl;
1215 struct list_head *tmp;
1216 int rc = 0; 1168 int rc = 0;
1217 1169
1218 list_for_each (tmp, &ebda_hpc_head) { 1170 list_for_each_entry(ctrl, &ebda_hpc_head, ebda_hpc_list) {
1219 ctrl = list_entry (tmp, struct controller, ebda_hpc_list);
1220 if (ctrl->ctlr_type == 1) { 1171 if (ctrl->ctlr_type == 1) {
1221 rc = pci_register_driver(&ibmphp_driver); 1172 rc = pci_register_driver(&ibmphp_driver);
1222 break; 1173 break;
@@ -1227,12 +1178,10 @@ int ibmphp_register_pci (void)
1227static int ibmphp_probe (struct pci_dev * dev, const struct pci_device_id *ids) 1178static int ibmphp_probe (struct pci_dev * dev, const struct pci_device_id *ids)
1228{ 1179{
1229 struct controller *ctrl; 1180 struct controller *ctrl;
1230 struct list_head *tmp;
1231 1181
1232 debug ("inside ibmphp_probe\n"); 1182 debug ("inside ibmphp_probe\n");
1233 1183
1234 list_for_each (tmp, &ebda_hpc_head) { 1184 list_for_each_entry(ctrl, &ebda_hpc_head, ebda_hpc_list) {
1235 ctrl = list_entry (tmp, struct controller, ebda_hpc_list);
1236 if (ctrl->ctlr_type == 1) { 1185 if (ctrl->ctlr_type == 1) {
1237 if ((dev->devfn == ctrl->u.pci_ctlr.dev_fun) && (dev->bus->number == ctrl->u.pci_ctlr.bus)) { 1186 if ((dev->devfn == ctrl->u.pci_ctlr.dev_fun) && (dev->bus->number == ctrl->u.pci_ctlr.bus)) {
1238 ctrl->ctrl_dev = dev; 1187 ctrl->ctrl_dev = dev;
diff --git a/drivers/pci/hotplug/pci_hotplug_core.c b/drivers/pci/hotplug/pci_hotplug_core.c
index 5f85b1b120e3..535fce0f07f9 100644
--- a/drivers/pci/hotplug/pci_hotplug_core.c
+++ b/drivers/pci/hotplug/pci_hotplug_core.c
@@ -37,6 +37,7 @@
37#include <linux/init.h> 37#include <linux/init.h>
38#include <linux/mount.h> 38#include <linux/mount.h>
39#include <linux/namei.h> 39#include <linux/namei.h>
40#include <linux/mutex.h>
40#include <linux/pci.h> 41#include <linux/pci.h>
41#include <linux/pci_hotplug.h> 42#include <linux/pci_hotplug.h>
42#include <asm/uaccess.h> 43#include <asm/uaccess.h>
@@ -61,7 +62,7 @@ static int debug;
61////////////////////////////////////////////////////////////////// 62//////////////////////////////////////////////////////////////////
62 63
63static LIST_HEAD(pci_hotplug_slot_list); 64static LIST_HEAD(pci_hotplug_slot_list);
64static DEFINE_SPINLOCK(pci_hotplug_slot_list_lock); 65static DEFINE_MUTEX(pci_hp_mutex);
65 66
66/* these strings match up with the values in pci_bus_speed */ 67/* these strings match up with the values in pci_bus_speed */
67static char *pci_bus_speed_strings[] = { 68static char *pci_bus_speed_strings[] = {
@@ -102,13 +103,13 @@ static int get_##name (struct hotplug_slot *slot, type *value) \
102{ \ 103{ \
103 struct hotplug_slot_ops *ops = slot->ops; \ 104 struct hotplug_slot_ops *ops = slot->ops; \
104 int retval = 0; \ 105 int retval = 0; \
105 if (try_module_get(ops->owner)) { \ 106 if (!try_module_get(ops->owner)) \
106 if (ops->get_##name) \ 107 return -ENODEV; \
107 retval = ops->get_##name(slot, value); \ 108 if (ops->get_##name) \
108 else \ 109 retval = ops->get_##name(slot, value); \
109 *value = slot->info->name; \ 110 else \
110 module_put(ops->owner); \ 111 *value = slot->info->name; \
111 } \ 112 module_put(ops->owner); \
112 return retval; \ 113 return retval; \
113} 114}
114 115
@@ -530,16 +531,12 @@ static struct hotplug_slot *get_slot_from_name (const char *name)
530 struct hotplug_slot *slot; 531 struct hotplug_slot *slot;
531 struct list_head *tmp; 532 struct list_head *tmp;
532 533
533 spin_lock(&pci_hotplug_slot_list_lock);
534 list_for_each (tmp, &pci_hotplug_slot_list) { 534 list_for_each (tmp, &pci_hotplug_slot_list) {
535 slot = list_entry (tmp, struct hotplug_slot, slot_list); 535 slot = list_entry (tmp, struct hotplug_slot, slot_list);
536 if (strcmp(slot->name, name) == 0) 536 if (strcmp(hotplug_slot_name(slot), name) == 0)
537 goto out; 537 return slot;
538 } 538 }
539 slot = NULL; 539 return NULL;
540out:
541 spin_unlock(&pci_hotplug_slot_list_lock);
542 return slot;
543} 540}
544 541
545/** 542/**
@@ -547,13 +544,15 @@ out:
547 * @bus: bus this slot is on 544 * @bus: bus this slot is on
548 * @slot: pointer to the &struct hotplug_slot to register 545 * @slot: pointer to the &struct hotplug_slot to register
549 * @slot_nr: slot number 546 * @slot_nr: slot number
547 * @name: name registered with kobject core
550 * 548 *
551 * Registers a hotplug slot with the pci hotplug subsystem, which will allow 549 * Registers a hotplug slot with the pci hotplug subsystem, which will allow
552 * userspace interaction to the slot. 550 * userspace interaction to the slot.
553 * 551 *
554 * Returns 0 if successful, anything else for an error. 552 * Returns 0 if successful, anything else for an error.
555 */ 553 */
556int pci_hp_register(struct hotplug_slot *slot, struct pci_bus *bus, int slot_nr) 554int pci_hp_register(struct hotplug_slot *slot, struct pci_bus *bus, int slot_nr,
555 const char *name)
557{ 556{
558 int result; 557 int result;
559 struct pci_slot *pci_slot; 558 struct pci_slot *pci_slot;
@@ -568,48 +567,29 @@ int pci_hp_register(struct hotplug_slot *slot, struct pci_bus *bus, int slot_nr)
568 return -EINVAL; 567 return -EINVAL;
569 } 568 }
570 569
571 /* Check if we have already registered a slot with the same name. */ 570 mutex_lock(&pci_hp_mutex);
572 if (get_slot_from_name(slot->name))
573 return -EEXIST;
574 571
575 /* 572 /*
576 * No problems if we call this interface from both ACPI_PCI_SLOT 573 * No problems if we call this interface from both ACPI_PCI_SLOT
577 * driver and call it here again. If we've already created the 574 * driver and call it here again. If we've already created the
578 * pci_slot, the interface will simply bump the refcount. 575 * pci_slot, the interface will simply bump the refcount.
579 */ 576 */
580 pci_slot = pci_create_slot(bus, slot_nr, slot->name); 577 pci_slot = pci_create_slot(bus, slot_nr, name, slot);
581 if (IS_ERR(pci_slot)) 578 if (IS_ERR(pci_slot)) {
582 return PTR_ERR(pci_slot); 579 result = PTR_ERR(pci_slot);
583 580 goto out;
584 if (pci_slot->hotplug) {
585 dbg("%s: already claimed\n", __func__);
586 pci_destroy_slot(pci_slot);
587 return -EBUSY;
588 } 581 }
589 582
590 slot->pci_slot = pci_slot; 583 slot->pci_slot = pci_slot;
591 pci_slot->hotplug = slot; 584 pci_slot->hotplug = slot;
592 585
593 /*
594 * Allow pcihp drivers to override the ACPI_PCI_SLOT name.
595 */
596 if (strcmp(kobject_name(&pci_slot->kobj), slot->name)) {
597 result = kobject_rename(&pci_slot->kobj, slot->name);
598 if (result) {
599 pci_destroy_slot(pci_slot);
600 return result;
601 }
602 }
603
604 spin_lock(&pci_hotplug_slot_list_lock);
605 list_add(&slot->slot_list, &pci_hotplug_slot_list); 586 list_add(&slot->slot_list, &pci_hotplug_slot_list);
606 spin_unlock(&pci_hotplug_slot_list_lock);
607 587
608 result = fs_add_slot(pci_slot); 588 result = fs_add_slot(pci_slot);
609 kobject_uevent(&pci_slot->kobj, KOBJ_ADD); 589 kobject_uevent(&pci_slot->kobj, KOBJ_ADD);
610 dbg("Added slot %s to the list\n", slot->name); 590 dbg("Added slot %s to the list\n", name);
611 591out:
612 592 mutex_unlock(&pci_hp_mutex);
613 return result; 593 return result;
614} 594}
615 595
@@ -630,21 +610,23 @@ int pci_hp_deregister(struct hotplug_slot *hotplug)
630 if (!hotplug) 610 if (!hotplug)
631 return -ENODEV; 611 return -ENODEV;
632 612
633 temp = get_slot_from_name(hotplug->name); 613 mutex_lock(&pci_hp_mutex);
634 if (temp != hotplug) 614 temp = get_slot_from_name(hotplug_slot_name(hotplug));
615 if (temp != hotplug) {
616 mutex_unlock(&pci_hp_mutex);
635 return -ENODEV; 617 return -ENODEV;
618 }
636 619
637 spin_lock(&pci_hotplug_slot_list_lock);
638 list_del(&hotplug->slot_list); 620 list_del(&hotplug->slot_list);
639 spin_unlock(&pci_hotplug_slot_list_lock);
640 621
641 slot = hotplug->pci_slot; 622 slot = hotplug->pci_slot;
642 fs_remove_slot(slot); 623 fs_remove_slot(slot);
643 dbg("Removed slot %s from the list\n", hotplug->name); 624 dbg("Removed slot %s from the list\n", hotplug_slot_name(hotplug));
644 625
645 hotplug->release(hotplug); 626 hotplug->release(hotplug);
646 slot->hotplug = NULL; 627 slot->hotplug = NULL;
647 pci_destroy_slot(slot); 628 pci_destroy_slot(slot);
629 mutex_unlock(&pci_hp_mutex);
648 630
649 return 0; 631 return 0;
650} 632}
diff --git a/drivers/pci/hotplug/pciehp.h b/drivers/pci/hotplug/pciehp.h
index e3a1e7e7dba2..b2801a7ee37f 100644
--- a/drivers/pci/hotplug/pciehp.h
+++ b/drivers/pci/hotplug/pciehp.h
@@ -43,7 +43,6 @@ extern int pciehp_poll_mode;
43extern int pciehp_poll_time; 43extern int pciehp_poll_time;
44extern int pciehp_debug; 44extern int pciehp_debug;
45extern int pciehp_force; 45extern int pciehp_force;
46extern int pciehp_slot_with_bus;
47extern struct workqueue_struct *pciehp_wq; 46extern struct workqueue_struct *pciehp_wq;
48 47
49#define dbg(format, arg...) \ 48#define dbg(format, arg...) \
@@ -58,19 +57,30 @@ extern struct workqueue_struct *pciehp_wq;
58#define warn(format, arg...) \ 57#define warn(format, arg...) \
59 printk(KERN_WARNING "%s: " format, MY_NAME , ## arg) 58 printk(KERN_WARNING "%s: " format, MY_NAME , ## arg)
60 59
60#define ctrl_dbg(ctrl, format, arg...) \
61 do { \
62 if (pciehp_debug) \
63 dev_printk(, &ctrl->pcie->device, \
64 format, ## arg); \
65 } while (0)
66#define ctrl_err(ctrl, format, arg...) \
67 dev_err(&ctrl->pcie->device, format, ## arg)
68#define ctrl_info(ctrl, format, arg...) \
69 dev_info(&ctrl->pcie->device, format, ## arg)
70#define ctrl_warn(ctrl, format, arg...) \
71 dev_warn(&ctrl->pcie->device, format, ## arg)
72
61#define SLOT_NAME_SIZE 10 73#define SLOT_NAME_SIZE 10
62struct slot { 74struct slot {
63 u8 bus; 75 u8 bus;
64 u8 device; 76 u8 device;
65 u32 number;
66 u8 state; 77 u8 state;
67 struct timer_list task_event;
68 u8 hp_slot; 78 u8 hp_slot;
79 u32 number;
69 struct controller *ctrl; 80 struct controller *ctrl;
70 struct hpc_ops *hpc_ops; 81 struct hpc_ops *hpc_ops;
71 struct hotplug_slot *hotplug_slot; 82 struct hotplug_slot *hotplug_slot;
72 struct list_head slot_list; 83 struct list_head slot_list;
73 char name[SLOT_NAME_SIZE];
74 unsigned long last_emi_toggle; 84 unsigned long last_emi_toggle;
75 struct delayed_work work; /* work for button event */ 85 struct delayed_work work; /* work for button event */
76 struct mutex lock; 86 struct mutex lock;
@@ -88,6 +98,7 @@ struct controller {
88 int num_slots; /* Number of slots on ctlr */ 98 int num_slots; /* Number of slots on ctlr */
89 int slot_num_inc; /* 1 or -1 */ 99 int slot_num_inc; /* 1 or -1 */
90 struct pci_dev *pci_dev; 100 struct pci_dev *pci_dev;
101 struct pcie_device *pcie; /* PCI Express port service */
91 struct list_head slot_list; 102 struct list_head slot_list;
92 struct hpc_ops *hpc_ops; 103 struct hpc_ops *hpc_ops;
93 wait_queue_head_t queue; /* sleep & wake process */ 104 wait_queue_head_t queue; /* sleep & wake process */
@@ -99,6 +110,7 @@ struct controller {
99 struct timer_list poll_timer; 110 struct timer_list poll_timer;
100 int cmd_busy; 111 int cmd_busy;
101 unsigned int no_cmd_complete:1; 112 unsigned int no_cmd_complete:1;
113 unsigned int link_active_reporting:1;
102}; 114};
103 115
104#define INT_BUTTON_IGNORE 0 116#define INT_BUTTON_IGNORE 0
@@ -162,6 +174,11 @@ int pciehp_enable_slot(struct slot *p_slot);
162int pciehp_disable_slot(struct slot *p_slot); 174int pciehp_disable_slot(struct slot *p_slot);
163int pcie_enable_notification(struct controller *ctrl); 175int pcie_enable_notification(struct controller *ctrl);
164 176
177static inline const char *slot_name(struct slot *slot)
178{
179 return hotplug_slot_name(slot->hotplug_slot);
180}
181
165static inline struct slot *pciehp_find_slot(struct controller *ctrl, u8 device) 182static inline struct slot *pciehp_find_slot(struct controller *ctrl, u8 device)
166{ 183{
167 struct slot *slot; 184 struct slot *slot;
@@ -171,7 +188,7 @@ static inline struct slot *pciehp_find_slot(struct controller *ctrl, u8 device)
171 return slot; 188 return slot;
172 } 189 }
173 190
174 err("%s: slot (device=0x%x) not found\n", __func__, device); 191 ctrl_err(ctrl, "Slot (device=0x%02x) not found\n", device);
175 return NULL; 192 return NULL;
176} 193}
177 194
diff --git a/drivers/pci/hotplug/pciehp_core.c b/drivers/pci/hotplug/pciehp_core.c
index 3677495c4f91..4b23bc39b11e 100644
--- a/drivers/pci/hotplug/pciehp_core.c
+++ b/drivers/pci/hotplug/pciehp_core.c
@@ -41,7 +41,6 @@ int pciehp_debug;
41int pciehp_poll_mode; 41int pciehp_poll_mode;
42int pciehp_poll_time; 42int pciehp_poll_time;
43int pciehp_force; 43int pciehp_force;
44int pciehp_slot_with_bus;
45struct workqueue_struct *pciehp_wq; 44struct workqueue_struct *pciehp_wq;
46 45
47#define DRIVER_VERSION "0.4" 46#define DRIVER_VERSION "0.4"
@@ -56,12 +55,10 @@ module_param(pciehp_debug, bool, 0644);
56module_param(pciehp_poll_mode, bool, 0644); 55module_param(pciehp_poll_mode, bool, 0644);
57module_param(pciehp_poll_time, int, 0644); 56module_param(pciehp_poll_time, int, 0644);
58module_param(pciehp_force, bool, 0644); 57module_param(pciehp_force, bool, 0644);
59module_param(pciehp_slot_with_bus, bool, 0644);
60MODULE_PARM_DESC(pciehp_debug, "Debugging mode enabled or not"); 58MODULE_PARM_DESC(pciehp_debug, "Debugging mode enabled or not");
61MODULE_PARM_DESC(pciehp_poll_mode, "Using polling mechanism for hot-plug events or not"); 59MODULE_PARM_DESC(pciehp_poll_mode, "Using polling mechanism for hot-plug events or not");
62MODULE_PARM_DESC(pciehp_poll_time, "Polling mechanism frequency, in seconds"); 60MODULE_PARM_DESC(pciehp_poll_time, "Polling mechanism frequency, in seconds");
63MODULE_PARM_DESC(pciehp_force, "Force pciehp, even if _OSC and OSHP are missing"); 61MODULE_PARM_DESC(pciehp_force, "Force pciehp, even if _OSC and OSHP are missing");
64MODULE_PARM_DESC(pciehp_slot_with_bus, "Use bus number in the slot name");
65 62
66#define PCIE_MODULE_NAME "pciehp" 63#define PCIE_MODULE_NAME "pciehp"
67 64
@@ -147,9 +144,10 @@ set_lock_exit:
147 * sysfs interface which allows the user to toggle the Electro Mechanical 144 * sysfs interface which allows the user to toggle the Electro Mechanical
148 * Interlock. Valid values are either 0 or 1. 0 == unlock, 1 == lock 145 * Interlock. Valid values are either 0 or 1. 0 == unlock, 1 == lock
149 */ 146 */
150static ssize_t lock_write_file(struct hotplug_slot *slot, const char *buf, 147static ssize_t lock_write_file(struct hotplug_slot *hotplug_slot,
151 size_t count) 148 const char *buf, size_t count)
152{ 149{
150 struct slot *slot = hotplug_slot->private;
153 unsigned long llock; 151 unsigned long llock;
154 u8 lock; 152 u8 lock;
155 int retval = 0; 153 int retval = 0;
@@ -160,10 +158,11 @@ static ssize_t lock_write_file(struct hotplug_slot *slot, const char *buf,
160 switch (lock) { 158 switch (lock) {
161 case 0: 159 case 0:
162 case 1: 160 case 1:
163 retval = set_lock_status(slot, lock); 161 retval = set_lock_status(hotplug_slot, lock);
164 break; 162 break;
165 default: 163 default:
166 err ("%d is an invalid lock value\n", lock); 164 ctrl_err(slot->ctrl, "%d is an invalid lock value\n",
165 lock);
167 retval = -EINVAL; 166 retval = -EINVAL;
168 } 167 }
169 if (retval) 168 if (retval)
@@ -183,7 +182,10 @@ static struct hotplug_slot_attribute hotplug_slot_attr_lock = {
183 */ 182 */
184static void release_slot(struct hotplug_slot *hotplug_slot) 183static void release_slot(struct hotplug_slot *hotplug_slot)
185{ 184{
186 dbg("%s - physical_slot = %s\n", __func__, hotplug_slot->name); 185 struct slot *slot = hotplug_slot->private;
186
187 ctrl_dbg(slot->ctrl, "%s: physical_slot = %s\n",
188 __func__, hotplug_slot_name(hotplug_slot));
187 189
188 kfree(hotplug_slot->info); 190 kfree(hotplug_slot->info);
189 kfree(hotplug_slot); 191 kfree(hotplug_slot);
@@ -194,6 +196,7 @@ static int init_slots(struct controller *ctrl)
194 struct slot *slot; 196 struct slot *slot;
195 struct hotplug_slot *hotplug_slot; 197 struct hotplug_slot *hotplug_slot;
196 struct hotplug_slot_info *info; 198 struct hotplug_slot_info *info;
199 char name[SLOT_NAME_SIZE];
197 int retval = -ENOMEM; 200 int retval = -ENOMEM;
198 201
199 list_for_each_entry(slot, &ctrl->slot_list, slot_list) { 202 list_for_each_entry(slot, &ctrl->slot_list, slot_list) {
@@ -207,37 +210,38 @@ static int init_slots(struct controller *ctrl)
207 210
208 /* register this slot with the hotplug pci core */ 211 /* register this slot with the hotplug pci core */
209 hotplug_slot->info = info; 212 hotplug_slot->info = info;
210 hotplug_slot->name = slot->name;
211 hotplug_slot->private = slot; 213 hotplug_slot->private = slot;
212 hotplug_slot->release = &release_slot; 214 hotplug_slot->release = &release_slot;
213 hotplug_slot->ops = &pciehp_hotplug_slot_ops; 215 hotplug_slot->ops = &pciehp_hotplug_slot_ops;
214 get_power_status(hotplug_slot, &info->power_status);
215 get_attention_status(hotplug_slot, &info->attention_status);
216 get_latch_status(hotplug_slot, &info->latch_status);
217 get_adapter_status(hotplug_slot, &info->adapter_status);
218 slot->hotplug_slot = hotplug_slot; 216 slot->hotplug_slot = hotplug_slot;
217 snprintf(name, SLOT_NAME_SIZE, "%u", slot->number);
219 218
220 dbg("Registering bus=%x dev=%x hp_slot=%x sun=%x " 219 ctrl_dbg(ctrl, "Registering domain:bus:dev=%04x:%02x:%02x "
221 "slot_device_offset=%x\n", slot->bus, slot->device, 220 "hp_slot=%x sun=%x slot_device_offset=%x\n",
222 slot->hp_slot, slot->number, ctrl->slot_device_offset); 221 pci_domain_nr(ctrl->pci_dev->subordinate),
222 slot->bus, slot->device, slot->hp_slot, slot->number,
223 ctrl->slot_device_offset);
223 retval = pci_hp_register(hotplug_slot, 224 retval = pci_hp_register(hotplug_slot,
224 ctrl->pci_dev->subordinate, 225 ctrl->pci_dev->subordinate,
225 slot->device); 226 slot->device,
227 name);
226 if (retval) { 228 if (retval) {
227 err("pci_hp_register failed with error %d\n", retval); 229 ctrl_err(ctrl, "pci_hp_register failed with error %d\n",
228 if (retval == -EEXIST) 230 retval);
229 err("Failed to register slot because of name "
230 "collision. Try \'pciehp_slot_with_bus\' "
231 "module option.\n");
232 goto error_info; 231 goto error_info;
233 } 232 }
233 get_power_status(hotplug_slot, &info->power_status);
234 get_attention_status(hotplug_slot, &info->attention_status);
235 get_latch_status(hotplug_slot, &info->latch_status);
236 get_adapter_status(hotplug_slot, &info->adapter_status);
234 /* create additional sysfs entries */ 237 /* create additional sysfs entries */
235 if (EMI(ctrl)) { 238 if (EMI(ctrl)) {
236 retval = sysfs_create_file(&hotplug_slot->pci_slot->kobj, 239 retval = sysfs_create_file(&hotplug_slot->pci_slot->kobj,
237 &hotplug_slot_attr_lock.attr); 240 &hotplug_slot_attr_lock.attr);
238 if (retval) { 241 if (retval) {
239 pci_hp_deregister(hotplug_slot); 242 pci_hp_deregister(hotplug_slot);
240 err("cannot create additional sysfs entries\n"); 243 ctrl_err(ctrl, "Cannot create additional sysfs "
244 "entries\n");
241 goto error_info; 245 goto error_info;
242 } 246 }
243 } 247 }
@@ -271,7 +275,8 @@ static int set_attention_status(struct hotplug_slot *hotplug_slot, u8 status)
271{ 275{
272 struct slot *slot = hotplug_slot->private; 276 struct slot *slot = hotplug_slot->private;
273 277
274 dbg("%s - physical_slot = %s\n", __func__, hotplug_slot->name); 278 ctrl_dbg(slot->ctrl, "%s: physical_slot = %s\n",
279 __func__, slot_name(slot));
275 280
276 hotplug_slot->info->attention_status = status; 281 hotplug_slot->info->attention_status = status;
277 282
@@ -286,7 +291,8 @@ static int enable_slot(struct hotplug_slot *hotplug_slot)
286{ 291{
287 struct slot *slot = hotplug_slot->private; 292 struct slot *slot = hotplug_slot->private;
288 293
289 dbg("%s - physical_slot = %s\n", __func__, hotplug_slot->name); 294 ctrl_dbg(slot->ctrl, "%s: physical_slot = %s\n",
295 __func__, slot_name(slot));
290 296
291 return pciehp_sysfs_enable_slot(slot); 297 return pciehp_sysfs_enable_slot(slot);
292} 298}
@@ -296,7 +302,8 @@ static int disable_slot(struct hotplug_slot *hotplug_slot)
296{ 302{
297 struct slot *slot = hotplug_slot->private; 303 struct slot *slot = hotplug_slot->private;
298 304
299 dbg("%s - physical_slot = %s\n", __func__, hotplug_slot->name); 305 ctrl_dbg(slot->ctrl, "%s: physical_slot = %s\n",
306 __func__, slot_name(slot));
300 307
301 return pciehp_sysfs_disable_slot(slot); 308 return pciehp_sysfs_disable_slot(slot);
302} 309}
@@ -306,7 +313,8 @@ static int get_power_status(struct hotplug_slot *hotplug_slot, u8 *value)
306 struct slot *slot = hotplug_slot->private; 313 struct slot *slot = hotplug_slot->private;
307 int retval; 314 int retval;
308 315
309 dbg("%s - physical_slot = %s\n", __func__, hotplug_slot->name); 316 ctrl_dbg(slot->ctrl, "%s: physical_slot = %s\n",
317 __func__, slot_name(slot));
310 318
311 retval = slot->hpc_ops->get_power_status(slot, value); 319 retval = slot->hpc_ops->get_power_status(slot, value);
312 if (retval < 0) 320 if (retval < 0)
@@ -320,7 +328,8 @@ static int get_attention_status(struct hotplug_slot *hotplug_slot, u8 *value)
320 struct slot *slot = hotplug_slot->private; 328 struct slot *slot = hotplug_slot->private;
321 int retval; 329 int retval;
322 330
323 dbg("%s - physical_slot = %s\n", __func__, hotplug_slot->name); 331 ctrl_dbg(slot->ctrl, "%s: physical_slot = %s\n",
332 __func__, slot_name(slot));
324 333
325 retval = slot->hpc_ops->get_attention_status(slot, value); 334 retval = slot->hpc_ops->get_attention_status(slot, value);
326 if (retval < 0) 335 if (retval < 0)
@@ -334,7 +343,8 @@ static int get_latch_status(struct hotplug_slot *hotplug_slot, u8 *value)
334 struct slot *slot = hotplug_slot->private; 343 struct slot *slot = hotplug_slot->private;
335 int retval; 344 int retval;
336 345
337 dbg("%s - physical_slot = %s\n", __func__, hotplug_slot->name); 346 ctrl_dbg(slot->ctrl, "%s: physical_slot = %s\n",
347 __func__, slot_name(slot));
338 348
339 retval = slot->hpc_ops->get_latch_status(slot, value); 349 retval = slot->hpc_ops->get_latch_status(slot, value);
340 if (retval < 0) 350 if (retval < 0)
@@ -348,7 +358,8 @@ static int get_adapter_status(struct hotplug_slot *hotplug_slot, u8 *value)
348 struct slot *slot = hotplug_slot->private; 358 struct slot *slot = hotplug_slot->private;
349 int retval; 359 int retval;
350 360
351 dbg("%s - physical_slot = %s\n", __func__, hotplug_slot->name); 361 ctrl_dbg(slot->ctrl, "%s: physical_slot = %s\n",
362 __func__, slot_name(slot));
352 363
353 retval = slot->hpc_ops->get_adapter_status(slot, value); 364 retval = slot->hpc_ops->get_adapter_status(slot, value);
354 if (retval < 0) 365 if (retval < 0)
@@ -363,7 +374,8 @@ static int get_max_bus_speed(struct hotplug_slot *hotplug_slot,
363 struct slot *slot = hotplug_slot->private; 374 struct slot *slot = hotplug_slot->private;
364 int retval; 375 int retval;
365 376
366 dbg("%s - physical_slot = %s\n", __func__, hotplug_slot->name); 377 ctrl_dbg(slot->ctrl, "%s: physical_slot = %s\n",
378 __func__, slot_name(slot));
367 379
368 retval = slot->hpc_ops->get_max_bus_speed(slot, value); 380 retval = slot->hpc_ops->get_max_bus_speed(slot, value);
369 if (retval < 0) 381 if (retval < 0)
@@ -377,7 +389,8 @@ static int get_cur_bus_speed(struct hotplug_slot *hotplug_slot, enum pci_bus_spe
377 struct slot *slot = hotplug_slot->private; 389 struct slot *slot = hotplug_slot->private;
378 int retval; 390 int retval;
379 391
380 dbg("%s - physical_slot = %s\n", __func__, hotplug_slot->name); 392 ctrl_dbg(slot->ctrl, "%s: physical_slot = %s\n",
393 __func__, slot_name(slot));
381 394
382 retval = slot->hpc_ops->get_cur_bus_speed(slot, value); 395 retval = slot->hpc_ops->get_cur_bus_speed(slot, value);
383 if (retval < 0) 396 if (retval < 0)
@@ -395,14 +408,15 @@ static int pciehp_probe(struct pcie_device *dev, const struct pcie_port_service_
395 struct pci_dev *pdev = dev->port; 408 struct pci_dev *pdev = dev->port;
396 409
397 if (pciehp_force) 410 if (pciehp_force)
398 dbg("Bypassing BIOS check for pciehp use on %s\n", 411 dev_info(&dev->device,
399 pci_name(pdev)); 412 "Bypassing BIOS check for pciehp use on %s\n",
413 pci_name(pdev));
400 else if (pciehp_get_hp_hw_control_from_firmware(pdev)) 414 else if (pciehp_get_hp_hw_control_from_firmware(pdev))
401 goto err_out_none; 415 goto err_out_none;
402 416
403 ctrl = pcie_init(dev); 417 ctrl = pcie_init(dev);
404 if (!ctrl) { 418 if (!ctrl) {
405 dbg("%s: controller initialization failed\n", PCIE_MODULE_NAME); 419 dev_err(&dev->device, "Controller initialization failed\n");
406 goto err_out_none; 420 goto err_out_none;
407 } 421 }
408 set_service_data(dev, ctrl); 422 set_service_data(dev, ctrl);
@@ -411,11 +425,10 @@ static int pciehp_probe(struct pcie_device *dev, const struct pcie_port_service_
411 rc = init_slots(ctrl); 425 rc = init_slots(ctrl);
412 if (rc) { 426 if (rc) {
413 if (rc == -EBUSY) 427 if (rc == -EBUSY)
414 warn("%s: slot already registered by another " 428 ctrl_warn(ctrl, "Slot already registered by another "
415 "hotplug driver\n", PCIE_MODULE_NAME); 429 "hotplug driver\n");
416 else 430 else
417 err("%s: slot initialization failed\n", 431 ctrl_err(ctrl, "Slot initialization failed\n");
418 PCIE_MODULE_NAME);
419 goto err_out_release_ctlr; 432 goto err_out_release_ctlr;
420 } 433 }
421 434
@@ -454,13 +467,13 @@ static void pciehp_remove (struct pcie_device *dev)
454#ifdef CONFIG_PM 467#ifdef CONFIG_PM
455static int pciehp_suspend (struct pcie_device *dev, pm_message_t state) 468static int pciehp_suspend (struct pcie_device *dev, pm_message_t state)
456{ 469{
457 printk("%s ENTRY\n", __func__); 470 dev_info(&dev->device, "%s ENTRY\n", __func__);
458 return 0; 471 return 0;
459} 472}
460 473
461static int pciehp_resume (struct pcie_device *dev) 474static int pciehp_resume (struct pcie_device *dev)
462{ 475{
463 printk("%s ENTRY\n", __func__); 476 dev_info(&dev->device, "%s ENTRY\n", __func__);
464 if (pciehp_force) { 477 if (pciehp_force) {
465 struct controller *ctrl = get_service_data(dev); 478 struct controller *ctrl = get_service_data(dev);
466 struct slot *t_slot; 479 struct slot *t_slot;
@@ -490,10 +503,9 @@ static struct pcie_port_service_id port_pci_ids[] = { {
490 .driver_data = 0, 503 .driver_data = 0,
491 }, { /* end: all zeroes */ } 504 }, { /* end: all zeroes */ }
492}; 505};
493static const char device_name[] = "hpdriver";
494 506
495static struct pcie_port_service_driver hpdriver_portdrv = { 507static struct pcie_port_service_driver hpdriver_portdrv = {
496 .name = (char *)device_name, 508 .name = PCIE_MODULE_NAME,
497 .id_table = &port_pci_ids[0], 509 .id_table = &port_pci_ids[0],
498 510
499 .probe = pciehp_probe, 511 .probe = pciehp_probe,
@@ -513,7 +525,7 @@ static int __init pcied_init(void)
513 dbg("pcie_port_service_register = %d\n", retval); 525 dbg("pcie_port_service_register = %d\n", retval);
514 info(DRIVER_DESC " version: " DRIVER_VERSION "\n"); 526 info(DRIVER_DESC " version: " DRIVER_VERSION "\n");
515 if (retval) 527 if (retval)
516 dbg("%s: Failure to register service\n", __func__); 528 dbg("Failure to register service\n");
517 return retval; 529 return retval;
518} 530}
519 531
diff --git a/drivers/pci/hotplug/pciehp_ctrl.c b/drivers/pci/hotplug/pciehp_ctrl.c
index 96a5d55a4983..fead63c6b49e 100644
--- a/drivers/pci/hotplug/pciehp_ctrl.c
+++ b/drivers/pci/hotplug/pciehp_ctrl.c
@@ -58,14 +58,15 @@ static int queue_interrupt_event(struct slot *p_slot, u32 event_type)
58u8 pciehp_handle_attention_button(struct slot *p_slot) 58u8 pciehp_handle_attention_button(struct slot *p_slot)
59{ 59{
60 u32 event_type; 60 u32 event_type;
61 struct controller *ctrl = p_slot->ctrl;
61 62
62 /* Attention Button Change */ 63 /* Attention Button Change */
63 dbg("pciehp: Attention button interrupt received.\n"); 64 ctrl_dbg(ctrl, "Attention button interrupt received\n");
64 65
65 /* 66 /*
66 * Button pressed - See if need to TAKE ACTION!!! 67 * Button pressed - See if need to TAKE ACTION!!!
67 */ 68 */
68 info("Button pressed on Slot(%s)\n", p_slot->name); 69 ctrl_info(ctrl, "Button pressed on Slot(%s)\n", slot_name(p_slot));
69 event_type = INT_BUTTON_PRESS; 70 event_type = INT_BUTTON_PRESS;
70 71
71 queue_interrupt_event(p_slot, event_type); 72 queue_interrupt_event(p_slot, event_type);
@@ -77,22 +78,23 @@ u8 pciehp_handle_switch_change(struct slot *p_slot)
77{ 78{
78 u8 getstatus; 79 u8 getstatus;
79 u32 event_type; 80 u32 event_type;
81 struct controller *ctrl = p_slot->ctrl;
80 82
81 /* Switch Change */ 83 /* Switch Change */
82 dbg("pciehp: Switch interrupt received.\n"); 84 ctrl_dbg(ctrl, "Switch interrupt received\n");
83 85
84 p_slot->hpc_ops->get_latch_status(p_slot, &getstatus); 86 p_slot->hpc_ops->get_latch_status(p_slot, &getstatus);
85 if (getstatus) { 87 if (getstatus) {
86 /* 88 /*
87 * Switch opened 89 * Switch opened
88 */ 90 */
89 info("Latch open on Slot(%s)\n", p_slot->name); 91 ctrl_info(ctrl, "Latch open on Slot(%s)\n", slot_name(p_slot));
90 event_type = INT_SWITCH_OPEN; 92 event_type = INT_SWITCH_OPEN;
91 } else { 93 } else {
92 /* 94 /*
93 * Switch closed 95 * Switch closed
94 */ 96 */
95 info("Latch close on Slot(%s)\n", p_slot->name); 97 ctrl_info(ctrl, "Latch close on Slot(%s)\n", slot_name(p_slot));
96 event_type = INT_SWITCH_CLOSE; 98 event_type = INT_SWITCH_CLOSE;
97 } 99 }
98 100
@@ -105,9 +107,10 @@ u8 pciehp_handle_presence_change(struct slot *p_slot)
105{ 107{
106 u32 event_type; 108 u32 event_type;
107 u8 presence_save; 109 u8 presence_save;
110 struct controller *ctrl = p_slot->ctrl;
108 111
109 /* Presence Change */ 112 /* Presence Change */
110 dbg("pciehp: Presence/Notify input change.\n"); 113 ctrl_dbg(ctrl, "Presence/Notify input change\n");
111 114
112 /* Switch is open, assume a presence change 115 /* Switch is open, assume a presence change
113 * Save the presence state 116 * Save the presence state
@@ -117,13 +120,14 @@ u8 pciehp_handle_presence_change(struct slot *p_slot)
117 /* 120 /*
118 * Card Present 121 * Card Present
119 */ 122 */
120 info("Card present on Slot(%s)\n", p_slot->name); 123 ctrl_info(ctrl, "Card present on Slot(%s)\n", slot_name(p_slot));
121 event_type = INT_PRESENCE_ON; 124 event_type = INT_PRESENCE_ON;
122 } else { 125 } else {
123 /* 126 /*
124 * Not Present 127 * Not Present
125 */ 128 */
126 info("Card not present on Slot(%s)\n", p_slot->name); 129 ctrl_info(ctrl, "Card not present on Slot(%s)\n",
130 slot_name(p_slot));
127 event_type = INT_PRESENCE_OFF; 131 event_type = INT_PRESENCE_OFF;
128 } 132 }
129 133
@@ -135,23 +139,25 @@ u8 pciehp_handle_presence_change(struct slot *p_slot)
135u8 pciehp_handle_power_fault(struct slot *p_slot) 139u8 pciehp_handle_power_fault(struct slot *p_slot)
136{ 140{
137 u32 event_type; 141 u32 event_type;
142 struct controller *ctrl = p_slot->ctrl;
138 143
139 /* power fault */ 144 /* power fault */
140 dbg("pciehp: Power fault interrupt received.\n"); 145 ctrl_dbg(ctrl, "Power fault interrupt received\n");
141 146
142 if ( !(p_slot->hpc_ops->query_power_fault(p_slot))) { 147 if ( !(p_slot->hpc_ops->query_power_fault(p_slot))) {
143 /* 148 /*
144 * power fault Cleared 149 * power fault Cleared
145 */ 150 */
146 info("Power fault cleared on Slot(%s)\n", p_slot->name); 151 ctrl_info(ctrl, "Power fault cleared on Slot(%s)\n",
152 slot_name(p_slot));
147 event_type = INT_POWER_FAULT_CLEAR; 153 event_type = INT_POWER_FAULT_CLEAR;
148 } else { 154 } else {
149 /* 155 /*
150 * power fault 156 * power fault
151 */ 157 */
152 info("Power fault on Slot(%s)\n", p_slot->name); 158 ctrl_info(ctrl, "Power fault on Slot(%s)\n", slot_name(p_slot));
153 event_type = INT_POWER_FAULT; 159 event_type = INT_POWER_FAULT;
154 info("power fault bit %x set\n", 0); 160 ctrl_info(ctrl, "Power fault bit %x set\n", 0);
155 } 161 }
156 162
157 queue_interrupt_event(p_slot, event_type); 163 queue_interrupt_event(p_slot, event_type);
@@ -168,8 +174,8 @@ static void set_slot_off(struct controller *ctrl, struct slot * pslot)
168 /* turn off slot, turn on Amber LED, turn off Green LED if supported*/ 174 /* turn off slot, turn on Amber LED, turn off Green LED if supported*/
169 if (POWER_CTRL(ctrl)) { 175 if (POWER_CTRL(ctrl)) {
170 if (pslot->hpc_ops->power_off_slot(pslot)) { 176 if (pslot->hpc_ops->power_off_slot(pslot)) {
171 err("%s: Issue of Slot Power Off command failed\n", 177 ctrl_err(ctrl,
172 __func__); 178 "Issue of Slot Power Off command failed\n");
173 return; 179 return;
174 } 180 }
175 } 181 }
@@ -186,8 +192,8 @@ static void set_slot_off(struct controller *ctrl, struct slot * pslot)
186 192
187 if (ATTN_LED(ctrl)) { 193 if (ATTN_LED(ctrl)) {
188 if (pslot->hpc_ops->set_attention_status(pslot, 1)) { 194 if (pslot->hpc_ops->set_attention_status(pslot, 1)) {
189 err("%s: Issue of Set Attention Led command failed\n", 195 ctrl_err(ctrl,
190 __func__); 196 "Issue of Set Attention Led command failed\n");
191 return; 197 return;
192 } 198 }
193 } 199 }
@@ -204,10 +210,11 @@ static int board_added(struct slot *p_slot)
204{ 210{
205 int retval = 0; 211 int retval = 0;
206 struct controller *ctrl = p_slot->ctrl; 212 struct controller *ctrl = p_slot->ctrl;
213 struct pci_bus *parent = ctrl->pci_dev->subordinate;
207 214
208 dbg("%s: slot device, slot offset, hp slot = %d, %d ,%d\n", 215 ctrl_dbg(ctrl, "%s: slot device, slot offset, hp slot = %d, %d, %d\n",
209 __func__, p_slot->device, 216 __func__, p_slot->device, ctrl->slot_device_offset,
210 ctrl->slot_device_offset, p_slot->hp_slot); 217 p_slot->hp_slot);
211 218
212 if (POWER_CTRL(ctrl)) { 219 if (POWER_CTRL(ctrl)) {
213 /* Power on slot */ 220 /* Power on slot */
@@ -219,28 +226,25 @@ static int board_added(struct slot *p_slot)
219 if (PWR_LED(ctrl)) 226 if (PWR_LED(ctrl))
220 p_slot->hpc_ops->green_led_blink(p_slot); 227 p_slot->hpc_ops->green_led_blink(p_slot);
221 228
222 /* Wait for ~1 second */
223 msleep(1000);
224
225 /* Check link training status */ 229 /* Check link training status */
226 retval = p_slot->hpc_ops->check_lnk_status(ctrl); 230 retval = p_slot->hpc_ops->check_lnk_status(ctrl);
227 if (retval) { 231 if (retval) {
228 err("%s: Failed to check link status\n", __func__); 232 ctrl_err(ctrl, "Failed to check link status\n");
229 set_slot_off(ctrl, p_slot); 233 set_slot_off(ctrl, p_slot);
230 return retval; 234 return retval;
231 } 235 }
232 236
233 /* Check for a power fault */ 237 /* Check for a power fault */
234 if (p_slot->hpc_ops->query_power_fault(p_slot)) { 238 if (p_slot->hpc_ops->query_power_fault(p_slot)) {
235 dbg("%s: power fault detected\n", __func__); 239 ctrl_dbg(ctrl, "Power fault detected\n");
236 retval = POWER_FAILURE; 240 retval = POWER_FAILURE;
237 goto err_exit; 241 goto err_exit;
238 } 242 }
239 243
240 retval = pciehp_configure_device(p_slot); 244 retval = pciehp_configure_device(p_slot);
241 if (retval) { 245 if (retval) {
242 err("Cannot add device 0x%x:%x\n", p_slot->bus, 246 ctrl_err(ctrl, "Cannot add device at %04x:%02x:%02x\n",
243 p_slot->device); 247 pci_domain_nr(parent), p_slot->bus, p_slot->device);
244 goto err_exit; 248 goto err_exit;
245 } 249 }
246 250
@@ -272,14 +276,14 @@ static int remove_board(struct slot *p_slot)
272 if (retval) 276 if (retval)
273 return retval; 277 return retval;
274 278
275 dbg("In %s, hp_slot = %d\n", __func__, p_slot->hp_slot); 279 ctrl_dbg(ctrl, "%s: hp_slot = %d\n", __func__, p_slot->hp_slot);
276 280
277 if (POWER_CTRL(ctrl)) { 281 if (POWER_CTRL(ctrl)) {
278 /* power off slot */ 282 /* power off slot */
279 retval = p_slot->hpc_ops->power_off_slot(p_slot); 283 retval = p_slot->hpc_ops->power_off_slot(p_slot);
280 if (retval) { 284 if (retval) {
281 err("%s: Issue of Slot Disable command failed\n", 285 ctrl_err(ctrl,
282 __func__); 286 "Issue of Slot Disable command failed\n");
283 return retval; 287 return retval;
284 } 288 }
285 } 289 }
@@ -320,8 +324,10 @@ static void pciehp_power_thread(struct work_struct *work)
320 switch (p_slot->state) { 324 switch (p_slot->state) {
321 case POWEROFF_STATE: 325 case POWEROFF_STATE:
322 mutex_unlock(&p_slot->lock); 326 mutex_unlock(&p_slot->lock);
323 dbg("%s: disabling bus:device(%x:%x)\n", 327 ctrl_dbg(p_slot->ctrl,
324 __func__, p_slot->bus, p_slot->device); 328 "Disabling domain:bus:device=%04x:%02x:%02x\n",
329 pci_domain_nr(p_slot->ctrl->pci_dev->subordinate),
330 p_slot->bus, p_slot->device);
325 pciehp_disable_slot(p_slot); 331 pciehp_disable_slot(p_slot);
326 mutex_lock(&p_slot->lock); 332 mutex_lock(&p_slot->lock);
327 p_slot->state = STATIC_STATE; 333 p_slot->state = STATIC_STATE;
@@ -349,7 +355,8 @@ void pciehp_queue_pushbutton_work(struct work_struct *work)
349 355
350 info = kmalloc(sizeof(*info), GFP_KERNEL); 356 info = kmalloc(sizeof(*info), GFP_KERNEL);
351 if (!info) { 357 if (!info) {
352 err("%s: Cannot allocate memory\n", __func__); 358 ctrl_err(p_slot->ctrl, "%s: Cannot allocate memory\n",
359 __func__);
353 return; 360 return;
354 } 361 }
355 info->p_slot = p_slot; 362 info->p_slot = p_slot;
@@ -403,12 +410,14 @@ static void handle_button_press_event(struct slot *p_slot)
403 p_slot->hpc_ops->get_power_status(p_slot, &getstatus); 410 p_slot->hpc_ops->get_power_status(p_slot, &getstatus);
404 if (getstatus) { 411 if (getstatus) {
405 p_slot->state = BLINKINGOFF_STATE; 412 p_slot->state = BLINKINGOFF_STATE;
406 info("PCI slot #%s - powering off due to button " 413 ctrl_info(ctrl,
407 "press.\n", p_slot->name); 414 "PCI slot #%s - powering off due to button "
415 "press.\n", slot_name(p_slot));
408 } else { 416 } else {
409 p_slot->state = BLINKINGON_STATE; 417 p_slot->state = BLINKINGON_STATE;
410 info("PCI slot #%s - powering on due to button " 418 ctrl_info(ctrl,
411 "press.\n", p_slot->name); 419 "PCI slot #%s - powering on due to button "
420 "press.\n", slot_name(p_slot));
412 } 421 }
413 /* blink green LED and turn off amber */ 422 /* blink green LED and turn off amber */
414 if (PWR_LED(ctrl)) 423 if (PWR_LED(ctrl))
@@ -425,8 +434,7 @@ static void handle_button_press_event(struct slot *p_slot)
425 * press the attention again before the 5 sec. limit 434 * press the attention again before the 5 sec. limit
426 * expires to cancel hot-add or hot-remove 435 * expires to cancel hot-add or hot-remove
427 */ 436 */
428 info("Button cancel on Slot(%s)\n", p_slot->name); 437 ctrl_info(ctrl, "Button cancel on Slot(%s)\n", slot_name(p_slot));
429 dbg("%s: button cancel\n", __func__);
430 cancel_delayed_work(&p_slot->work); 438 cancel_delayed_work(&p_slot->work);
431 if (p_slot->state == BLINKINGOFF_STATE) { 439 if (p_slot->state == BLINKINGOFF_STATE) {
432 if (PWR_LED(ctrl)) 440 if (PWR_LED(ctrl))
@@ -437,8 +445,8 @@ static void handle_button_press_event(struct slot *p_slot)
437 } 445 }
438 if (ATTN_LED(ctrl)) 446 if (ATTN_LED(ctrl))
439 p_slot->hpc_ops->set_attention_status(p_slot, 0); 447 p_slot->hpc_ops->set_attention_status(p_slot, 0);
440 info("PCI slot #%s - action canceled due to button press\n", 448 ctrl_info(ctrl, "PCI slot #%s - action canceled "
441 p_slot->name); 449 "due to button press\n", slot_name(p_slot));
442 p_slot->state = STATIC_STATE; 450 p_slot->state = STATIC_STATE;
443 break; 451 break;
444 case POWEROFF_STATE: 452 case POWEROFF_STATE:
@@ -448,11 +456,11 @@ static void handle_button_press_event(struct slot *p_slot)
448 * this means that the previous attention button action 456 * this means that the previous attention button action
449 * to hot-add or hot-remove is undergoing 457 * to hot-add or hot-remove is undergoing
450 */ 458 */
451 info("Button ignore on Slot(%s)\n", p_slot->name); 459 ctrl_info(ctrl, "Button ignore on Slot(%s)\n", slot_name(p_slot));
452 update_slot_info(p_slot); 460 update_slot_info(p_slot);
453 break; 461 break;
454 default: 462 default:
455 warn("Not a valid state\n"); 463 ctrl_warn(ctrl, "Not a valid state\n");
456 break; 464 break;
457 } 465 }
458} 466}
@@ -467,7 +475,8 @@ static void handle_surprise_event(struct slot *p_slot)
467 475
468 info = kmalloc(sizeof(*info), GFP_KERNEL); 476 info = kmalloc(sizeof(*info), GFP_KERNEL);
469 if (!info) { 477 if (!info) {
470 err("%s: Cannot allocate memory\n", __func__); 478 ctrl_err(p_slot->ctrl, "%s: Cannot allocate memory\n",
479 __func__);
471 return; 480 return;
472 } 481 }
473 info->p_slot = p_slot; 482 info->p_slot = p_slot;
@@ -505,7 +514,7 @@ static void interrupt_event_handler(struct work_struct *work)
505 case INT_PRESENCE_OFF: 514 case INT_PRESENCE_OFF:
506 if (!HP_SUPR_RM(ctrl)) 515 if (!HP_SUPR_RM(ctrl))
507 break; 516 break;
508 dbg("Surprise Removal\n"); 517 ctrl_dbg(ctrl, "Surprise Removal\n");
509 update_slot_info(p_slot); 518 update_slot_info(p_slot);
510 handle_surprise_event(p_slot); 519 handle_surprise_event(p_slot);
511 break; 520 break;
@@ -522,22 +531,22 @@ int pciehp_enable_slot(struct slot *p_slot)
522{ 531{
523 u8 getstatus = 0; 532 u8 getstatus = 0;
524 int rc; 533 int rc;
534 struct controller *ctrl = p_slot->ctrl;
525 535
526 /* Check to see if (latch closed, card present, power off) */ 536 /* Check to see if (latch closed, card present, power off) */
527 mutex_lock(&p_slot->ctrl->crit_sect); 537 mutex_lock(&p_slot->ctrl->crit_sect);
528 538
529 rc = p_slot->hpc_ops->get_adapter_status(p_slot, &getstatus); 539 rc = p_slot->hpc_ops->get_adapter_status(p_slot, &getstatus);
530 if (rc || !getstatus) { 540 if (rc || !getstatus) {
531 info("%s: no adapter on slot(%s)\n", __func__, 541 ctrl_info(ctrl, "No adapter on slot(%s)\n", slot_name(p_slot));
532 p_slot->name);
533 mutex_unlock(&p_slot->ctrl->crit_sect); 542 mutex_unlock(&p_slot->ctrl->crit_sect);
534 return -ENODEV; 543 return -ENODEV;
535 } 544 }
536 if (MRL_SENS(p_slot->ctrl)) { 545 if (MRL_SENS(p_slot->ctrl)) {
537 rc = p_slot->hpc_ops->get_latch_status(p_slot, &getstatus); 546 rc = p_slot->hpc_ops->get_latch_status(p_slot, &getstatus);
538 if (rc || getstatus) { 547 if (rc || getstatus) {
539 info("%s: latch open on slot(%s)\n", __func__, 548 ctrl_info(ctrl, "Latch open on slot(%s)\n",
540 p_slot->name); 549 slot_name(p_slot));
541 mutex_unlock(&p_slot->ctrl->crit_sect); 550 mutex_unlock(&p_slot->ctrl->crit_sect);
542 return -ENODEV; 551 return -ENODEV;
543 } 552 }
@@ -546,8 +555,8 @@ int pciehp_enable_slot(struct slot *p_slot)
546 if (POWER_CTRL(p_slot->ctrl)) { 555 if (POWER_CTRL(p_slot->ctrl)) {
547 rc = p_slot->hpc_ops->get_power_status(p_slot, &getstatus); 556 rc = p_slot->hpc_ops->get_power_status(p_slot, &getstatus);
548 if (rc || getstatus) { 557 if (rc || getstatus) {
549 info("%s: already enabled on slot(%s)\n", __func__, 558 ctrl_info(ctrl, "Already enabled on slot(%s)\n",
550 p_slot->name); 559 slot_name(p_slot));
551 mutex_unlock(&p_slot->ctrl->crit_sect); 560 mutex_unlock(&p_slot->ctrl->crit_sect);
552 return -EINVAL; 561 return -EINVAL;
553 } 562 }
@@ -571,6 +580,7 @@ int pciehp_disable_slot(struct slot *p_slot)
571{ 580{
572 u8 getstatus = 0; 581 u8 getstatus = 0;
573 int ret = 0; 582 int ret = 0;
583 struct controller *ctrl = p_slot->ctrl;
574 584
575 if (!p_slot->ctrl) 585 if (!p_slot->ctrl)
576 return 1; 586 return 1;
@@ -581,8 +591,8 @@ int pciehp_disable_slot(struct slot *p_slot)
581 if (!HP_SUPR_RM(p_slot->ctrl)) { 591 if (!HP_SUPR_RM(p_slot->ctrl)) {
582 ret = p_slot->hpc_ops->get_adapter_status(p_slot, &getstatus); 592 ret = p_slot->hpc_ops->get_adapter_status(p_slot, &getstatus);
583 if (ret || !getstatus) { 593 if (ret || !getstatus) {
584 info("%s: no adapter on slot(%s)\n", __func__, 594 ctrl_info(ctrl, "No adapter on slot(%s)\n",
585 p_slot->name); 595 slot_name(p_slot));
586 mutex_unlock(&p_slot->ctrl->crit_sect); 596 mutex_unlock(&p_slot->ctrl->crit_sect);
587 return -ENODEV; 597 return -ENODEV;
588 } 598 }
@@ -591,8 +601,8 @@ int pciehp_disable_slot(struct slot *p_slot)
591 if (MRL_SENS(p_slot->ctrl)) { 601 if (MRL_SENS(p_slot->ctrl)) {
592 ret = p_slot->hpc_ops->get_latch_status(p_slot, &getstatus); 602 ret = p_slot->hpc_ops->get_latch_status(p_slot, &getstatus);
593 if (ret || getstatus) { 603 if (ret || getstatus) {
594 info("%s: latch open on slot(%s)\n", __func__, 604 ctrl_info(ctrl, "Latch open on slot(%s)\n",
595 p_slot->name); 605 slot_name(p_slot));
596 mutex_unlock(&p_slot->ctrl->crit_sect); 606 mutex_unlock(&p_slot->ctrl->crit_sect);
597 return -ENODEV; 607 return -ENODEV;
598 } 608 }
@@ -601,8 +611,8 @@ int pciehp_disable_slot(struct slot *p_slot)
601 if (POWER_CTRL(p_slot->ctrl)) { 611 if (POWER_CTRL(p_slot->ctrl)) {
602 ret = p_slot->hpc_ops->get_power_status(p_slot, &getstatus); 612 ret = p_slot->hpc_ops->get_power_status(p_slot, &getstatus);
603 if (ret || !getstatus) { 613 if (ret || !getstatus) {
604 info("%s: already disabled slot(%s)\n", __func__, 614 ctrl_info(ctrl, "Already disabled on slot(%s)\n",
605 p_slot->name); 615 slot_name(p_slot));
606 mutex_unlock(&p_slot->ctrl->crit_sect); 616 mutex_unlock(&p_slot->ctrl->crit_sect);
607 return -EINVAL; 617 return -EINVAL;
608 } 618 }
@@ -618,6 +628,7 @@ int pciehp_disable_slot(struct slot *p_slot)
618int pciehp_sysfs_enable_slot(struct slot *p_slot) 628int pciehp_sysfs_enable_slot(struct slot *p_slot)
619{ 629{
620 int retval = -ENODEV; 630 int retval = -ENODEV;
631 struct controller *ctrl = p_slot->ctrl;
621 632
622 mutex_lock(&p_slot->lock); 633 mutex_lock(&p_slot->lock);
623 switch (p_slot->state) { 634 switch (p_slot->state) {
@@ -631,15 +642,17 @@ int pciehp_sysfs_enable_slot(struct slot *p_slot)
631 p_slot->state = STATIC_STATE; 642 p_slot->state = STATIC_STATE;
632 break; 643 break;
633 case POWERON_STATE: 644 case POWERON_STATE:
634 info("Slot %s is already in powering on state\n", 645 ctrl_info(ctrl, "Slot %s is already in powering on state\n",
635 p_slot->name); 646 slot_name(p_slot));
636 break; 647 break;
637 case BLINKINGOFF_STATE: 648 case BLINKINGOFF_STATE:
638 case POWEROFF_STATE: 649 case POWEROFF_STATE:
639 info("Already enabled on slot %s\n", p_slot->name); 650 ctrl_info(ctrl, "Already enabled on slot %s\n",
651 slot_name(p_slot));
640 break; 652 break;
641 default: 653 default:
642 err("Not a valid state on slot %s\n", p_slot->name); 654 ctrl_err(ctrl, "Not a valid state on slot %s\n",
655 slot_name(p_slot));
643 break; 656 break;
644 } 657 }
645 mutex_unlock(&p_slot->lock); 658 mutex_unlock(&p_slot->lock);
@@ -650,6 +663,7 @@ int pciehp_sysfs_enable_slot(struct slot *p_slot)
650int pciehp_sysfs_disable_slot(struct slot *p_slot) 663int pciehp_sysfs_disable_slot(struct slot *p_slot)
651{ 664{
652 int retval = -ENODEV; 665 int retval = -ENODEV;
666 struct controller *ctrl = p_slot->ctrl;
653 667
654 mutex_lock(&p_slot->lock); 668 mutex_lock(&p_slot->lock);
655 switch (p_slot->state) { 669 switch (p_slot->state) {
@@ -663,15 +677,17 @@ int pciehp_sysfs_disable_slot(struct slot *p_slot)
663 p_slot->state = STATIC_STATE; 677 p_slot->state = STATIC_STATE;
664 break; 678 break;
665 case POWEROFF_STATE: 679 case POWEROFF_STATE:
666 info("Slot %s is already in powering off state\n", 680 ctrl_info(ctrl, "Slot %s is already in powering off state\n",
667 p_slot->name); 681 slot_name(p_slot));
668 break; 682 break;
669 case BLINKINGON_STATE: 683 case BLINKINGON_STATE:
670 case POWERON_STATE: 684 case POWERON_STATE:
671 info("Already disabled on slot %s\n", p_slot->name); 685 ctrl_info(ctrl, "Already disabled on slot %s\n",
686 slot_name(p_slot));
672 break; 687 break;
673 default: 688 default:
674 err("Not a valid state on slot %s\n", p_slot->name); 689 ctrl_err(ctrl, "Not a valid state on slot %s\n",
690 slot_name(p_slot));
675 break; 691 break;
676 } 692 }
677 mutex_unlock(&p_slot->lock); 693 mutex_unlock(&p_slot->lock);
diff --git a/drivers/pci/hotplug/pciehp_hpc.c b/drivers/pci/hotplug/pciehp_hpc.c
index 1323a43285d7..b643ca13e4f1 100644
--- a/drivers/pci/hotplug/pciehp_hpc.c
+++ b/drivers/pci/hotplug/pciehp_hpc.c
@@ -125,6 +125,7 @@ static inline int pciehp_writel(struct controller *ctrl, int reg, u32 value)
125/* Field definitions in Link Capabilities Register */ 125/* Field definitions in Link Capabilities Register */
126#define MAX_LNK_SPEED 0x000F 126#define MAX_LNK_SPEED 0x000F
127#define MAX_LNK_WIDTH 0x03F0 127#define MAX_LNK_WIDTH 0x03F0
128#define LINK_ACTIVE_REPORTING 0x00100000
128 129
129/* Link Width Encoding */ 130/* Link Width Encoding */
130#define LNK_X1 0x01 131#define LNK_X1 0x01
@@ -141,6 +142,7 @@ static inline int pciehp_writel(struct controller *ctrl, int reg, u32 value)
141#define LNK_TRN_ERR 0x0400 142#define LNK_TRN_ERR 0x0400
142#define LNK_TRN 0x0800 143#define LNK_TRN 0x0800
143#define SLOT_CLK_CONF 0x1000 144#define SLOT_CLK_CONF 0x1000
145#define LINK_ACTIVE 0x2000
144 146
145/* Field definitions in Slot Capabilities Register */ 147/* Field definitions in Slot Capabilities Register */
146#define ATTN_BUTTN_PRSN 0x00000001 148#define ATTN_BUTTN_PRSN 0x00000001
@@ -223,7 +225,7 @@ static void start_int_poll_timer(struct controller *ctrl, int sec)
223 225
224static inline int pciehp_request_irq(struct controller *ctrl) 226static inline int pciehp_request_irq(struct controller *ctrl)
225{ 227{
226 int retval, irq = ctrl->pci_dev->irq; 228 int retval, irq = ctrl->pcie->irq;
227 229
228 /* Install interrupt polling timer. Start with 10 sec delay */ 230 /* Install interrupt polling timer. Start with 10 sec delay */
229 if (pciehp_poll_mode) { 231 if (pciehp_poll_mode) {
@@ -235,7 +237,8 @@ static inline int pciehp_request_irq(struct controller *ctrl)
235 /* Installs the interrupt handler */ 237 /* Installs the interrupt handler */
236 retval = request_irq(irq, pcie_isr, IRQF_SHARED, MY_NAME, ctrl); 238 retval = request_irq(irq, pcie_isr, IRQF_SHARED, MY_NAME, ctrl);
237 if (retval) 239 if (retval)
238 err("Cannot get irq %d for the hotplug controller\n", irq); 240 ctrl_err(ctrl, "Cannot get irq %d for the hotplug controller\n",
241 irq);
239 return retval; 242 return retval;
240} 243}
241 244
@@ -244,7 +247,7 @@ static inline void pciehp_free_irq(struct controller *ctrl)
244 if (pciehp_poll_mode) 247 if (pciehp_poll_mode)
245 del_timer_sync(&ctrl->poll_timer); 248 del_timer_sync(&ctrl->poll_timer);
246 else 249 else
247 free_irq(ctrl->pci_dev->irq, ctrl); 250 free_irq(ctrl->pcie->irq, ctrl);
248} 251}
249 252
250static int pcie_poll_cmd(struct controller *ctrl) 253static int pcie_poll_cmd(struct controller *ctrl)
@@ -258,7 +261,7 @@ static int pcie_poll_cmd(struct controller *ctrl)
258 return 1; 261 return 1;
259 } 262 }
260 } 263 }
261 while (timeout > 1000) { 264 while (timeout > 0) {
262 msleep(10); 265 msleep(10);
263 timeout -= 10; 266 timeout -= 10;
264 if (!pciehp_readw(ctrl, SLOTSTATUS, &slot_status)) { 267 if (!pciehp_readw(ctrl, SLOTSTATUS, &slot_status)) {
@@ -282,7 +285,7 @@ static void pcie_wait_cmd(struct controller *ctrl, int poll)
282 else 285 else
283 rc = wait_event_timeout(ctrl->queue, !ctrl->cmd_busy, timeout); 286 rc = wait_event_timeout(ctrl->queue, !ctrl->cmd_busy, timeout);
284 if (!rc) 287 if (!rc)
285 dbg("Command not completed in 1000 msec\n"); 288 ctrl_dbg(ctrl, "Command not completed in 1000 msec\n");
286} 289}
287 290
288/** 291/**
@@ -301,7 +304,8 @@ static int pcie_write_cmd(struct controller *ctrl, u16 cmd, u16 mask)
301 304
302 retval = pciehp_readw(ctrl, SLOTSTATUS, &slot_status); 305 retval = pciehp_readw(ctrl, SLOTSTATUS, &slot_status);
303 if (retval) { 306 if (retval) {
304 err("%s: Cannot read SLOTSTATUS register\n", __func__); 307 ctrl_err(ctrl, "%s: Cannot read SLOTSTATUS register\n",
308 __func__);
305 goto out; 309 goto out;
306 } 310 }
307 311
@@ -312,26 +316,25 @@ static int pcie_write_cmd(struct controller *ctrl, u16 cmd, u16 mask)
312 * proceed forward to issue the next command according 316 * proceed forward to issue the next command according
313 * to spec. Just print out the error message. 317 * to spec. Just print out the error message.
314 */ 318 */
315 dbg("%s: CMD_COMPLETED not clear after 1 sec.\n", 319 ctrl_dbg(ctrl, "CMD_COMPLETED not clear after 1 sec\n");
316 __func__);
317 } else if (!NO_CMD_CMPL(ctrl)) { 320 } else if (!NO_CMD_CMPL(ctrl)) {
318 /* 321 /*
319 * This controller semms to notify of command completed 322 * This controller semms to notify of command completed
320 * event even though it supports none of power 323 * event even though it supports none of power
321 * controller, attention led, power led and EMI. 324 * controller, attention led, power led and EMI.
322 */ 325 */
323 dbg("%s: Unexpected CMD_COMPLETED. Need to wait for " 326 ctrl_dbg(ctrl, "Unexpected CMD_COMPLETED. Need to "
324 "command completed event.\n", __func__); 327 "wait for command completed event.\n");
325 ctrl->no_cmd_complete = 0; 328 ctrl->no_cmd_complete = 0;
326 } else { 329 } else {
327 dbg("%s: Unexpected CMD_COMPLETED. Maybe the " 330 ctrl_dbg(ctrl, "Unexpected CMD_COMPLETED. Maybe "
328 "controller is broken.\n", __func__); 331 "the controller is broken.\n");
329 } 332 }
330 } 333 }
331 334
332 retval = pciehp_readw(ctrl, SLOTCTRL, &slot_ctrl); 335 retval = pciehp_readw(ctrl, SLOTCTRL, &slot_ctrl);
333 if (retval) { 336 if (retval) {
334 err("%s: Cannot read SLOTCTRL register\n", __func__); 337 ctrl_err(ctrl, "%s: Cannot read SLOTCTRL register\n", __func__);
335 goto out; 338 goto out;
336 } 339 }
337 340
@@ -341,7 +344,7 @@ static int pcie_write_cmd(struct controller *ctrl, u16 cmd, u16 mask)
341 smp_mb(); 344 smp_mb();
342 retval = pciehp_writew(ctrl, SLOTCTRL, slot_ctrl); 345 retval = pciehp_writew(ctrl, SLOTCTRL, slot_ctrl);
343 if (retval) 346 if (retval)
344 err("%s: Cannot write to SLOTCTRL register\n", __func__); 347 ctrl_err(ctrl, "Cannot write to SLOTCTRL register\n");
345 348
346 /* 349 /*
347 * Wait for command completion. 350 * Wait for command completion.
@@ -363,21 +366,62 @@ static int pcie_write_cmd(struct controller *ctrl, u16 cmd, u16 mask)
363 return retval; 366 return retval;
364} 367}
365 368
369static inline int check_link_active(struct controller *ctrl)
370{
371 u16 link_status;
372
373 if (pciehp_readw(ctrl, LNKSTATUS, &link_status))
374 return 0;
375 return !!(link_status & LINK_ACTIVE);
376}
377
378static void pcie_wait_link_active(struct controller *ctrl)
379{
380 int timeout = 1000;
381
382 if (check_link_active(ctrl))
383 return;
384 while (timeout > 0) {
385 msleep(10);
386 timeout -= 10;
387 if (check_link_active(ctrl))
388 return;
389 }
390 ctrl_dbg(ctrl, "Data Link Layer Link Active not set in 1000 msec\n");
391}
392
366static int hpc_check_lnk_status(struct controller *ctrl) 393static int hpc_check_lnk_status(struct controller *ctrl)
367{ 394{
368 u16 lnk_status; 395 u16 lnk_status;
369 int retval = 0; 396 int retval = 0;
370 397
398 /*
399 * Data Link Layer Link Active Reporting must be capable for
400 * hot-plug capable downstream port. But old controller might
401 * not implement it. In this case, we wait for 1000 ms.
402 */
403 if (ctrl->link_active_reporting){
404 /* Wait for Data Link Layer Link Active bit to be set */
405 pcie_wait_link_active(ctrl);
406 /*
407 * We must wait for 100 ms after the Data Link Layer
408 * Link Active bit reads 1b before initiating a
409 * configuration access to the hot added device.
410 */
411 msleep(100);
412 } else
413 msleep(1000);
414
371 retval = pciehp_readw(ctrl, LNKSTATUS, &lnk_status); 415 retval = pciehp_readw(ctrl, LNKSTATUS, &lnk_status);
372 if (retval) { 416 if (retval) {
373 err("%s: Cannot read LNKSTATUS register\n", __func__); 417 ctrl_err(ctrl, "Cannot read LNKSTATUS register\n");
374 return retval; 418 return retval;
375 } 419 }
376 420
377 dbg("%s: lnk_status = %x\n", __func__, lnk_status); 421 ctrl_dbg(ctrl, "%s: lnk_status = %x\n", __func__, lnk_status);
378 if ( (lnk_status & LNK_TRN) || (lnk_status & LNK_TRN_ERR) || 422 if ( (lnk_status & LNK_TRN) || (lnk_status & LNK_TRN_ERR) ||
379 !(lnk_status & NEG_LINK_WD)) { 423 !(lnk_status & NEG_LINK_WD)) {
380 err("%s : Link Training Error occurs \n", __func__); 424 ctrl_err(ctrl, "Link Training Error occurs \n");
381 retval = -1; 425 retval = -1;
382 return retval; 426 return retval;
383 } 427 }
@@ -394,12 +438,12 @@ static int hpc_get_attention_status(struct slot *slot, u8 *status)
394 438
395 retval = pciehp_readw(ctrl, SLOTCTRL, &slot_ctrl); 439 retval = pciehp_readw(ctrl, SLOTCTRL, &slot_ctrl);
396 if (retval) { 440 if (retval) {
397 err("%s: Cannot read SLOTCTRL register\n", __func__); 441 ctrl_err(ctrl, "%s: Cannot read SLOTCTRL register\n", __func__);
398 return retval; 442 return retval;
399 } 443 }
400 444
401 dbg("%s: SLOTCTRL %x, value read %x\n", 445 ctrl_dbg(ctrl, "%s: SLOTCTRL %x, value read %x\n",
402 __func__, ctrl->cap_base + SLOTCTRL, slot_ctrl); 446 __func__, ctrl->cap_base + SLOTCTRL, slot_ctrl);
403 447
404 atten_led_state = (slot_ctrl & ATTN_LED_CTRL) >> 6; 448 atten_led_state = (slot_ctrl & ATTN_LED_CTRL) >> 6;
405 449
@@ -433,11 +477,11 @@ static int hpc_get_power_status(struct slot *slot, u8 *status)
433 477
434 retval = pciehp_readw(ctrl, SLOTCTRL, &slot_ctrl); 478 retval = pciehp_readw(ctrl, SLOTCTRL, &slot_ctrl);
435 if (retval) { 479 if (retval) {
436 err("%s: Cannot read SLOTCTRL register\n", __func__); 480 ctrl_err(ctrl, "%s: Cannot read SLOTCTRL register\n", __func__);
437 return retval; 481 return retval;
438 } 482 }
439 dbg("%s: SLOTCTRL %x value read %x\n", 483 ctrl_dbg(ctrl, "%s: SLOTCTRL %x value read %x\n",
440 __func__, ctrl->cap_base + SLOTCTRL, slot_ctrl); 484 __func__, ctrl->cap_base + SLOTCTRL, slot_ctrl);
441 485
442 pwr_state = (slot_ctrl & PWR_CTRL) >> 10; 486 pwr_state = (slot_ctrl & PWR_CTRL) >> 10;
443 487
@@ -464,7 +508,8 @@ static int hpc_get_latch_status(struct slot *slot, u8 *status)
464 508
465 retval = pciehp_readw(ctrl, SLOTSTATUS, &slot_status); 509 retval = pciehp_readw(ctrl, SLOTSTATUS, &slot_status);
466 if (retval) { 510 if (retval) {
467 err("%s: Cannot read SLOTSTATUS register\n", __func__); 511 ctrl_err(ctrl, "%s: Cannot read SLOTSTATUS register\n",
512 __func__);
468 return retval; 513 return retval;
469 } 514 }
470 515
@@ -482,7 +527,8 @@ static int hpc_get_adapter_status(struct slot *slot, u8 *status)
482 527
483 retval = pciehp_readw(ctrl, SLOTSTATUS, &slot_status); 528 retval = pciehp_readw(ctrl, SLOTSTATUS, &slot_status);
484 if (retval) { 529 if (retval) {
485 err("%s: Cannot read SLOTSTATUS register\n", __func__); 530 ctrl_err(ctrl, "%s: Cannot read SLOTSTATUS register\n",
531 __func__);
486 return retval; 532 return retval;
487 } 533 }
488 card_state = (u8)((slot_status & PRSN_STATE) >> 6); 534 card_state = (u8)((slot_status & PRSN_STATE) >> 6);
@@ -500,7 +546,7 @@ static int hpc_query_power_fault(struct slot *slot)
500 546
501 retval = pciehp_readw(ctrl, SLOTSTATUS, &slot_status); 547 retval = pciehp_readw(ctrl, SLOTSTATUS, &slot_status);
502 if (retval) { 548 if (retval) {
503 err("%s: Cannot check for power fault\n", __func__); 549 ctrl_err(ctrl, "Cannot check for power fault\n");
504 return retval; 550 return retval;
505 } 551 }
506 pwr_fault = (u8)((slot_status & PWR_FAULT_DETECTED) >> 1); 552 pwr_fault = (u8)((slot_status & PWR_FAULT_DETECTED) >> 1);
@@ -516,7 +562,7 @@ static int hpc_get_emi_status(struct slot *slot, u8 *status)
516 562
517 retval = pciehp_readw(ctrl, SLOTSTATUS, &slot_status); 563 retval = pciehp_readw(ctrl, SLOTSTATUS, &slot_status);
518 if (retval) { 564 if (retval) {
519 err("%s : Cannot check EMI status\n", __func__); 565 ctrl_err(ctrl, "Cannot check EMI status\n");
520 return retval; 566 return retval;
521 } 567 }
522 *status = (slot_status & EMI_STATE) >> EMI_STATUS_BIT; 568 *status = (slot_status & EMI_STATE) >> EMI_STATUS_BIT;
@@ -560,8 +606,8 @@ static int hpc_set_attention_status(struct slot *slot, u8 value)
560 return -1; 606 return -1;
561 } 607 }
562 rc = pcie_write_cmd(ctrl, slot_cmd, cmd_mask); 608 rc = pcie_write_cmd(ctrl, slot_cmd, cmd_mask);
563 dbg("%s: SLOTCTRL %x write cmd %x\n", 609 ctrl_dbg(ctrl, "%s: SLOTCTRL %x write cmd %x\n",
564 __func__, ctrl->cap_base + SLOTCTRL, slot_cmd); 610 __func__, ctrl->cap_base + SLOTCTRL, slot_cmd);
565 611
566 return rc; 612 return rc;
567} 613}
@@ -575,8 +621,8 @@ static void hpc_set_green_led_on(struct slot *slot)
575 slot_cmd = 0x0100; 621 slot_cmd = 0x0100;
576 cmd_mask = PWR_LED_CTRL; 622 cmd_mask = PWR_LED_CTRL;
577 pcie_write_cmd(ctrl, slot_cmd, cmd_mask); 623 pcie_write_cmd(ctrl, slot_cmd, cmd_mask);
578 dbg("%s: SLOTCTRL %x write cmd %x\n", 624 ctrl_dbg(ctrl, "%s: SLOTCTRL %x write cmd %x\n",
579 __func__, ctrl->cap_base + SLOTCTRL, slot_cmd); 625 __func__, ctrl->cap_base + SLOTCTRL, slot_cmd);
580} 626}
581 627
582static void hpc_set_green_led_off(struct slot *slot) 628static void hpc_set_green_led_off(struct slot *slot)
@@ -588,8 +634,8 @@ static void hpc_set_green_led_off(struct slot *slot)
588 slot_cmd = 0x0300; 634 slot_cmd = 0x0300;
589 cmd_mask = PWR_LED_CTRL; 635 cmd_mask = PWR_LED_CTRL;
590 pcie_write_cmd(ctrl, slot_cmd, cmd_mask); 636 pcie_write_cmd(ctrl, slot_cmd, cmd_mask);
591 dbg("%s: SLOTCTRL %x write cmd %x\n", 637 ctrl_dbg(ctrl, "%s: SLOTCTRL %x write cmd %x\n",
592 __func__, ctrl->cap_base + SLOTCTRL, slot_cmd); 638 __func__, ctrl->cap_base + SLOTCTRL, slot_cmd);
593} 639}
594 640
595static void hpc_set_green_led_blink(struct slot *slot) 641static void hpc_set_green_led_blink(struct slot *slot)
@@ -601,8 +647,8 @@ static void hpc_set_green_led_blink(struct slot *slot)
601 slot_cmd = 0x0200; 647 slot_cmd = 0x0200;
602 cmd_mask = PWR_LED_CTRL; 648 cmd_mask = PWR_LED_CTRL;
603 pcie_write_cmd(ctrl, slot_cmd, cmd_mask); 649 pcie_write_cmd(ctrl, slot_cmd, cmd_mask);
604 dbg("%s: SLOTCTRL %x write cmd %x\n", 650 ctrl_dbg(ctrl, "%s: SLOTCTRL %x write cmd %x\n",
605 __func__, ctrl->cap_base + SLOTCTRL, slot_cmd); 651 __func__, ctrl->cap_base + SLOTCTRL, slot_cmd);
606} 652}
607 653
608static int hpc_power_on_slot(struct slot * slot) 654static int hpc_power_on_slot(struct slot * slot)
@@ -613,20 +659,22 @@ static int hpc_power_on_slot(struct slot * slot)
613 u16 slot_status; 659 u16 slot_status;
614 int retval = 0; 660 int retval = 0;
615 661
616 dbg("%s: slot->hp_slot %x\n", __func__, slot->hp_slot); 662 ctrl_dbg(ctrl, "%s: slot->hp_slot %x\n", __func__, slot->hp_slot);
617 663
618 /* Clear sticky power-fault bit from previous power failures */ 664 /* Clear sticky power-fault bit from previous power failures */
619 retval = pciehp_readw(ctrl, SLOTSTATUS, &slot_status); 665 retval = pciehp_readw(ctrl, SLOTSTATUS, &slot_status);
620 if (retval) { 666 if (retval) {
621 err("%s: Cannot read SLOTSTATUS register\n", __func__); 667 ctrl_err(ctrl, "%s: Cannot read SLOTSTATUS register\n",
668 __func__);
622 return retval; 669 return retval;
623 } 670 }
624 slot_status &= PWR_FAULT_DETECTED; 671 slot_status &= PWR_FAULT_DETECTED;
625 if (slot_status) { 672 if (slot_status) {
626 retval = pciehp_writew(ctrl, SLOTSTATUS, slot_status); 673 retval = pciehp_writew(ctrl, SLOTSTATUS, slot_status);
627 if (retval) { 674 if (retval) {
628 err("%s: Cannot write to SLOTSTATUS register\n", 675 ctrl_err(ctrl,
629 __func__); 676 "%s: Cannot write to SLOTSTATUS register\n",
677 __func__);
630 return retval; 678 return retval;
631 } 679 }
632 } 680 }
@@ -644,11 +692,11 @@ static int hpc_power_on_slot(struct slot * slot)
644 retval = pcie_write_cmd(ctrl, slot_cmd, cmd_mask); 692 retval = pcie_write_cmd(ctrl, slot_cmd, cmd_mask);
645 693
646 if (retval) { 694 if (retval) {
647 err("%s: Write %x command failed!\n", __func__, slot_cmd); 695 ctrl_err(ctrl, "Write %x command failed!\n", slot_cmd);
648 return -1; 696 return -1;
649 } 697 }
650 dbg("%s: SLOTCTRL %x write cmd %x\n", 698 ctrl_dbg(ctrl, "%s: SLOTCTRL %x write cmd %x\n",
651 __func__, ctrl->cap_base + SLOTCTRL, slot_cmd); 699 __func__, ctrl->cap_base + SLOTCTRL, slot_cmd);
652 700
653 return retval; 701 return retval;
654} 702}
@@ -694,7 +742,7 @@ static int hpc_power_off_slot(struct slot * slot)
694 int retval = 0; 742 int retval = 0;
695 int changed; 743 int changed;
696 744
697 dbg("%s: slot->hp_slot %x\n", __func__, slot->hp_slot); 745 ctrl_dbg(ctrl, "%s: slot->hp_slot %x\n", __func__, slot->hp_slot);
698 746
699 /* 747 /*
700 * Set Bad DLLP Mask bit in Correctable Error Mask 748 * Set Bad DLLP Mask bit in Correctable Error Mask
@@ -722,12 +770,12 @@ static int hpc_power_off_slot(struct slot * slot)
722 770
723 retval = pcie_write_cmd(ctrl, slot_cmd, cmd_mask); 771 retval = pcie_write_cmd(ctrl, slot_cmd, cmd_mask);
724 if (retval) { 772 if (retval) {
725 err("%s: Write command failed!\n", __func__); 773 ctrl_err(ctrl, "Write command failed!\n");
726 retval = -1; 774 retval = -1;
727 goto out; 775 goto out;
728 } 776 }
729 dbg("%s: SLOTCTRL %x write cmd %x\n", 777 ctrl_dbg(ctrl, "%s: SLOTCTRL %x write cmd %x\n",
730 __func__, ctrl->cap_base + SLOTCTRL, slot_cmd); 778 __func__, ctrl->cap_base + SLOTCTRL, slot_cmd);
731 out: 779 out:
732 if (changed) 780 if (changed)
733 pcie_unmask_bad_dllp(ctrl); 781 pcie_unmask_bad_dllp(ctrl);
@@ -749,7 +797,8 @@ static irqreturn_t pcie_isr(int irq, void *dev_id)
749 intr_loc = 0; 797 intr_loc = 0;
750 do { 798 do {
751 if (pciehp_readw(ctrl, SLOTSTATUS, &detected)) { 799 if (pciehp_readw(ctrl, SLOTSTATUS, &detected)) {
752 err("%s: Cannot read SLOTSTATUS\n", __func__); 800 ctrl_err(ctrl, "%s: Cannot read SLOTSTATUS\n",
801 __func__);
753 return IRQ_NONE; 802 return IRQ_NONE;
754 } 803 }
755 804
@@ -760,12 +809,13 @@ static irqreturn_t pcie_isr(int irq, void *dev_id)
760 if (!intr_loc) 809 if (!intr_loc)
761 return IRQ_NONE; 810 return IRQ_NONE;
762 if (detected && pciehp_writew(ctrl, SLOTSTATUS, detected)) { 811 if (detected && pciehp_writew(ctrl, SLOTSTATUS, detected)) {
763 err("%s: Cannot write to SLOTSTATUS\n", __func__); 812 ctrl_err(ctrl, "%s: Cannot write to SLOTSTATUS\n",
813 __func__);
764 return IRQ_NONE; 814 return IRQ_NONE;
765 } 815 }
766 } while (detected); 816 } while (detected);
767 817
768 dbg("%s: intr_loc %x\n", __FUNCTION__, intr_loc); 818 ctrl_dbg(ctrl, "%s: intr_loc %x\n", __func__, intr_loc);
769 819
770 /* Check Command Complete Interrupt Pending */ 820 /* Check Command Complete Interrupt Pending */
771 if (intr_loc & CMD_COMPLETED) { 821 if (intr_loc & CMD_COMPLETED) {
@@ -807,7 +857,7 @@ static int hpc_get_max_lnk_speed(struct slot *slot, enum pci_bus_speed *value)
807 857
808 retval = pciehp_readl(ctrl, LNKCAP, &lnk_cap); 858 retval = pciehp_readl(ctrl, LNKCAP, &lnk_cap);
809 if (retval) { 859 if (retval) {
810 err("%s: Cannot read LNKCAP register\n", __func__); 860 ctrl_err(ctrl, "%s: Cannot read LNKCAP register\n", __func__);
811 return retval; 861 return retval;
812 } 862 }
813 863
@@ -821,7 +871,7 @@ static int hpc_get_max_lnk_speed(struct slot *slot, enum pci_bus_speed *value)
821 } 871 }
822 872
823 *value = lnk_speed; 873 *value = lnk_speed;
824 dbg("Max link speed = %d\n", lnk_speed); 874 ctrl_dbg(ctrl, "Max link speed = %d\n", lnk_speed);
825 875
826 return retval; 876 return retval;
827} 877}
@@ -836,7 +886,7 @@ static int hpc_get_max_lnk_width(struct slot *slot,
836 886
837 retval = pciehp_readl(ctrl, LNKCAP, &lnk_cap); 887 retval = pciehp_readl(ctrl, LNKCAP, &lnk_cap);
838 if (retval) { 888 if (retval) {
839 err("%s: Cannot read LNKCAP register\n", __func__); 889 ctrl_err(ctrl, "%s: Cannot read LNKCAP register\n", __func__);
840 return retval; 890 return retval;
841 } 891 }
842 892
@@ -871,7 +921,7 @@ static int hpc_get_max_lnk_width(struct slot *slot,
871 } 921 }
872 922
873 *value = lnk_wdth; 923 *value = lnk_wdth;
874 dbg("Max link width = %d\n", lnk_wdth); 924 ctrl_dbg(ctrl, "Max link width = %d\n", lnk_wdth);
875 925
876 return retval; 926 return retval;
877} 927}
@@ -885,7 +935,8 @@ static int hpc_get_cur_lnk_speed(struct slot *slot, enum pci_bus_speed *value)
885 935
886 retval = pciehp_readw(ctrl, LNKSTATUS, &lnk_status); 936 retval = pciehp_readw(ctrl, LNKSTATUS, &lnk_status);
887 if (retval) { 937 if (retval) {
888 err("%s: Cannot read LNKSTATUS register\n", __func__); 938 ctrl_err(ctrl, "%s: Cannot read LNKSTATUS register\n",
939 __func__);
889 return retval; 940 return retval;
890 } 941 }
891 942
@@ -899,7 +950,7 @@ static int hpc_get_cur_lnk_speed(struct slot *slot, enum pci_bus_speed *value)
899 } 950 }
900 951
901 *value = lnk_speed; 952 *value = lnk_speed;
902 dbg("Current link speed = %d\n", lnk_speed); 953 ctrl_dbg(ctrl, "Current link speed = %d\n", lnk_speed);
903 954
904 return retval; 955 return retval;
905} 956}
@@ -914,7 +965,8 @@ static int hpc_get_cur_lnk_width(struct slot *slot,
914 965
915 retval = pciehp_readw(ctrl, LNKSTATUS, &lnk_status); 966 retval = pciehp_readw(ctrl, LNKSTATUS, &lnk_status);
916 if (retval) { 967 if (retval) {
917 err("%s: Cannot read LNKSTATUS register\n", __func__); 968 ctrl_err(ctrl, "%s: Cannot read LNKSTATUS register\n",
969 __func__);
918 return retval; 970 return retval;
919 } 971 }
920 972
@@ -949,7 +1001,7 @@ static int hpc_get_cur_lnk_width(struct slot *slot,
949 } 1001 }
950 1002
951 *value = lnk_wdth; 1003 *value = lnk_wdth;
952 dbg("Current link width = %d\n", lnk_wdth); 1004 ctrl_dbg(ctrl, "Current link width = %d\n", lnk_wdth);
953 1005
954 return retval; 1006 return retval;
955} 1007}
@@ -998,7 +1050,7 @@ int pcie_enable_notification(struct controller *ctrl)
998 PWR_FAULT_DETECT_ENABLE | HP_INTR_ENABLE | CMD_CMPL_INTR_ENABLE; 1050 PWR_FAULT_DETECT_ENABLE | HP_INTR_ENABLE | CMD_CMPL_INTR_ENABLE;
999 1051
1000 if (pcie_write_cmd(ctrl, cmd, mask)) { 1052 if (pcie_write_cmd(ctrl, cmd, mask)) {
1001 err("%s: Cannot enable software notification\n", __func__); 1053 ctrl_err(ctrl, "Cannot enable software notification\n");
1002 return -1; 1054 return -1;
1003 } 1055 }
1004 return 0; 1056 return 0;
@@ -1010,7 +1062,7 @@ static void pcie_disable_notification(struct controller *ctrl)
1010 mask = PRSN_DETECT_ENABLE | ATTN_BUTTN_ENABLE | MRL_DETECT_ENABLE | 1062 mask = PRSN_DETECT_ENABLE | ATTN_BUTTN_ENABLE | MRL_DETECT_ENABLE |
1011 PWR_FAULT_DETECT_ENABLE | HP_INTR_ENABLE | CMD_CMPL_INTR_ENABLE; 1063 PWR_FAULT_DETECT_ENABLE | HP_INTR_ENABLE | CMD_CMPL_INTR_ENABLE;
1012 if (pcie_write_cmd(ctrl, 0, mask)) 1064 if (pcie_write_cmd(ctrl, 0, mask))
1013 warn("%s: Cannot disable software notification\n", __func__); 1065 ctrl_warn(ctrl, "Cannot disable software notification\n");
1014} 1066}
1015 1067
1016static int pcie_init_notification(struct controller *ctrl) 1068static int pcie_init_notification(struct controller *ctrl)
@@ -1030,15 +1082,6 @@ static void pcie_shutdown_notification(struct controller *ctrl)
1030 pciehp_free_irq(ctrl); 1082 pciehp_free_irq(ctrl);
1031} 1083}
1032 1084
1033static void make_slot_name(struct slot *slot)
1034{
1035 if (pciehp_slot_with_bus)
1036 snprintf(slot->name, SLOT_NAME_SIZE, "%04d_%04d",
1037 slot->bus, slot->number);
1038 else
1039 snprintf(slot->name, SLOT_NAME_SIZE, "%d", slot->number);
1040}
1041
1042static int pcie_init_slot(struct controller *ctrl) 1085static int pcie_init_slot(struct controller *ctrl)
1043{ 1086{
1044 struct slot *slot; 1087 struct slot *slot;
@@ -1053,7 +1096,6 @@ static int pcie_init_slot(struct controller *ctrl)
1053 slot->device = ctrl->slot_device_offset + slot->hp_slot; 1096 slot->device = ctrl->slot_device_offset + slot->hp_slot;
1054 slot->hpc_ops = ctrl->hpc_ops; 1097 slot->hpc_ops = ctrl->hpc_ops;
1055 slot->number = ctrl->first_slot; 1098 slot->number = ctrl->first_slot;
1056 make_slot_name(slot);
1057 mutex_init(&slot->lock); 1099 mutex_init(&slot->lock);
1058 INIT_DELAYED_WORK(&slot->work, pciehp_queue_pushbutton_work); 1100 INIT_DELAYED_WORK(&slot->work, pciehp_queue_pushbutton_work);
1059 list_add(&slot->slot_list, &ctrl->slot_list); 1101 list_add(&slot->slot_list, &ctrl->slot_list);
@@ -1080,58 +1122,70 @@ static inline void dbg_ctrl(struct controller *ctrl)
1080 if (!pciehp_debug) 1122 if (!pciehp_debug)
1081 return; 1123 return;
1082 1124
1083 dbg("Hotplug Controller:\n"); 1125 ctrl_info(ctrl, "Hotplug Controller:\n");
1084 dbg(" Seg/Bus/Dev/Func/IRQ : %s IRQ %d\n", pci_name(pdev), pdev->irq); 1126 ctrl_info(ctrl, " Seg/Bus/Dev/Func/IRQ : %s IRQ %d\n",
1085 dbg(" Vendor ID : 0x%04x\n", pdev->vendor); 1127 pci_name(pdev), pdev->irq);
1086 dbg(" Device ID : 0x%04x\n", pdev->device); 1128 ctrl_info(ctrl, " Vendor ID : 0x%04x\n", pdev->vendor);
1087 dbg(" Subsystem ID : 0x%04x\n", pdev->subsystem_device); 1129 ctrl_info(ctrl, " Device ID : 0x%04x\n", pdev->device);
1088 dbg(" Subsystem Vendor ID : 0x%04x\n", pdev->subsystem_vendor); 1130 ctrl_info(ctrl, " Subsystem ID : 0x%04x\n",
1089 dbg(" PCIe Cap offset : 0x%02x\n", ctrl->cap_base); 1131 pdev->subsystem_device);
1132 ctrl_info(ctrl, " Subsystem Vendor ID : 0x%04x\n",
1133 pdev->subsystem_vendor);
1134 ctrl_info(ctrl, " PCIe Cap offset : 0x%02x\n", ctrl->cap_base);
1090 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) { 1135 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
1091 if (!pci_resource_len(pdev, i)) 1136 if (!pci_resource_len(pdev, i))
1092 continue; 1137 continue;
1093 dbg(" PCI resource [%d] : 0x%llx@0x%llx\n", i, 1138 ctrl_info(ctrl, " PCI resource [%d] : 0x%llx@0x%llx\n",
1094 (unsigned long long)pci_resource_len(pdev, i), 1139 i, (unsigned long long)pci_resource_len(pdev, i),
1095 (unsigned long long)pci_resource_start(pdev, i)); 1140 (unsigned long long)pci_resource_start(pdev, i));
1096 } 1141 }
1097 dbg("Slot Capabilities : 0x%08x\n", ctrl->slot_cap); 1142 ctrl_info(ctrl, "Slot Capabilities : 0x%08x\n", ctrl->slot_cap);
1098 dbg(" Physical Slot Number : %d\n", ctrl->first_slot); 1143 ctrl_info(ctrl, " Physical Slot Number : %d\n", ctrl->first_slot);
1099 dbg(" Attention Button : %3s\n", ATTN_BUTTN(ctrl) ? "yes" : "no"); 1144 ctrl_info(ctrl, " Attention Button : %3s\n",
1100 dbg(" Power Controller : %3s\n", POWER_CTRL(ctrl) ? "yes" : "no"); 1145 ATTN_BUTTN(ctrl) ? "yes" : "no");
1101 dbg(" MRL Sensor : %3s\n", MRL_SENS(ctrl) ? "yes" : "no"); 1146 ctrl_info(ctrl, " Power Controller : %3s\n",
1102 dbg(" Attention Indicator : %3s\n", ATTN_LED(ctrl) ? "yes" : "no"); 1147 POWER_CTRL(ctrl) ? "yes" : "no");
1103 dbg(" Power Indicator : %3s\n", PWR_LED(ctrl) ? "yes" : "no"); 1148 ctrl_info(ctrl, " MRL Sensor : %3s\n",
1104 dbg(" Hot-Plug Surprise : %3s\n", HP_SUPR_RM(ctrl) ? "yes" : "no"); 1149 MRL_SENS(ctrl) ? "yes" : "no");
1105 dbg(" EMI Present : %3s\n", EMI(ctrl) ? "yes" : "no"); 1150 ctrl_info(ctrl, " Attention Indicator : %3s\n",
1106 dbg(" Comamnd Completed : %3s\n", NO_CMD_CMPL(ctrl)? "no" : "yes"); 1151 ATTN_LED(ctrl) ? "yes" : "no");
1152 ctrl_info(ctrl, " Power Indicator : %3s\n",
1153 PWR_LED(ctrl) ? "yes" : "no");
1154 ctrl_info(ctrl, " Hot-Plug Surprise : %3s\n",
1155 HP_SUPR_RM(ctrl) ? "yes" : "no");
1156 ctrl_info(ctrl, " EMI Present : %3s\n",
1157 EMI(ctrl) ? "yes" : "no");
1158 ctrl_info(ctrl, " Command Completed : %3s\n",
1159 NO_CMD_CMPL(ctrl) ? "no" : "yes");
1107 pciehp_readw(ctrl, SLOTSTATUS, &reg16); 1160 pciehp_readw(ctrl, SLOTSTATUS, &reg16);
1108 dbg("Slot Status : 0x%04x\n", reg16); 1161 ctrl_info(ctrl, "Slot Status : 0x%04x\n", reg16);
1109 pciehp_readw(ctrl, SLOTCTRL, &reg16); 1162 pciehp_readw(ctrl, SLOTCTRL, &reg16);
1110 dbg("Slot Control : 0x%04x\n", reg16); 1163 ctrl_info(ctrl, "Slot Control : 0x%04x\n", reg16);
1111} 1164}
1112 1165
1113struct controller *pcie_init(struct pcie_device *dev) 1166struct controller *pcie_init(struct pcie_device *dev)
1114{ 1167{
1115 struct controller *ctrl; 1168 struct controller *ctrl;
1116 u32 slot_cap; 1169 u32 slot_cap, link_cap;
1117 struct pci_dev *pdev = dev->port; 1170 struct pci_dev *pdev = dev->port;
1118 1171
1119 ctrl = kzalloc(sizeof(*ctrl), GFP_KERNEL); 1172 ctrl = kzalloc(sizeof(*ctrl), GFP_KERNEL);
1120 if (!ctrl) { 1173 if (!ctrl) {
1121 err("%s : out of memory\n", __func__); 1174 dev_err(&dev->device, "%s: Out of memory\n", __func__);
1122 goto abort; 1175 goto abort;
1123 } 1176 }
1124 INIT_LIST_HEAD(&ctrl->slot_list); 1177 INIT_LIST_HEAD(&ctrl->slot_list);
1125 1178
1179 ctrl->pcie = dev;
1126 ctrl->pci_dev = pdev; 1180 ctrl->pci_dev = pdev;
1127 ctrl->cap_base = pci_find_capability(pdev, PCI_CAP_ID_EXP); 1181 ctrl->cap_base = pci_find_capability(pdev, PCI_CAP_ID_EXP);
1128 if (!ctrl->cap_base) { 1182 if (!ctrl->cap_base) {
1129 err("%s: Cannot find PCI Express capability\n", __func__); 1183 ctrl_err(ctrl, "Cannot find PCI Express capability\n");
1130 goto abort; 1184 goto abort_ctrl;
1131 } 1185 }
1132 if (pciehp_readl(ctrl, SLOTCAP, &slot_cap)) { 1186 if (pciehp_readl(ctrl, SLOTCAP, &slot_cap)) {
1133 err("%s: Cannot read SLOTCAP register\n", __func__); 1187 ctrl_err(ctrl, "Cannot read SLOTCAP register\n");
1134 goto abort; 1188 goto abort_ctrl;
1135 } 1189 }
1136 1190
1137 ctrl->slot_cap = slot_cap; 1191 ctrl->slot_cap = slot_cap;
@@ -1153,6 +1207,16 @@ struct controller *pcie_init(struct pcie_device *dev)
1153 !(POWER_CTRL(ctrl) | ATTN_LED(ctrl) | PWR_LED(ctrl) | EMI(ctrl))) 1207 !(POWER_CTRL(ctrl) | ATTN_LED(ctrl) | PWR_LED(ctrl) | EMI(ctrl)))
1154 ctrl->no_cmd_complete = 1; 1208 ctrl->no_cmd_complete = 1;
1155 1209
1210 /* Check if Data Link Layer Link Active Reporting is implemented */
1211 if (pciehp_readl(ctrl, LNKCAP, &link_cap)) {
1212 ctrl_err(ctrl, "%s: Cannot read LNKCAP register\n", __func__);
1213 goto abort_ctrl;
1214 }
1215 if (link_cap & LINK_ACTIVE_REPORTING) {
1216 ctrl_dbg(ctrl, "Link Active Reporting supported\n");
1217 ctrl->link_active_reporting = 1;
1218 }
1219
1156 /* Clear all remaining event bits in Slot Status register */ 1220 /* Clear all remaining event bits in Slot Status register */
1157 if (pciehp_writew(ctrl, SLOTSTATUS, 0x1f)) 1221 if (pciehp_writew(ctrl, SLOTSTATUS, 0x1f))
1158 goto abort_ctrl; 1222 goto abort_ctrl;
@@ -1170,9 +1234,9 @@ struct controller *pcie_init(struct pcie_device *dev)
1170 goto abort_ctrl; 1234 goto abort_ctrl;
1171 } 1235 }
1172 1236
1173 info("HPC vendor_id %x device_id %x ss_vid %x ss_did %x\n", 1237 ctrl_info(ctrl, "HPC vendor_id %x device_id %x ss_vid %x ss_did %x\n",
1174 pdev->vendor, pdev->device, 1238 pdev->vendor, pdev->device, pdev->subsystem_vendor,
1175 pdev->subsystem_vendor, pdev->subsystem_device); 1239 pdev->subsystem_device);
1176 1240
1177 if (pcie_init_slot(ctrl)) 1241 if (pcie_init_slot(ctrl))
1178 goto abort_ctrl; 1242 goto abort_ctrl;
diff --git a/drivers/pci/hotplug/pciehp_pci.c b/drivers/pci/hotplug/pciehp_pci.c
index 6040dcceb256..10f9566cceeb 100644
--- a/drivers/pci/hotplug/pciehp_pci.c
+++ b/drivers/pci/hotplug/pciehp_pci.c
@@ -39,8 +39,7 @@ static void program_hpp_type0(struct pci_dev *dev, struct hpp_type0 *hpp)
39 u16 pci_cmd, pci_bctl; 39 u16 pci_cmd, pci_bctl;
40 40
41 if (hpp->revision > 1) { 41 if (hpp->revision > 1) {
42 printk(KERN_WARNING "%s: Rev.%d type0 record not supported\n", 42 warn("Rev.%d type0 record not supported\n", hpp->revision);
43 __func__, hpp->revision);
44 return; 43 return;
45 } 44 }
46 45
@@ -81,8 +80,7 @@ static void program_hpp_type2(struct pci_dev *dev, struct hpp_type2 *hpp)
81 u32 reg32; 80 u32 reg32;
82 81
83 if (hpp->revision > 1) { 82 if (hpp->revision > 1) {
84 printk(KERN_WARNING "%s: Rev.%d type2 record not supported\n", 83 warn("Rev.%d type2 record not supported\n", hpp->revision);
85 __func__, hpp->revision);
86 return; 84 return;
87 } 85 }
88 86
@@ -149,8 +147,7 @@ static void program_fw_provided_values(struct pci_dev *dev)
149 return; 147 return;
150 148
151 if (pciehp_get_hp_params_from_firmware(dev, &hpp)) { 149 if (pciehp_get_hp_params_from_firmware(dev, &hpp)) {
152 printk(KERN_WARNING "%s: Could not get hotplug parameters\n", 150 warn("Could not get hotplug parameters\n");
153 __func__);
154 return; 151 return;
155 } 152 }
156 153
@@ -198,18 +195,20 @@ int pciehp_configure_device(struct slot *p_slot)
198 struct pci_dev *dev; 195 struct pci_dev *dev;
199 struct pci_bus *parent = p_slot->ctrl->pci_dev->subordinate; 196 struct pci_bus *parent = p_slot->ctrl->pci_dev->subordinate;
200 int num, fn; 197 int num, fn;
198 struct controller *ctrl = p_slot->ctrl;
201 199
202 dev = pci_get_slot(parent, PCI_DEVFN(p_slot->device, 0)); 200 dev = pci_get_slot(parent, PCI_DEVFN(p_slot->device, 0));
203 if (dev) { 201 if (dev) {
204 err("Device %s already exists at %x:%x, cannot hot-add\n", 202 ctrl_err(ctrl, "Device %s already exists "
205 pci_name(dev), p_slot->bus, p_slot->device); 203 "at %04x:%02x:%02x, cannot hot-add\n", pci_name(dev),
204 pci_domain_nr(parent), p_slot->bus, p_slot->device);
206 pci_dev_put(dev); 205 pci_dev_put(dev);
207 return -EINVAL; 206 return -EINVAL;
208 } 207 }
209 208
210 num = pci_scan_slot(parent, PCI_DEVFN(p_slot->device, 0)); 209 num = pci_scan_slot(parent, PCI_DEVFN(p_slot->device, 0));
211 if (num == 0) { 210 if (num == 0) {
212 err("No new device found\n"); 211 ctrl_err(ctrl, "No new device found\n");
213 return -ENODEV; 212 return -ENODEV;
214 } 213 }
215 214
@@ -218,8 +217,8 @@ int pciehp_configure_device(struct slot *p_slot)
218 if (!dev) 217 if (!dev)
219 continue; 218 continue;
220 if ((dev->class >> 16) == PCI_BASE_CLASS_DISPLAY) { 219 if ((dev->class >> 16) == PCI_BASE_CLASS_DISPLAY) {
221 err("Cannot hot-add display device %s\n", 220 ctrl_err(ctrl, "Cannot hot-add display device %s\n",
222 pci_name(dev)); 221 pci_name(dev));
223 pci_dev_put(dev); 222 pci_dev_put(dev);
224 continue; 223 continue;
225 } 224 }
@@ -244,9 +243,10 @@ int pciehp_unconfigure_device(struct slot *p_slot)
244 u8 presence = 0; 243 u8 presence = 0;
245 struct pci_bus *parent = p_slot->ctrl->pci_dev->subordinate; 244 struct pci_bus *parent = p_slot->ctrl->pci_dev->subordinate;
246 u16 command; 245 u16 command;
246 struct controller *ctrl = p_slot->ctrl;
247 247
248 dbg("%s: bus/dev = %x/%x\n", __func__, p_slot->bus, 248 ctrl_dbg(ctrl, "%s: domain:bus:dev = %04x:%02x:%02x\n",
249 p_slot->device); 249 __func__, pci_domain_nr(parent), p_slot->bus, p_slot->device);
250 ret = p_slot->hpc_ops->get_adapter_status(p_slot, &presence); 250 ret = p_slot->hpc_ops->get_adapter_status(p_slot, &presence);
251 if (ret) 251 if (ret)
252 presence = 0; 252 presence = 0;
@@ -257,16 +257,17 @@ int pciehp_unconfigure_device(struct slot *p_slot)
257 if (!temp) 257 if (!temp)
258 continue; 258 continue;
259 if ((temp->class >> 16) == PCI_BASE_CLASS_DISPLAY) { 259 if ((temp->class >> 16) == PCI_BASE_CLASS_DISPLAY) {
260 err("Cannot remove display device %s\n", 260 ctrl_err(ctrl, "Cannot remove display device %s\n",
261 pci_name(temp)); 261 pci_name(temp));
262 pci_dev_put(temp); 262 pci_dev_put(temp);
263 continue; 263 continue;
264 } 264 }
265 if (temp->hdr_type == PCI_HEADER_TYPE_BRIDGE && presence) { 265 if (temp->hdr_type == PCI_HEADER_TYPE_BRIDGE && presence) {
266 pci_read_config_byte(temp, PCI_BRIDGE_CONTROL, &bctl); 266 pci_read_config_byte(temp, PCI_BRIDGE_CONTROL, &bctl);
267 if (bctl & PCI_BRIDGE_CTL_VGA) { 267 if (bctl & PCI_BRIDGE_CTL_VGA) {
268 err("Cannot remove display device %s\n", 268 ctrl_err(ctrl,
269 pci_name(temp)); 269 "Cannot remove display device %s\n",
270 pci_name(temp));
270 pci_dev_put(temp); 271 pci_dev_put(temp);
271 continue; 272 continue;
272 } 273 }
diff --git a/drivers/pci/hotplug/rpaphp.h b/drivers/pci/hotplug/rpaphp.h
index 7d5921b1ee78..419919a87b0f 100644
--- a/drivers/pci/hotplug/rpaphp.h
+++ b/drivers/pci/hotplug/rpaphp.h
@@ -46,10 +46,10 @@
46#define PRESENT 1 /* Card in slot */ 46#define PRESENT 1 /* Card in slot */
47 47
48#define MY_NAME "rpaphp" 48#define MY_NAME "rpaphp"
49extern int debug; 49extern int rpaphp_debug;
50#define dbg(format, arg...) \ 50#define dbg(format, arg...) \
51 do { \ 51 do { \
52 if (debug) \ 52 if (rpaphp_debug) \
53 printk(KERN_DEBUG "%s: " format, \ 53 printk(KERN_DEBUG "%s: " format, \
54 MY_NAME , ## arg); \ 54 MY_NAME , ## arg); \
55 } while (0) 55 } while (0)
diff --git a/drivers/pci/hotplug/rpaphp_core.c b/drivers/pci/hotplug/rpaphp_core.c
index 1f84f402acdb..95d02a08fdc7 100644
--- a/drivers/pci/hotplug/rpaphp_core.c
+++ b/drivers/pci/hotplug/rpaphp_core.c
@@ -37,7 +37,7 @@
37 /* and pci_do_scan_bus */ 37 /* and pci_do_scan_bus */
38#include "rpaphp.h" 38#include "rpaphp.h"
39 39
40int debug; 40int rpaphp_debug;
41LIST_HEAD(rpaphp_slot_head); 41LIST_HEAD(rpaphp_slot_head);
42 42
43#define DRIVER_VERSION "0.1" 43#define DRIVER_VERSION "0.1"
@@ -50,7 +50,7 @@ MODULE_AUTHOR(DRIVER_AUTHOR);
50MODULE_DESCRIPTION(DRIVER_DESC); 50MODULE_DESCRIPTION(DRIVER_DESC);
51MODULE_LICENSE("GPL"); 51MODULE_LICENSE("GPL");
52 52
53module_param(debug, bool, 0644); 53module_param_named(debug, rpaphp_debug, bool, 0644);
54 54
55/** 55/**
56 * set_attention_status - set attention LED 56 * set_attention_status - set attention LED
diff --git a/drivers/pci/hotplug/rpaphp_pci.c b/drivers/pci/hotplug/rpaphp_pci.c
index 5acfd4f3d4cb..513e1e282391 100644
--- a/drivers/pci/hotplug/rpaphp_pci.c
+++ b/drivers/pci/hotplug/rpaphp_pci.c
@@ -123,7 +123,7 @@ int rpaphp_enable_slot(struct slot *slot)
123 slot->state = CONFIGURED; 123 slot->state = CONFIGURED;
124 } 124 }
125 125
126 if (debug) { 126 if (rpaphp_debug) {
127 struct pci_dev *dev; 127 struct pci_dev *dev;
128 dbg("%s: pci_devs of slot[%s]\n", __func__, slot->dn->full_name); 128 dbg("%s: pci_devs of slot[%s]\n", __func__, slot->dn->full_name);
129 list_for_each_entry (dev, &bus->devices, bus_list) 129 list_for_each_entry (dev, &bus->devices, bus_list)
diff --git a/drivers/pci/hotplug/rpaphp_slot.c b/drivers/pci/hotplug/rpaphp_slot.c
index 9b714ea93d20..2ea9cf1a8d02 100644
--- a/drivers/pci/hotplug/rpaphp_slot.c
+++ b/drivers/pci/hotplug/rpaphp_slot.c
@@ -43,7 +43,7 @@ static void rpaphp_release_slot(struct hotplug_slot *hotplug_slot)
43void dealloc_slot_struct(struct slot *slot) 43void dealloc_slot_struct(struct slot *slot)
44{ 44{
45 kfree(slot->hotplug_slot->info); 45 kfree(slot->hotplug_slot->info);
46 kfree(slot->hotplug_slot->name); 46 kfree(slot->name);
47 kfree(slot->hotplug_slot); 47 kfree(slot->hotplug_slot);
48 kfree(slot); 48 kfree(slot);
49} 49}
@@ -63,11 +63,9 @@ struct slot *alloc_slot_struct(struct device_node *dn,
63 GFP_KERNEL); 63 GFP_KERNEL);
64 if (!slot->hotplug_slot->info) 64 if (!slot->hotplug_slot->info)
65 goto error_hpslot; 65 goto error_hpslot;
66 slot->hotplug_slot->name = kmalloc(strlen(drc_name) + 1, GFP_KERNEL); 66 slot->name = kstrdup(drc_name, GFP_KERNEL);
67 if (!slot->hotplug_slot->name) 67 if (!slot->name)
68 goto error_info; 68 goto error_info;
69 slot->name = slot->hotplug_slot->name;
70 strcpy(slot->name, drc_name);
71 slot->dn = dn; 69 slot->dn = dn;
72 slot->index = drc_index; 70 slot->index = drc_index;
73 slot->power_domain = power_domain; 71 slot->power_domain = power_domain;
@@ -137,7 +135,7 @@ int rpaphp_register_slot(struct slot *slot)
137 slotno = PCI_SLOT(PCI_DN(slot->dn->child)->devfn); 135 slotno = PCI_SLOT(PCI_DN(slot->dn->child)->devfn);
138 else 136 else
139 slotno = -1; 137 slotno = -1;
140 retval = pci_hp_register(php_slot, slot->bus, slotno); 138 retval = pci_hp_register(php_slot, slot->bus, slotno, slot->name);
141 if (retval) { 139 if (retval) {
142 err("pci_hp_register failed with error %d\n", retval); 140 err("pci_hp_register failed with error %d\n", retval);
143 return retval; 141 return retval;
@@ -147,9 +145,5 @@ int rpaphp_register_slot(struct slot *slot)
147 list_add(&slot->rpaphp_slot_list, &rpaphp_slot_head); 145 list_add(&slot->rpaphp_slot_list, &rpaphp_slot_head);
148 info("Slot [%s] registered\n", slot->name); 146 info("Slot [%s] registered\n", slot->name);
149 return 0; 147 return 0;
150
151sysfs_fail:
152 pci_hp_deregister(php_slot);
153 return retval;
154} 148}
155 149
diff --git a/drivers/pci/hotplug/sgi_hotplug.c b/drivers/pci/hotplug/sgi_hotplug.c
index 410fe0394a8e..3eee70928d45 100644
--- a/drivers/pci/hotplug/sgi_hotplug.c
+++ b/drivers/pci/hotplug/sgi_hotplug.c
@@ -161,7 +161,8 @@ static int sn_pci_bus_valid(struct pci_bus *pci_bus)
161} 161}
162 162
163static int sn_hp_slot_private_alloc(struct hotplug_slot *bss_hotplug_slot, 163static int sn_hp_slot_private_alloc(struct hotplug_slot *bss_hotplug_slot,
164 struct pci_bus *pci_bus, int device) 164 struct pci_bus *pci_bus, int device,
165 char *name)
165{ 166{
166 struct pcibus_info *pcibus_info; 167 struct pcibus_info *pcibus_info;
167 struct slot *slot; 168 struct slot *slot;
@@ -173,15 +174,9 @@ static int sn_hp_slot_private_alloc(struct hotplug_slot *bss_hotplug_slot,
173 return -ENOMEM; 174 return -ENOMEM;
174 bss_hotplug_slot->private = slot; 175 bss_hotplug_slot->private = slot;
175 176
176 bss_hotplug_slot->name = kmalloc(SN_SLOT_NAME_SIZE, GFP_KERNEL);
177 if (!bss_hotplug_slot->name) {
178 kfree(bss_hotplug_slot->private);
179 return -ENOMEM;
180 }
181
182 slot->device_num = device; 177 slot->device_num = device;
183 slot->pci_bus = pci_bus; 178 slot->pci_bus = pci_bus;
184 sprintf(bss_hotplug_slot->name, "%04x:%02x:%02x", 179 sprintf(name, "%04x:%02x:%02x",
185 pci_domain_nr(pci_bus), 180 pci_domain_nr(pci_bus),
186 ((u16)pcibus_info->pbi_buscommon.bs_persist_busnum), 181 ((u16)pcibus_info->pbi_buscommon.bs_persist_busnum),
187 device + 1); 182 device + 1);
@@ -418,7 +413,7 @@ static int enable_slot(struct hotplug_slot *bss_hotplug_slot)
418 /* 413 /*
419 * Add the slot's devices to the ACPI infrastructure */ 414 * Add the slot's devices to the ACPI infrastructure */
420 if (SN_ACPI_BASE_SUPPORT() && ssdt) { 415 if (SN_ACPI_BASE_SUPPORT() && ssdt) {
421 unsigned long adr; 416 unsigned long long adr;
422 struct acpi_device *pdevice; 417 struct acpi_device *pdevice;
423 struct acpi_device *device; 418 struct acpi_device *device;
424 acpi_handle phandle; 419 acpi_handle phandle;
@@ -510,7 +505,7 @@ static int disable_slot(struct hotplug_slot *bss_hotplug_slot)
510 /* free the ACPI resources for the slot */ 505 /* free the ACPI resources for the slot */
511 if (SN_ACPI_BASE_SUPPORT() && 506 if (SN_ACPI_BASE_SUPPORT() &&
512 PCI_CONTROLLER(slot->pci_bus)->acpi_handle) { 507 PCI_CONTROLLER(slot->pci_bus)->acpi_handle) {
513 unsigned long adr; 508 unsigned long long adr;
514 struct acpi_device *device; 509 struct acpi_device *device;
515 acpi_handle phandle; 510 acpi_handle phandle;
516 acpi_handle chandle = NULL; 511 acpi_handle chandle = NULL;
@@ -608,7 +603,6 @@ static inline int get_power_status(struct hotplug_slot *bss_hotplug_slot,
608static void sn_release_slot(struct hotplug_slot *bss_hotplug_slot) 603static void sn_release_slot(struct hotplug_slot *bss_hotplug_slot)
609{ 604{
610 kfree(bss_hotplug_slot->info); 605 kfree(bss_hotplug_slot->info);
611 kfree(bss_hotplug_slot->name);
612 kfree(bss_hotplug_slot->private); 606 kfree(bss_hotplug_slot->private);
613 kfree(bss_hotplug_slot); 607 kfree(bss_hotplug_slot);
614} 608}
@@ -618,6 +612,7 @@ static int sn_hotplug_slot_register(struct pci_bus *pci_bus)
618 int device; 612 int device;
619 struct pci_slot *pci_slot; 613 struct pci_slot *pci_slot;
620 struct hotplug_slot *bss_hotplug_slot; 614 struct hotplug_slot *bss_hotplug_slot;
615 char name[SN_SLOT_NAME_SIZE];
621 int rc = 0; 616 int rc = 0;
622 617
623 /* 618 /*
@@ -645,15 +640,14 @@ static int sn_hotplug_slot_register(struct pci_bus *pci_bus)
645 } 640 }
646 641
647 if (sn_hp_slot_private_alloc(bss_hotplug_slot, 642 if (sn_hp_slot_private_alloc(bss_hotplug_slot,
648 pci_bus, device)) { 643 pci_bus, device, name)) {
649 rc = -ENOMEM; 644 rc = -ENOMEM;
650 goto alloc_err; 645 goto alloc_err;
651 } 646 }
652
653 bss_hotplug_slot->ops = &sn_hotplug_slot_ops; 647 bss_hotplug_slot->ops = &sn_hotplug_slot_ops;
654 bss_hotplug_slot->release = &sn_release_slot; 648 bss_hotplug_slot->release = &sn_release_slot;
655 649
656 rc = pci_hp_register(bss_hotplug_slot, pci_bus, device); 650 rc = pci_hp_register(bss_hotplug_slot, pci_bus, device, name);
657 if (rc) 651 if (rc)
658 goto register_err; 652 goto register_err;
659 653
diff --git a/drivers/pci/hotplug/shpchp.h b/drivers/pci/hotplug/shpchp.h
index 8a026f750deb..6aba0b6cf2e0 100644
--- a/drivers/pci/hotplug/shpchp.h
+++ b/drivers/pci/hotplug/shpchp.h
@@ -59,6 +59,20 @@ extern struct workqueue_struct *shpchp_wq;
59#define warn(format, arg...) \ 59#define warn(format, arg...) \
60 printk(KERN_WARNING "%s: " format, MY_NAME , ## arg) 60 printk(KERN_WARNING "%s: " format, MY_NAME , ## arg)
61 61
62#define ctrl_dbg(ctrl, format, arg...) \
63 do { \
64 if (shpchp_debug) \
65 dev_printk(, &ctrl->pci_dev->dev, \
66 format, ## arg); \
67 } while (0)
68#define ctrl_err(ctrl, format, arg...) \
69 dev_err(&ctrl->pci_dev->dev, format, ## arg)
70#define ctrl_info(ctrl, format, arg...) \
71 dev_info(&ctrl->pci_dev->dev, format, ## arg)
72#define ctrl_warn(ctrl, format, arg...) \
73 dev_warn(&ctrl->pci_dev->dev, format, ## arg)
74
75
62#define SLOT_NAME_SIZE 10 76#define SLOT_NAME_SIZE 10
63struct slot { 77struct slot {
64 u8 bus; 78 u8 bus;
@@ -69,15 +83,13 @@ struct slot {
69 u8 state; 83 u8 state;
70 u8 presence_save; 84 u8 presence_save;
71 u8 pwr_save; 85 u8 pwr_save;
72 struct timer_list task_event;
73 u8 hp_slot;
74 struct controller *ctrl; 86 struct controller *ctrl;
75 struct hpc_ops *hpc_ops; 87 struct hpc_ops *hpc_ops;
76 struct hotplug_slot *hotplug_slot; 88 struct hotplug_slot *hotplug_slot;
77 struct list_head slot_list; 89 struct list_head slot_list;
78 char name[SLOT_NAME_SIZE];
79 struct delayed_work work; /* work for button event */ 90 struct delayed_work work; /* work for button event */
80 struct mutex lock; 91 struct mutex lock;
92 u8 hp_slot;
81}; 93};
82 94
83struct event_info { 95struct event_info {
@@ -169,6 +181,11 @@ extern void cleanup_slots(struct controller *ctrl);
169extern void shpchp_queue_pushbutton_work(struct work_struct *work); 181extern void shpchp_queue_pushbutton_work(struct work_struct *work);
170extern int shpc_init( struct controller *ctrl, struct pci_dev *pdev); 182extern int shpc_init( struct controller *ctrl, struct pci_dev *pdev);
171 183
184static inline const char *slot_name(struct slot *slot)
185{
186 return hotplug_slot_name(slot->hotplug_slot);
187}
188
172#ifdef CONFIG_ACPI 189#ifdef CONFIG_ACPI
173#include <linux/pci-acpi.h> 190#include <linux/pci-acpi.h>
174static inline int get_hp_params_from_firmware(struct pci_dev *dev, 191static inline int get_hp_params_from_firmware(struct pci_dev *dev,
@@ -236,7 +253,7 @@ static inline struct slot *shpchp_find_slot(struct controller *ctrl, u8 device)
236 return slot; 253 return slot;
237 } 254 }
238 255
239 err("%s: slot (device=0x%x) not found\n", __func__, device); 256 ctrl_err(ctrl, "Slot (device=0x%02x) not found\n", device);
240 return NULL; 257 return NULL;
241} 258}
242 259
@@ -270,7 +287,9 @@ static inline void amd_pogo_errata_restore_misc_reg(struct slot *p_slot)
270 pci_read_config_dword(p_slot->ctrl->pci_dev, PCIX_MISC_BRIDGE_ERRORS_OFFSET, &pcix_bridge_errors_reg); 287 pci_read_config_dword(p_slot->ctrl->pci_dev, PCIX_MISC_BRIDGE_ERRORS_OFFSET, &pcix_bridge_errors_reg);
271 perr_set = pcix_bridge_errors_reg & PERR_OBSERVED_MASK; 288 perr_set = pcix_bridge_errors_reg & PERR_OBSERVED_MASK;
272 if (perr_set) { 289 if (perr_set) {
273 dbg ("%s W1C: Bridge_Errors[ PERR_OBSERVED = %08X]\n",__func__ , perr_set); 290 ctrl_dbg(p_slot->ctrl,
291 "Bridge_Errors[ PERR_OBSERVED = %08X] (W1C)\n",
292 perr_set);
274 293
275 pci_write_config_dword(p_slot->ctrl->pci_dev, PCIX_MISC_BRIDGE_ERRORS_OFFSET, perr_set); 294 pci_write_config_dword(p_slot->ctrl->pci_dev, PCIX_MISC_BRIDGE_ERRORS_OFFSET, perr_set);
276 } 295 }
@@ -279,7 +298,7 @@ static inline void amd_pogo_errata_restore_misc_reg(struct slot *p_slot)
279 pci_read_config_dword(p_slot->ctrl->pci_dev, PCIX_MEM_BASE_LIMIT_OFFSET, &pcix_mem_base_reg); 298 pci_read_config_dword(p_slot->ctrl->pci_dev, PCIX_MEM_BASE_LIMIT_OFFSET, &pcix_mem_base_reg);
280 rse_set = pcix_mem_base_reg & RSE_MASK; 299 rse_set = pcix_mem_base_reg & RSE_MASK;
281 if (rse_set) { 300 if (rse_set) {
282 dbg ("%s W1C: Memory_Base_Limit[ RSE ]\n",__func__ ); 301 ctrl_dbg(p_slot->ctrl, "Memory_Base_Limit[ RSE ] (W1C)\n");
283 302
284 pci_write_config_dword(p_slot->ctrl->pci_dev, PCIX_MEM_BASE_LIMIT_OFFSET, rse_set); 303 pci_write_config_dword(p_slot->ctrl->pci_dev, PCIX_MEM_BASE_LIMIT_OFFSET, rse_set);
285 } 304 }
diff --git a/drivers/pci/hotplug/shpchp_core.c b/drivers/pci/hotplug/shpchp_core.c
index a8cbd039b85b..fe8d149c2293 100644
--- a/drivers/pci/hotplug/shpchp_core.c
+++ b/drivers/pci/hotplug/shpchp_core.c
@@ -39,7 +39,6 @@
39int shpchp_debug; 39int shpchp_debug;
40int shpchp_poll_mode; 40int shpchp_poll_mode;
41int shpchp_poll_time; 41int shpchp_poll_time;
42static int shpchp_slot_with_bus;
43struct workqueue_struct *shpchp_wq; 42struct workqueue_struct *shpchp_wq;
44 43
45#define DRIVER_VERSION "0.4" 44#define DRIVER_VERSION "0.4"
@@ -53,11 +52,9 @@ MODULE_LICENSE("GPL");
53module_param(shpchp_debug, bool, 0644); 52module_param(shpchp_debug, bool, 0644);
54module_param(shpchp_poll_mode, bool, 0644); 53module_param(shpchp_poll_mode, bool, 0644);
55module_param(shpchp_poll_time, int, 0644); 54module_param(shpchp_poll_time, int, 0644);
56module_param(shpchp_slot_with_bus, bool, 0644);
57MODULE_PARM_DESC(shpchp_debug, "Debugging mode enabled or not"); 55MODULE_PARM_DESC(shpchp_debug, "Debugging mode enabled or not");
58MODULE_PARM_DESC(shpchp_poll_mode, "Using polling mechanism for hot-plug events or not"); 56MODULE_PARM_DESC(shpchp_poll_mode, "Using polling mechanism for hot-plug events or not");
59MODULE_PARM_DESC(shpchp_poll_time, "Polling mechanism frequency, in seconds"); 57MODULE_PARM_DESC(shpchp_poll_time, "Polling mechanism frequency, in seconds");
60MODULE_PARM_DESC(shpchp_slot_with_bus, "Use bus number in the slot name");
61 58
62#define SHPC_MODULE_NAME "shpchp" 59#define SHPC_MODULE_NAME "shpchp"
63 60
@@ -92,28 +89,20 @@ static void release_slot(struct hotplug_slot *hotplug_slot)
92{ 89{
93 struct slot *slot = hotplug_slot->private; 90 struct slot *slot = hotplug_slot->private;
94 91
95 dbg("%s - physical_slot = %s\n", __func__, hotplug_slot->name); 92 ctrl_dbg(slot->ctrl, "%s: physical_slot = %s\n",
93 __func__, slot_name(slot));
96 94
97 kfree(slot->hotplug_slot->info); 95 kfree(slot->hotplug_slot->info);
98 kfree(slot->hotplug_slot); 96 kfree(slot->hotplug_slot);
99 kfree(slot); 97 kfree(slot);
100} 98}
101 99
102static void make_slot_name(struct slot *slot)
103{
104 if (shpchp_slot_with_bus)
105 snprintf(slot->hotplug_slot->name, SLOT_NAME_SIZE, "%04d_%04d",
106 slot->bus, slot->number);
107 else
108 snprintf(slot->hotplug_slot->name, SLOT_NAME_SIZE, "%d",
109 slot->number);
110}
111
112static int init_slots(struct controller *ctrl) 100static int init_slots(struct controller *ctrl)
113{ 101{
114 struct slot *slot; 102 struct slot *slot;
115 struct hotplug_slot *hotplug_slot; 103 struct hotplug_slot *hotplug_slot;
116 struct hotplug_slot_info *info; 104 struct hotplug_slot_info *info;
105 char name[SLOT_NAME_SIZE];
117 int retval = -ENOMEM; 106 int retval = -ENOMEM;
118 int i; 107 int i;
119 108
@@ -132,8 +121,6 @@ static int init_slots(struct controller *ctrl)
132 goto error_hpslot; 121 goto error_hpslot;
133 hotplug_slot->info = info; 122 hotplug_slot->info = info;
134 123
135 hotplug_slot->name = slot->name;
136
137 slot->hp_slot = i; 124 slot->hp_slot = i;
138 slot->ctrl = ctrl; 125 slot->ctrl = ctrl;
139 slot->bus = ctrl->pci_dev->subordinate->number; 126 slot->bus = ctrl->pci_dev->subordinate->number;
@@ -146,28 +133,27 @@ static int init_slots(struct controller *ctrl)
146 /* register this slot with the hotplug pci core */ 133 /* register this slot with the hotplug pci core */
147 hotplug_slot->private = slot; 134 hotplug_slot->private = slot;
148 hotplug_slot->release = &release_slot; 135 hotplug_slot->release = &release_slot;
149 make_slot_name(slot); 136 snprintf(name, SLOT_NAME_SIZE, "%d", slot->number);
150 hotplug_slot->ops = &shpchp_hotplug_slot_ops; 137 hotplug_slot->ops = &shpchp_hotplug_slot_ops;
151 138
152 get_power_status(hotplug_slot, &info->power_status); 139 ctrl_dbg(ctrl, "Registering domain:bus:dev=%04x:%02x:%02x "
153 get_attention_status(hotplug_slot, &info->attention_status); 140 "hp_slot=%x sun=%x slot_device_offset=%x\n",
154 get_latch_status(hotplug_slot, &info->latch_status); 141 pci_domain_nr(ctrl->pci_dev->subordinate),
155 get_adapter_status(hotplug_slot, &info->adapter_status); 142 slot->bus, slot->device, slot->hp_slot, slot->number,
156 143 ctrl->slot_device_offset);
157 dbg("Registering bus=%x dev=%x hp_slot=%x sun=%x "
158 "slot_device_offset=%x\n", slot->bus, slot->device,
159 slot->hp_slot, slot->number, ctrl->slot_device_offset);
160 retval = pci_hp_register(slot->hotplug_slot, 144 retval = pci_hp_register(slot->hotplug_slot,
161 ctrl->pci_dev->subordinate, slot->device); 145 ctrl->pci_dev->subordinate, slot->device, name);
162 if (retval) { 146 if (retval) {
163 err("pci_hp_register failed with error %d\n", retval); 147 ctrl_err(ctrl, "pci_hp_register failed with error %d\n",
164 if (retval == -EEXIST) 148 retval);
165 err("Failed to register slot because of name "
166 "collision. Try \'shpchp_slot_with_bus\' "
167 "module option.\n");
168 goto error_info; 149 goto error_info;
169 } 150 }
170 151
152 get_power_status(hotplug_slot, &info->power_status);
153 get_attention_status(hotplug_slot, &info->attention_status);
154 get_latch_status(hotplug_slot, &info->latch_status);
155 get_adapter_status(hotplug_slot, &info->adapter_status);
156
171 list_add(&slot->slot_list, &ctrl->slot_list); 157 list_add(&slot->slot_list, &ctrl->slot_list);
172 } 158 }
173 159
@@ -205,7 +191,8 @@ static int set_attention_status (struct hotplug_slot *hotplug_slot, u8 status)
205{ 191{
206 struct slot *slot = get_slot(hotplug_slot); 192 struct slot *slot = get_slot(hotplug_slot);
207 193
208 dbg("%s - physical_slot = %s\n", __func__, hotplug_slot->name); 194 ctrl_dbg(slot->ctrl, "%s: physical_slot = %s\n",
195 __func__, slot_name(slot));
209 196
210 hotplug_slot->info->attention_status = status; 197 hotplug_slot->info->attention_status = status;
211 slot->hpc_ops->set_attention_status(slot, status); 198 slot->hpc_ops->set_attention_status(slot, status);
@@ -217,7 +204,8 @@ static int enable_slot (struct hotplug_slot *hotplug_slot)
217{ 204{
218 struct slot *slot = get_slot(hotplug_slot); 205 struct slot *slot = get_slot(hotplug_slot);
219 206
220 dbg("%s - physical_slot = %s\n", __func__, hotplug_slot->name); 207 ctrl_dbg(slot->ctrl, "%s: physical_slot = %s\n",
208 __func__, slot_name(slot));
221 209
222 return shpchp_sysfs_enable_slot(slot); 210 return shpchp_sysfs_enable_slot(slot);
223} 211}
@@ -226,7 +214,8 @@ static int disable_slot (struct hotplug_slot *hotplug_slot)
226{ 214{
227 struct slot *slot = get_slot(hotplug_slot); 215 struct slot *slot = get_slot(hotplug_slot);
228 216
229 dbg("%s - physical_slot = %s\n", __func__, hotplug_slot->name); 217 ctrl_dbg(slot->ctrl, "%s: physical_slot = %s\n",
218 __func__, slot_name(slot));
230 219
231 return shpchp_sysfs_disable_slot(slot); 220 return shpchp_sysfs_disable_slot(slot);
232} 221}
@@ -236,7 +225,8 @@ static int get_power_status (struct hotplug_slot *hotplug_slot, u8 *value)
236 struct slot *slot = get_slot(hotplug_slot); 225 struct slot *slot = get_slot(hotplug_slot);
237 int retval; 226 int retval;
238 227
239 dbg("%s - physical_slot = %s\n", __func__, hotplug_slot->name); 228 ctrl_dbg(slot->ctrl, "%s: physical_slot = %s\n",
229 __func__, slot_name(slot));
240 230
241 retval = slot->hpc_ops->get_power_status(slot, value); 231 retval = slot->hpc_ops->get_power_status(slot, value);
242 if (retval < 0) 232 if (retval < 0)
@@ -250,7 +240,8 @@ static int get_attention_status (struct hotplug_slot *hotplug_slot, u8 *value)
250 struct slot *slot = get_slot(hotplug_slot); 240 struct slot *slot = get_slot(hotplug_slot);
251 int retval; 241 int retval;
252 242
253 dbg("%s - physical_slot = %s\n", __func__, hotplug_slot->name); 243 ctrl_dbg(slot->ctrl, "%s: physical_slot = %s\n",
244 __func__, slot_name(slot));
254 245
255 retval = slot->hpc_ops->get_attention_status(slot, value); 246 retval = slot->hpc_ops->get_attention_status(slot, value);
256 if (retval < 0) 247 if (retval < 0)
@@ -264,7 +255,8 @@ static int get_latch_status (struct hotplug_slot *hotplug_slot, u8 *value)
264 struct slot *slot = get_slot(hotplug_slot); 255 struct slot *slot = get_slot(hotplug_slot);
265 int retval; 256 int retval;
266 257
267 dbg("%s - physical_slot = %s\n", __func__, hotplug_slot->name); 258 ctrl_dbg(slot->ctrl, "%s: physical_slot = %s\n",
259 __func__, slot_name(slot));
268 260
269 retval = slot->hpc_ops->get_latch_status(slot, value); 261 retval = slot->hpc_ops->get_latch_status(slot, value);
270 if (retval < 0) 262 if (retval < 0)
@@ -278,7 +270,8 @@ static int get_adapter_status (struct hotplug_slot *hotplug_slot, u8 *value)
278 struct slot *slot = get_slot(hotplug_slot); 270 struct slot *slot = get_slot(hotplug_slot);
279 int retval; 271 int retval;
280 272
281 dbg("%s - physical_slot = %s\n", __func__, hotplug_slot->name); 273 ctrl_dbg(slot->ctrl, "%s: physical_slot = %s\n",
274 __func__, slot_name(slot));
282 275
283 retval = slot->hpc_ops->get_adapter_status(slot, value); 276 retval = slot->hpc_ops->get_adapter_status(slot, value);
284 if (retval < 0) 277 if (retval < 0)
@@ -293,7 +286,8 @@ static int get_max_bus_speed(struct hotplug_slot *hotplug_slot,
293 struct slot *slot = get_slot(hotplug_slot); 286 struct slot *slot = get_slot(hotplug_slot);
294 int retval; 287 int retval;
295 288
296 dbg("%s - physical_slot = %s\n", __func__, hotplug_slot->name); 289 ctrl_dbg(slot->ctrl, "%s: physical_slot = %s\n",
290 __func__, slot_name(slot));
297 291
298 retval = slot->hpc_ops->get_max_bus_speed(slot, value); 292 retval = slot->hpc_ops->get_max_bus_speed(slot, value);
299 if (retval < 0) 293 if (retval < 0)
@@ -307,7 +301,8 @@ static int get_cur_bus_speed (struct hotplug_slot *hotplug_slot, enum pci_bus_sp
307 struct slot *slot = get_slot(hotplug_slot); 301 struct slot *slot = get_slot(hotplug_slot);
308 int retval; 302 int retval;
309 303
310 dbg("%s - physical_slot = %s\n", __func__, hotplug_slot->name); 304 ctrl_dbg(slot->ctrl, "%s: physical_slot = %s\n",
305 __func__, slot_name(slot));
311 306
312 retval = slot->hpc_ops->get_cur_bus_speed(slot, value); 307 retval = slot->hpc_ops->get_cur_bus_speed(slot, value);
313 if (retval < 0) 308 if (retval < 0)
@@ -338,15 +333,14 @@ static int shpc_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
338 333
339 ctrl = kzalloc(sizeof(*ctrl), GFP_KERNEL); 334 ctrl = kzalloc(sizeof(*ctrl), GFP_KERNEL);
340 if (!ctrl) { 335 if (!ctrl) {
341 err("%s : out of memory\n", __func__); 336 dev_err(&pdev->dev, "%s: Out of memory\n", __func__);
342 goto err_out_none; 337 goto err_out_none;
343 } 338 }
344 INIT_LIST_HEAD(&ctrl->slot_list); 339 INIT_LIST_HEAD(&ctrl->slot_list);
345 340
346 rc = shpc_init(ctrl, pdev); 341 rc = shpc_init(ctrl, pdev);
347 if (rc) { 342 if (rc) {
348 dbg("%s: controller initialization failed\n", 343 ctrl_dbg(ctrl, "Controller initialization failed\n");
349 SHPC_MODULE_NAME);
350 goto err_out_free_ctrl; 344 goto err_out_free_ctrl;
351 } 345 }
352 346
@@ -355,7 +349,7 @@ static int shpc_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
355 /* Setup the slot information structures */ 349 /* Setup the slot information structures */
356 rc = init_slots(ctrl); 350 rc = init_slots(ctrl);
357 if (rc) { 351 if (rc) {
358 err("%s: slot initialization failed\n", SHPC_MODULE_NAME); 352 ctrl_err(ctrl, "Slot initialization failed\n");
359 goto err_out_release_ctlr; 353 goto err_out_release_ctlr;
360 } 354 }
361 355
diff --git a/drivers/pci/hotplug/shpchp_ctrl.c b/drivers/pci/hotplug/shpchp_ctrl.c
index dfb53932dfbc..b8ab2796e66a 100644
--- a/drivers/pci/hotplug/shpchp_ctrl.c
+++ b/drivers/pci/hotplug/shpchp_ctrl.c
@@ -62,7 +62,7 @@ u8 shpchp_handle_attention_button(u8 hp_slot, struct controller *ctrl)
62 u32 event_type; 62 u32 event_type;
63 63
64 /* Attention Button Change */ 64 /* Attention Button Change */
65 dbg("shpchp: Attention button interrupt received.\n"); 65 ctrl_dbg(ctrl, "Attention button interrupt received\n");
66 66
67 p_slot = shpchp_find_slot(ctrl, hp_slot + ctrl->slot_device_offset); 67 p_slot = shpchp_find_slot(ctrl, hp_slot + ctrl->slot_device_offset);
68 p_slot->hpc_ops->get_adapter_status(p_slot, &(p_slot->presence_save)); 68 p_slot->hpc_ops->get_adapter_status(p_slot, &(p_slot->presence_save));
@@ -70,7 +70,7 @@ u8 shpchp_handle_attention_button(u8 hp_slot, struct controller *ctrl)
70 /* 70 /*
71 * Button pressed - See if need to TAKE ACTION!!! 71 * Button pressed - See if need to TAKE ACTION!!!
72 */ 72 */
73 info("Button pressed on Slot(%s)\n", p_slot->name); 73 ctrl_info(ctrl, "Button pressed on Slot(%s)\n", slot_name(p_slot));
74 event_type = INT_BUTTON_PRESS; 74 event_type = INT_BUTTON_PRESS;
75 75
76 queue_interrupt_event(p_slot, event_type); 76 queue_interrupt_event(p_slot, event_type);
@@ -86,29 +86,29 @@ u8 shpchp_handle_switch_change(u8 hp_slot, struct controller *ctrl)
86 u32 event_type; 86 u32 event_type;
87 87
88 /* Switch Change */ 88 /* Switch Change */
89 dbg("shpchp: Switch interrupt received.\n"); 89 ctrl_dbg(ctrl, "Switch interrupt received\n");
90 90
91 p_slot = shpchp_find_slot(ctrl, hp_slot + ctrl->slot_device_offset); 91 p_slot = shpchp_find_slot(ctrl, hp_slot + ctrl->slot_device_offset);
92 p_slot->hpc_ops->get_adapter_status(p_slot, &(p_slot->presence_save)); 92 p_slot->hpc_ops->get_adapter_status(p_slot, &(p_slot->presence_save));
93 p_slot->hpc_ops->get_latch_status(p_slot, &getstatus); 93 p_slot->hpc_ops->get_latch_status(p_slot, &getstatus);
94 dbg("%s: Card present %x Power status %x\n", __func__, 94 ctrl_dbg(ctrl, "Card present %x Power status %x\n",
95 p_slot->presence_save, p_slot->pwr_save); 95 p_slot->presence_save, p_slot->pwr_save);
96 96
97 if (getstatus) { 97 if (getstatus) {
98 /* 98 /*
99 * Switch opened 99 * Switch opened
100 */ 100 */
101 info("Latch open on Slot(%s)\n", p_slot->name); 101 ctrl_info(ctrl, "Latch open on Slot(%s)\n", slot_name(p_slot));
102 event_type = INT_SWITCH_OPEN; 102 event_type = INT_SWITCH_OPEN;
103 if (p_slot->pwr_save && p_slot->presence_save) { 103 if (p_slot->pwr_save && p_slot->presence_save) {
104 event_type = INT_POWER_FAULT; 104 event_type = INT_POWER_FAULT;
105 err("Surprise Removal of card\n"); 105 ctrl_err(ctrl, "Surprise Removal of card\n");
106 } 106 }
107 } else { 107 } else {
108 /* 108 /*
109 * Switch closed 109 * Switch closed
110 */ 110 */
111 info("Latch close on Slot(%s)\n", p_slot->name); 111 ctrl_info(ctrl, "Latch close on Slot(%s)\n", slot_name(p_slot));
112 event_type = INT_SWITCH_CLOSE; 112 event_type = INT_SWITCH_CLOSE;
113 } 113 }
114 114
@@ -123,7 +123,7 @@ u8 shpchp_handle_presence_change(u8 hp_slot, struct controller *ctrl)
123 u32 event_type; 123 u32 event_type;
124 124
125 /* Presence Change */ 125 /* Presence Change */
126 dbg("shpchp: Presence/Notify input change.\n"); 126 ctrl_dbg(ctrl, "Presence/Notify input change\n");
127 127
128 p_slot = shpchp_find_slot(ctrl, hp_slot + ctrl->slot_device_offset); 128 p_slot = shpchp_find_slot(ctrl, hp_slot + ctrl->slot_device_offset);
129 129
@@ -135,13 +135,15 @@ u8 shpchp_handle_presence_change(u8 hp_slot, struct controller *ctrl)
135 /* 135 /*
136 * Card Present 136 * Card Present
137 */ 137 */
138 info("Card present on Slot(%s)\n", p_slot->name); 138 ctrl_info(ctrl, "Card present on Slot(%s)\n",
139 slot_name(p_slot));
139 event_type = INT_PRESENCE_ON; 140 event_type = INT_PRESENCE_ON;
140 } else { 141 } else {
141 /* 142 /*
142 * Not Present 143 * Not Present
143 */ 144 */
144 info("Card not present on Slot(%s)\n", p_slot->name); 145 ctrl_info(ctrl, "Card not present on Slot(%s)\n",
146 slot_name(p_slot));
145 event_type = INT_PRESENCE_OFF; 147 event_type = INT_PRESENCE_OFF;
146 } 148 }
147 149
@@ -156,7 +158,7 @@ u8 shpchp_handle_power_fault(u8 hp_slot, struct controller *ctrl)
156 u32 event_type; 158 u32 event_type;
157 159
158 /* Power fault */ 160 /* Power fault */
159 dbg("shpchp: Power fault interrupt received.\n"); 161 ctrl_dbg(ctrl, "Power fault interrupt received\n");
160 162
161 p_slot = shpchp_find_slot(ctrl, hp_slot + ctrl->slot_device_offset); 163 p_slot = shpchp_find_slot(ctrl, hp_slot + ctrl->slot_device_offset);
162 164
@@ -164,18 +166,19 @@ u8 shpchp_handle_power_fault(u8 hp_slot, struct controller *ctrl)
164 /* 166 /*
165 * Power fault Cleared 167 * Power fault Cleared
166 */ 168 */
167 info("Power fault cleared on Slot(%s)\n", p_slot->name); 169 ctrl_info(ctrl, "Power fault cleared on Slot(%s)\n",
170 slot_name(p_slot));
168 p_slot->status = 0x00; 171 p_slot->status = 0x00;
169 event_type = INT_POWER_FAULT_CLEAR; 172 event_type = INT_POWER_FAULT_CLEAR;
170 } else { 173 } else {
171 /* 174 /*
172 * Power fault 175 * Power fault
173 */ 176 */
174 info("Power fault on Slot(%s)\n", p_slot->name); 177 ctrl_info(ctrl, "Power fault on Slot(%s)\n", slot_name(p_slot));
175 event_type = INT_POWER_FAULT; 178 event_type = INT_POWER_FAULT;
176 /* set power fault status for this board */ 179 /* set power fault status for this board */
177 p_slot->status = 0xFF; 180 p_slot->status = 0xFF;
178 info("power fault bit %x set\n", hp_slot); 181 ctrl_info(ctrl, "Power fault bit %x set\n", hp_slot);
179 } 182 }
180 183
181 queue_interrupt_event(p_slot, event_type); 184 queue_interrupt_event(p_slot, event_type);
@@ -191,10 +194,10 @@ static int change_bus_speed(struct controller *ctrl, struct slot *p_slot,
191{ 194{
192 int rc = 0; 195 int rc = 0;
193 196
194 dbg("%s: change to speed %d\n", __func__, speed); 197 ctrl_dbg(ctrl, "Change speed to %d\n", speed);
195 if ((rc = p_slot->hpc_ops->set_bus_speed_mode(p_slot, speed))) { 198 if ((rc = p_slot->hpc_ops->set_bus_speed_mode(p_slot, speed))) {
196 err("%s: Issue of set bus speed mode command failed\n", 199 ctrl_err(ctrl, "%s: Issue of set bus speed mode command "
197 __func__); 200 "failed\n", __func__);
198 return WRONG_BUS_FREQUENCY; 201 return WRONG_BUS_FREQUENCY;
199 } 202 }
200 return rc; 203 return rc;
@@ -212,8 +215,8 @@ static int fix_bus_speed(struct controller *ctrl, struct slot *pslot,
212 */ 215 */
213 if (flag) { 216 if (flag) {
214 if (asp < bsp) { 217 if (asp < bsp) {
215 err("%s: speed of bus %x and adapter %x mismatch\n", 218 ctrl_err(ctrl, "Speed of bus %x and adapter %x "
216 __func__, bsp, asp); 219 "mismatch\n", bsp, asp);
217 rc = WRONG_BUS_FREQUENCY; 220 rc = WRONG_BUS_FREQUENCY;
218 } 221 }
219 return rc; 222 return rc;
@@ -243,17 +246,18 @@ static int board_added(struct slot *p_slot)
243 int rc = 0; 246 int rc = 0;
244 enum pci_bus_speed asp, bsp, msp; 247 enum pci_bus_speed asp, bsp, msp;
245 struct controller *ctrl = p_slot->ctrl; 248 struct controller *ctrl = p_slot->ctrl;
249 struct pci_bus *parent = ctrl->pci_dev->subordinate;
246 250
247 hp_slot = p_slot->device - ctrl->slot_device_offset; 251 hp_slot = p_slot->device - ctrl->slot_device_offset;
248 252
249 dbg("%s: p_slot->device, slot_offset, hp_slot = %d, %d ,%d\n", 253 ctrl_dbg(ctrl,
250 __func__, p_slot->device, 254 "%s: p_slot->device, slot_offset, hp_slot = %d, %d ,%d\n",
251 ctrl->slot_device_offset, hp_slot); 255 __func__, p_slot->device, ctrl->slot_device_offset, hp_slot);
252 256
253 /* Power on slot without connecting to bus */ 257 /* Power on slot without connecting to bus */
254 rc = p_slot->hpc_ops->power_on_slot(p_slot); 258 rc = p_slot->hpc_ops->power_on_slot(p_slot);
255 if (rc) { 259 if (rc) {
256 err("%s: Failed to power on slot\n", __func__); 260 ctrl_err(ctrl, "Failed to power on slot\n");
257 return -1; 261 return -1;
258 } 262 }
259 263
@@ -262,33 +266,34 @@ static int board_added(struct slot *p_slot)
262 return WRONG_BUS_FREQUENCY; 266 return WRONG_BUS_FREQUENCY;
263 267
264 if ((rc = p_slot->hpc_ops->set_bus_speed_mode(p_slot, PCI_SPEED_33MHz))) { 268 if ((rc = p_slot->hpc_ops->set_bus_speed_mode(p_slot, PCI_SPEED_33MHz))) {
265 err("%s: Issue of set bus speed mode command failed\n", __func__); 269 ctrl_err(ctrl, "%s: Issue of set bus speed mode command"
270 " failed\n", __func__);
266 return WRONG_BUS_FREQUENCY; 271 return WRONG_BUS_FREQUENCY;
267 } 272 }
268 273
269 /* turn on board, blink green LED, turn off Amber LED */ 274 /* turn on board, blink green LED, turn off Amber LED */
270 if ((rc = p_slot->hpc_ops->slot_enable(p_slot))) { 275 if ((rc = p_slot->hpc_ops->slot_enable(p_slot))) {
271 err("%s: Issue of Slot Enable command failed\n", __func__); 276 ctrl_err(ctrl, "Issue of Slot Enable command failed\n");
272 return rc; 277 return rc;
273 } 278 }
274 } 279 }
275 280
276 rc = p_slot->hpc_ops->get_adapter_speed(p_slot, &asp); 281 rc = p_slot->hpc_ops->get_adapter_speed(p_slot, &asp);
277 if (rc) { 282 if (rc) {
278 err("%s: Can't get adapter speed or bus mode mismatch\n", 283 ctrl_err(ctrl, "Can't get adapter speed or "
279 __func__); 284 "bus mode mismatch\n");
280 return WRONG_BUS_FREQUENCY; 285 return WRONG_BUS_FREQUENCY;
281 } 286 }
282 287
283 rc = p_slot->hpc_ops->get_cur_bus_speed(p_slot, &bsp); 288 rc = p_slot->hpc_ops->get_cur_bus_speed(p_slot, &bsp);
284 if (rc) { 289 if (rc) {
285 err("%s: Can't get bus operation speed\n", __func__); 290 ctrl_err(ctrl, "Can't get bus operation speed\n");
286 return WRONG_BUS_FREQUENCY; 291 return WRONG_BUS_FREQUENCY;
287 } 292 }
288 293
289 rc = p_slot->hpc_ops->get_max_bus_speed(p_slot, &msp); 294 rc = p_slot->hpc_ops->get_max_bus_speed(p_slot, &msp);
290 if (rc) { 295 if (rc) {
291 err("%s: Can't get max bus operation speed\n", __func__); 296 ctrl_err(ctrl, "Can't get max bus operation speed\n");
292 msp = bsp; 297 msp = bsp;
293 } 298 }
294 299
@@ -296,9 +301,9 @@ static int board_added(struct slot *p_slot)
296 if (!list_empty(&ctrl->pci_dev->subordinate->devices)) 301 if (!list_empty(&ctrl->pci_dev->subordinate->devices))
297 slots_not_empty = 1; 302 slots_not_empty = 1;
298 303
299 dbg("%s: slots_not_empty %d, adapter_speed %d, bus_speed %d, " 304 ctrl_dbg(ctrl, "%s: slots_not_empty %d, adapter_speed %d, bus_speed %d,"
300 "max_bus_speed %d\n", __func__, slots_not_empty, asp, 305 " max_bus_speed %d\n", __func__, slots_not_empty, asp,
301 bsp, msp); 306 bsp, msp);
302 307
303 rc = fix_bus_speed(ctrl, p_slot, slots_not_empty, asp, bsp, msp); 308 rc = fix_bus_speed(ctrl, p_slot, slots_not_empty, asp, bsp, msp);
304 if (rc) 309 if (rc)
@@ -306,26 +311,26 @@ static int board_added(struct slot *p_slot)
306 311
307 /* turn on board, blink green LED, turn off Amber LED */ 312 /* turn on board, blink green LED, turn off Amber LED */
308 if ((rc = p_slot->hpc_ops->slot_enable(p_slot))) { 313 if ((rc = p_slot->hpc_ops->slot_enable(p_slot))) {
309 err("%s: Issue of Slot Enable command failed\n", __func__); 314 ctrl_err(ctrl, "Issue of Slot Enable command failed\n");
310 return rc; 315 return rc;
311 } 316 }
312 317
313 /* Wait for ~1 second */ 318 /* Wait for ~1 second */
314 msleep(1000); 319 msleep(1000);
315 320
316 dbg("%s: slot status = %x\n", __func__, p_slot->status); 321 ctrl_dbg(ctrl, "%s: slot status = %x\n", __func__, p_slot->status);
317 /* Check for a power fault */ 322 /* Check for a power fault */
318 if (p_slot->status == 0xFF) { 323 if (p_slot->status == 0xFF) {
319 /* power fault occurred, but it was benign */ 324 /* power fault occurred, but it was benign */
320 dbg("%s: power fault\n", __func__); 325 ctrl_dbg(ctrl, "%s: Power fault\n", __func__);
321 rc = POWER_FAILURE; 326 rc = POWER_FAILURE;
322 p_slot->status = 0; 327 p_slot->status = 0;
323 goto err_exit; 328 goto err_exit;
324 } 329 }
325 330
326 if (shpchp_configure_device(p_slot)) { 331 if (shpchp_configure_device(p_slot)) {
327 err("Cannot add device at 0x%x:0x%x\n", p_slot->bus, 332 ctrl_err(ctrl, "Cannot add device at %04x:%02x:%02x\n",
328 p_slot->device); 333 pci_domain_nr(parent), p_slot->bus, p_slot->device);
329 goto err_exit; 334 goto err_exit;
330 } 335 }
331 336
@@ -341,7 +346,8 @@ err_exit:
341 /* turn off slot, turn on Amber LED, turn off Green LED */ 346 /* turn off slot, turn on Amber LED, turn off Green LED */
342 rc = p_slot->hpc_ops->slot_disable(p_slot); 347 rc = p_slot->hpc_ops->slot_disable(p_slot);
343 if (rc) { 348 if (rc) {
344 err("%s: Issue of Slot Disable command failed\n", __func__); 349 ctrl_err(ctrl, "%s: Issue of Slot Disable command failed\n",
350 __func__);
345 return rc; 351 return rc;
346 } 352 }
347 353
@@ -365,7 +371,7 @@ static int remove_board(struct slot *p_slot)
365 hp_slot = p_slot->device - ctrl->slot_device_offset; 371 hp_slot = p_slot->device - ctrl->slot_device_offset;
366 p_slot = shpchp_find_slot(ctrl, hp_slot + ctrl->slot_device_offset); 372 p_slot = shpchp_find_slot(ctrl, hp_slot + ctrl->slot_device_offset);
367 373
368 dbg("In %s, hp_slot = %d\n", __func__, hp_slot); 374 ctrl_dbg(ctrl, "%s: hp_slot = %d\n", __func__, hp_slot);
369 375
370 /* Change status to shutdown */ 376 /* Change status to shutdown */
371 if (p_slot->is_a_board) 377 if (p_slot->is_a_board)
@@ -374,13 +380,14 @@ static int remove_board(struct slot *p_slot)
374 /* turn off slot, turn on Amber LED, turn off Green LED */ 380 /* turn off slot, turn on Amber LED, turn off Green LED */
375 rc = p_slot->hpc_ops->slot_disable(p_slot); 381 rc = p_slot->hpc_ops->slot_disable(p_slot);
376 if (rc) { 382 if (rc) {
377 err("%s: Issue of Slot Disable command failed\n", __func__); 383 ctrl_err(ctrl, "%s: Issue of Slot Disable command failed\n",
384 __func__);
378 return rc; 385 return rc;
379 } 386 }
380 387
381 rc = p_slot->hpc_ops->set_attention_status(p_slot, 0); 388 rc = p_slot->hpc_ops->set_attention_status(p_slot, 0);
382 if (rc) { 389 if (rc) {
383 err("%s: Issue of Set Attention command failed\n", __func__); 390 ctrl_err(ctrl, "Issue of Set Attention command failed\n");
384 return rc; 391 return rc;
385 } 392 }
386 393
@@ -439,7 +446,8 @@ void shpchp_queue_pushbutton_work(struct work_struct *work)
439 446
440 info = kmalloc(sizeof(*info), GFP_KERNEL); 447 info = kmalloc(sizeof(*info), GFP_KERNEL);
441 if (!info) { 448 if (!info) {
442 err("%s: Cannot allocate memory\n", __func__); 449 ctrl_err(p_slot->ctrl, "%s: Cannot allocate memory\n",
450 __func__);
443 return; 451 return;
444 } 452 }
445 info->p_slot = p_slot; 453 info->p_slot = p_slot;
@@ -486,18 +494,19 @@ static int update_slot_info (struct slot *slot)
486static void handle_button_press_event(struct slot *p_slot) 494static void handle_button_press_event(struct slot *p_slot)
487{ 495{
488 u8 getstatus; 496 u8 getstatus;
497 struct controller *ctrl = p_slot->ctrl;
489 498
490 switch (p_slot->state) { 499 switch (p_slot->state) {
491 case STATIC_STATE: 500 case STATIC_STATE:
492 p_slot->hpc_ops->get_power_status(p_slot, &getstatus); 501 p_slot->hpc_ops->get_power_status(p_slot, &getstatus);
493 if (getstatus) { 502 if (getstatus) {
494 p_slot->state = BLINKINGOFF_STATE; 503 p_slot->state = BLINKINGOFF_STATE;
495 info("PCI slot #%s - powering off due to button " 504 ctrl_info(ctrl, "PCI slot #%s - powering off due to "
496 "press.\n", p_slot->name); 505 "button press.\n", slot_name(p_slot));
497 } else { 506 } else {
498 p_slot->state = BLINKINGON_STATE; 507 p_slot->state = BLINKINGON_STATE;
499 info("PCI slot #%s - powering on due to button " 508 ctrl_info(ctrl, "PCI slot #%s - powering on due to "
500 "press.\n", p_slot->name); 509 "button press.\n", slot_name(p_slot));
501 } 510 }
502 /* blink green LED and turn off amber */ 511 /* blink green LED and turn off amber */
503 p_slot->hpc_ops->green_led_blink(p_slot); 512 p_slot->hpc_ops->green_led_blink(p_slot);
@@ -512,16 +521,16 @@ static void handle_button_press_event(struct slot *p_slot)
512 * press the attention again before the 5 sec. limit 521 * press the attention again before the 5 sec. limit
513 * expires to cancel hot-add or hot-remove 522 * expires to cancel hot-add or hot-remove
514 */ 523 */
515 info("Button cancel on Slot(%s)\n", p_slot->name); 524 ctrl_info(ctrl, "Button cancel on Slot(%s)\n",
516 dbg("%s: button cancel\n", __func__); 525 slot_name(p_slot));
517 cancel_delayed_work(&p_slot->work); 526 cancel_delayed_work(&p_slot->work);
518 if (p_slot->state == BLINKINGOFF_STATE) 527 if (p_slot->state == BLINKINGOFF_STATE)
519 p_slot->hpc_ops->green_led_on(p_slot); 528 p_slot->hpc_ops->green_led_on(p_slot);
520 else 529 else
521 p_slot->hpc_ops->green_led_off(p_slot); 530 p_slot->hpc_ops->green_led_off(p_slot);
522 p_slot->hpc_ops->set_attention_status(p_slot, 0); 531 p_slot->hpc_ops->set_attention_status(p_slot, 0);
523 info("PCI slot #%s - action canceled due to button press\n", 532 ctrl_info(ctrl, "PCI slot #%s - action canceled due to "
524 p_slot->name); 533 "button press\n", slot_name(p_slot));
525 p_slot->state = STATIC_STATE; 534 p_slot->state = STATIC_STATE;
526 break; 535 break;
527 case POWEROFF_STATE: 536 case POWEROFF_STATE:
@@ -531,11 +540,12 @@ static void handle_button_press_event(struct slot *p_slot)
531 * this means that the previous attention button action 540 * this means that the previous attention button action
532 * to hot-add or hot-remove is undergoing 541 * to hot-add or hot-remove is undergoing
533 */ 542 */
534 info("Button ignore on Slot(%s)\n", p_slot->name); 543 ctrl_info(ctrl, "Button ignore on Slot(%s)\n",
544 slot_name(p_slot));
535 update_slot_info(p_slot); 545 update_slot_info(p_slot);
536 break; 546 break;
537 default: 547 default:
538 warn("Not a valid state\n"); 548 ctrl_warn(ctrl, "Not a valid state\n");
539 break; 549 break;
540 } 550 }
541} 551}
@@ -551,7 +561,7 @@ static void interrupt_event_handler(struct work_struct *work)
551 handle_button_press_event(p_slot); 561 handle_button_press_event(p_slot);
552 break; 562 break;
553 case INT_POWER_FAULT: 563 case INT_POWER_FAULT:
554 dbg("%s: power fault\n", __func__); 564 ctrl_dbg(p_slot->ctrl, "%s: Power fault\n", __func__);
555 p_slot->hpc_ops->set_attention_status(p_slot, 1); 565 p_slot->hpc_ops->set_attention_status(p_slot, 1);
556 p_slot->hpc_ops->green_led_off(p_slot); 566 p_slot->hpc_ops->green_led_off(p_slot);
557 break; 567 break;
@@ -569,22 +579,24 @@ static int shpchp_enable_slot (struct slot *p_slot)
569{ 579{
570 u8 getstatus = 0; 580 u8 getstatus = 0;
571 int rc, retval = -ENODEV; 581 int rc, retval = -ENODEV;
582 struct controller *ctrl = p_slot->ctrl;
572 583
573 /* Check to see if (latch closed, card present, power off) */ 584 /* Check to see if (latch closed, card present, power off) */
574 mutex_lock(&p_slot->ctrl->crit_sect); 585 mutex_lock(&p_slot->ctrl->crit_sect);
575 rc = p_slot->hpc_ops->get_adapter_status(p_slot, &getstatus); 586 rc = p_slot->hpc_ops->get_adapter_status(p_slot, &getstatus);
576 if (rc || !getstatus) { 587 if (rc || !getstatus) {
577 info("No adapter on slot(%s)\n", p_slot->name); 588 ctrl_info(ctrl, "No adapter on slot(%s)\n", slot_name(p_slot));
578 goto out; 589 goto out;
579 } 590 }
580 rc = p_slot->hpc_ops->get_latch_status(p_slot, &getstatus); 591 rc = p_slot->hpc_ops->get_latch_status(p_slot, &getstatus);
581 if (rc || getstatus) { 592 if (rc || getstatus) {
582 info("Latch open on slot(%s)\n", p_slot->name); 593 ctrl_info(ctrl, "Latch open on slot(%s)\n", slot_name(p_slot));
583 goto out; 594 goto out;
584 } 595 }
585 rc = p_slot->hpc_ops->get_power_status(p_slot, &getstatus); 596 rc = p_slot->hpc_ops->get_power_status(p_slot, &getstatus);
586 if (rc || getstatus) { 597 if (rc || getstatus) {
587 info("Already enabled on slot(%s)\n", p_slot->name); 598 ctrl_info(ctrl, "Already enabled on slot(%s)\n",
599 slot_name(p_slot));
588 goto out; 600 goto out;
589 } 601 }
590 602
@@ -593,7 +605,7 @@ static int shpchp_enable_slot (struct slot *p_slot)
593 /* We have to save the presence info for these slots */ 605 /* We have to save the presence info for these slots */
594 p_slot->hpc_ops->get_adapter_status(p_slot, &(p_slot->presence_save)); 606 p_slot->hpc_ops->get_adapter_status(p_slot, &(p_slot->presence_save));
595 p_slot->hpc_ops->get_power_status(p_slot, &(p_slot->pwr_save)); 607 p_slot->hpc_ops->get_power_status(p_slot, &(p_slot->pwr_save));
596 dbg("%s: p_slot->pwr_save %x\n", __func__, p_slot->pwr_save); 608 ctrl_dbg(ctrl, "%s: p_slot->pwr_save %x\n", __func__, p_slot->pwr_save);
597 p_slot->hpc_ops->get_latch_status(p_slot, &getstatus); 609 p_slot->hpc_ops->get_latch_status(p_slot, &getstatus);
598 610
599 if(((p_slot->ctrl->pci_dev->vendor == PCI_VENDOR_ID_AMD) || 611 if(((p_slot->ctrl->pci_dev->vendor == PCI_VENDOR_ID_AMD) ||
@@ -624,6 +636,7 @@ static int shpchp_disable_slot (struct slot *p_slot)
624{ 636{
625 u8 getstatus = 0; 637 u8 getstatus = 0;
626 int rc, retval = -ENODEV; 638 int rc, retval = -ENODEV;
639 struct controller *ctrl = p_slot->ctrl;
627 640
628 if (!p_slot->ctrl) 641 if (!p_slot->ctrl)
629 return -ENODEV; 642 return -ENODEV;
@@ -633,17 +646,18 @@ static int shpchp_disable_slot (struct slot *p_slot)
633 646
634 rc = p_slot->hpc_ops->get_adapter_status(p_slot, &getstatus); 647 rc = p_slot->hpc_ops->get_adapter_status(p_slot, &getstatus);
635 if (rc || !getstatus) { 648 if (rc || !getstatus) {
636 info("No adapter on slot(%s)\n", p_slot->name); 649 ctrl_info(ctrl, "No adapter on slot(%s)\n", slot_name(p_slot));
637 goto out; 650 goto out;
638 } 651 }
639 rc = p_slot->hpc_ops->get_latch_status(p_slot, &getstatus); 652 rc = p_slot->hpc_ops->get_latch_status(p_slot, &getstatus);
640 if (rc || getstatus) { 653 if (rc || getstatus) {
641 info("Latch open on slot(%s)\n", p_slot->name); 654 ctrl_info(ctrl, "Latch open on slot(%s)\n", slot_name(p_slot));
642 goto out; 655 goto out;
643 } 656 }
644 rc = p_slot->hpc_ops->get_power_status(p_slot, &getstatus); 657 rc = p_slot->hpc_ops->get_power_status(p_slot, &getstatus);
645 if (rc || !getstatus) { 658 if (rc || !getstatus) {
646 info("Already disabled slot(%s)\n", p_slot->name); 659 ctrl_info(ctrl, "Already disabled on slot(%s)\n",
660 slot_name(p_slot));
647 goto out; 661 goto out;
648 } 662 }
649 663
@@ -657,6 +671,7 @@ static int shpchp_disable_slot (struct slot *p_slot)
657int shpchp_sysfs_enable_slot(struct slot *p_slot) 671int shpchp_sysfs_enable_slot(struct slot *p_slot)
658{ 672{
659 int retval = -ENODEV; 673 int retval = -ENODEV;
674 struct controller *ctrl = p_slot->ctrl;
660 675
661 mutex_lock(&p_slot->lock); 676 mutex_lock(&p_slot->lock);
662 switch (p_slot->state) { 677 switch (p_slot->state) {
@@ -670,15 +685,17 @@ int shpchp_sysfs_enable_slot(struct slot *p_slot)
670 p_slot->state = STATIC_STATE; 685 p_slot->state = STATIC_STATE;
671 break; 686 break;
672 case POWERON_STATE: 687 case POWERON_STATE:
673 info("Slot %s is already in powering on state\n", 688 ctrl_info(ctrl, "Slot %s is already in powering on state\n",
674 p_slot->name); 689 slot_name(p_slot));
675 break; 690 break;
676 case BLINKINGOFF_STATE: 691 case BLINKINGOFF_STATE:
677 case POWEROFF_STATE: 692 case POWEROFF_STATE:
678 info("Already enabled on slot %s\n", p_slot->name); 693 ctrl_info(ctrl, "Already enabled on slot %s\n",
694 slot_name(p_slot));
679 break; 695 break;
680 default: 696 default:
681 err("Not a valid state on slot %s\n", p_slot->name); 697 ctrl_err(ctrl, "Not a valid state on slot %s\n",
698 slot_name(p_slot));
682 break; 699 break;
683 } 700 }
684 mutex_unlock(&p_slot->lock); 701 mutex_unlock(&p_slot->lock);
@@ -689,6 +706,7 @@ int shpchp_sysfs_enable_slot(struct slot *p_slot)
689int shpchp_sysfs_disable_slot(struct slot *p_slot) 706int shpchp_sysfs_disable_slot(struct slot *p_slot)
690{ 707{
691 int retval = -ENODEV; 708 int retval = -ENODEV;
709 struct controller *ctrl = p_slot->ctrl;
692 710
693 mutex_lock(&p_slot->lock); 711 mutex_lock(&p_slot->lock);
694 switch (p_slot->state) { 712 switch (p_slot->state) {
@@ -702,15 +720,17 @@ int shpchp_sysfs_disable_slot(struct slot *p_slot)
702 p_slot->state = STATIC_STATE; 720 p_slot->state = STATIC_STATE;
703 break; 721 break;
704 case POWEROFF_STATE: 722 case POWEROFF_STATE:
705 info("Slot %s is already in powering off state\n", 723 ctrl_info(ctrl, "Slot %s is already in powering off state\n",
706 p_slot->name); 724 slot_name(p_slot));
707 break; 725 break;
708 case BLINKINGON_STATE: 726 case BLINKINGON_STATE:
709 case POWERON_STATE: 727 case POWERON_STATE:
710 info("Already disabled on slot %s\n", p_slot->name); 728 ctrl_info(ctrl, "Already disabled on slot %s\n",
729 slot_name(p_slot));
711 break; 730 break;
712 default: 731 default:
713 err("Not a valid state on slot %s\n", p_slot->name); 732 ctrl_err(ctrl, "Not a valid state on slot %s\n",
733 slot_name(p_slot));
714 break; 734 break;
715 } 735 }
716 mutex_unlock(&p_slot->lock); 736 mutex_unlock(&p_slot->lock);
diff --git a/drivers/pci/hotplug/shpchp_hpc.c b/drivers/pci/hotplug/shpchp_hpc.c
index 7a0bff364cd4..86dc39847769 100644
--- a/drivers/pci/hotplug/shpchp_hpc.c
+++ b/drivers/pci/hotplug/shpchp_hpc.c
@@ -300,10 +300,10 @@ static inline int shpc_wait_cmd(struct controller *ctrl)
300 !is_ctrl_busy(ctrl), timeout); 300 !is_ctrl_busy(ctrl), timeout);
301 if (!rc && is_ctrl_busy(ctrl)) { 301 if (!rc && is_ctrl_busy(ctrl)) {
302 retval = -EIO; 302 retval = -EIO;
303 err("Command not completed in 1000 msec\n"); 303 ctrl_err(ctrl, "Command not completed in 1000 msec\n");
304 } else if (rc < 0) { 304 } else if (rc < 0) {
305 retval = -EINTR; 305 retval = -EINTR;
306 info("Command was interrupted by a signal\n"); 306 ctrl_info(ctrl, "Command was interrupted by a signal\n");
307 } 307 }
308 308
309 return retval; 309 return retval;
@@ -320,15 +320,14 @@ static int shpc_write_cmd(struct slot *slot, u8 t_slot, u8 cmd)
320 320
321 if (!shpc_poll_ctrl_busy(ctrl)) { 321 if (!shpc_poll_ctrl_busy(ctrl)) {
322 /* After 1 sec and and the controller is still busy */ 322 /* After 1 sec and and the controller is still busy */
323 err("%s : Controller is still busy after 1 sec.\n", 323 ctrl_err(ctrl, "Controller is still busy after 1 sec\n");
324 __func__);
325 retval = -EBUSY; 324 retval = -EBUSY;
326 goto out; 325 goto out;
327 } 326 }
328 327
329 ++t_slot; 328 ++t_slot;
330 temp_word = (t_slot << 8) | (cmd & 0xFF); 329 temp_word = (t_slot << 8) | (cmd & 0xFF);
331 dbg("%s: t_slot %x cmd %x\n", __func__, t_slot, cmd); 330 ctrl_dbg(ctrl, "%s: t_slot %x cmd %x\n", __func__, t_slot, cmd);
332 331
333 /* To make sure the Controller Busy bit is 0 before we send out the 332 /* To make sure the Controller Busy bit is 0 before we send out the
334 * command. 333 * command.
@@ -344,8 +343,9 @@ static int shpc_write_cmd(struct slot *slot, u8 t_slot, u8 cmd)
344 343
345 cmd_status = hpc_check_cmd_status(slot->ctrl); 344 cmd_status = hpc_check_cmd_status(slot->ctrl);
346 if (cmd_status) { 345 if (cmd_status) {
347 err("%s: Failed to issued command 0x%x (error code = %d)\n", 346 ctrl_err(ctrl,
348 __func__, cmd, cmd_status); 347 "Failed to issued command 0x%x (error code = %d)\n",
348 cmd, cmd_status);
349 retval = -EIO; 349 retval = -EIO;
350 } 350 }
351 out: 351 out:
@@ -364,15 +364,15 @@ static int hpc_check_cmd_status(struct controller *ctrl)
364 break; 364 break;
365 case 1: 365 case 1:
366 retval = SWITCH_OPEN; 366 retval = SWITCH_OPEN;
367 err("%s: Switch opened!\n", __func__); 367 ctrl_err(ctrl, "Switch opened!\n");
368 break; 368 break;
369 case 2: 369 case 2:
370 retval = INVALID_CMD; 370 retval = INVALID_CMD;
371 err("%s: Invalid HPC command!\n", __func__); 371 ctrl_err(ctrl, "Invalid HPC command!\n");
372 break; 372 break;
373 case 4: 373 case 4:
374 retval = INVALID_SPEED_MODE; 374 retval = INVALID_SPEED_MODE;
375 err("%s: Invalid bus speed/mode!\n", __func__); 375 ctrl_err(ctrl, "Invalid bus speed/mode!\n");
376 break; 376 break;
377 default: 377 default:
378 retval = cmd_status; 378 retval = cmd_status;
@@ -483,8 +483,8 @@ static int hpc_get_adapter_speed(struct slot *slot, enum pci_bus_speed *value)
483 return -ENODEV; 483 return -ENODEV;
484 } 484 }
485 485
486 dbg("%s: slot_reg = %x, pcix_cap = %x, m66_cap = %x\n", 486 ctrl_dbg(ctrl, "%s: slot_reg = %x, pcix_cap = %x, m66_cap = %x\n",
487 __func__, slot_reg, pcix_cap, m66_cap); 487 __func__, slot_reg, pcix_cap, m66_cap);
488 488
489 switch (pcix_cap) { 489 switch (pcix_cap) {
490 case 0x0: 490 case 0x0:
@@ -509,7 +509,7 @@ static int hpc_get_adapter_speed(struct slot *slot, enum pci_bus_speed *value)
509 break; 509 break;
510 } 510 }
511 511
512 dbg("Adapter speed = %d\n", *value); 512 ctrl_dbg(ctrl, "Adapter speed = %d\n", *value);
513 return retval; 513 return retval;
514} 514}
515 515
@@ -526,7 +526,7 @@ static int hpc_get_mode1_ECC_cap(struct slot *slot, u8 *mode)
526 retval = -1; 526 retval = -1;
527 } 527 }
528 528
529 dbg("Mode 1 ECC cap = %d\n", *mode); 529 ctrl_dbg(ctrl, "Mode 1 ECC cap = %d\n", *mode);
530 return retval; 530 return retval;
531} 531}
532 532
@@ -629,7 +629,7 @@ static int hpc_power_on_slot(struct slot * slot)
629 629
630 retval = shpc_write_cmd(slot, slot->hp_slot, SET_SLOT_PWR); 630 retval = shpc_write_cmd(slot, slot->hp_slot, SET_SLOT_PWR);
631 if (retval) 631 if (retval)
632 err("%s: Write command failed!\n", __func__); 632 ctrl_err(slot->ctrl, "%s: Write command failed!\n", __func__);
633 633
634 return retval; 634 return retval;
635} 635}
@@ -642,7 +642,7 @@ static int hpc_slot_enable(struct slot * slot)
642 retval = shpc_write_cmd(slot, slot->hp_slot, 642 retval = shpc_write_cmd(slot, slot->hp_slot,
643 SET_SLOT_ENABLE | SET_PWR_BLINK | SET_ATTN_OFF); 643 SET_SLOT_ENABLE | SET_PWR_BLINK | SET_ATTN_OFF);
644 if (retval) 644 if (retval)
645 err("%s: Write command failed!\n", __func__); 645 ctrl_err(slot->ctrl, "%s: Write command failed!\n", __func__);
646 646
647 return retval; 647 return retval;
648} 648}
@@ -655,7 +655,7 @@ static int hpc_slot_disable(struct slot * slot)
655 retval = shpc_write_cmd(slot, slot->hp_slot, 655 retval = shpc_write_cmd(slot, slot->hp_slot,
656 SET_SLOT_DISABLE | SET_PWR_OFF | SET_ATTN_ON); 656 SET_SLOT_DISABLE | SET_PWR_OFF | SET_ATTN_ON);
657 if (retval) 657 if (retval)
658 err("%s: Write command failed!\n", __func__); 658 ctrl_err(slot->ctrl, "%s: Write command failed!\n", __func__);
659 659
660 return retval; 660 return retval;
661} 661}
@@ -719,7 +719,7 @@ static int hpc_set_bus_speed_mode(struct slot * slot, enum pci_bus_speed value)
719 719
720 retval = shpc_write_cmd(slot, 0, cmd); 720 retval = shpc_write_cmd(slot, 0, cmd);
721 if (retval) 721 if (retval)
722 err("%s: Write command failed!\n", __func__); 722 ctrl_err(ctrl, "%s: Write command failed!\n", __func__);
723 723
724 return retval; 724 return retval;
725} 725}
@@ -735,7 +735,7 @@ static irqreturn_t shpc_isr(int irq, void *dev_id)
735 if (!intr_loc) 735 if (!intr_loc)
736 return IRQ_NONE; 736 return IRQ_NONE;
737 737
738 dbg("%s: intr_loc = %x\n",__func__, intr_loc); 738 ctrl_dbg(ctrl, "%s: intr_loc = %x\n", __func__, intr_loc);
739 739
740 if(!shpchp_poll_mode) { 740 if(!shpchp_poll_mode) {
741 /* 741 /*
@@ -748,7 +748,7 @@ static irqreturn_t shpc_isr(int irq, void *dev_id)
748 shpc_writel(ctrl, SERR_INTR_ENABLE, serr_int); 748 shpc_writel(ctrl, SERR_INTR_ENABLE, serr_int);
749 749
750 intr_loc2 = shpc_readl(ctrl, INTR_LOC); 750 intr_loc2 = shpc_readl(ctrl, INTR_LOC);
751 dbg("%s: intr_loc2 = %x\n",__func__, intr_loc2); 751 ctrl_dbg(ctrl, "%s: intr_loc2 = %x\n", __func__, intr_loc2);
752 } 752 }
753 753
754 if (intr_loc & CMD_INTR_PENDING) { 754 if (intr_loc & CMD_INTR_PENDING) {
@@ -773,8 +773,8 @@ static irqreturn_t shpc_isr(int irq, void *dev_id)
773 continue; 773 continue;
774 774
775 slot_reg = shpc_readl(ctrl, SLOT_REG(hp_slot)); 775 slot_reg = shpc_readl(ctrl, SLOT_REG(hp_slot));
776 dbg("%s: Slot %x with intr, slot register = %x\n", 776 ctrl_dbg(ctrl, "Slot %x with intr, slot register = %x\n",
777 __func__, hp_slot, slot_reg); 777 hp_slot, slot_reg);
778 778
779 if (slot_reg & MRL_CHANGE_DETECTED) 779 if (slot_reg & MRL_CHANGE_DETECTED)
780 shpchp_handle_switch_change(hp_slot, ctrl); 780 shpchp_handle_switch_change(hp_slot, ctrl);
@@ -843,7 +843,7 @@ static int hpc_get_max_bus_speed (struct slot *slot, enum pci_bus_speed *value)
843 } 843 }
844 844
845 *value = bus_speed; 845 *value = bus_speed;
846 dbg("Max bus speed = %d\n", bus_speed); 846 ctrl_dbg(ctrl, "Max bus speed = %d\n", bus_speed);
847 847
848 return retval; 848 return retval;
849} 849}
@@ -911,7 +911,7 @@ static int hpc_get_cur_bus_speed (struct slot *slot, enum pci_bus_speed *value)
911 break; 911 break;
912 } 912 }
913 913
914 dbg("Current bus speed = %d\n", bus_speed); 914 ctrl_dbg(ctrl, "Current bus speed = %d\n", bus_speed);
915 return retval; 915 return retval;
916} 916}
917 917
@@ -949,6 +949,7 @@ int shpc_init(struct controller *ctrl, struct pci_dev *pdev)
949 u8 i; 949 u8 i;
950 950
951 ctrl->pci_dev = pdev; /* pci_dev of the P2P bridge */ 951 ctrl->pci_dev = pdev; /* pci_dev of the P2P bridge */
952 ctrl_dbg(ctrl, "Hotplug Controller:\n");
952 953
953 if ((pdev->vendor == PCI_VENDOR_ID_AMD) || (pdev->device == 954 if ((pdev->vendor == PCI_VENDOR_ID_AMD) || (pdev->device ==
954 PCI_DEVICE_ID_AMD_GOLAM_7450)) { 955 PCI_DEVICE_ID_AMD_GOLAM_7450)) {
@@ -958,34 +959,33 @@ int shpc_init(struct controller *ctrl, struct pci_dev *pdev)
958 } else { 959 } else {
959 ctrl->cap_offset = pci_find_capability(pdev, PCI_CAP_ID_SHPC); 960 ctrl->cap_offset = pci_find_capability(pdev, PCI_CAP_ID_SHPC);
960 if (!ctrl->cap_offset) { 961 if (!ctrl->cap_offset) {
961 err("%s : cap_offset == 0\n", __func__); 962 ctrl_err(ctrl, "Cannot find PCI capability\n");
962 goto abort; 963 goto abort;
963 } 964 }
964 dbg("%s: cap_offset = %x\n", __func__, ctrl->cap_offset); 965 ctrl_dbg(ctrl, " cap_offset = %x\n", ctrl->cap_offset);
965 966
966 rc = shpc_indirect_read(ctrl, 0, &shpc_base_offset); 967 rc = shpc_indirect_read(ctrl, 0, &shpc_base_offset);
967 if (rc) { 968 if (rc) {
968 err("%s: cannot read base_offset\n", __func__); 969 ctrl_err(ctrl, "Cannot read base_offset\n");
969 goto abort; 970 goto abort;
970 } 971 }
971 972
972 rc = shpc_indirect_read(ctrl, 3, &tempdword); 973 rc = shpc_indirect_read(ctrl, 3, &tempdword);
973 if (rc) { 974 if (rc) {
974 err("%s: cannot read slot config\n", __func__); 975 ctrl_err(ctrl, "Cannot read slot config\n");
975 goto abort; 976 goto abort;
976 } 977 }
977 num_slots = tempdword & SLOT_NUM; 978 num_slots = tempdword & SLOT_NUM;
978 dbg("%s: num_slots (indirect) %x\n", __func__, num_slots); 979 ctrl_dbg(ctrl, " num_slots (indirect) %x\n", num_slots);
979 980
980 for (i = 0; i < 9 + num_slots; i++) { 981 for (i = 0; i < 9 + num_slots; i++) {
981 rc = shpc_indirect_read(ctrl, i, &tempdword); 982 rc = shpc_indirect_read(ctrl, i, &tempdword);
982 if (rc) { 983 if (rc) {
983 err("%s: cannot read creg (index = %d)\n", 984 ctrl_err(ctrl,
984 __func__, i); 985 "Cannot read creg (index = %d)\n", i);
985 goto abort; 986 goto abort;
986 } 987 }
987 dbg("%s: offset %d: value %x\n", __func__,i, 988 ctrl_dbg(ctrl, " offset %d: value %x\n", i, tempdword);
988 tempdword);
989 } 989 }
990 990
991 ctrl->mmio_base = 991 ctrl->mmio_base =
@@ -993,30 +993,31 @@ int shpc_init(struct controller *ctrl, struct pci_dev *pdev)
993 ctrl->mmio_size = 0x24 + 0x4 * num_slots; 993 ctrl->mmio_size = 0x24 + 0x4 * num_slots;
994 } 994 }
995 995
996 info("HPC vendor_id %x device_id %x ss_vid %x ss_did %x\n", pdev->vendor, pdev->device, pdev->subsystem_vendor, 996 ctrl_info(ctrl, "HPC vendor_id %x device_id %x ss_vid %x ss_did %x\n",
997 pdev->subsystem_device); 997 pdev->vendor, pdev->device, pdev->subsystem_vendor,
998 pdev->subsystem_device);
998 999
999 rc = pci_enable_device(pdev); 1000 rc = pci_enable_device(pdev);
1000 if (rc) { 1001 if (rc) {
1001 err("%s: pci_enable_device failed\n", __func__); 1002 ctrl_err(ctrl, "pci_enable_device failed\n");
1002 goto abort; 1003 goto abort;
1003 } 1004 }
1004 1005
1005 if (!request_mem_region(ctrl->mmio_base, ctrl->mmio_size, MY_NAME)) { 1006 if (!request_mem_region(ctrl->mmio_base, ctrl->mmio_size, MY_NAME)) {
1006 err("%s: cannot reserve MMIO region\n", __func__); 1007 ctrl_err(ctrl, "Cannot reserve MMIO region\n");
1007 rc = -1; 1008 rc = -1;
1008 goto abort; 1009 goto abort;
1009 } 1010 }
1010 1011
1011 ctrl->creg = ioremap(ctrl->mmio_base, ctrl->mmio_size); 1012 ctrl->creg = ioremap(ctrl->mmio_base, ctrl->mmio_size);
1012 if (!ctrl->creg) { 1013 if (!ctrl->creg) {
1013 err("%s: cannot remap MMIO region %lx @ %lx\n", __func__, 1014 ctrl_err(ctrl, "Cannot remap MMIO region %lx @ %lx\n",
1014 ctrl->mmio_size, ctrl->mmio_base); 1015 ctrl->mmio_size, ctrl->mmio_base);
1015 release_mem_region(ctrl->mmio_base, ctrl->mmio_size); 1016 release_mem_region(ctrl->mmio_base, ctrl->mmio_size);
1016 rc = -1; 1017 rc = -1;
1017 goto abort; 1018 goto abort;
1018 } 1019 }
1019 dbg("%s: ctrl->creg %p\n", __func__, ctrl->creg); 1020 ctrl_dbg(ctrl, "ctrl->creg %p\n", ctrl->creg);
1020 1021
1021 mutex_init(&ctrl->crit_sect); 1022 mutex_init(&ctrl->crit_sect);
1022 mutex_init(&ctrl->cmd_lock); 1023 mutex_init(&ctrl->cmd_lock);
@@ -1035,21 +1036,21 @@ int shpc_init(struct controller *ctrl, struct pci_dev *pdev)
1035 1036
1036 /* Mask Global Interrupt Mask & Command Complete Interrupt Mask */ 1037 /* Mask Global Interrupt Mask & Command Complete Interrupt Mask */
1037 tempdword = shpc_readl(ctrl, SERR_INTR_ENABLE); 1038 tempdword = shpc_readl(ctrl, SERR_INTR_ENABLE);
1038 dbg("%s: SERR_INTR_ENABLE = %x\n", __func__, tempdword); 1039 ctrl_dbg(ctrl, "SERR_INTR_ENABLE = %x\n", tempdword);
1039 tempdword |= (GLOBAL_INTR_MASK | GLOBAL_SERR_MASK | 1040 tempdword |= (GLOBAL_INTR_MASK | GLOBAL_SERR_MASK |
1040 COMMAND_INTR_MASK | ARBITER_SERR_MASK); 1041 COMMAND_INTR_MASK | ARBITER_SERR_MASK);
1041 tempdword &= ~SERR_INTR_RSVDZ_MASK; 1042 tempdword &= ~SERR_INTR_RSVDZ_MASK;
1042 shpc_writel(ctrl, SERR_INTR_ENABLE, tempdword); 1043 shpc_writel(ctrl, SERR_INTR_ENABLE, tempdword);
1043 tempdword = shpc_readl(ctrl, SERR_INTR_ENABLE); 1044 tempdword = shpc_readl(ctrl, SERR_INTR_ENABLE);
1044 dbg("%s: SERR_INTR_ENABLE = %x\n", __func__, tempdword); 1045 ctrl_dbg(ctrl, "SERR_INTR_ENABLE = %x\n", tempdword);
1045 1046
1046 /* Mask the MRL sensor SERR Mask of individual slot in 1047 /* Mask the MRL sensor SERR Mask of individual slot in
1047 * Slot SERR-INT Mask & clear all the existing event if any 1048 * Slot SERR-INT Mask & clear all the existing event if any
1048 */ 1049 */
1049 for (hp_slot = 0; hp_slot < ctrl->num_slots; hp_slot++) { 1050 for (hp_slot = 0; hp_slot < ctrl->num_slots; hp_slot++) {
1050 slot_reg = shpc_readl(ctrl, SLOT_REG(hp_slot)); 1051 slot_reg = shpc_readl(ctrl, SLOT_REG(hp_slot));
1051 dbg("%s: Default Logical Slot Register %d value %x\n", __func__, 1052 ctrl_dbg(ctrl, "Default Logical Slot Register %d value %x\n",
1052 hp_slot, slot_reg); 1053 hp_slot, slot_reg);
1053 slot_reg |= (PRSNT_CHANGE_INTR_MASK | ISO_PFAULT_INTR_MASK | 1054 slot_reg |= (PRSNT_CHANGE_INTR_MASK | ISO_PFAULT_INTR_MASK |
1054 BUTTON_PRESS_INTR_MASK | MRL_CHANGE_INTR_MASK | 1055 BUTTON_PRESS_INTR_MASK | MRL_CHANGE_INTR_MASK |
1055 CON_PFAULT_INTR_MASK | MRL_CHANGE_SERR_MASK | 1056 CON_PFAULT_INTR_MASK | MRL_CHANGE_SERR_MASK |
@@ -1066,24 +1067,24 @@ int shpc_init(struct controller *ctrl, struct pci_dev *pdev)
1066 /* Installs the interrupt handler */ 1067 /* Installs the interrupt handler */
1067 rc = pci_enable_msi(pdev); 1068 rc = pci_enable_msi(pdev);
1068 if (rc) { 1069 if (rc) {
1069 info("Can't get msi for the hotplug controller\n"); 1070 ctrl_info(ctrl,
1070 info("Use INTx for the hotplug controller\n"); 1071 "Can't get msi for the hotplug controller\n");
1072 ctrl_info(ctrl,
1073 "Use INTx for the hotplug controller\n");
1071 } 1074 }
1072 1075
1073 rc = request_irq(ctrl->pci_dev->irq, shpc_isr, IRQF_SHARED, 1076 rc = request_irq(ctrl->pci_dev->irq, shpc_isr, IRQF_SHARED,
1074 MY_NAME, (void *)ctrl); 1077 MY_NAME, (void *)ctrl);
1075 dbg("%s: request_irq %d for hpc%d (returns %d)\n", 1078 ctrl_dbg(ctrl, "request_irq %d for hpc%d (returns %d)\n",
1076 __func__, ctrl->pci_dev->irq, 1079 ctrl->pci_dev->irq,
1077 atomic_read(&shpchp_num_controllers), rc); 1080 atomic_read(&shpchp_num_controllers), rc);
1078 if (rc) { 1081 if (rc) {
1079 err("Can't get irq %d for the hotplug controller\n", 1082 ctrl_err(ctrl, "Can't get irq %d for the hotplug "
1080 ctrl->pci_dev->irq); 1083 "controller\n", ctrl->pci_dev->irq);
1081 goto abort_iounmap; 1084 goto abort_iounmap;
1082 } 1085 }
1083 } 1086 }
1084 dbg("%s: HPC at b:d:f:irq=0x%x:%x:%x:%x\n", __func__, 1087 ctrl_dbg(ctrl, "HPC at %s irq=%x\n", pci_name(pdev), pdev->irq);
1085 pdev->bus->number, PCI_SLOT(pdev->devfn),
1086 PCI_FUNC(pdev->devfn), pdev->irq);
1087 1088
1088 /* 1089 /*
1089 * If this is the first controller to be initialized, 1090 * If this is the first controller to be initialized,
@@ -1102,8 +1103,8 @@ int shpc_init(struct controller *ctrl, struct pci_dev *pdev)
1102 */ 1103 */
1103 for (hp_slot = 0; hp_slot < ctrl->num_slots; hp_slot++) { 1104 for (hp_slot = 0; hp_slot < ctrl->num_slots; hp_slot++) {
1104 slot_reg = shpc_readl(ctrl, SLOT_REG(hp_slot)); 1105 slot_reg = shpc_readl(ctrl, SLOT_REG(hp_slot));
1105 dbg("%s: Default Logical Slot Register %d value %x\n", __func__, 1106 ctrl_dbg(ctrl, "Default Logical Slot Register %d value %x\n",
1106 hp_slot, slot_reg); 1107 hp_slot, slot_reg);
1107 slot_reg &= ~(PRSNT_CHANGE_INTR_MASK | ISO_PFAULT_INTR_MASK | 1108 slot_reg &= ~(PRSNT_CHANGE_INTR_MASK | ISO_PFAULT_INTR_MASK |
1108 BUTTON_PRESS_INTR_MASK | MRL_CHANGE_INTR_MASK | 1109 BUTTON_PRESS_INTR_MASK | MRL_CHANGE_INTR_MASK |
1109 CON_PFAULT_INTR_MASK | SLOT_REG_RSVDZ_MASK); 1110 CON_PFAULT_INTR_MASK | SLOT_REG_RSVDZ_MASK);
@@ -1116,7 +1117,7 @@ int shpc_init(struct controller *ctrl, struct pci_dev *pdev)
1116 SERR_INTR_RSVDZ_MASK); 1117 SERR_INTR_RSVDZ_MASK);
1117 shpc_writel(ctrl, SERR_INTR_ENABLE, tempdword); 1118 shpc_writel(ctrl, SERR_INTR_ENABLE, tempdword);
1118 tempdword = shpc_readl(ctrl, SERR_INTR_ENABLE); 1119 tempdword = shpc_readl(ctrl, SERR_INTR_ENABLE);
1119 dbg("%s: SERR_INTR_ENABLE = %x\n", __func__, tempdword); 1120 ctrl_dbg(ctrl, "SERR_INTR_ENABLE = %x\n", tempdword);
1120 } 1121 }
1121 1122
1122 return 0; 1123 return 0;
diff --git a/drivers/pci/hotplug/shpchp_pci.c b/drivers/pci/hotplug/shpchp_pci.c
index 3fc4ec0eea0b..138f161becc0 100644
--- a/drivers/pci/hotplug/shpchp_pci.c
+++ b/drivers/pci/hotplug/shpchp_pci.c
@@ -49,9 +49,7 @@ static void program_fw_provided_values(struct pci_dev *dev)
49 /* use default values if we can't get them from firmware */ 49 /* use default values if we can't get them from firmware */
50 if (get_hp_params_from_firmware(dev, &hpp) || 50 if (get_hp_params_from_firmware(dev, &hpp) ||
51 !hpp.t0 || (hpp.t0->revision > 1)) { 51 !hpp.t0 || (hpp.t0->revision > 1)) {
52 printk(KERN_WARNING 52 warn("Could not get hotplug parameters. Use defaults\n");
53 "%s: Could not get hotplug parameters. Use defaults\n",
54 __func__);
55 hpp.t0 = &hpp.type0_data; 53 hpp.t0 = &hpp.type0_data;
56 hpp.t0->revision = 0; 54 hpp.t0->revision = 0;
57 hpp.t0->cache_line_size = 8; 55 hpp.t0->cache_line_size = 8;
@@ -101,18 +99,20 @@ int __ref shpchp_configure_device(struct slot *p_slot)
101 struct pci_dev *dev; 99 struct pci_dev *dev;
102 struct pci_bus *parent = p_slot->ctrl->pci_dev->subordinate; 100 struct pci_bus *parent = p_slot->ctrl->pci_dev->subordinate;
103 int num, fn; 101 int num, fn;
102 struct controller *ctrl = p_slot->ctrl;
104 103
105 dev = pci_get_slot(parent, PCI_DEVFN(p_slot->device, 0)); 104 dev = pci_get_slot(parent, PCI_DEVFN(p_slot->device, 0));
106 if (dev) { 105 if (dev) {
107 err("Device %s already exists at %x:%x, cannot hot-add\n", 106 ctrl_err(ctrl, "Device %s already exists "
108 pci_name(dev), p_slot->bus, p_slot->device); 107 "at %04x:%02x:%02x, cannot hot-add\n", pci_name(dev),
108 pci_domain_nr(parent), p_slot->bus, p_slot->device);
109 pci_dev_put(dev); 109 pci_dev_put(dev);
110 return -EINVAL; 110 return -EINVAL;
111 } 111 }
112 112
113 num = pci_scan_slot(parent, PCI_DEVFN(p_slot->device, 0)); 113 num = pci_scan_slot(parent, PCI_DEVFN(p_slot->device, 0));
114 if (num == 0) { 114 if (num == 0) {
115 err("No new device found\n"); 115 ctrl_err(ctrl, "No new device found\n");
116 return -ENODEV; 116 return -ENODEV;
117 } 117 }
118 118
@@ -121,8 +121,8 @@ int __ref shpchp_configure_device(struct slot *p_slot)
121 if (!dev) 121 if (!dev)
122 continue; 122 continue;
123 if ((dev->class >> 16) == PCI_BASE_CLASS_DISPLAY) { 123 if ((dev->class >> 16) == PCI_BASE_CLASS_DISPLAY) {
124 err("Cannot hot-add display device %s\n", 124 ctrl_err(ctrl, "Cannot hot-add display device %s\n",
125 pci_name(dev)); 125 pci_name(dev));
126 pci_dev_put(dev); 126 pci_dev_put(dev);
127 continue; 127 continue;
128 } 128 }
@@ -138,14 +138,15 @@ int __ref shpchp_configure_device(struct slot *p_slot)
138 break; 138 break;
139 } 139 }
140 if (busnr >= end) { 140 if (busnr >= end) {
141 err("No free bus for hot-added bridge\n"); 141 ctrl_err(ctrl,
142 "No free bus for hot-added bridge\n");
142 pci_dev_put(dev); 143 pci_dev_put(dev);
143 continue; 144 continue;
144 } 145 }
145 child = pci_add_new_bus(parent, dev, busnr); 146 child = pci_add_new_bus(parent, dev, busnr);
146 if (!child) { 147 if (!child) {
147 err("Cannot add new bus for %s\n", 148 ctrl_err(ctrl, "Cannot add new bus for %s\n",
148 pci_name(dev)); 149 pci_name(dev));
149 pci_dev_put(dev); 150 pci_dev_put(dev);
150 continue; 151 continue;
151 } 152 }
@@ -168,8 +169,10 @@ int shpchp_unconfigure_device(struct slot *p_slot)
168 int j; 169 int j;
169 u8 bctl = 0; 170 u8 bctl = 0;
170 struct pci_bus *parent = p_slot->ctrl->pci_dev->subordinate; 171 struct pci_bus *parent = p_slot->ctrl->pci_dev->subordinate;
172 struct controller *ctrl = p_slot->ctrl;
171 173
172 dbg("%s: bus/dev = %x/%x\n", __func__, p_slot->bus, p_slot->device); 174 ctrl_dbg(ctrl, "%s: domain:bus:dev = %04x:%02x:%02x\n",
175 __func__, pci_domain_nr(parent), p_slot->bus, p_slot->device);
173 176
174 for (j=0; j<8 ; j++) { 177 for (j=0; j<8 ; j++) {
175 struct pci_dev* temp = pci_get_slot(parent, 178 struct pci_dev* temp = pci_get_slot(parent,
@@ -177,16 +180,17 @@ int shpchp_unconfigure_device(struct slot *p_slot)
177 if (!temp) 180 if (!temp)
178 continue; 181 continue;
179 if ((temp->class >> 16) == PCI_BASE_CLASS_DISPLAY) { 182 if ((temp->class >> 16) == PCI_BASE_CLASS_DISPLAY) {
180 err("Cannot remove display device %s\n", 183 ctrl_err(ctrl, "Cannot remove display device %s\n",
181 pci_name(temp)); 184 pci_name(temp));
182 pci_dev_put(temp); 185 pci_dev_put(temp);
183 continue; 186 continue;
184 } 187 }
185 if (temp->hdr_type == PCI_HEADER_TYPE_BRIDGE) { 188 if (temp->hdr_type == PCI_HEADER_TYPE_BRIDGE) {
186 pci_read_config_byte(temp, PCI_BRIDGE_CONTROL, &bctl); 189 pci_read_config_byte(temp, PCI_BRIDGE_CONTROL, &bctl);
187 if (bctl & PCI_BRIDGE_CTL_VGA) { 190 if (bctl & PCI_BRIDGE_CTL_VGA) {
188 err("Cannot remove display device %s\n", 191 ctrl_err(ctrl,
189 pci_name(temp)); 192 "Cannot remove display device %s\n",
193 pci_name(temp));
190 pci_dev_put(temp); 194 pci_dev_put(temp);
191 continue; 195 continue;
192 } 196 }
diff --git a/drivers/pci/htirq.c b/drivers/pci/htirq.c
index 279c940a0039..bf7d6ce9bbb3 100644
--- a/drivers/pci/htirq.c
+++ b/drivers/pci/htirq.c
@@ -126,7 +126,8 @@ int __ht_create_irq(struct pci_dev *dev, int idx, ht_irq_update_t *update)
126 cfg->msg.address_hi = 0xffffffff; 126 cfg->msg.address_hi = 0xffffffff;
127 127
128 irq = create_irq(); 128 irq = create_irq();
129 if (irq < 0) { 129
130 if (irq <= 0) {
130 kfree(cfg); 131 kfree(cfg);
131 return -EBUSY; 132 return -EBUSY;
132 } 133 }
diff --git a/drivers/pci/intel-iommu.c b/drivers/pci/intel-iommu.c
index 3f7b81c065d2..a2692724b68f 100644
--- a/drivers/pci/intel-iommu.c
+++ b/drivers/pci/intel-iommu.c
@@ -18,6 +18,7 @@
18 * Author: Ashok Raj <ashok.raj@intel.com> 18 * Author: Ashok Raj <ashok.raj@intel.com>
19 * Author: Shaohua Li <shaohua.li@intel.com> 19 * Author: Shaohua Li <shaohua.li@intel.com>
20 * Author: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com> 20 * Author: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
21 * Author: Fenghua Yu <fenghua.yu@intel.com>
21 */ 22 */
22 23
23#include <linux/init.h> 24#include <linux/init.h>
@@ -33,13 +34,15 @@
33#include <linux/dma-mapping.h> 34#include <linux/dma-mapping.h>
34#include <linux/mempool.h> 35#include <linux/mempool.h>
35#include <linux/timer.h> 36#include <linux/timer.h>
36#include "iova.h" 37#include <linux/iova.h>
37#include "intel-iommu.h" 38#include <linux/intel-iommu.h>
38#include <asm/proto.h> /* force_iommu in this header in x86-64*/
39#include <asm/cacheflush.h> 39#include <asm/cacheflush.h>
40#include <asm/gart.h> 40#include <asm/iommu.h>
41#include "pci.h" 41#include "pci.h"
42 42
43#define ROOT_SIZE VTD_PAGE_SIZE
44#define CONTEXT_SIZE VTD_PAGE_SIZE
45
43#define IS_GFX_DEVICE(pdev) ((pdev->class >> 16) == PCI_BASE_CLASS_DISPLAY) 46#define IS_GFX_DEVICE(pdev) ((pdev->class >> 16) == PCI_BASE_CLASS_DISPLAY)
44#define IS_ISA_DEVICE(pdev) ((pdev->class >> 8) == PCI_CLASS_BRIDGE_ISA) 47#define IS_ISA_DEVICE(pdev) ((pdev->class >> 8) == PCI_CLASS_BRIDGE_ISA)
45 48
@@ -49,8 +52,6 @@
49 52
50#define DEFAULT_DOMAIN_ADDRESS_WIDTH 48 53#define DEFAULT_DOMAIN_ADDRESS_WIDTH 48
51 54
52#define DMAR_OPERATION_TIMEOUT ((cycles_t) tsc_khz*10*1000) /* 10sec */
53
54#define DOMAIN_MAX_ADDR(gaw) ((((u64)1) << gaw) - 1) 55#define DOMAIN_MAX_ADDR(gaw) ((((u64)1) << gaw) - 1)
55 56
56 57
@@ -58,8 +59,6 @@ static void flush_unmaps_timeout(unsigned long data);
58 59
59DEFINE_TIMER(unmap_timer, flush_unmaps_timeout, 0, 0); 60DEFINE_TIMER(unmap_timer, flush_unmaps_timeout, 0, 0);
60 61
61static struct intel_iommu *g_iommus;
62
63#define HIGH_WATER_MARK 250 62#define HIGH_WATER_MARK 250
64struct deferred_flush_tables { 63struct deferred_flush_tables {
65 int next; 64 int next;
@@ -80,7 +79,7 @@ static long list_size;
80 79
81static void domain_remove_dev_info(struct dmar_domain *domain); 80static void domain_remove_dev_info(struct dmar_domain *domain);
82 81
83static int dmar_disabled; 82int dmar_disabled;
84static int __initdata dmar_map_gfx = 1; 83static int __initdata dmar_map_gfx = 1;
85static int dmar_forcedac; 84static int dmar_forcedac;
86static int intel_iommu_strict; 85static int intel_iommu_strict;
@@ -160,7 +159,7 @@ static inline void *alloc_domain_mem(void)
160 return iommu_kmem_cache_alloc(iommu_domain_cache); 159 return iommu_kmem_cache_alloc(iommu_domain_cache);
161} 160}
162 161
163static inline void free_domain_mem(void *vaddr) 162static void free_domain_mem(void *vaddr)
164{ 163{
165 kmem_cache_free(iommu_domain_cache, vaddr); 164 kmem_cache_free(iommu_domain_cache, vaddr);
166} 165}
@@ -185,13 +184,6 @@ void free_iova_mem(struct iova *iova)
185 kmem_cache_free(iommu_iova_cache, iova); 184 kmem_cache_free(iommu_iova_cache, iova);
186} 185}
187 186
188static inline void __iommu_flush_cache(
189 struct intel_iommu *iommu, void *addr, int size)
190{
191 if (!ecap_coherent(iommu->ecap))
192 clflush_cache_range(addr, size);
193}
194
195/* Gets context entry for a given bus and devfn */ 187/* Gets context entry for a given bus and devfn */
196static struct context_entry * device_to_context_entry(struct intel_iommu *iommu, 188static struct context_entry * device_to_context_entry(struct intel_iommu *iommu,
197 u8 bus, u8 devfn) 189 u8 bus, u8 devfn)
@@ -210,7 +202,7 @@ static struct context_entry * device_to_context_entry(struct intel_iommu *iommu,
210 spin_unlock_irqrestore(&iommu->lock, flags); 202 spin_unlock_irqrestore(&iommu->lock, flags);
211 return NULL; 203 return NULL;
212 } 204 }
213 __iommu_flush_cache(iommu, (void *)context, PAGE_SIZE_4K); 205 __iommu_flush_cache(iommu, (void *)context, CONTEXT_SIZE);
214 phy_addr = virt_to_phys((void *)context); 206 phy_addr = virt_to_phys((void *)context);
215 set_root_value(root, phy_addr); 207 set_root_value(root, phy_addr);
216 set_root_present(root); 208 set_root_present(root);
@@ -356,7 +348,7 @@ static struct dma_pte * addr_to_dma_pte(struct dmar_domain *domain, u64 addr)
356 return NULL; 348 return NULL;
357 } 349 }
358 __iommu_flush_cache(domain->iommu, tmp_page, 350 __iommu_flush_cache(domain->iommu, tmp_page,
359 PAGE_SIZE_4K); 351 PAGE_SIZE);
360 dma_set_pte_addr(*pte, virt_to_phys(tmp_page)); 352 dma_set_pte_addr(*pte, virt_to_phys(tmp_page));
361 /* 353 /*
362 * high level table always sets r/w, last level page 354 * high level table always sets r/w, last level page
@@ -419,13 +411,13 @@ static void dma_pte_clear_range(struct dmar_domain *domain, u64 start, u64 end)
419 start &= (((u64)1) << addr_width) - 1; 411 start &= (((u64)1) << addr_width) - 1;
420 end &= (((u64)1) << addr_width) - 1; 412 end &= (((u64)1) << addr_width) - 1;
421 /* in case it's partial page */ 413 /* in case it's partial page */
422 start = PAGE_ALIGN_4K(start); 414 start = PAGE_ALIGN(start);
423 end &= PAGE_MASK_4K; 415 end &= PAGE_MASK;
424 416
425 /* we don't need lock here, nobody else touches the iova range */ 417 /* we don't need lock here, nobody else touches the iova range */
426 while (start < end) { 418 while (start < end) {
427 dma_pte_clear_one(domain, start); 419 dma_pte_clear_one(domain, start);
428 start += PAGE_SIZE_4K; 420 start += VTD_PAGE_SIZE;
429 } 421 }
430} 422}
431 423
@@ -479,7 +471,7 @@ static int iommu_alloc_root_entry(struct intel_iommu *iommu)
479 if (!root) 471 if (!root)
480 return -ENOMEM; 472 return -ENOMEM;
481 473
482 __iommu_flush_cache(iommu, root, PAGE_SIZE_4K); 474 __iommu_flush_cache(iommu, root, ROOT_SIZE);
483 475
484 spin_lock_irqsave(&iommu->lock, flags); 476 spin_lock_irqsave(&iommu->lock, flags);
485 iommu->root_entry = root; 477 iommu->root_entry = root;
@@ -488,19 +480,6 @@ static int iommu_alloc_root_entry(struct intel_iommu *iommu)
488 return 0; 480 return 0;
489} 481}
490 482
491#define IOMMU_WAIT_OP(iommu, offset, op, cond, sts) \
492{\
493 cycles_t start_time = get_cycles();\
494 while (1) {\
495 sts = op (iommu->reg + offset);\
496 if (cond)\
497 break;\
498 if (DMAR_OPERATION_TIMEOUT < (get_cycles() - start_time))\
499 panic("DMAR hardware is malfunctioning\n");\
500 cpu_relax();\
501 }\
502}
503
504static void iommu_set_root_entry(struct intel_iommu *iommu) 483static void iommu_set_root_entry(struct intel_iommu *iommu)
505{ 484{
506 void *addr; 485 void *addr;
@@ -587,31 +566,10 @@ static int __iommu_flush_context(struct intel_iommu *iommu,
587 566
588 spin_unlock_irqrestore(&iommu->register_lock, flag); 567 spin_unlock_irqrestore(&iommu->register_lock, flag);
589 568
590 /* flush context entry will implictly flush write buffer */ 569 /* flush context entry will implicitly flush write buffer */
591 return 0; 570 return 0;
592} 571}
593 572
594static int inline iommu_flush_context_global(struct intel_iommu *iommu,
595 int non_present_entry_flush)
596{
597 return __iommu_flush_context(iommu, 0, 0, 0, DMA_CCMD_GLOBAL_INVL,
598 non_present_entry_flush);
599}
600
601static int inline iommu_flush_context_domain(struct intel_iommu *iommu, u16 did,
602 int non_present_entry_flush)
603{
604 return __iommu_flush_context(iommu, did, 0, 0, DMA_CCMD_DOMAIN_INVL,
605 non_present_entry_flush);
606}
607
608static int inline iommu_flush_context_device(struct intel_iommu *iommu,
609 u16 did, u16 source_id, u8 function_mask, int non_present_entry_flush)
610{
611 return __iommu_flush_context(iommu, did, source_id, function_mask,
612 DMA_CCMD_DEVICE_INVL, non_present_entry_flush);
613}
614
615/* return value determine if we need a write buffer flush */ 573/* return value determine if we need a write buffer flush */
616static int __iommu_flush_iotlb(struct intel_iommu *iommu, u16 did, 574static int __iommu_flush_iotlb(struct intel_iommu *iommu, u16 did,
617 u64 addr, unsigned int size_order, u64 type, 575 u64 addr, unsigned int size_order, u64 type,
@@ -679,37 +637,25 @@ static int __iommu_flush_iotlb(struct intel_iommu *iommu, u16 did,
679 printk(KERN_ERR"IOMMU: flush IOTLB failed\n"); 637 printk(KERN_ERR"IOMMU: flush IOTLB failed\n");
680 if (DMA_TLB_IAIG(val) != DMA_TLB_IIRG(type)) 638 if (DMA_TLB_IAIG(val) != DMA_TLB_IIRG(type))
681 pr_debug("IOMMU: tlb flush request %Lx, actual %Lx\n", 639 pr_debug("IOMMU: tlb flush request %Lx, actual %Lx\n",
682 DMA_TLB_IIRG(type), DMA_TLB_IAIG(val)); 640 (unsigned long long)DMA_TLB_IIRG(type),
683 /* flush context entry will implictly flush write buffer */ 641 (unsigned long long)DMA_TLB_IAIG(val));
642 /* flush iotlb entry will implicitly flush write buffer */
684 return 0; 643 return 0;
685} 644}
686 645
687static int inline iommu_flush_iotlb_global(struct intel_iommu *iommu,
688 int non_present_entry_flush)
689{
690 return __iommu_flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH,
691 non_present_entry_flush);
692}
693
694static int inline iommu_flush_iotlb_dsi(struct intel_iommu *iommu, u16 did,
695 int non_present_entry_flush)
696{
697 return __iommu_flush_iotlb(iommu, did, 0, 0, DMA_TLB_DSI_FLUSH,
698 non_present_entry_flush);
699}
700
701static int iommu_flush_iotlb_psi(struct intel_iommu *iommu, u16 did, 646static int iommu_flush_iotlb_psi(struct intel_iommu *iommu, u16 did,
702 u64 addr, unsigned int pages, int non_present_entry_flush) 647 u64 addr, unsigned int pages, int non_present_entry_flush)
703{ 648{
704 unsigned int mask; 649 unsigned int mask;
705 650
706 BUG_ON(addr & (~PAGE_MASK_4K)); 651 BUG_ON(addr & (~VTD_PAGE_MASK));
707 BUG_ON(pages == 0); 652 BUG_ON(pages == 0);
708 653
709 /* Fallback to domain selective flush if no PSI support */ 654 /* Fallback to domain selective flush if no PSI support */
710 if (!cap_pgsel_inv(iommu->cap)) 655 if (!cap_pgsel_inv(iommu->cap))
711 return iommu_flush_iotlb_dsi(iommu, did, 656 return iommu->flush.flush_iotlb(iommu, did, 0, 0,
712 non_present_entry_flush); 657 DMA_TLB_DSI_FLUSH,
658 non_present_entry_flush);
713 659
714 /* 660 /*
715 * PSI requires page size to be 2 ^ x, and the base address is naturally 661 * PSI requires page size to be 2 ^ x, and the base address is naturally
@@ -718,11 +664,12 @@ static int iommu_flush_iotlb_psi(struct intel_iommu *iommu, u16 did,
718 mask = ilog2(__roundup_pow_of_two(pages)); 664 mask = ilog2(__roundup_pow_of_two(pages));
719 /* Fallback to domain selective flush if size is too big */ 665 /* Fallback to domain selective flush if size is too big */
720 if (mask > cap_max_amask_val(iommu->cap)) 666 if (mask > cap_max_amask_val(iommu->cap))
721 return iommu_flush_iotlb_dsi(iommu, did, 667 return iommu->flush.flush_iotlb(iommu, did, 0, 0,
722 non_present_entry_flush); 668 DMA_TLB_DSI_FLUSH, non_present_entry_flush);
723 669
724 return __iommu_flush_iotlb(iommu, did, addr, mask, 670 return iommu->flush.flush_iotlb(iommu, did, addr, mask,
725 DMA_TLB_PSI_FLUSH, non_present_entry_flush); 671 DMA_TLB_PSI_FLUSH,
672 non_present_entry_flush);
726} 673}
727 674
728static void iommu_disable_protect_mem_regions(struct intel_iommu *iommu) 675static void iommu_disable_protect_mem_regions(struct intel_iommu *iommu)
@@ -855,7 +802,7 @@ void dmar_msi_read(int irq, struct msi_msg *msg)
855} 802}
856 803
857static int iommu_page_fault_do_one(struct intel_iommu *iommu, int type, 804static int iommu_page_fault_do_one(struct intel_iommu *iommu, int type,
858 u8 fault_reason, u16 source_id, u64 addr) 805 u8 fault_reason, u16 source_id, unsigned long long addr)
859{ 806{
860 const char *reason; 807 const char *reason;
861 808
@@ -990,6 +937,8 @@ static int iommu_init_domains(struct intel_iommu *iommu)
990 return -ENOMEM; 937 return -ENOMEM;
991 } 938 }
992 939
940 spin_lock_init(&iommu->lock);
941
993 /* 942 /*
994 * if Caching mode is set, then invalid translations are tagged 943 * if Caching mode is set, then invalid translations are tagged
995 * with domainid 0. Hence we need to pre-allocate it. 944 * with domainid 0. Hence we need to pre-allocate it.
@@ -998,62 +947,15 @@ static int iommu_init_domains(struct intel_iommu *iommu)
998 set_bit(0, iommu->domain_ids); 947 set_bit(0, iommu->domain_ids);
999 return 0; 948 return 0;
1000} 949}
1001static struct intel_iommu *alloc_iommu(struct intel_iommu *iommu,
1002 struct dmar_drhd_unit *drhd)
1003{
1004 int ret;
1005 int map_size;
1006 u32 ver;
1007 950
1008 iommu->reg = ioremap(drhd->reg_base_addr, PAGE_SIZE_4K);
1009 if (!iommu->reg) {
1010 printk(KERN_ERR "IOMMU: can't map the region\n");
1011 goto error;
1012 }
1013 iommu->cap = dmar_readq(iommu->reg + DMAR_CAP_REG);
1014 iommu->ecap = dmar_readq(iommu->reg + DMAR_ECAP_REG);
1015
1016 /* the registers might be more than one page */
1017 map_size = max_t(int, ecap_max_iotlb_offset(iommu->ecap),
1018 cap_max_fault_reg_offset(iommu->cap));
1019 map_size = PAGE_ALIGN_4K(map_size);
1020 if (map_size > PAGE_SIZE_4K) {
1021 iounmap(iommu->reg);
1022 iommu->reg = ioremap(drhd->reg_base_addr, map_size);
1023 if (!iommu->reg) {
1024 printk(KERN_ERR "IOMMU: can't map the region\n");
1025 goto error;
1026 }
1027 }
1028
1029 ver = readl(iommu->reg + DMAR_VER_REG);
1030 pr_debug("IOMMU %llx: ver %d:%d cap %llx ecap %llx\n",
1031 drhd->reg_base_addr, DMAR_VER_MAJOR(ver), DMAR_VER_MINOR(ver),
1032 iommu->cap, iommu->ecap);
1033 ret = iommu_init_domains(iommu);
1034 if (ret)
1035 goto error_unmap;
1036 spin_lock_init(&iommu->lock);
1037 spin_lock_init(&iommu->register_lock);
1038
1039 drhd->iommu = iommu;
1040 return iommu;
1041error_unmap:
1042 iounmap(iommu->reg);
1043error:
1044 kfree(iommu);
1045 return NULL;
1046}
1047 951
1048static void domain_exit(struct dmar_domain *domain); 952static void domain_exit(struct dmar_domain *domain);
1049static void free_iommu(struct intel_iommu *iommu) 953
954void free_dmar_iommu(struct intel_iommu *iommu)
1050{ 955{
1051 struct dmar_domain *domain; 956 struct dmar_domain *domain;
1052 int i; 957 int i;
1053 958
1054 if (!iommu)
1055 return;
1056
1057 i = find_first_bit(iommu->domain_ids, cap_ndoms(iommu->cap)); 959 i = find_first_bit(iommu->domain_ids, cap_ndoms(iommu->cap));
1058 for (; i < cap_ndoms(iommu->cap); ) { 960 for (; i < cap_ndoms(iommu->cap); ) {
1059 domain = iommu->domains[i]; 961 domain = iommu->domains[i];
@@ -1078,10 +980,6 @@ static void free_iommu(struct intel_iommu *iommu)
1078 980
1079 /* free context mapping */ 981 /* free context mapping */
1080 free_context_table(iommu); 982 free_context_table(iommu);
1081
1082 if (iommu->reg)
1083 iounmap(iommu->reg);
1084 kfree(iommu);
1085} 983}
1086 984
1087static struct dmar_domain * iommu_alloc_domain(struct intel_iommu *iommu) 985static struct dmar_domain * iommu_alloc_domain(struct intel_iommu *iommu)
@@ -1157,9 +1055,9 @@ static void dmar_init_reserved_ranges(void)
1157 if (!r->flags || !(r->flags & IORESOURCE_MEM)) 1055 if (!r->flags || !(r->flags & IORESOURCE_MEM))
1158 continue; 1056 continue;
1159 addr = r->start; 1057 addr = r->start;
1160 addr &= PAGE_MASK_4K; 1058 addr &= PAGE_MASK;
1161 size = r->end - addr; 1059 size = r->end - addr;
1162 size = PAGE_ALIGN_4K(size); 1060 size = PAGE_ALIGN(size);
1163 iova = reserve_iova(&reserved_iova_list, IOVA_PFN(addr), 1061 iova = reserve_iova(&reserved_iova_list, IOVA_PFN(addr),
1164 IOVA_PFN(size + addr) - 1); 1062 IOVA_PFN(size + addr) - 1);
1165 if (!iova) 1063 if (!iova)
@@ -1221,7 +1119,7 @@ static int domain_init(struct dmar_domain *domain, int guest_width)
1221 domain->pgd = (struct dma_pte *)alloc_pgtable_page(); 1119 domain->pgd = (struct dma_pte *)alloc_pgtable_page();
1222 if (!domain->pgd) 1120 if (!domain->pgd)
1223 return -ENOMEM; 1121 return -ENOMEM;
1224 __iommu_flush_cache(iommu, domain->pgd, PAGE_SIZE_4K); 1122 __iommu_flush_cache(iommu, domain->pgd, PAGE_SIZE);
1225 return 0; 1123 return 0;
1226} 1124}
1227 1125
@@ -1237,7 +1135,7 @@ static void domain_exit(struct dmar_domain *domain)
1237 /* destroy iovas */ 1135 /* destroy iovas */
1238 put_iova_domain(&domain->iovad); 1136 put_iova_domain(&domain->iovad);
1239 end = DOMAIN_MAX_ADDR(domain->gaw); 1137 end = DOMAIN_MAX_ADDR(domain->gaw);
1240 end = end & (~PAGE_MASK_4K); 1138 end = end & (~PAGE_MASK);
1241 1139
1242 /* clear ptes */ 1140 /* clear ptes */
1243 dma_pte_clear_range(domain, 0, end); 1141 dma_pte_clear_range(domain, 0, end);
@@ -1277,11 +1175,13 @@ static int domain_context_mapping_one(struct dmar_domain *domain,
1277 __iommu_flush_cache(iommu, context, sizeof(*context)); 1175 __iommu_flush_cache(iommu, context, sizeof(*context));
1278 1176
1279 /* it's a non-present to present mapping */ 1177 /* it's a non-present to present mapping */
1280 if (iommu_flush_context_device(iommu, domain->id, 1178 if (iommu->flush.flush_context(iommu, domain->id,
1281 (((u16)bus) << 8) | devfn, DMA_CCMD_MASK_NOBIT, 1)) 1179 (((u16)bus) << 8) | devfn, DMA_CCMD_MASK_NOBIT,
1180 DMA_CCMD_DEVICE_INVL, 1))
1282 iommu_flush_write_buffer(iommu); 1181 iommu_flush_write_buffer(iommu);
1283 else 1182 else
1284 iommu_flush_iotlb_dsi(iommu, 0, 0); 1183 iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_DSI_FLUSH, 0);
1184
1285 spin_unlock_irqrestore(&iommu->lock, flags); 1185 spin_unlock_irqrestore(&iommu->lock, flags);
1286 return 0; 1186 return 0;
1287} 1187}
@@ -1356,22 +1256,25 @@ domain_page_mapping(struct dmar_domain *domain, dma_addr_t iova,
1356 u64 start_pfn, end_pfn; 1256 u64 start_pfn, end_pfn;
1357 struct dma_pte *pte; 1257 struct dma_pte *pte;
1358 int index; 1258 int index;
1259 int addr_width = agaw_to_width(domain->agaw);
1260
1261 hpa &= (((u64)1) << addr_width) - 1;
1359 1262
1360 if ((prot & (DMA_PTE_READ|DMA_PTE_WRITE)) == 0) 1263 if ((prot & (DMA_PTE_READ|DMA_PTE_WRITE)) == 0)
1361 return -EINVAL; 1264 return -EINVAL;
1362 iova &= PAGE_MASK_4K; 1265 iova &= PAGE_MASK;
1363 start_pfn = ((u64)hpa) >> PAGE_SHIFT_4K; 1266 start_pfn = ((u64)hpa) >> VTD_PAGE_SHIFT;
1364 end_pfn = (PAGE_ALIGN_4K(((u64)hpa) + size)) >> PAGE_SHIFT_4K; 1267 end_pfn = (VTD_PAGE_ALIGN(((u64)hpa) + size)) >> VTD_PAGE_SHIFT;
1365 index = 0; 1268 index = 0;
1366 while (start_pfn < end_pfn) { 1269 while (start_pfn < end_pfn) {
1367 pte = addr_to_dma_pte(domain, iova + PAGE_SIZE_4K * index); 1270 pte = addr_to_dma_pte(domain, iova + VTD_PAGE_SIZE * index);
1368 if (!pte) 1271 if (!pte)
1369 return -ENOMEM; 1272 return -ENOMEM;
1370 /* We don't need lock here, nobody else 1273 /* We don't need lock here, nobody else
1371 * touches the iova range 1274 * touches the iova range
1372 */ 1275 */
1373 BUG_ON(dma_pte_addr(*pte)); 1276 BUG_ON(dma_pte_addr(*pte));
1374 dma_set_pte_addr(*pte, start_pfn << PAGE_SHIFT_4K); 1277 dma_set_pte_addr(*pte, start_pfn << VTD_PAGE_SHIFT);
1375 dma_set_pte_prot(*pte, prot); 1278 dma_set_pte_prot(*pte, prot);
1376 __iommu_flush_cache(domain->iommu, pte, sizeof(*pte)); 1279 __iommu_flush_cache(domain->iommu, pte, sizeof(*pte));
1377 start_pfn++; 1280 start_pfn++;
@@ -1383,8 +1286,10 @@ domain_page_mapping(struct dmar_domain *domain, dma_addr_t iova,
1383static void detach_domain_for_dev(struct dmar_domain *domain, u8 bus, u8 devfn) 1286static void detach_domain_for_dev(struct dmar_domain *domain, u8 bus, u8 devfn)
1384{ 1287{
1385 clear_context_table(domain->iommu, bus, devfn); 1288 clear_context_table(domain->iommu, bus, devfn);
1386 iommu_flush_context_global(domain->iommu, 0); 1289 domain->iommu->flush.flush_context(domain->iommu, 0, 0, 0,
1387 iommu_flush_iotlb_global(domain->iommu, 0); 1290 DMA_CCMD_GLOBAL_INVL, 0);
1291 domain->iommu->flush.flush_iotlb(domain->iommu, 0, 0, 0,
1292 DMA_TLB_GLOBAL_FLUSH, 0);
1388} 1293}
1389 1294
1390static void domain_remove_dev_info(struct dmar_domain *domain) 1295static void domain_remove_dev_info(struct dmar_domain *domain)
@@ -1414,7 +1319,7 @@ static void domain_remove_dev_info(struct dmar_domain *domain)
1414 * find_domain 1319 * find_domain
1415 * Note: we use struct pci_dev->dev.archdata.iommu stores the info 1320 * Note: we use struct pci_dev->dev.archdata.iommu stores the info
1416 */ 1321 */
1417struct dmar_domain * 1322static struct dmar_domain *
1418find_domain(struct pci_dev *pdev) 1323find_domain(struct pci_dev *pdev)
1419{ 1324{
1420 struct device_domain_info *info; 1325 struct device_domain_info *info;
@@ -1426,37 +1331,6 @@ find_domain(struct pci_dev *pdev)
1426 return NULL; 1331 return NULL;
1427} 1332}
1428 1333
1429static int dmar_pci_device_match(struct pci_dev *devices[], int cnt,
1430 struct pci_dev *dev)
1431{
1432 int index;
1433
1434 while (dev) {
1435 for (index = 0; index < cnt; index++)
1436 if (dev == devices[index])
1437 return 1;
1438
1439 /* Check our parent */
1440 dev = dev->bus->self;
1441 }
1442
1443 return 0;
1444}
1445
1446static struct dmar_drhd_unit *
1447dmar_find_matched_drhd_unit(struct pci_dev *dev)
1448{
1449 struct dmar_drhd_unit *drhd = NULL;
1450
1451 list_for_each_entry(drhd, &dmar_drhd_units, list) {
1452 if (drhd->include_all || dmar_pci_device_match(drhd->devices,
1453 drhd->devices_cnt, dev))
1454 return drhd;
1455 }
1456
1457 return NULL;
1458}
1459
1460/* domain is initialized */ 1334/* domain is initialized */
1461static struct dmar_domain *get_domain_for_dev(struct pci_dev *pdev, int gaw) 1335static struct dmar_domain *get_domain_for_dev(struct pci_dev *pdev, int gaw)
1462{ 1336{
@@ -1578,11 +1452,13 @@ error:
1578 return find_domain(pdev); 1452 return find_domain(pdev);
1579} 1453}
1580 1454
1581static int iommu_prepare_identity_map(struct pci_dev *pdev, u64 start, u64 end) 1455static int iommu_prepare_identity_map(struct pci_dev *pdev,
1456 unsigned long long start,
1457 unsigned long long end)
1582{ 1458{
1583 struct dmar_domain *domain; 1459 struct dmar_domain *domain;
1584 unsigned long size; 1460 unsigned long size;
1585 u64 base; 1461 unsigned long long base;
1586 int ret; 1462 int ret;
1587 1463
1588 printk(KERN_INFO 1464 printk(KERN_INFO
@@ -1594,9 +1470,9 @@ static int iommu_prepare_identity_map(struct pci_dev *pdev, u64 start, u64 end)
1594 return -ENOMEM; 1470 return -ENOMEM;
1595 1471
1596 /* The address might not be aligned */ 1472 /* The address might not be aligned */
1597 base = start & PAGE_MASK_4K; 1473 base = start & PAGE_MASK;
1598 size = end - base; 1474 size = end - base;
1599 size = PAGE_ALIGN_4K(size); 1475 size = PAGE_ALIGN(size);
1600 if (!reserve_iova(&domain->iovad, IOVA_PFN(base), 1476 if (!reserve_iova(&domain->iovad, IOVA_PFN(base),
1601 IOVA_PFN(base + size) - 1)) { 1477 IOVA_PFN(base + size) - 1)) {
1602 printk(KERN_ERR "IOMMU: reserve iova failed\n"); 1478 printk(KERN_ERR "IOMMU: reserve iova failed\n");
@@ -1729,8 +1605,6 @@ int __init init_dmars(void)
1729 * endfor 1605 * endfor
1730 */ 1606 */
1731 for_each_drhd_unit(drhd) { 1607 for_each_drhd_unit(drhd) {
1732 if (drhd->ignored)
1733 continue;
1734 g_num_of_iommus++; 1608 g_num_of_iommus++;
1735 /* 1609 /*
1736 * lock not needed as this is only incremented in the single 1610 * lock not needed as this is only incremented in the single
@@ -1739,12 +1613,6 @@ int __init init_dmars(void)
1739 */ 1613 */
1740 } 1614 }
1741 1615
1742 g_iommus = kzalloc(g_num_of_iommus * sizeof(*iommu), GFP_KERNEL);
1743 if (!g_iommus) {
1744 ret = -ENOMEM;
1745 goto error;
1746 }
1747
1748 deferred_flush = kzalloc(g_num_of_iommus * 1616 deferred_flush = kzalloc(g_num_of_iommus *
1749 sizeof(struct deferred_flush_tables), GFP_KERNEL); 1617 sizeof(struct deferred_flush_tables), GFP_KERNEL);
1750 if (!deferred_flush) { 1618 if (!deferred_flush) {
@@ -1752,16 +1620,15 @@ int __init init_dmars(void)
1752 goto error; 1620 goto error;
1753 } 1621 }
1754 1622
1755 i = 0;
1756 for_each_drhd_unit(drhd) { 1623 for_each_drhd_unit(drhd) {
1757 if (drhd->ignored) 1624 if (drhd->ignored)
1758 continue; 1625 continue;
1759 iommu = alloc_iommu(&g_iommus[i], drhd); 1626
1760 i++; 1627 iommu = drhd->iommu;
1761 if (!iommu) { 1628
1762 ret = -ENOMEM; 1629 ret = iommu_init_domains(iommu);
1630 if (ret)
1763 goto error; 1631 goto error;
1764 }
1765 1632
1766 /* 1633 /*
1767 * TBD: 1634 * TBD:
@@ -1775,6 +1642,28 @@ int __init init_dmars(void)
1775 } 1642 }
1776 } 1643 }
1777 1644
1645 for_each_drhd_unit(drhd) {
1646 if (drhd->ignored)
1647 continue;
1648
1649 iommu = drhd->iommu;
1650 if (dmar_enable_qi(iommu)) {
1651 /*
1652 * Queued Invalidate not enabled, use Register Based
1653 * Invalidate
1654 */
1655 iommu->flush.flush_context = __iommu_flush_context;
1656 iommu->flush.flush_iotlb = __iommu_flush_iotlb;
1657 printk(KERN_INFO "IOMMU 0x%Lx: using Register based "
1658 "invalidation\n", drhd->reg_base_addr);
1659 } else {
1660 iommu->flush.flush_context = qi_flush_context;
1661 iommu->flush.flush_iotlb = qi_flush_iotlb;
1662 printk(KERN_INFO "IOMMU 0x%Lx: using Queued "
1663 "invalidation\n", drhd->reg_base_addr);
1664 }
1665 }
1666
1778 /* 1667 /*
1779 * For each rmrr 1668 * For each rmrr
1780 * for each dev attached to rmrr 1669 * for each dev attached to rmrr
@@ -1827,9 +1716,10 @@ int __init init_dmars(void)
1827 1716
1828 iommu_set_root_entry(iommu); 1717 iommu_set_root_entry(iommu);
1829 1718
1830 iommu_flush_context_global(iommu, 0); 1719 iommu->flush.flush_context(iommu, 0, 0, 0, DMA_CCMD_GLOBAL_INVL,
1831 iommu_flush_iotlb_global(iommu, 0); 1720 0);
1832 1721 iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH,
1722 0);
1833 iommu_disable_protect_mem_regions(iommu); 1723 iommu_disable_protect_mem_regions(iommu);
1834 1724
1835 ret = iommu_enable_translation(iommu); 1725 ret = iommu_enable_translation(iommu);
@@ -1845,15 +1735,14 @@ error:
1845 iommu = drhd->iommu; 1735 iommu = drhd->iommu;
1846 free_iommu(iommu); 1736 free_iommu(iommu);
1847 } 1737 }
1848 kfree(g_iommus);
1849 return ret; 1738 return ret;
1850} 1739}
1851 1740
1852static inline u64 aligned_size(u64 host_addr, size_t size) 1741static inline u64 aligned_size(u64 host_addr, size_t size)
1853{ 1742{
1854 u64 addr; 1743 u64 addr;
1855 addr = (host_addr & (~PAGE_MASK_4K)) + size; 1744 addr = (host_addr & (~PAGE_MASK)) + size;
1856 return PAGE_ALIGN_4K(addr); 1745 return PAGE_ALIGN(addr);
1857} 1746}
1858 1747
1859struct iova * 1748struct iova *
@@ -1867,20 +1756,20 @@ iommu_alloc_iova(struct dmar_domain *domain, size_t size, u64 end)
1867 return NULL; 1756 return NULL;
1868 1757
1869 piova = alloc_iova(&domain->iovad, 1758 piova = alloc_iova(&domain->iovad,
1870 size >> PAGE_SHIFT_4K, IOVA_PFN(end), 1); 1759 size >> PAGE_SHIFT, IOVA_PFN(end), 1);
1871 return piova; 1760 return piova;
1872} 1761}
1873 1762
1874static struct iova * 1763static struct iova *
1875__intel_alloc_iova(struct device *dev, struct dmar_domain *domain, 1764__intel_alloc_iova(struct device *dev, struct dmar_domain *domain,
1876 size_t size) 1765 size_t size, u64 dma_mask)
1877{ 1766{
1878 struct pci_dev *pdev = to_pci_dev(dev); 1767 struct pci_dev *pdev = to_pci_dev(dev);
1879 struct iova *iova = NULL; 1768 struct iova *iova = NULL;
1880 1769
1881 if ((pdev->dma_mask <= DMA_32BIT_MASK) || (dmar_forcedac)) { 1770 if (dma_mask <= DMA_32BIT_MASK || dmar_forcedac)
1882 iova = iommu_alloc_iova(domain, size, pdev->dma_mask); 1771 iova = iommu_alloc_iova(domain, size, dma_mask);
1883 } else { 1772 else {
1884 /* 1773 /*
1885 * First try to allocate an io virtual address in 1774 * First try to allocate an io virtual address in
1886 * DMA_32BIT_MASK and if that fails then try allocating 1775 * DMA_32BIT_MASK and if that fails then try allocating
@@ -1888,7 +1777,7 @@ __intel_alloc_iova(struct device *dev, struct dmar_domain *domain,
1888 */ 1777 */
1889 iova = iommu_alloc_iova(domain, size, DMA_32BIT_MASK); 1778 iova = iommu_alloc_iova(domain, size, DMA_32BIT_MASK);
1890 if (!iova) 1779 if (!iova)
1891 iova = iommu_alloc_iova(domain, size, pdev->dma_mask); 1780 iova = iommu_alloc_iova(domain, size, dma_mask);
1892 } 1781 }
1893 1782
1894 if (!iova) { 1783 if (!iova) {
@@ -1927,12 +1816,12 @@ get_valid_domain_for_dev(struct pci_dev *pdev)
1927 return domain; 1816 return domain;
1928} 1817}
1929 1818
1930static dma_addr_t 1819static dma_addr_t __intel_map_single(struct device *hwdev, phys_addr_t paddr,
1931intel_map_single(struct device *hwdev, phys_addr_t paddr, size_t size, int dir) 1820 size_t size, int dir, u64 dma_mask)
1932{ 1821{
1933 struct pci_dev *pdev = to_pci_dev(hwdev); 1822 struct pci_dev *pdev = to_pci_dev(hwdev);
1934 struct dmar_domain *domain; 1823 struct dmar_domain *domain;
1935 unsigned long start_paddr; 1824 phys_addr_t start_paddr;
1936 struct iova *iova; 1825 struct iova *iova;
1937 int prot = 0; 1826 int prot = 0;
1938 int ret; 1827 int ret;
@@ -1947,11 +1836,11 @@ intel_map_single(struct device *hwdev, phys_addr_t paddr, size_t size, int dir)
1947 1836
1948 size = aligned_size((u64)paddr, size); 1837 size = aligned_size((u64)paddr, size);
1949 1838
1950 iova = __intel_alloc_iova(hwdev, domain, size); 1839 iova = __intel_alloc_iova(hwdev, domain, size, pdev->dma_mask);
1951 if (!iova) 1840 if (!iova)
1952 goto error; 1841 goto error;
1953 1842
1954 start_paddr = iova->pfn_lo << PAGE_SHIFT_4K; 1843 start_paddr = (phys_addr_t)iova->pfn_lo << PAGE_SHIFT;
1955 1844
1956 /* 1845 /*
1957 * Check if DMAR supports zero-length reads on write only 1846 * Check if DMAR supports zero-length reads on write only
@@ -1969,30 +1858,33 @@ intel_map_single(struct device *hwdev, phys_addr_t paddr, size_t size, int dir)
1969 * is not a big problem 1858 * is not a big problem
1970 */ 1859 */
1971 ret = domain_page_mapping(domain, start_paddr, 1860 ret = domain_page_mapping(domain, start_paddr,
1972 ((u64)paddr) & PAGE_MASK_4K, size, prot); 1861 ((u64)paddr) & PAGE_MASK, size, prot);
1973 if (ret) 1862 if (ret)
1974 goto error; 1863 goto error;
1975 1864
1976 pr_debug("Device %s request: %lx@%llx mapping: %lx@%llx, dir %d\n",
1977 pci_name(pdev), size, (u64)paddr,
1978 size, (u64)start_paddr, dir);
1979
1980 /* it's a non-present to present mapping */ 1865 /* it's a non-present to present mapping */
1981 ret = iommu_flush_iotlb_psi(domain->iommu, domain->id, 1866 ret = iommu_flush_iotlb_psi(domain->iommu, domain->id,
1982 start_paddr, size >> PAGE_SHIFT_4K, 1); 1867 start_paddr, size >> VTD_PAGE_SHIFT, 1);
1983 if (ret) 1868 if (ret)
1984 iommu_flush_write_buffer(domain->iommu); 1869 iommu_flush_write_buffer(domain->iommu);
1985 1870
1986 return (start_paddr + ((u64)paddr & (~PAGE_MASK_4K))); 1871 return start_paddr + ((u64)paddr & (~PAGE_MASK));
1987 1872
1988error: 1873error:
1989 if (iova) 1874 if (iova)
1990 __free_iova(&domain->iovad, iova); 1875 __free_iova(&domain->iovad, iova);
1991 printk(KERN_ERR"Device %s request: %lx@%llx dir %d --- failed\n", 1876 printk(KERN_ERR"Device %s request: %lx@%llx dir %d --- failed\n",
1992 pci_name(pdev), size, (u64)paddr, dir); 1877 pci_name(pdev), size, (unsigned long long)paddr, dir);
1993 return 0; 1878 return 0;
1994} 1879}
1995 1880
1881dma_addr_t intel_map_single(struct device *hwdev, phys_addr_t paddr,
1882 size_t size, int dir)
1883{
1884 return __intel_map_single(hwdev, paddr, size, dir,
1885 to_pci_dev(hwdev)->dma_mask);
1886}
1887
1996static void flush_unmaps(void) 1888static void flush_unmaps(void)
1997{ 1889{
1998 int i, j; 1890 int i, j;
@@ -2002,7 +1894,11 @@ static void flush_unmaps(void)
2002 /* just flush them all */ 1894 /* just flush them all */
2003 for (i = 0; i < g_num_of_iommus; i++) { 1895 for (i = 0; i < g_num_of_iommus; i++) {
2004 if (deferred_flush[i].next) { 1896 if (deferred_flush[i].next) {
2005 iommu_flush_iotlb_global(&g_iommus[i], 0); 1897 struct intel_iommu *iommu =
1898 deferred_flush[i].domain[0]->iommu;
1899
1900 iommu->flush.flush_iotlb(iommu, 0, 0, 0,
1901 DMA_TLB_GLOBAL_FLUSH, 0);
2006 for (j = 0; j < deferred_flush[i].next; j++) { 1902 for (j = 0; j < deferred_flush[i].next; j++) {
2007 __free_iova(&deferred_flush[i].domain[j]->iovad, 1903 __free_iova(&deferred_flush[i].domain[j]->iovad,
2008 deferred_flush[i].iova[j]); 1904 deferred_flush[i].iova[j]);
@@ -2032,7 +1928,8 @@ static void add_unmap(struct dmar_domain *dom, struct iova *iova)
2032 if (list_size == HIGH_WATER_MARK) 1928 if (list_size == HIGH_WATER_MARK)
2033 flush_unmaps(); 1929 flush_unmaps();
2034 1930
2035 iommu_id = dom->iommu - g_iommus; 1931 iommu_id = dom->iommu->seq_id;
1932
2036 next = deferred_flush[iommu_id].next; 1933 next = deferred_flush[iommu_id].next;
2037 deferred_flush[iommu_id].domain[next] = dom; 1934 deferred_flush[iommu_id].domain[next] = dom;
2038 deferred_flush[iommu_id].iova[next] = iova; 1935 deferred_flush[iommu_id].iova[next] = iova;
@@ -2046,8 +1943,8 @@ static void add_unmap(struct dmar_domain *dom, struct iova *iova)
2046 spin_unlock_irqrestore(&async_umap_flush_lock, flags); 1943 spin_unlock_irqrestore(&async_umap_flush_lock, flags);
2047} 1944}
2048 1945
2049static void intel_unmap_single(struct device *dev, dma_addr_t dev_addr, 1946void intel_unmap_single(struct device *dev, dma_addr_t dev_addr, size_t size,
2050 size_t size, int dir) 1947 int dir)
2051{ 1948{
2052 struct pci_dev *pdev = to_pci_dev(dev); 1949 struct pci_dev *pdev = to_pci_dev(dev);
2053 struct dmar_domain *domain; 1950 struct dmar_domain *domain;
@@ -2063,11 +1960,11 @@ static void intel_unmap_single(struct device *dev, dma_addr_t dev_addr,
2063 if (!iova) 1960 if (!iova)
2064 return; 1961 return;
2065 1962
2066 start_addr = iova->pfn_lo << PAGE_SHIFT_4K; 1963 start_addr = iova->pfn_lo << PAGE_SHIFT;
2067 size = aligned_size((u64)dev_addr, size); 1964 size = aligned_size((u64)dev_addr, size);
2068 1965
2069 pr_debug("Device %s unmapping: %lx@%llx\n", 1966 pr_debug("Device %s unmapping: %lx@%llx\n",
2070 pci_name(pdev), size, (u64)start_addr); 1967 pci_name(pdev), size, (unsigned long long)start_addr);
2071 1968
2072 /* clear the whole page */ 1969 /* clear the whole page */
2073 dma_pte_clear_range(domain, start_addr, start_addr + size); 1970 dma_pte_clear_range(domain, start_addr, start_addr + size);
@@ -2075,7 +1972,7 @@ static void intel_unmap_single(struct device *dev, dma_addr_t dev_addr,
2075 dma_pte_free_pagetable(domain, start_addr, start_addr + size); 1972 dma_pte_free_pagetable(domain, start_addr, start_addr + size);
2076 if (intel_iommu_strict) { 1973 if (intel_iommu_strict) {
2077 if (iommu_flush_iotlb_psi(domain->iommu, 1974 if (iommu_flush_iotlb_psi(domain->iommu,
2078 domain->id, start_addr, size >> PAGE_SHIFT_4K, 0)) 1975 domain->id, start_addr, size >> VTD_PAGE_SHIFT, 0))
2079 iommu_flush_write_buffer(domain->iommu); 1976 iommu_flush_write_buffer(domain->iommu);
2080 /* free iova */ 1977 /* free iova */
2081 __free_iova(&domain->iovad, iova); 1978 __free_iova(&domain->iovad, iova);
@@ -2088,13 +1985,13 @@ static void intel_unmap_single(struct device *dev, dma_addr_t dev_addr,
2088 } 1985 }
2089} 1986}
2090 1987
2091static void * intel_alloc_coherent(struct device *hwdev, size_t size, 1988void *intel_alloc_coherent(struct device *hwdev, size_t size,
2092 dma_addr_t *dma_handle, gfp_t flags) 1989 dma_addr_t *dma_handle, gfp_t flags)
2093{ 1990{
2094 void *vaddr; 1991 void *vaddr;
2095 int order; 1992 int order;
2096 1993
2097 size = PAGE_ALIGN_4K(size); 1994 size = PAGE_ALIGN(size);
2098 order = get_order(size); 1995 order = get_order(size);
2099 flags &= ~(GFP_DMA | GFP_DMA32); 1996 flags &= ~(GFP_DMA | GFP_DMA32);
2100 1997
@@ -2103,19 +2000,21 @@ static void * intel_alloc_coherent(struct device *hwdev, size_t size,
2103 return NULL; 2000 return NULL;
2104 memset(vaddr, 0, size); 2001 memset(vaddr, 0, size);
2105 2002
2106 *dma_handle = intel_map_single(hwdev, virt_to_bus(vaddr), size, DMA_BIDIRECTIONAL); 2003 *dma_handle = __intel_map_single(hwdev, virt_to_bus(vaddr), size,
2004 DMA_BIDIRECTIONAL,
2005 hwdev->coherent_dma_mask);
2107 if (*dma_handle) 2006 if (*dma_handle)
2108 return vaddr; 2007 return vaddr;
2109 free_pages((unsigned long)vaddr, order); 2008 free_pages((unsigned long)vaddr, order);
2110 return NULL; 2009 return NULL;
2111} 2010}
2112 2011
2113static void intel_free_coherent(struct device *hwdev, size_t size, 2012void intel_free_coherent(struct device *hwdev, size_t size, void *vaddr,
2114 void *vaddr, dma_addr_t dma_handle) 2013 dma_addr_t dma_handle)
2115{ 2014{
2116 int order; 2015 int order;
2117 2016
2118 size = PAGE_ALIGN_4K(size); 2017 size = PAGE_ALIGN(size);
2119 order = get_order(size); 2018 order = get_order(size);
2120 2019
2121 intel_unmap_single(hwdev, dma_handle, size, DMA_BIDIRECTIONAL); 2020 intel_unmap_single(hwdev, dma_handle, size, DMA_BIDIRECTIONAL);
@@ -2123,8 +2022,9 @@ static void intel_free_coherent(struct device *hwdev, size_t size,
2123} 2022}
2124 2023
2125#define SG_ENT_VIRT_ADDRESS(sg) (sg_virt((sg))) 2024#define SG_ENT_VIRT_ADDRESS(sg) (sg_virt((sg)))
2126static void intel_unmap_sg(struct device *hwdev, struct scatterlist *sglist, 2025
2127 int nelems, int dir) 2026void intel_unmap_sg(struct device *hwdev, struct scatterlist *sglist,
2027 int nelems, int dir)
2128{ 2028{
2129 int i; 2029 int i;
2130 struct pci_dev *pdev = to_pci_dev(hwdev); 2030 struct pci_dev *pdev = to_pci_dev(hwdev);
@@ -2148,7 +2048,7 @@ static void intel_unmap_sg(struct device *hwdev, struct scatterlist *sglist,
2148 size += aligned_size((u64)addr, sg->length); 2048 size += aligned_size((u64)addr, sg->length);
2149 } 2049 }
2150 2050
2151 start_addr = iova->pfn_lo << PAGE_SHIFT_4K; 2051 start_addr = iova->pfn_lo << PAGE_SHIFT;
2152 2052
2153 /* clear the whole page */ 2053 /* clear the whole page */
2154 dma_pte_clear_range(domain, start_addr, start_addr + size); 2054 dma_pte_clear_range(domain, start_addr, start_addr + size);
@@ -2156,7 +2056,7 @@ static void intel_unmap_sg(struct device *hwdev, struct scatterlist *sglist,
2156 dma_pte_free_pagetable(domain, start_addr, start_addr + size); 2056 dma_pte_free_pagetable(domain, start_addr, start_addr + size);
2157 2057
2158 if (iommu_flush_iotlb_psi(domain->iommu, domain->id, start_addr, 2058 if (iommu_flush_iotlb_psi(domain->iommu, domain->id, start_addr,
2159 size >> PAGE_SHIFT_4K, 0)) 2059 size >> VTD_PAGE_SHIFT, 0))
2160 iommu_flush_write_buffer(domain->iommu); 2060 iommu_flush_write_buffer(domain->iommu);
2161 2061
2162 /* free iova */ 2062 /* free iova */
@@ -2177,8 +2077,8 @@ static int intel_nontranslate_map_sg(struct device *hddev,
2177 return nelems; 2077 return nelems;
2178} 2078}
2179 2079
2180static int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, 2080int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, int nelems,
2181 int nelems, int dir) 2081 int dir)
2182{ 2082{
2183 void *addr; 2083 void *addr;
2184 int i; 2084 int i;
@@ -2206,7 +2106,7 @@ static int intel_map_sg(struct device *hwdev, struct scatterlist *sglist,
2206 size += aligned_size((u64)addr, sg->length); 2106 size += aligned_size((u64)addr, sg->length);
2207 } 2107 }
2208 2108
2209 iova = __intel_alloc_iova(hwdev, domain, size); 2109 iova = __intel_alloc_iova(hwdev, domain, size, pdev->dma_mask);
2210 if (!iova) { 2110 if (!iova) {
2211 sglist->dma_length = 0; 2111 sglist->dma_length = 0;
2212 return 0; 2112 return 0;
@@ -2222,14 +2122,14 @@ static int intel_map_sg(struct device *hwdev, struct scatterlist *sglist,
2222 if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL) 2122 if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL)
2223 prot |= DMA_PTE_WRITE; 2123 prot |= DMA_PTE_WRITE;
2224 2124
2225 start_addr = iova->pfn_lo << PAGE_SHIFT_4K; 2125 start_addr = iova->pfn_lo << PAGE_SHIFT;
2226 offset = 0; 2126 offset = 0;
2227 for_each_sg(sglist, sg, nelems, i) { 2127 for_each_sg(sglist, sg, nelems, i) {
2228 addr = SG_ENT_VIRT_ADDRESS(sg); 2128 addr = SG_ENT_VIRT_ADDRESS(sg);
2229 addr = (void *)virt_to_phys(addr); 2129 addr = (void *)virt_to_phys(addr);
2230 size = aligned_size((u64)addr, sg->length); 2130 size = aligned_size((u64)addr, sg->length);
2231 ret = domain_page_mapping(domain, start_addr + offset, 2131 ret = domain_page_mapping(domain, start_addr + offset,
2232 ((u64)addr) & PAGE_MASK_4K, 2132 ((u64)addr) & PAGE_MASK,
2233 size, prot); 2133 size, prot);
2234 if (ret) { 2134 if (ret) {
2235 /* clear the page */ 2135 /* clear the page */
@@ -2243,14 +2143,14 @@ static int intel_map_sg(struct device *hwdev, struct scatterlist *sglist,
2243 return 0; 2143 return 0;
2244 } 2144 }
2245 sg->dma_address = start_addr + offset + 2145 sg->dma_address = start_addr + offset +
2246 ((u64)addr & (~PAGE_MASK_4K)); 2146 ((u64)addr & (~PAGE_MASK));
2247 sg->dma_length = sg->length; 2147 sg->dma_length = sg->length;
2248 offset += size; 2148 offset += size;
2249 } 2149 }
2250 2150
2251 /* it's a non-present to present mapping */ 2151 /* it's a non-present to present mapping */
2252 if (iommu_flush_iotlb_psi(domain->iommu, domain->id, 2152 if (iommu_flush_iotlb_psi(domain->iommu, domain->id,
2253 start_addr, offset >> PAGE_SHIFT_4K, 1)) 2153 start_addr, offset >> VTD_PAGE_SHIFT, 1))
2254 iommu_flush_write_buffer(domain->iommu); 2154 iommu_flush_write_buffer(domain->iommu);
2255 return nelems; 2155 return nelems;
2256} 2156}
@@ -2290,7 +2190,6 @@ static inline int iommu_devinfo_cache_init(void)
2290 sizeof(struct device_domain_info), 2190 sizeof(struct device_domain_info),
2291 0, 2191 0,
2292 SLAB_HWCACHE_ALIGN, 2192 SLAB_HWCACHE_ALIGN,
2293
2294 NULL); 2193 NULL);
2295 if (!iommu_devinfo_cache) { 2194 if (!iommu_devinfo_cache) {
2296 printk(KERN_ERR "Couldn't create devinfo cache\n"); 2195 printk(KERN_ERR "Couldn't create devinfo cache\n");
@@ -2308,7 +2207,6 @@ static inline int iommu_iova_cache_init(void)
2308 sizeof(struct iova), 2207 sizeof(struct iova),
2309 0, 2208 0,
2310 SLAB_HWCACHE_ALIGN, 2209 SLAB_HWCACHE_ALIGN,
2311
2312 NULL); 2210 NULL);
2313 if (!iommu_iova_cache) { 2211 if (!iommu_iova_cache) {
2314 printk(KERN_ERR "Couldn't create iova cache\n"); 2212 printk(KERN_ERR "Couldn't create iova cache\n");
@@ -2348,15 +2246,6 @@ static void __init iommu_exit_mempool(void)
2348 2246
2349} 2247}
2350 2248
2351void __init detect_intel_iommu(void)
2352{
2353 if (swiotlb || no_iommu || iommu_detected || dmar_disabled)
2354 return;
2355 if (early_dmar_detect()) {
2356 iommu_detected = 1;
2357 }
2358}
2359
2360static void __init init_no_remapping_devices(void) 2249static void __init init_no_remapping_devices(void)
2361{ 2250{
2362 struct dmar_drhd_unit *drhd; 2251 struct dmar_drhd_unit *drhd;
@@ -2403,12 +2292,19 @@ int __init intel_iommu_init(void)
2403{ 2292{
2404 int ret = 0; 2293 int ret = 0;
2405 2294
2406 if (no_iommu || swiotlb || dmar_disabled)
2407 return -ENODEV;
2408
2409 if (dmar_table_init()) 2295 if (dmar_table_init())
2410 return -ENODEV; 2296 return -ENODEV;
2411 2297
2298 if (dmar_dev_scope_init())
2299 return -ENODEV;
2300
2301 /*
2302 * Check the need for DMA-remapping initialization now.
2303 * Above initialization will also be used by Interrupt-remapping.
2304 */
2305 if (no_iommu || swiotlb || dmar_disabled)
2306 return -ENODEV;
2307
2412 iommu_init_mempool(); 2308 iommu_init_mempool();
2413 dmar_init_reserved_ranges(); 2309 dmar_init_reserved_ranges();
2414 2310
@@ -2430,3 +2326,111 @@ int __init intel_iommu_init(void)
2430 return 0; 2326 return 0;
2431} 2327}
2432 2328
2329void intel_iommu_domain_exit(struct dmar_domain *domain)
2330{
2331 u64 end;
2332
2333 /* Domain 0 is reserved, so dont process it */
2334 if (!domain)
2335 return;
2336
2337 end = DOMAIN_MAX_ADDR(domain->gaw);
2338 end = end & (~VTD_PAGE_MASK);
2339
2340 /* clear ptes */
2341 dma_pte_clear_range(domain, 0, end);
2342
2343 /* free page tables */
2344 dma_pte_free_pagetable(domain, 0, end);
2345
2346 iommu_free_domain(domain);
2347 free_domain_mem(domain);
2348}
2349EXPORT_SYMBOL_GPL(intel_iommu_domain_exit);
2350
2351struct dmar_domain *intel_iommu_domain_alloc(struct pci_dev *pdev)
2352{
2353 struct dmar_drhd_unit *drhd;
2354 struct dmar_domain *domain;
2355 struct intel_iommu *iommu;
2356
2357 drhd = dmar_find_matched_drhd_unit(pdev);
2358 if (!drhd) {
2359 printk(KERN_ERR "intel_iommu_domain_alloc: drhd == NULL\n");
2360 return NULL;
2361 }
2362
2363 iommu = drhd->iommu;
2364 if (!iommu) {
2365 printk(KERN_ERR
2366 "intel_iommu_domain_alloc: iommu == NULL\n");
2367 return NULL;
2368 }
2369 domain = iommu_alloc_domain(iommu);
2370 if (!domain) {
2371 printk(KERN_ERR
2372 "intel_iommu_domain_alloc: domain == NULL\n");
2373 return NULL;
2374 }
2375 if (domain_init(domain, DEFAULT_DOMAIN_ADDRESS_WIDTH)) {
2376 printk(KERN_ERR
2377 "intel_iommu_domain_alloc: domain_init() failed\n");
2378 intel_iommu_domain_exit(domain);
2379 return NULL;
2380 }
2381 return domain;
2382}
2383EXPORT_SYMBOL_GPL(intel_iommu_domain_alloc);
2384
2385int intel_iommu_context_mapping(
2386 struct dmar_domain *domain, struct pci_dev *pdev)
2387{
2388 int rc;
2389 rc = domain_context_mapping(domain, pdev);
2390 return rc;
2391}
2392EXPORT_SYMBOL_GPL(intel_iommu_context_mapping);
2393
2394int intel_iommu_page_mapping(
2395 struct dmar_domain *domain, dma_addr_t iova,
2396 u64 hpa, size_t size, int prot)
2397{
2398 int rc;
2399 rc = domain_page_mapping(domain, iova, hpa, size, prot);
2400 return rc;
2401}
2402EXPORT_SYMBOL_GPL(intel_iommu_page_mapping);
2403
2404void intel_iommu_detach_dev(struct dmar_domain *domain, u8 bus, u8 devfn)
2405{
2406 detach_domain_for_dev(domain, bus, devfn);
2407}
2408EXPORT_SYMBOL_GPL(intel_iommu_detach_dev);
2409
2410struct dmar_domain *
2411intel_iommu_find_domain(struct pci_dev *pdev)
2412{
2413 return find_domain(pdev);
2414}
2415EXPORT_SYMBOL_GPL(intel_iommu_find_domain);
2416
2417int intel_iommu_found(void)
2418{
2419 return g_num_of_iommus;
2420}
2421EXPORT_SYMBOL_GPL(intel_iommu_found);
2422
2423u64 intel_iommu_iova_to_pfn(struct dmar_domain *domain, u64 iova)
2424{
2425 struct dma_pte *pte;
2426 u64 pfn;
2427
2428 pfn = 0;
2429 pte = addr_to_dma_pte(domain, iova);
2430
2431 if (pte)
2432 pfn = dma_pte_addr(*pte);
2433
2434 return pfn >> VTD_PAGE_SHIFT;
2435}
2436EXPORT_SYMBOL_GPL(intel_iommu_iova_to_pfn);
diff --git a/drivers/pci/intel-iommu.h b/drivers/pci/intel-iommu.h
deleted file mode 100644
index afc0ad96122e..000000000000
--- a/drivers/pci/intel-iommu.h
+++ /dev/null
@@ -1,344 +0,0 @@
1/*
2 * Copyright (c) 2006, Intel Corporation.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
15 * Place - Suite 330, Boston, MA 02111-1307 USA.
16 *
17 * Copyright (C) 2006-2008 Intel Corporation
18 * Author: Ashok Raj <ashok.raj@intel.com>
19 * Author: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
20 */
21
22#ifndef _INTEL_IOMMU_H_
23#define _INTEL_IOMMU_H_
24
25#include <linux/types.h>
26#include <linux/msi.h>
27#include <linux/sysdev.h>
28#include "iova.h"
29#include <linux/io.h>
30
31/*
32 * We need a fixed PAGE_SIZE of 4K irrespective of
33 * arch PAGE_SIZE for IOMMU page tables.
34 */
35#define PAGE_SHIFT_4K (12)
36#define PAGE_SIZE_4K (1UL << PAGE_SHIFT_4K)
37#define PAGE_MASK_4K (((u64)-1) << PAGE_SHIFT_4K)
38#define PAGE_ALIGN_4K(addr) (((addr) + PAGE_SIZE_4K - 1) & PAGE_MASK_4K)
39
40#define IOVA_PFN(addr) ((addr) >> PAGE_SHIFT_4K)
41#define DMA_32BIT_PFN IOVA_PFN(DMA_32BIT_MASK)
42#define DMA_64BIT_PFN IOVA_PFN(DMA_64BIT_MASK)
43
44/*
45 * Intel IOMMU register specification per version 1.0 public spec.
46 */
47
48#define DMAR_VER_REG 0x0 /* Arch version supported by this IOMMU */
49#define DMAR_CAP_REG 0x8 /* Hardware supported capabilities */
50#define DMAR_ECAP_REG 0x10 /* Extended capabilities supported */
51#define DMAR_GCMD_REG 0x18 /* Global command register */
52#define DMAR_GSTS_REG 0x1c /* Global status register */
53#define DMAR_RTADDR_REG 0x20 /* Root entry table */
54#define DMAR_CCMD_REG 0x28 /* Context command reg */
55#define DMAR_FSTS_REG 0x34 /* Fault Status register */
56#define DMAR_FECTL_REG 0x38 /* Fault control register */
57#define DMAR_FEDATA_REG 0x3c /* Fault event interrupt data register */
58#define DMAR_FEADDR_REG 0x40 /* Fault event interrupt addr register */
59#define DMAR_FEUADDR_REG 0x44 /* Upper address register */
60#define DMAR_AFLOG_REG 0x58 /* Advanced Fault control */
61#define DMAR_PMEN_REG 0x64 /* Enable Protected Memory Region */
62#define DMAR_PLMBASE_REG 0x68 /* PMRR Low addr */
63#define DMAR_PLMLIMIT_REG 0x6c /* PMRR low limit */
64#define DMAR_PHMBASE_REG 0x70 /* pmrr high base addr */
65#define DMAR_PHMLIMIT_REG 0x78 /* pmrr high limit */
66
67#define OFFSET_STRIDE (9)
68/*
69#define dmar_readl(dmar, reg) readl(dmar + reg)
70#define dmar_readq(dmar, reg) ({ \
71 u32 lo, hi; \
72 lo = readl(dmar + reg); \
73 hi = readl(dmar + reg + 4); \
74 (((u64) hi) << 32) + lo; })
75*/
76static inline u64 dmar_readq(void __iomem *addr)
77{
78 u32 lo, hi;
79 lo = readl(addr);
80 hi = readl(addr + 4);
81 return (((u64) hi) << 32) + lo;
82}
83
84static inline void dmar_writeq(void __iomem *addr, u64 val)
85{
86 writel((u32)val, addr);
87 writel((u32)(val >> 32), addr + 4);
88}
89
90#define DMAR_VER_MAJOR(v) (((v) & 0xf0) >> 4)
91#define DMAR_VER_MINOR(v) ((v) & 0x0f)
92
93/*
94 * Decoding Capability Register
95 */
96#define cap_read_drain(c) (((c) >> 55) & 1)
97#define cap_write_drain(c) (((c) >> 54) & 1)
98#define cap_max_amask_val(c) (((c) >> 48) & 0x3f)
99#define cap_num_fault_regs(c) ((((c) >> 40) & 0xff) + 1)
100#define cap_pgsel_inv(c) (((c) >> 39) & 1)
101
102#define cap_super_page_val(c) (((c) >> 34) & 0xf)
103#define cap_super_offset(c) (((find_first_bit(&cap_super_page_val(c), 4)) \
104 * OFFSET_STRIDE) + 21)
105
106#define cap_fault_reg_offset(c) ((((c) >> 24) & 0x3ff) * 16)
107#define cap_max_fault_reg_offset(c) \
108 (cap_fault_reg_offset(c) + cap_num_fault_regs(c) * 16)
109
110#define cap_zlr(c) (((c) >> 22) & 1)
111#define cap_isoch(c) (((c) >> 23) & 1)
112#define cap_mgaw(c) ((((c) >> 16) & 0x3f) + 1)
113#define cap_sagaw(c) (((c) >> 8) & 0x1f)
114#define cap_caching_mode(c) (((c) >> 7) & 1)
115#define cap_phmr(c) (((c) >> 6) & 1)
116#define cap_plmr(c) (((c) >> 5) & 1)
117#define cap_rwbf(c) (((c) >> 4) & 1)
118#define cap_afl(c) (((c) >> 3) & 1)
119#define cap_ndoms(c) (((unsigned long)1) << (4 + 2 * ((c) & 0x7)))
120/*
121 * Extended Capability Register
122 */
123
124#define ecap_niotlb_iunits(e) ((((e) >> 24) & 0xff) + 1)
125#define ecap_iotlb_offset(e) ((((e) >> 8) & 0x3ff) * 16)
126#define ecap_max_iotlb_offset(e) \
127 (ecap_iotlb_offset(e) + ecap_niotlb_iunits(e) * 16)
128#define ecap_coherent(e) ((e) & 0x1)
129
130
131/* IOTLB_REG */
132#define DMA_TLB_GLOBAL_FLUSH (((u64)1) << 60)
133#define DMA_TLB_DSI_FLUSH (((u64)2) << 60)
134#define DMA_TLB_PSI_FLUSH (((u64)3) << 60)
135#define DMA_TLB_IIRG(type) ((type >> 60) & 7)
136#define DMA_TLB_IAIG(val) (((val) >> 57) & 7)
137#define DMA_TLB_READ_DRAIN (((u64)1) << 49)
138#define DMA_TLB_WRITE_DRAIN (((u64)1) << 48)
139#define DMA_TLB_DID(id) (((u64)((id) & 0xffff)) << 32)
140#define DMA_TLB_IVT (((u64)1) << 63)
141#define DMA_TLB_IH_NONLEAF (((u64)1) << 6)
142#define DMA_TLB_MAX_SIZE (0x3f)
143
144/* PMEN_REG */
145#define DMA_PMEN_EPM (((u32)1)<<31)
146#define DMA_PMEN_PRS (((u32)1)<<0)
147
148/* GCMD_REG */
149#define DMA_GCMD_TE (((u32)1) << 31)
150#define DMA_GCMD_SRTP (((u32)1) << 30)
151#define DMA_GCMD_SFL (((u32)1) << 29)
152#define DMA_GCMD_EAFL (((u32)1) << 28)
153#define DMA_GCMD_WBF (((u32)1) << 27)
154
155/* GSTS_REG */
156#define DMA_GSTS_TES (((u32)1) << 31)
157#define DMA_GSTS_RTPS (((u32)1) << 30)
158#define DMA_GSTS_FLS (((u32)1) << 29)
159#define DMA_GSTS_AFLS (((u32)1) << 28)
160#define DMA_GSTS_WBFS (((u32)1) << 27)
161
162/* CCMD_REG */
163#define DMA_CCMD_ICC (((u64)1) << 63)
164#define DMA_CCMD_GLOBAL_INVL (((u64)1) << 61)
165#define DMA_CCMD_DOMAIN_INVL (((u64)2) << 61)
166#define DMA_CCMD_DEVICE_INVL (((u64)3) << 61)
167#define DMA_CCMD_FM(m) (((u64)((m) & 0x3)) << 32)
168#define DMA_CCMD_MASK_NOBIT 0
169#define DMA_CCMD_MASK_1BIT 1
170#define DMA_CCMD_MASK_2BIT 2
171#define DMA_CCMD_MASK_3BIT 3
172#define DMA_CCMD_SID(s) (((u64)((s) & 0xffff)) << 16)
173#define DMA_CCMD_DID(d) ((u64)((d) & 0xffff))
174
175/* FECTL_REG */
176#define DMA_FECTL_IM (((u32)1) << 31)
177
178/* FSTS_REG */
179#define DMA_FSTS_PPF ((u32)2)
180#define DMA_FSTS_PFO ((u32)1)
181#define dma_fsts_fault_record_index(s) (((s) >> 8) & 0xff)
182
183/* FRCD_REG, 32 bits access */
184#define DMA_FRCD_F (((u32)1) << 31)
185#define dma_frcd_type(d) ((d >> 30) & 1)
186#define dma_frcd_fault_reason(c) (c & 0xff)
187#define dma_frcd_source_id(c) (c & 0xffff)
188#define dma_frcd_page_addr(d) (d & (((u64)-1) << 12)) /* low 64 bit */
189
190/*
191 * 0: Present
192 * 1-11: Reserved
193 * 12-63: Context Ptr (12 - (haw-1))
194 * 64-127: Reserved
195 */
196struct root_entry {
197 u64 val;
198 u64 rsvd1;
199};
200#define ROOT_ENTRY_NR (PAGE_SIZE_4K/sizeof(struct root_entry))
201static inline bool root_present(struct root_entry *root)
202{
203 return (root->val & 1);
204}
205static inline void set_root_present(struct root_entry *root)
206{
207 root->val |= 1;
208}
209static inline void set_root_value(struct root_entry *root, unsigned long value)
210{
211 root->val |= value & PAGE_MASK_4K;
212}
213
214struct context_entry;
215static inline struct context_entry *
216get_context_addr_from_root(struct root_entry *root)
217{
218 return (struct context_entry *)
219 (root_present(root)?phys_to_virt(
220 root->val & PAGE_MASK_4K):
221 NULL);
222}
223
224/*
225 * low 64 bits:
226 * 0: present
227 * 1: fault processing disable
228 * 2-3: translation type
229 * 12-63: address space root
230 * high 64 bits:
231 * 0-2: address width
232 * 3-6: aval
233 * 8-23: domain id
234 */
235struct context_entry {
236 u64 lo;
237 u64 hi;
238};
239#define context_present(c) ((c).lo & 1)
240#define context_fault_disable(c) (((c).lo >> 1) & 1)
241#define context_translation_type(c) (((c).lo >> 2) & 3)
242#define context_address_root(c) ((c).lo & PAGE_MASK_4K)
243#define context_address_width(c) ((c).hi & 7)
244#define context_domain_id(c) (((c).hi >> 8) & ((1 << 16) - 1))
245
246#define context_set_present(c) do {(c).lo |= 1;} while (0)
247#define context_set_fault_enable(c) \
248 do {(c).lo &= (((u64)-1) << 2) | 1;} while (0)
249#define context_set_translation_type(c, val) \
250 do { \
251 (c).lo &= (((u64)-1) << 4) | 3; \
252 (c).lo |= ((val) & 3) << 2; \
253 } while (0)
254#define CONTEXT_TT_MULTI_LEVEL 0
255#define context_set_address_root(c, val) \
256 do {(c).lo |= (val) & PAGE_MASK_4K;} while (0)
257#define context_set_address_width(c, val) do {(c).hi |= (val) & 7;} while (0)
258#define context_set_domain_id(c, val) \
259 do {(c).hi |= ((val) & ((1 << 16) - 1)) << 8;} while (0)
260#define context_clear_entry(c) do {(c).lo = 0; (c).hi = 0;} while (0)
261
262/*
263 * 0: readable
264 * 1: writable
265 * 2-6: reserved
266 * 7: super page
267 * 8-11: available
268 * 12-63: Host physcial address
269 */
270struct dma_pte {
271 u64 val;
272};
273#define dma_clear_pte(p) do {(p).val = 0;} while (0)
274
275#define DMA_PTE_READ (1)
276#define DMA_PTE_WRITE (2)
277
278#define dma_set_pte_readable(p) do {(p).val |= DMA_PTE_READ;} while (0)
279#define dma_set_pte_writable(p) do {(p).val |= DMA_PTE_WRITE;} while (0)
280#define dma_set_pte_prot(p, prot) \
281 do {(p).val = ((p).val & ~3) | ((prot) & 3); } while (0)
282#define dma_pte_addr(p) ((p).val & PAGE_MASK_4K)
283#define dma_set_pte_addr(p, addr) do {\
284 (p).val |= ((addr) & PAGE_MASK_4K); } while (0)
285#define dma_pte_present(p) (((p).val & 3) != 0)
286
287struct intel_iommu;
288
289struct dmar_domain {
290 int id; /* domain id */
291 struct intel_iommu *iommu; /* back pointer to owning iommu */
292
293 struct list_head devices; /* all devices' list */
294 struct iova_domain iovad; /* iova's that belong to this domain */
295
296 struct dma_pte *pgd; /* virtual address */
297 spinlock_t mapping_lock; /* page table lock */
298 int gaw; /* max guest address width */
299
300 /* adjusted guest address width, 0 is level 2 30-bit */
301 int agaw;
302
303#define DOMAIN_FLAG_MULTIPLE_DEVICES 1
304 int flags;
305};
306
307/* PCI domain-device relationship */
308struct device_domain_info {
309 struct list_head link; /* link to domain siblings */
310 struct list_head global; /* link to global list */
311 u8 bus; /* PCI bus numer */
312 u8 devfn; /* PCI devfn number */
313 struct pci_dev *dev; /* it's NULL for PCIE-to-PCI bridge */
314 struct dmar_domain *domain; /* pointer to domain */
315};
316
317extern int init_dmars(void);
318
319struct intel_iommu {
320 void __iomem *reg; /* Pointer to hardware regs, virtual addr */
321 u64 cap;
322 u64 ecap;
323 unsigned long *domain_ids; /* bitmap of domains */
324 struct dmar_domain **domains; /* ptr to domains */
325 int seg;
326 u32 gcmd; /* Holds TE, EAFL. Don't need SRTP, SFL, WBF */
327 spinlock_t lock; /* protect context, domain ids */
328 spinlock_t register_lock; /* protect register handling */
329 struct root_entry *root_entry; /* virtual address */
330
331 unsigned int irq;
332 unsigned char name[7]; /* Device Name */
333 struct msi_msg saved_msg;
334 struct sys_device sysdev;
335};
336
337#ifndef CONFIG_DMAR_GFX_WA
338static inline void iommu_prepare_gfx_mapping(void)
339{
340 return;
341}
342#endif /* !CONFIG_DMAR_GFX_WA */
343
344#endif
diff --git a/drivers/pci/intr_remapping.c b/drivers/pci/intr_remapping.c
new file mode 100644
index 000000000000..2de5a3238c94
--- /dev/null
+++ b/drivers/pci/intr_remapping.c
@@ -0,0 +1,512 @@
1#include <linux/interrupt.h>
2#include <linux/dmar.h>
3#include <linux/spinlock.h>
4#include <linux/jiffies.h>
5#include <linux/pci.h>
6#include <linux/irq.h>
7#include <asm/io_apic.h>
8#include <linux/intel-iommu.h>
9#include "intr_remapping.h"
10
11static struct ioapic_scope ir_ioapic[MAX_IO_APICS];
12static int ir_ioapic_num;
13int intr_remapping_enabled;
14
15struct irq_2_iommu {
16 struct intel_iommu *iommu;
17 u16 irte_index;
18 u16 sub_handle;
19 u8 irte_mask;
20};
21
22static struct irq_2_iommu irq_2_iommuX[NR_IRQS];
23
24static struct irq_2_iommu *irq_2_iommu(unsigned int irq)
25{
26 return (irq < nr_irqs) ? irq_2_iommuX + irq : NULL;
27}
28
29static struct irq_2_iommu *irq_2_iommu_alloc(unsigned int irq)
30{
31 return irq_2_iommu(irq);
32}
33
34static DEFINE_SPINLOCK(irq_2_ir_lock);
35
36static struct irq_2_iommu *valid_irq_2_iommu(unsigned int irq)
37{
38 struct irq_2_iommu *irq_iommu;
39
40 irq_iommu = irq_2_iommu(irq);
41
42 if (!irq_iommu)
43 return NULL;
44
45 if (!irq_iommu->iommu)
46 return NULL;
47
48 return irq_iommu;
49}
50
51int irq_remapped(int irq)
52{
53 return valid_irq_2_iommu(irq) != NULL;
54}
55
56int get_irte(int irq, struct irte *entry)
57{
58 int index;
59 struct irq_2_iommu *irq_iommu;
60
61 if (!entry)
62 return -1;
63
64 spin_lock(&irq_2_ir_lock);
65 irq_iommu = valid_irq_2_iommu(irq);
66 if (!irq_iommu) {
67 spin_unlock(&irq_2_ir_lock);
68 return -1;
69 }
70
71 index = irq_iommu->irte_index + irq_iommu->sub_handle;
72 *entry = *(irq_iommu->iommu->ir_table->base + index);
73
74 spin_unlock(&irq_2_ir_lock);
75 return 0;
76}
77
78int alloc_irte(struct intel_iommu *iommu, int irq, u16 count)
79{
80 struct ir_table *table = iommu->ir_table;
81 struct irq_2_iommu *irq_iommu;
82 u16 index, start_index;
83 unsigned int mask = 0;
84 int i;
85
86 if (!count)
87 return -1;
88
89 /* protect irq_2_iommu_alloc later */
90 if (irq >= nr_irqs)
91 return -1;
92
93 /*
94 * start the IRTE search from index 0.
95 */
96 index = start_index = 0;
97
98 if (count > 1) {
99 count = __roundup_pow_of_two(count);
100 mask = ilog2(count);
101 }
102
103 if (mask > ecap_max_handle_mask(iommu->ecap)) {
104 printk(KERN_ERR
105 "Requested mask %x exceeds the max invalidation handle"
106 " mask value %Lx\n", mask,
107 ecap_max_handle_mask(iommu->ecap));
108 return -1;
109 }
110
111 spin_lock(&irq_2_ir_lock);
112 do {
113 for (i = index; i < index + count; i++)
114 if (table->base[i].present)
115 break;
116 /* empty index found */
117 if (i == index + count)
118 break;
119
120 index = (index + count) % INTR_REMAP_TABLE_ENTRIES;
121
122 if (index == start_index) {
123 spin_unlock(&irq_2_ir_lock);
124 printk(KERN_ERR "can't allocate an IRTE\n");
125 return -1;
126 }
127 } while (1);
128
129 for (i = index; i < index + count; i++)
130 table->base[i].present = 1;
131
132 irq_iommu = irq_2_iommu_alloc(irq);
133 irq_iommu->iommu = iommu;
134 irq_iommu->irte_index = index;
135 irq_iommu->sub_handle = 0;
136 irq_iommu->irte_mask = mask;
137
138 spin_unlock(&irq_2_ir_lock);
139
140 return index;
141}
142
143static void qi_flush_iec(struct intel_iommu *iommu, int index, int mask)
144{
145 struct qi_desc desc;
146
147 desc.low = QI_IEC_IIDEX(index) | QI_IEC_TYPE | QI_IEC_IM(mask)
148 | QI_IEC_SELECTIVE;
149 desc.high = 0;
150
151 qi_submit_sync(&desc, iommu);
152}
153
154int map_irq_to_irte_handle(int irq, u16 *sub_handle)
155{
156 int index;
157 struct irq_2_iommu *irq_iommu;
158
159 spin_lock(&irq_2_ir_lock);
160 irq_iommu = valid_irq_2_iommu(irq);
161 if (!irq_iommu) {
162 spin_unlock(&irq_2_ir_lock);
163 return -1;
164 }
165
166 *sub_handle = irq_iommu->sub_handle;
167 index = irq_iommu->irte_index;
168 spin_unlock(&irq_2_ir_lock);
169 return index;
170}
171
172int set_irte_irq(int irq, struct intel_iommu *iommu, u16 index, u16 subhandle)
173{
174 struct irq_2_iommu *irq_iommu;
175
176 spin_lock(&irq_2_ir_lock);
177
178 irq_iommu = irq_2_iommu_alloc(irq);
179
180 irq_iommu->iommu = iommu;
181 irq_iommu->irte_index = index;
182 irq_iommu->sub_handle = subhandle;
183 irq_iommu->irte_mask = 0;
184
185 spin_unlock(&irq_2_ir_lock);
186
187 return 0;
188}
189
190int clear_irte_irq(int irq, struct intel_iommu *iommu, u16 index)
191{
192 struct irq_2_iommu *irq_iommu;
193
194 spin_lock(&irq_2_ir_lock);
195 irq_iommu = valid_irq_2_iommu(irq);
196 if (!irq_iommu) {
197 spin_unlock(&irq_2_ir_lock);
198 return -1;
199 }
200
201 irq_iommu->iommu = NULL;
202 irq_iommu->irte_index = 0;
203 irq_iommu->sub_handle = 0;
204 irq_2_iommu(irq)->irte_mask = 0;
205
206 spin_unlock(&irq_2_ir_lock);
207
208 return 0;
209}
210
211int modify_irte(int irq, struct irte *irte_modified)
212{
213 int index;
214 struct irte *irte;
215 struct intel_iommu *iommu;
216 struct irq_2_iommu *irq_iommu;
217
218 spin_lock(&irq_2_ir_lock);
219 irq_iommu = valid_irq_2_iommu(irq);
220 if (!irq_iommu) {
221 spin_unlock(&irq_2_ir_lock);
222 return -1;
223 }
224
225 iommu = irq_iommu->iommu;
226
227 index = irq_iommu->irte_index + irq_iommu->sub_handle;
228 irte = &iommu->ir_table->base[index];
229
230 set_64bit((unsigned long *)irte, irte_modified->low | (1 << 1));
231 __iommu_flush_cache(iommu, irte, sizeof(*irte));
232
233 qi_flush_iec(iommu, index, 0);
234
235 spin_unlock(&irq_2_ir_lock);
236 return 0;
237}
238
239int flush_irte(int irq)
240{
241 int index;
242 struct intel_iommu *iommu;
243 struct irq_2_iommu *irq_iommu;
244
245 spin_lock(&irq_2_ir_lock);
246 irq_iommu = valid_irq_2_iommu(irq);
247 if (!irq_iommu) {
248 spin_unlock(&irq_2_ir_lock);
249 return -1;
250 }
251
252 iommu = irq_iommu->iommu;
253
254 index = irq_iommu->irte_index + irq_iommu->sub_handle;
255
256 qi_flush_iec(iommu, index, irq_iommu->irte_mask);
257 spin_unlock(&irq_2_ir_lock);
258
259 return 0;
260}
261
262struct intel_iommu *map_ioapic_to_ir(int apic)
263{
264 int i;
265
266 for (i = 0; i < MAX_IO_APICS; i++)
267 if (ir_ioapic[i].id == apic)
268 return ir_ioapic[i].iommu;
269 return NULL;
270}
271
272struct intel_iommu *map_dev_to_ir(struct pci_dev *dev)
273{
274 struct dmar_drhd_unit *drhd;
275
276 drhd = dmar_find_matched_drhd_unit(dev);
277 if (!drhd)
278 return NULL;
279
280 return drhd->iommu;
281}
282
283int free_irte(int irq)
284{
285 int index, i;
286 struct irte *irte;
287 struct intel_iommu *iommu;
288 struct irq_2_iommu *irq_iommu;
289
290 spin_lock(&irq_2_ir_lock);
291 irq_iommu = valid_irq_2_iommu(irq);
292 if (!irq_iommu) {
293 spin_unlock(&irq_2_ir_lock);
294 return -1;
295 }
296
297 iommu = irq_iommu->iommu;
298
299 index = irq_iommu->irte_index + irq_iommu->sub_handle;
300 irte = &iommu->ir_table->base[index];
301
302 if (!irq_iommu->sub_handle) {
303 for (i = 0; i < (1 << irq_iommu->irte_mask); i++)
304 set_64bit((unsigned long *)irte, 0);
305 qi_flush_iec(iommu, index, irq_iommu->irte_mask);
306 }
307
308 irq_iommu->iommu = NULL;
309 irq_iommu->irte_index = 0;
310 irq_iommu->sub_handle = 0;
311 irq_iommu->irte_mask = 0;
312
313 spin_unlock(&irq_2_ir_lock);
314
315 return 0;
316}
317
318static void iommu_set_intr_remapping(struct intel_iommu *iommu, int mode)
319{
320 u64 addr;
321 u32 cmd, sts;
322 unsigned long flags;
323
324 addr = virt_to_phys((void *)iommu->ir_table->base);
325
326 spin_lock_irqsave(&iommu->register_lock, flags);
327
328 dmar_writeq(iommu->reg + DMAR_IRTA_REG,
329 (addr) | IR_X2APIC_MODE(mode) | INTR_REMAP_TABLE_REG_SIZE);
330
331 /* Set interrupt-remapping table pointer */
332 cmd = iommu->gcmd | DMA_GCMD_SIRTP;
333 writel(cmd, iommu->reg + DMAR_GCMD_REG);
334
335 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
336 readl, (sts & DMA_GSTS_IRTPS), sts);
337 spin_unlock_irqrestore(&iommu->register_lock, flags);
338
339 /*
340 * global invalidation of interrupt entry cache before enabling
341 * interrupt-remapping.
342 */
343 qi_global_iec(iommu);
344
345 spin_lock_irqsave(&iommu->register_lock, flags);
346
347 /* Enable interrupt-remapping */
348 cmd = iommu->gcmd | DMA_GCMD_IRE;
349 iommu->gcmd |= DMA_GCMD_IRE;
350 writel(cmd, iommu->reg + DMAR_GCMD_REG);
351
352 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
353 readl, (sts & DMA_GSTS_IRES), sts);
354
355 spin_unlock_irqrestore(&iommu->register_lock, flags);
356}
357
358
359static int setup_intr_remapping(struct intel_iommu *iommu, int mode)
360{
361 struct ir_table *ir_table;
362 struct page *pages;
363
364 ir_table = iommu->ir_table = kzalloc(sizeof(struct ir_table),
365 GFP_KERNEL);
366
367 if (!iommu->ir_table)
368 return -ENOMEM;
369
370 pages = alloc_pages(GFP_KERNEL | __GFP_ZERO, INTR_REMAP_PAGE_ORDER);
371
372 if (!pages) {
373 printk(KERN_ERR "failed to allocate pages of order %d\n",
374 INTR_REMAP_PAGE_ORDER);
375 kfree(iommu->ir_table);
376 return -ENOMEM;
377 }
378
379 ir_table->base = page_address(pages);
380
381 iommu_set_intr_remapping(iommu, mode);
382 return 0;
383}
384
385int __init enable_intr_remapping(int eim)
386{
387 struct dmar_drhd_unit *drhd;
388 int setup = 0;
389
390 /*
391 * check for the Interrupt-remapping support
392 */
393 for_each_drhd_unit(drhd) {
394 struct intel_iommu *iommu = drhd->iommu;
395
396 if (!ecap_ir_support(iommu->ecap))
397 continue;
398
399 if (eim && !ecap_eim_support(iommu->ecap)) {
400 printk(KERN_INFO "DRHD %Lx: EIM not supported by DRHD, "
401 " ecap %Lx\n", drhd->reg_base_addr, iommu->ecap);
402 return -1;
403 }
404 }
405
406 /*
407 * Enable queued invalidation for all the DRHD's.
408 */
409 for_each_drhd_unit(drhd) {
410 int ret;
411 struct intel_iommu *iommu = drhd->iommu;
412 ret = dmar_enable_qi(iommu);
413
414 if (ret) {
415 printk(KERN_ERR "DRHD %Lx: failed to enable queued, "
416 " invalidation, ecap %Lx, ret %d\n",
417 drhd->reg_base_addr, iommu->ecap, ret);
418 return -1;
419 }
420 }
421
422 /*
423 * Setup Interrupt-remapping for all the DRHD's now.
424 */
425 for_each_drhd_unit(drhd) {
426 struct intel_iommu *iommu = drhd->iommu;
427
428 if (!ecap_ir_support(iommu->ecap))
429 continue;
430
431 if (setup_intr_remapping(iommu, eim))
432 goto error;
433
434 setup = 1;
435 }
436
437 if (!setup)
438 goto error;
439
440 intr_remapping_enabled = 1;
441
442 return 0;
443
444error:
445 /*
446 * handle error condition gracefully here!
447 */
448 return -1;
449}
450
451static int ir_parse_ioapic_scope(struct acpi_dmar_header *header,
452 struct intel_iommu *iommu)
453{
454 struct acpi_dmar_hardware_unit *drhd;
455 struct acpi_dmar_device_scope *scope;
456 void *start, *end;
457
458 drhd = (struct acpi_dmar_hardware_unit *)header;
459
460 start = (void *)(drhd + 1);
461 end = ((void *)drhd) + header->length;
462
463 while (start < end) {
464 scope = start;
465 if (scope->entry_type == ACPI_DMAR_SCOPE_TYPE_IOAPIC) {
466 if (ir_ioapic_num == MAX_IO_APICS) {
467 printk(KERN_WARNING "Exceeded Max IO APICS\n");
468 return -1;
469 }
470
471 printk(KERN_INFO "IOAPIC id %d under DRHD base"
472 " 0x%Lx\n", scope->enumeration_id,
473 drhd->address);
474
475 ir_ioapic[ir_ioapic_num].iommu = iommu;
476 ir_ioapic[ir_ioapic_num].id = scope->enumeration_id;
477 ir_ioapic_num++;
478 }
479 start += scope->length;
480 }
481
482 return 0;
483}
484
485/*
486 * Finds the assocaition between IOAPIC's and its Interrupt-remapping
487 * hardware unit.
488 */
489int __init parse_ioapics_under_ir(void)
490{
491 struct dmar_drhd_unit *drhd;
492 int ir_supported = 0;
493
494 for_each_drhd_unit(drhd) {
495 struct intel_iommu *iommu = drhd->iommu;
496
497 if (ecap_ir_support(iommu->ecap)) {
498 if (ir_parse_ioapic_scope(drhd->hdr, iommu))
499 return -1;
500
501 ir_supported = 1;
502 }
503 }
504
505 if (ir_supported && ir_ioapic_num != nr_ioapics) {
506 printk(KERN_WARNING
507 "Not all IO-APIC's listed under remapping hardware\n");
508 return -1;
509 }
510
511 return ir_supported;
512}
diff --git a/drivers/pci/intr_remapping.h b/drivers/pci/intr_remapping.h
new file mode 100644
index 000000000000..ca48f0df8ac9
--- /dev/null
+++ b/drivers/pci/intr_remapping.h
@@ -0,0 +1,8 @@
1#include <linux/intel-iommu.h>
2
3struct ioapic_scope {
4 struct intel_iommu *iommu;
5 unsigned int id;
6};
7
8#define IR_X2APIC_MODE(mode) (mode ? (1 << 11) : 0)
diff --git a/drivers/pci/iova.c b/drivers/pci/iova.c
index 3ef4ac064315..2287116e9822 100644
--- a/drivers/pci/iova.c
+++ b/drivers/pci/iova.c
@@ -7,7 +7,7 @@
7 * Author: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com> 7 * Author: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
8 */ 8 */
9 9
10#include "iova.h" 10#include <linux/iova.h>
11 11
12void 12void
13init_iova_domain(struct iova_domain *iovad, unsigned long pfn_32bit) 13init_iova_domain(struct iova_domain *iovad, unsigned long pfn_32bit)
diff --git a/drivers/pci/iova.h b/drivers/pci/iova.h
deleted file mode 100644
index 228f6c94b69c..000000000000
--- a/drivers/pci/iova.h
+++ /dev/null
@@ -1,52 +0,0 @@
1/*
2 * Copyright (c) 2006, Intel Corporation.
3 *
4 * This file is released under the GPLv2.
5 *
6 * Copyright (C) 2006-2008 Intel Corporation
7 * Author: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
8 *
9 */
10
11#ifndef _IOVA_H_
12#define _IOVA_H_
13
14#include <linux/types.h>
15#include <linux/kernel.h>
16#include <linux/rbtree.h>
17#include <linux/dma-mapping.h>
18
19/* IO virtual address start page frame number */
20#define IOVA_START_PFN (1)
21
22/* iova structure */
23struct iova {
24 struct rb_node node;
25 unsigned long pfn_hi; /* IOMMU dish out addr hi */
26 unsigned long pfn_lo; /* IOMMU dish out addr lo */
27};
28
29/* holds all the iova translations for a domain */
30struct iova_domain {
31 spinlock_t iova_alloc_lock;/* Lock to protect iova allocation */
32 spinlock_t iova_rbtree_lock; /* Lock to protect update of rbtree */
33 struct rb_root rbroot; /* iova domain rbtree root */
34 struct rb_node *cached32_node; /* Save last alloced node */
35 unsigned long dma_32bit_pfn;
36};
37
38struct iova *alloc_iova_mem(void);
39void free_iova_mem(struct iova *iova);
40void free_iova(struct iova_domain *iovad, unsigned long pfn);
41void __free_iova(struct iova_domain *iovad, struct iova *iova);
42struct iova *alloc_iova(struct iova_domain *iovad, unsigned long size,
43 unsigned long limit_pfn,
44 bool size_aligned);
45struct iova *reserve_iova(struct iova_domain *iovad, unsigned long pfn_lo,
46 unsigned long pfn_hi);
47void copy_reserved_iova(struct iova_domain *from, struct iova_domain *to);
48void init_iova_domain(struct iova_domain *iovad, unsigned long pfn_32bit);
49struct iova *find_iova(struct iova_domain *iovad, unsigned long pfn);
50void put_iova_domain(struct iova_domain *iovad);
51
52#endif
diff --git a/drivers/pci/irq.c b/drivers/pci/irq.c
new file mode 100644
index 000000000000..6441dfa969a3
--- /dev/null
+++ b/drivers/pci/irq.c
@@ -0,0 +1,60 @@
1/*
2 * PCI IRQ failure handing code
3 *
4 * Copyright (c) 2008 James Bottomley <James.Bottomley@HansenPartnership.com>
5 */
6
7#include <linux/acpi.h>
8#include <linux/device.h>
9#include <linux/kernel.h>
10#include <linux/pci.h>
11
12static void pci_note_irq_problem(struct pci_dev *pdev, const char *reason)
13{
14 struct pci_dev *parent = to_pci_dev(pdev->dev.parent);
15
16 dev_printk(KERN_ERR, &pdev->dev,
17 "Potentially misrouted IRQ (Bridge %s %04x:%04x)\n",
18 parent->dev.bus_id, parent->vendor, parent->device);
19 dev_printk(KERN_ERR, &pdev->dev, "%s\n", reason);
20 dev_printk(KERN_ERR, &pdev->dev, "Please report to linux-kernel@vger.kernel.org\n");
21 WARN_ON(1);
22}
23
24/**
25 * pci_lost_interrupt - reports a lost PCI interrupt
26 * @pdev: device whose interrupt is lost
27 *
28 * The primary function of this routine is to report a lost interrupt
29 * in a standard way which users can recognise (instead of blaming the
30 * driver).
31 *
32 * Returns:
33 * a suggestion for fixing it (although the driver is not required to
34 * act on this).
35 */
36enum pci_lost_interrupt_reason pci_lost_interrupt(struct pci_dev *pdev)
37{
38 if (pdev->msi_enabled || pdev->msix_enabled) {
39 enum pci_lost_interrupt_reason ret;
40
41 if (pdev->msix_enabled) {
42 pci_note_irq_problem(pdev, "MSIX routing failure");
43 ret = PCI_LOST_IRQ_DISABLE_MSIX;
44 } else {
45 pci_note_irq_problem(pdev, "MSI routing failure");
46 ret = PCI_LOST_IRQ_DISABLE_MSI;
47 }
48 return ret;
49 }
50#ifdef CONFIG_ACPI
51 if (!(acpi_disabled || acpi_noirq)) {
52 pci_note_irq_problem(pdev, "Potential ACPI misrouting please reboot with acpi=noirq");
53 /* currently no way to fix acpi on the fly */
54 return PCI_LOST_IRQ_DISABLE_ACPI;
55 }
56#endif
57 pci_note_irq_problem(pdev, "unknown cause (not MSI or ACPI)");
58 return PCI_LOST_IRQ_NO_INFORMATION;
59}
60EXPORT_SYMBOL(pci_lost_interrupt);
diff --git a/drivers/pci/msi.c b/drivers/pci/msi.c
index 15af618d36e2..74801f7df9c9 100644
--- a/drivers/pci/msi.c
+++ b/drivers/pci/msi.c
@@ -126,7 +126,16 @@ static void msix_flush_writes(unsigned int irq)
126 } 126 }
127} 127}
128 128
129static void msi_set_mask_bits(unsigned int irq, u32 mask, u32 flag) 129/*
130 * PCI 2.3 does not specify mask bits for each MSI interrupt. Attempting to
131 * mask all MSI interrupts by clearing the MSI enable bit does not work
132 * reliably as devices without an INTx disable bit will then generate a
133 * level IRQ which will never be cleared.
134 *
135 * Returns 1 if it succeeded in masking the interrupt and 0 if the device
136 * doesn't support MSI masking.
137 */
138static int msi_set_mask_bits(unsigned int irq, u32 mask, u32 flag)
130{ 139{
131 struct msi_desc *entry; 140 struct msi_desc *entry;
132 141
@@ -144,8 +153,7 @@ static void msi_set_mask_bits(unsigned int irq, u32 mask, u32 flag)
144 mask_bits |= flag & mask; 153 mask_bits |= flag & mask;
145 pci_write_config_dword(entry->dev, pos, mask_bits); 154 pci_write_config_dword(entry->dev, pos, mask_bits);
146 } else { 155 } else {
147 __msi_set_enable(entry->dev, entry->msi_attrib.pos, 156 return 0;
148 !flag);
149 } 157 }
150 break; 158 break;
151 case PCI_CAP_ID_MSIX: 159 case PCI_CAP_ID_MSIX:
@@ -161,6 +169,7 @@ static void msi_set_mask_bits(unsigned int irq, u32 mask, u32 flag)
161 break; 169 break;
162 } 170 }
163 entry->msi_attrib.masked = !!flag; 171 entry->msi_attrib.masked = !!flag;
172 return 1;
164} 173}
165 174
166void read_msi_msg(unsigned int irq, struct msi_msg *msg) 175void read_msi_msg(unsigned int irq, struct msi_msg *msg)
@@ -299,9 +308,8 @@ static void __pci_restore_msi_state(struct pci_dev *dev)
299 entry->msi_attrib.masked); 308 entry->msi_attrib.masked);
300 309
301 pci_read_config_word(dev, pos + PCI_MSI_FLAGS, &control); 310 pci_read_config_word(dev, pos + PCI_MSI_FLAGS, &control);
302 control &= ~(PCI_MSI_FLAGS_QSIZE | PCI_MSI_FLAGS_ENABLE); 311 control &= ~PCI_MSI_FLAGS_QSIZE;
303 if (entry->msi_attrib.maskbit || !entry->msi_attrib.masked) 312 control |= PCI_MSI_FLAGS_ENABLE;
304 control |= PCI_MSI_FLAGS_ENABLE;
305 pci_write_config_word(dev, pos + PCI_MSI_FLAGS, control); 313 pci_write_config_word(dev, pos + PCI_MSI_FLAGS, control);
306} 314}
307 315
@@ -370,23 +378,21 @@ static int msi_capability_init(struct pci_dev *dev)
370 entry->msi_attrib.masked = 1; 378 entry->msi_attrib.masked = 1;
371 entry->msi_attrib.default_irq = dev->irq; /* Save IOAPIC IRQ */ 379 entry->msi_attrib.default_irq = dev->irq; /* Save IOAPIC IRQ */
372 entry->msi_attrib.pos = pos; 380 entry->msi_attrib.pos = pos;
373 if (is_mask_bit_support(control)) { 381 if (entry->msi_attrib.maskbit) {
374 entry->mask_base = (void __iomem *)(long)msi_mask_bits_reg(pos, 382 entry->mask_base = (void __iomem *)(long)msi_mask_bits_reg(pos,
375 is_64bit_address(control)); 383 entry->msi_attrib.is_64);
376 } 384 }
377 entry->dev = dev; 385 entry->dev = dev;
378 if (entry->msi_attrib.maskbit) { 386 if (entry->msi_attrib.maskbit) {
379 unsigned int maskbits, temp; 387 unsigned int maskbits, temp;
380 /* All MSIs are unmasked by default, Mask them all */ 388 /* All MSIs are unmasked by default, Mask them all */
381 pci_read_config_dword(dev, 389 pci_read_config_dword(dev,
382 msi_mask_bits_reg(pos, is_64bit_address(control)), 390 msi_mask_bits_reg(pos, entry->msi_attrib.is_64),
383 &maskbits); 391 &maskbits);
384 temp = (1 << multi_msi_capable(control)); 392 temp = (1 << multi_msi_capable(control));
385 temp = ((temp - 1) & ~temp); 393 temp = ((temp - 1) & ~temp);
386 maskbits |= temp; 394 maskbits |= temp;
387 pci_write_config_dword(dev, 395 pci_write_config_dword(dev, entry->msi_attrib.is_64, maskbits);
388 msi_mask_bits_reg(pos, is_64bit_address(control)),
389 maskbits);
390 entry->msi_attrib.maskbits_mask = temp; 396 entry->msi_attrib.maskbits_mask = temp;
391 } 397 }
392 list_add_tail(&entry->list, &dev->msi_list); 398 list_add_tail(&entry->list, &dev->msi_list);
@@ -753,3 +759,24 @@ void pci_msi_init_pci_dev(struct pci_dev *dev)
753{ 759{
754 INIT_LIST_HEAD(&dev->msi_list); 760 INIT_LIST_HEAD(&dev->msi_list);
755} 761}
762
763#ifdef CONFIG_ACPI
764#include <linux/acpi.h>
765#include <linux/pci-acpi.h>
766static void __devinit msi_acpi_init(void)
767{
768 if (acpi_pci_disabled)
769 return;
770 pci_osc_support_set(OSC_MSI_SUPPORT);
771 pcie_osc_support_set(OSC_MSI_SUPPORT);
772}
773#else
774static inline void msi_acpi_init(void) { }
775#endif /* CONFIG_ACPI */
776
777void __devinit msi_init(void)
778{
779 if (!pci_msi_enable)
780 return;
781 msi_acpi_init();
782}
diff --git a/drivers/pci/pci-acpi.c b/drivers/pci/pci-acpi.c
index 7764768b6a0e..dfe7c8e1b185 100644
--- a/drivers/pci/pci-acpi.c
+++ b/drivers/pci/pci-acpi.c
@@ -11,6 +11,7 @@
11#include <linux/init.h> 11#include <linux/init.h>
12#include <linux/pci.h> 12#include <linux/pci.h>
13#include <linux/module.h> 13#include <linux/module.h>
14#include <linux/pci-aspm.h>
14#include <acpi/acpi.h> 15#include <acpi/acpi.h>
15#include <acpi/acnamesp.h> 16#include <acpi/acnamesp.h>
16#include <acpi/acresrc.h> 17#include <acpi/acresrc.h>
@@ -23,17 +24,17 @@ struct acpi_osc_data {
23 acpi_handle handle; 24 acpi_handle handle;
24 u32 support_set; 25 u32 support_set;
25 u32 control_set; 26 u32 control_set;
26 int is_queried;
27 u32 query_result;
28 struct list_head sibiling; 27 struct list_head sibiling;
29}; 28};
30static LIST_HEAD(acpi_osc_data_list); 29static LIST_HEAD(acpi_osc_data_list);
31 30
32struct acpi_osc_args { 31struct acpi_osc_args {
33 u32 capbuf[3]; 32 u32 capbuf[3];
34 u32 query_result; 33 u32 ctrl_result;
35}; 34};
36 35
36static DEFINE_MUTEX(pci_acpi_lock);
37
37static struct acpi_osc_data *acpi_get_osc_data(acpi_handle handle) 38static struct acpi_osc_data *acpi_get_osc_data(acpi_handle handle)
38{ 39{
39 struct acpi_osc_data *data; 40 struct acpi_osc_data *data;
@@ -107,9 +108,8 @@ static acpi_status acpi_run_osc(acpi_handle handle,
107 goto out_kfree; 108 goto out_kfree;
108 } 109 }
109out_success: 110out_success:
110 if (flags & OSC_QUERY_ENABLE) 111 osc_args->ctrl_result =
111 osc_args->query_result = 112 *((u32 *)(out_obj->buffer.pointer + 8));
112 *((u32 *)(out_obj->buffer.pointer + 8));
113 status = AE_OK; 113 status = AE_OK;
114 114
115out_kfree: 115out_kfree:
@@ -117,41 +117,53 @@ out_kfree:
117 return status; 117 return status;
118} 118}
119 119
120static acpi_status acpi_query_osc(acpi_handle handle, 120static acpi_status __acpi_query_osc(u32 flags, struct acpi_osc_data *osc_data,
121 u32 level, void *context, void **retval) 121 u32 *result)
122{ 122{
123 acpi_status status; 123 acpi_status status;
124 struct acpi_osc_data *osc_data; 124 u32 support_set;
125 u32 flags = (unsigned long)context, support_set;
126 acpi_handle tmp;
127 struct acpi_osc_args osc_args; 125 struct acpi_osc_args osc_args;
128 126
129 status = acpi_get_handle(handle, "_OSC", &tmp);
130 if (ACPI_FAILURE(status))
131 return status;
132
133 osc_data = acpi_get_osc_data(handle);
134 if (!osc_data) {
135 printk(KERN_ERR "acpi osc data array is full\n");
136 return AE_ERROR;
137 }
138
139 /* do _OSC query for all possible controls */ 127 /* do _OSC query for all possible controls */
140 support_set = osc_data->support_set | (flags & OSC_SUPPORT_MASKS); 128 support_set = osc_data->support_set | (flags & OSC_SUPPORT_MASKS);
141 osc_args.capbuf[OSC_QUERY_TYPE] = OSC_QUERY_ENABLE; 129 osc_args.capbuf[OSC_QUERY_TYPE] = OSC_QUERY_ENABLE;
142 osc_args.capbuf[OSC_SUPPORT_TYPE] = support_set; 130 osc_args.capbuf[OSC_SUPPORT_TYPE] = support_set;
143 osc_args.capbuf[OSC_CONTROL_TYPE] = OSC_CONTROL_MASKS; 131 osc_args.capbuf[OSC_CONTROL_TYPE] = OSC_CONTROL_MASKS;
144 132
145 status = acpi_run_osc(handle, &osc_args); 133 status = acpi_run_osc(osc_data->handle, &osc_args);
146 if (ACPI_SUCCESS(status)) { 134 if (ACPI_SUCCESS(status)) {
147 osc_data->support_set = support_set; 135 osc_data->support_set = support_set;
148 osc_data->query_result = osc_args.query_result; 136 *result = osc_args.ctrl_result;
149 osc_data->is_queried = 1;
150 } 137 }
151 138
152 return status; 139 return status;
153} 140}
154 141
142static acpi_status acpi_query_osc(acpi_handle handle,
143 u32 level, void *context, void **retval)
144{
145 acpi_status status;
146 struct acpi_osc_data *osc_data;
147 u32 flags = (unsigned long)context, dummy;
148 acpi_handle tmp;
149
150 status = acpi_get_handle(handle, "_OSC", &tmp);
151 if (ACPI_FAILURE(status))
152 return AE_OK;
153
154 mutex_lock(&pci_acpi_lock);
155 osc_data = acpi_get_osc_data(handle);
156 if (!osc_data) {
157 printk(KERN_ERR "acpi osc data array is full\n");
158 goto out;
159 }
160
161 __acpi_query_osc(flags, osc_data, &dummy);
162out:
163 mutex_unlock(&pci_acpi_lock);
164 return AE_OK;
165}
166
155/** 167/**
156 * __pci_osc_support_set - register OS support to Firmware 168 * __pci_osc_support_set - register OS support to Firmware
157 * @flags: OS support bits 169 * @flags: OS support bits
@@ -180,7 +192,7 @@ acpi_status __pci_osc_support_set(u32 flags, const char *hid)
180acpi_status pci_osc_control_set(acpi_handle handle, u32 flags) 192acpi_status pci_osc_control_set(acpi_handle handle, u32 flags)
181{ 193{
182 acpi_status status; 194 acpi_status status;
183 u32 ctrlset, control_set; 195 u32 ctrlset, control_set, result;
184 acpi_handle tmp; 196 acpi_handle tmp;
185 struct acpi_osc_data *osc_data; 197 struct acpi_osc_data *osc_data;
186 struct acpi_osc_args osc_args; 198 struct acpi_osc_args osc_args;
@@ -189,19 +201,28 @@ acpi_status pci_osc_control_set(acpi_handle handle, u32 flags)
189 if (ACPI_FAILURE(status)) 201 if (ACPI_FAILURE(status))
190 return status; 202 return status;
191 203
204 mutex_lock(&pci_acpi_lock);
192 osc_data = acpi_get_osc_data(handle); 205 osc_data = acpi_get_osc_data(handle);
193 if (!osc_data) { 206 if (!osc_data) {
194 printk(KERN_ERR "acpi osc data array is full\n"); 207 printk(KERN_ERR "acpi osc data array is full\n");
195 return AE_ERROR; 208 status = AE_ERROR;
209 goto out;
196 } 210 }
197 211
198 ctrlset = (flags & OSC_CONTROL_MASKS); 212 ctrlset = (flags & OSC_CONTROL_MASKS);
199 if (!ctrlset) 213 if (!ctrlset) {
200 return AE_TYPE; 214 status = AE_TYPE;
215 goto out;
216 }
201 217
202 if (osc_data->is_queried && 218 status = __acpi_query_osc(osc_data->support_set, osc_data, &result);
203 ((osc_data->query_result & ctrlset) != ctrlset)) 219 if (ACPI_FAILURE(status))
204 return AE_SUPPORT; 220 goto out;
221
222 if ((result & ctrlset) != ctrlset) {
223 status = AE_SUPPORT;
224 goto out;
225 }
205 226
206 control_set = osc_data->control_set | ctrlset; 227 control_set = osc_data->control_set | ctrlset;
207 osc_args.capbuf[OSC_QUERY_TYPE] = 0; 228 osc_args.capbuf[OSC_QUERY_TYPE] = 0;
@@ -210,7 +231,8 @@ acpi_status pci_osc_control_set(acpi_handle handle, u32 flags)
210 status = acpi_run_osc(handle, &osc_args); 231 status = acpi_run_osc(handle, &osc_args);
211 if (ACPI_SUCCESS(status)) 232 if (ACPI_SUCCESS(status))
212 osc_data->control_set = control_set; 233 osc_data->control_set = control_set;
213 234out:
235 mutex_unlock(&pci_acpi_lock);
214 return status; 236 return status;
215} 237}
216EXPORT_SYMBOL(pci_osc_control_set); 238EXPORT_SYMBOL(pci_osc_control_set);
@@ -372,6 +394,12 @@ static int __init acpi_pci_init(void)
372 printk(KERN_INFO"ACPI FADT declares the system doesn't support MSI, so disable it\n"); 394 printk(KERN_INFO"ACPI FADT declares the system doesn't support MSI, so disable it\n");
373 pci_no_msi(); 395 pci_no_msi();
374 } 396 }
397
398 if (acpi_gbl_FADT.boot_flags & BAF_PCIE_ASPM_CONTROL) {
399 printk(KERN_INFO"ACPI FADT declares the system doesn't support PCIe ASPM, so disable it\n");
400 pcie_no_aspm();
401 }
402
375 ret = register_acpi_bus_type(&acpi_pci_bus); 403 ret = register_acpi_bus_type(&acpi_pci_bus);
376 if (ret) 404 if (ret)
377 return 0; 405 return 0;
diff --git a/drivers/pci/pci-driver.c b/drivers/pci/pci-driver.c
index a13f53486114..b4cdd690ae71 100644
--- a/drivers/pci/pci-driver.c
+++ b/drivers/pci/pci-driver.c
@@ -43,18 +43,32 @@ store_new_id(struct device_driver *driver, const char *buf, size_t count)
43{ 43{
44 struct pci_dynid *dynid; 44 struct pci_dynid *dynid;
45 struct pci_driver *pdrv = to_pci_driver(driver); 45 struct pci_driver *pdrv = to_pci_driver(driver);
46 const struct pci_device_id *ids = pdrv->id_table;
46 __u32 vendor, device, subvendor=PCI_ANY_ID, 47 __u32 vendor, device, subvendor=PCI_ANY_ID,
47 subdevice=PCI_ANY_ID, class=0, class_mask=0; 48 subdevice=PCI_ANY_ID, class=0, class_mask=0;
48 unsigned long driver_data=0; 49 unsigned long driver_data=0;
49 int fields=0; 50 int fields=0;
50 int retval = 0; 51 int retval;
51 52
52 fields = sscanf(buf, "%x %x %x %x %x %x %lux", 53 fields = sscanf(buf, "%x %x %x %x %x %x %lx",
53 &vendor, &device, &subvendor, &subdevice, 54 &vendor, &device, &subvendor, &subdevice,
54 &class, &class_mask, &driver_data); 55 &class, &class_mask, &driver_data);
55 if (fields < 2) 56 if (fields < 2)
56 return -EINVAL; 57 return -EINVAL;
57 58
59 /* Only accept driver_data values that match an existing id_table
60 entry */
61 retval = -EINVAL;
62 while (ids->vendor || ids->subvendor || ids->class_mask) {
63 if (driver_data == ids->driver_data) {
64 retval = 0;
65 break;
66 }
67 ids++;
68 }
69 if (retval) /* No match */
70 return retval;
71
58 dynid = kzalloc(sizeof(*dynid), GFP_KERNEL); 72 dynid = kzalloc(sizeof(*dynid), GFP_KERNEL);
59 if (!dynid) 73 if (!dynid)
60 return -ENOMEM; 74 return -ENOMEM;
@@ -65,8 +79,7 @@ store_new_id(struct device_driver *driver, const char *buf, size_t count)
65 dynid->id.subdevice = subdevice; 79 dynid->id.subdevice = subdevice;
66 dynid->id.class = class; 80 dynid->id.class = class;
67 dynid->id.class_mask = class_mask; 81 dynid->id.class_mask = class_mask;
68 dynid->id.driver_data = pdrv->dynids.use_driver_data ? 82 dynid->id.driver_data = driver_data;
69 driver_data : 0UL;
70 83
71 spin_lock(&pdrv->dynids.lock); 84 spin_lock(&pdrv->dynids.lock);
72 list_add_tail(&dynid->node, &pdrv->dynids.list); 85 list_add_tail(&dynid->node, &pdrv->dynids.list);
diff --git a/drivers/pci/pci-sysfs.c b/drivers/pci/pci-sysfs.c
index 9c718583a237..110022d78689 100644
--- a/drivers/pci/pci-sysfs.c
+++ b/drivers/pci/pci-sysfs.c
@@ -16,6 +16,7 @@
16 16
17 17
18#include <linux/kernel.h> 18#include <linux/kernel.h>
19#include <linux/sched.h>
19#include <linux/pci.h> 20#include <linux/pci.h>
20#include <linux/stat.h> 21#include <linux/stat.h>
21#include <linux/topology.h> 22#include <linux/topology.h>
@@ -422,7 +423,7 @@ pci_write_vpd(struct kobject *kobj, struct bin_attribute *bin_attr,
422 * Reads 1, 2, or 4 bytes from legacy I/O port space using an arch specific 423 * Reads 1, 2, or 4 bytes from legacy I/O port space using an arch specific
423 * callback routine (pci_legacy_read). 424 * callback routine (pci_legacy_read).
424 */ 425 */
425ssize_t 426static ssize_t
426pci_read_legacy_io(struct kobject *kobj, struct bin_attribute *bin_attr, 427pci_read_legacy_io(struct kobject *kobj, struct bin_attribute *bin_attr,
427 char *buf, loff_t off, size_t count) 428 char *buf, loff_t off, size_t count)
428{ 429{
@@ -447,7 +448,7 @@ pci_read_legacy_io(struct kobject *kobj, struct bin_attribute *bin_attr,
447 * Writes 1, 2, or 4 bytes from legacy I/O port space using an arch specific 448 * Writes 1, 2, or 4 bytes from legacy I/O port space using an arch specific
448 * callback routine (pci_legacy_write). 449 * callback routine (pci_legacy_write).
449 */ 450 */
450ssize_t 451static ssize_t
451pci_write_legacy_io(struct kobject *kobj, struct bin_attribute *bin_attr, 452pci_write_legacy_io(struct kobject *kobj, struct bin_attribute *bin_attr,
452 char *buf, loff_t off, size_t count) 453 char *buf, loff_t off, size_t count)
453{ 454{
@@ -467,11 +468,11 @@ pci_write_legacy_io(struct kobject *kobj, struct bin_attribute *bin_attr,
467 * @attr: struct bin_attribute for this file 468 * @attr: struct bin_attribute for this file
468 * @vma: struct vm_area_struct passed to mmap 469 * @vma: struct vm_area_struct passed to mmap
469 * 470 *
470 * Uses an arch specific callback, pci_mmap_legacy_page_range, to mmap 471 * Uses an arch specific callback, pci_mmap_legacy_mem_page_range, to mmap
471 * legacy memory space (first meg of bus space) into application virtual 472 * legacy memory space (first meg of bus space) into application virtual
472 * memory space. 473 * memory space.
473 */ 474 */
474int 475static int
475pci_mmap_legacy_mem(struct kobject *kobj, struct bin_attribute *attr, 476pci_mmap_legacy_mem(struct kobject *kobj, struct bin_attribute *attr,
476 struct vm_area_struct *vma) 477 struct vm_area_struct *vma)
477{ 478{
@@ -479,11 +480,109 @@ pci_mmap_legacy_mem(struct kobject *kobj, struct bin_attribute *attr,
479 struct device, 480 struct device,
480 kobj)); 481 kobj));
481 482
482 return pci_mmap_legacy_page_range(bus, vma); 483 return pci_mmap_legacy_page_range(bus, vma, pci_mmap_mem);
484}
485
486/**
487 * pci_mmap_legacy_io - map legacy PCI IO into user memory space
488 * @kobj: kobject corresponding to device to be mapped
489 * @attr: struct bin_attribute for this file
490 * @vma: struct vm_area_struct passed to mmap
491 *
492 * Uses an arch specific callback, pci_mmap_legacy_io_page_range, to mmap
493 * legacy IO space (first meg of bus space) into application virtual
494 * memory space. Returns -ENOSYS if the operation isn't supported
495 */
496static int
497pci_mmap_legacy_io(struct kobject *kobj, struct bin_attribute *attr,
498 struct vm_area_struct *vma)
499{
500 struct pci_bus *bus = to_pci_bus(container_of(kobj,
501 struct device,
502 kobj));
503
504 return pci_mmap_legacy_page_range(bus, vma, pci_mmap_io);
505}
506
507/**
508 * pci_create_legacy_files - create legacy I/O port and memory files
509 * @b: bus to create files under
510 *
511 * Some platforms allow access to legacy I/O port and ISA memory space on
512 * a per-bus basis. This routine creates the files and ties them into
513 * their associated read, write and mmap files from pci-sysfs.c
514 *
515 * On error unwind, but don't propogate the error to the caller
516 * as it is ok to set up the PCI bus without these files.
517 */
518void pci_create_legacy_files(struct pci_bus *b)
519{
520 int error;
521
522 b->legacy_io = kzalloc(sizeof(struct bin_attribute) * 2,
523 GFP_ATOMIC);
524 if (!b->legacy_io)
525 goto kzalloc_err;
526
527 b->legacy_io->attr.name = "legacy_io";
528 b->legacy_io->size = 0xffff;
529 b->legacy_io->attr.mode = S_IRUSR | S_IWUSR;
530 b->legacy_io->read = pci_read_legacy_io;
531 b->legacy_io->write = pci_write_legacy_io;
532 b->legacy_io->mmap = pci_mmap_legacy_io;
533 error = device_create_bin_file(&b->dev, b->legacy_io);
534 if (error)
535 goto legacy_io_err;
536
537 /* Allocated above after the legacy_io struct */
538 b->legacy_mem = b->legacy_io + 1;
539 b->legacy_mem->attr.name = "legacy_mem";
540 b->legacy_mem->size = 1024*1024;
541 b->legacy_mem->attr.mode = S_IRUSR | S_IWUSR;
542 b->legacy_mem->mmap = pci_mmap_legacy_mem;
543 error = device_create_bin_file(&b->dev, b->legacy_mem);
544 if (error)
545 goto legacy_mem_err;
546
547 return;
548
549legacy_mem_err:
550 device_remove_bin_file(&b->dev, b->legacy_io);
551legacy_io_err:
552 kfree(b->legacy_io);
553 b->legacy_io = NULL;
554kzalloc_err:
555 printk(KERN_WARNING "pci: warning: could not create legacy I/O port "
556 "and ISA memory resources to sysfs\n");
557 return;
558}
559
560void pci_remove_legacy_files(struct pci_bus *b)
561{
562 if (b->legacy_io) {
563 device_remove_bin_file(&b->dev, b->legacy_io);
564 device_remove_bin_file(&b->dev, b->legacy_mem);
565 kfree(b->legacy_io); /* both are allocated here */
566 }
483} 567}
484#endif /* HAVE_PCI_LEGACY */ 568#endif /* HAVE_PCI_LEGACY */
485 569
486#ifdef HAVE_PCI_MMAP 570#ifdef HAVE_PCI_MMAP
571
572static int pci_mmap_fits(struct pci_dev *pdev, int resno, struct vm_area_struct *vma)
573{
574 unsigned long nr, start, size;
575
576 nr = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
577 start = vma->vm_pgoff;
578 size = pci_resource_len(pdev, resno) >> PAGE_SHIFT;
579 if (start < size && size - start >= nr)
580 return 1;
581 WARN(1, "process \"%s\" tried to map 0x%08lx-0x%08lx on %s BAR %d (size 0x%08lx)\n",
582 current->comm, start, start+nr, pci_name(pdev), resno, size);
583 return 0;
584}
585
487/** 586/**
488 * pci_mmap_resource - map a PCI resource into user memory space 587 * pci_mmap_resource - map a PCI resource into user memory space
489 * @kobj: kobject for mapping 588 * @kobj: kobject for mapping
@@ -510,6 +609,9 @@ pci_mmap_resource(struct kobject *kobj, struct bin_attribute *attr,
510 if (i >= PCI_ROM_RESOURCE) 609 if (i >= PCI_ROM_RESOURCE)
511 return -ENODEV; 610 return -ENODEV;
512 611
612 if (!pci_mmap_fits(pdev, i, vma))
613 return -EINVAL;
614
513 /* pci_mmap_page_range() expects the same kind of entry as coming 615 /* pci_mmap_page_range() expects the same kind of entry as coming
514 * from /proc/bus/pci/ which is a "user visible" value. If this is 616 * from /proc/bus/pci/ which is a "user visible" value. If this is
515 * different from the resource itself, arch will do necessary fixup. 617 * different from the resource itself, arch will do necessary fixup.
@@ -696,7 +798,7 @@ static struct bin_attribute pci_config_attr = {
696 .name = "config", 798 .name = "config",
697 .mode = S_IRUGO | S_IWUSR, 799 .mode = S_IRUGO | S_IWUSR,
698 }, 800 },
699 .size = 256, 801 .size = PCI_CFG_SPACE_SIZE,
700 .read = pci_read_config, 802 .read = pci_read_config,
701 .write = pci_write_config, 803 .write = pci_write_config,
702}; 804};
@@ -706,7 +808,7 @@ static struct bin_attribute pcie_config_attr = {
706 .name = "config", 808 .name = "config",
707 .mode = S_IRUGO | S_IWUSR, 809 .mode = S_IRUGO | S_IWUSR,
708 }, 810 },
709 .size = 4096, 811 .size = PCI_CFG_SPACE_EXP_SIZE,
710 .read = pci_read_config, 812 .read = pci_read_config,
711 .write = pci_write_config, 813 .write = pci_write_config,
712}; 814};
@@ -716,86 +818,103 @@ int __attribute__ ((weak)) pcibios_add_platform_entries(struct pci_dev *dev)
716 return 0; 818 return 0;
717} 819}
718 820
821static int pci_create_capabilities_sysfs(struct pci_dev *dev)
822{
823 int retval;
824 struct bin_attribute *attr;
825
826 /* If the device has VPD, try to expose it in sysfs. */
827 if (dev->vpd) {
828 attr = kzalloc(sizeof(*attr), GFP_ATOMIC);
829 if (!attr)
830 return -ENOMEM;
831
832 attr->size = dev->vpd->len;
833 attr->attr.name = "vpd";
834 attr->attr.mode = S_IRUSR | S_IWUSR;
835 attr->read = pci_read_vpd;
836 attr->write = pci_write_vpd;
837 retval = sysfs_create_bin_file(&dev->dev.kobj, attr);
838 if (retval) {
839 kfree(dev->vpd->attr);
840 return retval;
841 }
842 dev->vpd->attr = attr;
843 }
844
845 /* Active State Power Management */
846 pcie_aspm_create_sysfs_dev_files(dev);
847
848 return 0;
849}
850
719int __must_check pci_create_sysfs_dev_files (struct pci_dev *pdev) 851int __must_check pci_create_sysfs_dev_files (struct pci_dev *pdev)
720{ 852{
721 struct bin_attribute *attr = NULL;
722 int retval; 853 int retval;
854 int rom_size = 0;
855 struct bin_attribute *attr;
723 856
724 if (!sysfs_initialized) 857 if (!sysfs_initialized)
725 return -EACCES; 858 return -EACCES;
726 859
727 if (pdev->cfg_size < 4096) 860 if (pdev->cfg_size < PCI_CFG_SPACE_EXP_SIZE)
728 retval = sysfs_create_bin_file(&pdev->dev.kobj, &pci_config_attr); 861 retval = sysfs_create_bin_file(&pdev->dev.kobj, &pci_config_attr);
729 else 862 else
730 retval = sysfs_create_bin_file(&pdev->dev.kobj, &pcie_config_attr); 863 retval = sysfs_create_bin_file(&pdev->dev.kobj, &pcie_config_attr);
731 if (retval) 864 if (retval)
732 goto err; 865 goto err;
733 866
734 /* If the device has VPD, try to expose it in sysfs. */
735 if (pdev->vpd) {
736 attr = kzalloc(sizeof(*attr), GFP_ATOMIC);
737 if (attr) {
738 pdev->vpd->attr = attr;
739 attr->size = pdev->vpd->len;
740 attr->attr.name = "vpd";
741 attr->attr.mode = S_IRUSR | S_IWUSR;
742 attr->read = pci_read_vpd;
743 attr->write = pci_write_vpd;
744 retval = sysfs_create_bin_file(&pdev->dev.kobj, attr);
745 if (retval)
746 goto err_vpd;
747 } else {
748 retval = -ENOMEM;
749 goto err_config_file;
750 }
751 }
752
753 retval = pci_create_resource_files(pdev); 867 retval = pci_create_resource_files(pdev);
754 if (retval) 868 if (retval)
755 goto err_vpd_file; 869 goto err_config_file;
870
871 if (pci_resource_len(pdev, PCI_ROM_RESOURCE))
872 rom_size = pci_resource_len(pdev, PCI_ROM_RESOURCE);
873 else if (pdev->resource[PCI_ROM_RESOURCE].flags & IORESOURCE_ROM_SHADOW)
874 rom_size = 0x20000;
756 875
757 /* If the device has a ROM, try to expose it in sysfs. */ 876 /* If the device has a ROM, try to expose it in sysfs. */
758 if (pci_resource_len(pdev, PCI_ROM_RESOURCE) || 877 if (rom_size) {
759 (pdev->resource[PCI_ROM_RESOURCE].flags & IORESOURCE_ROM_SHADOW)) {
760 attr = kzalloc(sizeof(*attr), GFP_ATOMIC); 878 attr = kzalloc(sizeof(*attr), GFP_ATOMIC);
761 if (attr) { 879 if (!attr) {
762 pdev->rom_attr = attr;
763 attr->size = pci_resource_len(pdev, PCI_ROM_RESOURCE);
764 attr->attr.name = "rom";
765 attr->attr.mode = S_IRUSR;
766 attr->read = pci_read_rom;
767 attr->write = pci_write_rom;
768 retval = sysfs_create_bin_file(&pdev->dev.kobj, attr);
769 if (retval)
770 goto err_rom;
771 } else {
772 retval = -ENOMEM; 880 retval = -ENOMEM;
773 goto err_resource_files; 881 goto err_resource_files;
774 } 882 }
883 attr->size = rom_size;
884 attr->attr.name = "rom";
885 attr->attr.mode = S_IRUSR;
886 attr->read = pci_read_rom;
887 attr->write = pci_write_rom;
888 retval = sysfs_create_bin_file(&pdev->dev.kobj, attr);
889 if (retval) {
890 kfree(attr);
891 goto err_resource_files;
892 }
893 pdev->rom_attr = attr;
775 } 894 }
895
776 /* add platform-specific attributes */ 896 /* add platform-specific attributes */
777 if (pcibios_add_platform_entries(pdev)) 897 retval = pcibios_add_platform_entries(pdev);
898 if (retval)
778 goto err_rom_file; 899 goto err_rom_file;
779 900
780 pcie_aspm_create_sysfs_dev_files(pdev); 901 /* add sysfs entries for various capabilities */
902 retval = pci_create_capabilities_sysfs(pdev);
903 if (retval)
904 goto err_rom_file;
781 905
782 return 0; 906 return 0;
783 907
784err_rom_file: 908err_rom_file:
785 if (pci_resource_len(pdev, PCI_ROM_RESOURCE)) 909 if (rom_size) {
786 sysfs_remove_bin_file(&pdev->dev.kobj, pdev->rom_attr); 910 sysfs_remove_bin_file(&pdev->dev.kobj, pdev->rom_attr);
787err_rom: 911 kfree(pdev->rom_attr);
788 kfree(pdev->rom_attr); 912 pdev->rom_attr = NULL;
913 }
789err_resource_files: 914err_resource_files:
790 pci_remove_resource_files(pdev); 915 pci_remove_resource_files(pdev);
791err_vpd_file:
792 if (pdev->vpd) {
793 sysfs_remove_bin_file(&pdev->dev.kobj, pdev->vpd->attr);
794err_vpd:
795 kfree(pdev->vpd->attr);
796 }
797err_config_file: 916err_config_file:
798 if (pdev->cfg_size < 4096) 917 if (pdev->cfg_size < PCI_CFG_SPACE_EXP_SIZE)
799 sysfs_remove_bin_file(&pdev->dev.kobj, &pci_config_attr); 918 sysfs_remove_bin_file(&pdev->dev.kobj, &pci_config_attr);
800 else 919 else
801 sysfs_remove_bin_file(&pdev->dev.kobj, &pcie_config_attr); 920 sysfs_remove_bin_file(&pdev->dev.kobj, &pcie_config_attr);
@@ -803,6 +922,16 @@ err:
803 return retval; 922 return retval;
804} 923}
805 924
925static void pci_remove_capabilities_sysfs(struct pci_dev *dev)
926{
927 if (dev->vpd && dev->vpd->attr) {
928 sysfs_remove_bin_file(&dev->dev.kobj, dev->vpd->attr);
929 kfree(dev->vpd->attr);
930 }
931
932 pcie_aspm_remove_sysfs_dev_files(dev);
933}
934
806/** 935/**
807 * pci_remove_sysfs_dev_files - cleanup PCI specific sysfs files 936 * pci_remove_sysfs_dev_files - cleanup PCI specific sysfs files
808 * @pdev: device whose entries we should free 937 * @pdev: device whose entries we should free
@@ -811,27 +940,28 @@ err:
811 */ 940 */
812void pci_remove_sysfs_dev_files(struct pci_dev *pdev) 941void pci_remove_sysfs_dev_files(struct pci_dev *pdev)
813{ 942{
943 int rom_size = 0;
944
814 if (!sysfs_initialized) 945 if (!sysfs_initialized)
815 return; 946 return;
816 947
817 pcie_aspm_remove_sysfs_dev_files(pdev); 948 pci_remove_capabilities_sysfs(pdev);
818 949
819 if (pdev->vpd) { 950 if (pdev->cfg_size < PCI_CFG_SPACE_EXP_SIZE)
820 sysfs_remove_bin_file(&pdev->dev.kobj, pdev->vpd->attr);
821 kfree(pdev->vpd->attr);
822 }
823 if (pdev->cfg_size < 4096)
824 sysfs_remove_bin_file(&pdev->dev.kobj, &pci_config_attr); 951 sysfs_remove_bin_file(&pdev->dev.kobj, &pci_config_attr);
825 else 952 else
826 sysfs_remove_bin_file(&pdev->dev.kobj, &pcie_config_attr); 953 sysfs_remove_bin_file(&pdev->dev.kobj, &pcie_config_attr);
827 954
828 pci_remove_resource_files(pdev); 955 pci_remove_resource_files(pdev);
829 956
830 if (pci_resource_len(pdev, PCI_ROM_RESOURCE)) { 957 if (pci_resource_len(pdev, PCI_ROM_RESOURCE))
831 if (pdev->rom_attr) { 958 rom_size = pci_resource_len(pdev, PCI_ROM_RESOURCE);
832 sysfs_remove_bin_file(&pdev->dev.kobj, pdev->rom_attr); 959 else if (pdev->resource[PCI_ROM_RESOURCE].flags & IORESOURCE_ROM_SHADOW)
833 kfree(pdev->rom_attr); 960 rom_size = 0x20000;
834 } 961
962 if (rom_size && pdev->rom_attr) {
963 sysfs_remove_bin_file(&pdev->dev.kobj, pdev->rom_attr);
964 kfree(pdev->rom_attr);
835 } 965 }
836} 966}
837 967
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
index 44a46c92b721..21f2ac639cab 100644
--- a/drivers/pci/pci.c
+++ b/drivers/pci/pci.c
@@ -18,6 +18,7 @@
18#include <linux/log2.h> 18#include <linux/log2.h>
19#include <linux/pci-aspm.h> 19#include <linux/pci-aspm.h>
20#include <linux/pm_wakeup.h> 20#include <linux/pm_wakeup.h>
21#include <linux/interrupt.h>
21#include <asm/dma.h> /* isa_dma_bridge_buggy */ 22#include <asm/dma.h> /* isa_dma_bridge_buggy */
22#include "pci.h" 23#include "pci.h"
23 24
@@ -213,10 +214,13 @@ int pci_bus_find_capability(struct pci_bus *bus, unsigned int devfn, int cap)
213int pci_find_ext_capability(struct pci_dev *dev, int cap) 214int pci_find_ext_capability(struct pci_dev *dev, int cap)
214{ 215{
215 u32 header; 216 u32 header;
216 int ttl = 480; /* 3840 bytes, minimum 8 bytes per capability */ 217 int ttl;
217 int pos = 0x100; 218 int pos = PCI_CFG_SPACE_SIZE;
218 219
219 if (dev->cfg_size <= 256) 220 /* minimum 8 bytes per capability */
221 ttl = (PCI_CFG_SPACE_EXP_SIZE - PCI_CFG_SPACE_SIZE) / 8;
222
223 if (dev->cfg_size <= PCI_CFG_SPACE_SIZE)
220 return 0; 224 return 0;
221 225
222 if (pci_read_config_dword(dev, pos, &header) != PCIBIOS_SUCCESSFUL) 226 if (pci_read_config_dword(dev, pos, &header) != PCIBIOS_SUCCESSFUL)
@@ -234,7 +238,7 @@ int pci_find_ext_capability(struct pci_dev *dev, int cap)
234 return pos; 238 return pos;
235 239
236 pos = PCI_EXT_CAP_NEXT(header); 240 pos = PCI_EXT_CAP_NEXT(header);
237 if (pos < 0x100) 241 if (pos < PCI_CFG_SPACE_SIZE)
238 break; 242 break;
239 243
240 if (pci_read_config_dword(dev, pos, &header) != PCIBIOS_SUCCESSFUL) 244 if (pci_read_config_dword(dev, pos, &header) != PCIBIOS_SUCCESSFUL)
@@ -572,6 +576,10 @@ int pci_set_power_state(struct pci_dev *dev, pci_power_t state)
572 if (!ret) 576 if (!ret)
573 pci_update_current_state(dev); 577 pci_update_current_state(dev);
574 } 578 }
579 /* This device is quirked not to be put into D3, so
580 don't put it in D3 */
581 if (state == PCI_D3hot && (dev->dev_flags & PCI_DEV_FLAGS_NO_D3))
582 return 0;
575 583
576 error = pci_raw_set_power_state(dev, state); 584 error = pci_raw_set_power_state(dev, state);
577 585
@@ -1040,7 +1048,7 @@ int pci_set_pcie_reset_state(struct pci_dev *dev, enum pcie_reset_state state)
1040 * @dev: PCI device to handle. 1048 * @dev: PCI device to handle.
1041 * @state: PCI state from which device will issue PME#. 1049 * @state: PCI state from which device will issue PME#.
1042 */ 1050 */
1043static bool pci_pme_capable(struct pci_dev *dev, pci_power_t state) 1051bool pci_pme_capable(struct pci_dev *dev, pci_power_t state)
1044{ 1052{
1045 if (!dev->pm_cap) 1053 if (!dev->pm_cap)
1046 return false; 1054 return false;
@@ -1056,7 +1064,7 @@ static bool pci_pme_capable(struct pci_dev *dev, pci_power_t state)
1056 * The caller must verify that the device is capable of generating PME# before 1064 * The caller must verify that the device is capable of generating PME# before
1057 * calling this function with @enable equal to 'true'. 1065 * calling this function with @enable equal to 'true'.
1058 */ 1066 */
1059static void pci_pme_active(struct pci_dev *dev, bool enable) 1067void pci_pme_active(struct pci_dev *dev, bool enable)
1060{ 1068{
1061 u16 pmcsr; 1069 u16 pmcsr;
1062 1070
@@ -1123,18 +1131,37 @@ int pci_enable_wake(struct pci_dev *dev, pci_power_t state, int enable)
1123} 1131}
1124 1132
1125/** 1133/**
1126 * pci_prepare_to_sleep - prepare PCI device for system-wide transition into 1134 * pci_wake_from_d3 - enable/disable device to wake up from D3_hot or D3_cold
1127 * a sleep state 1135 * @dev: PCI device to prepare
1128 * @dev: Device to handle. 1136 * @enable: True to enable wake-up event generation; false to disable
1129 * 1137 *
1130 * Choose the power state appropriate for the device depending on whether 1138 * Many drivers want the device to wake up the system from D3_hot or D3_cold
1131 * it can wake up the system and/or is power manageable by the platform 1139 * and this function allows them to set that up cleanly - pci_enable_wake()
1132 * (PCI_D3hot is the default) and put the device into that state. 1140 * should not be called twice in a row to enable wake-up due to PCI PM vs ACPI
1141 * ordering constraints.
1142 *
1143 * This function only returns error code if the device is not capable of
1144 * generating PME# from both D3_hot and D3_cold, and the platform is unable to
1145 * enable wake-up power for it.
1133 */ 1146 */
1134int pci_prepare_to_sleep(struct pci_dev *dev) 1147int pci_wake_from_d3(struct pci_dev *dev, bool enable)
1148{
1149 return pci_pme_capable(dev, PCI_D3cold) ?
1150 pci_enable_wake(dev, PCI_D3cold, enable) :
1151 pci_enable_wake(dev, PCI_D3hot, enable);
1152}
1153
1154/**
1155 * pci_target_state - find an appropriate low power state for a given PCI dev
1156 * @dev: PCI device
1157 *
1158 * Use underlying platform code to find a supported low power state for @dev.
1159 * If the platform can't manage @dev, return the deepest state from which it
1160 * can generate wake events, based on any available PME info.
1161 */
1162pci_power_t pci_target_state(struct pci_dev *dev)
1135{ 1163{
1136 pci_power_t target_state = PCI_D3hot; 1164 pci_power_t target_state = PCI_D3hot;
1137 int error;
1138 1165
1139 if (platform_pci_power_manageable(dev)) { 1166 if (platform_pci_power_manageable(dev)) {
1140 /* 1167 /*
@@ -1161,7 +1188,7 @@ int pci_prepare_to_sleep(struct pci_dev *dev)
1161 * to generate PME#. 1188 * to generate PME#.
1162 */ 1189 */
1163 if (!dev->pm_cap) 1190 if (!dev->pm_cap)
1164 return -EIO; 1191 return PCI_POWER_ERROR;
1165 1192
1166 if (dev->pme_support) { 1193 if (dev->pme_support) {
1167 while (target_state 1194 while (target_state
@@ -1170,6 +1197,25 @@ int pci_prepare_to_sleep(struct pci_dev *dev)
1170 } 1197 }
1171 } 1198 }
1172 1199
1200 return target_state;
1201}
1202
1203/**
1204 * pci_prepare_to_sleep - prepare PCI device for system-wide transition into a sleep state
1205 * @dev: Device to handle.
1206 *
1207 * Choose the power state appropriate for the device depending on whether
1208 * it can wake up the system and/or is power manageable by the platform
1209 * (PCI_D3hot is the default) and put the device into that state.
1210 */
1211int pci_prepare_to_sleep(struct pci_dev *dev)
1212{
1213 pci_power_t target_state = pci_target_state(dev);
1214 int error;
1215
1216 if (target_state == PCI_POWER_ERROR)
1217 return -EIO;
1218
1173 pci_enable_wake(dev, target_state, true); 1219 pci_enable_wake(dev, target_state, true);
1174 1220
1175 error = pci_set_power_state(dev, target_state); 1221 error = pci_set_power_state(dev, target_state);
@@ -1181,8 +1227,7 @@ int pci_prepare_to_sleep(struct pci_dev *dev)
1181} 1227}
1182 1228
1183/** 1229/**
1184 * pci_back_from_sleep - turn PCI device on during system-wide transition into 1230 * pci_back_from_sleep - turn PCI device on during system-wide transition into working state
1185 * the working state a sleep state
1186 * @dev: Device to handle. 1231 * @dev: Device to handle.
1187 * 1232 *
1188 * Disable device's sytem wake-up capability and put it into D0. 1233 * Disable device's sytem wake-up capability and put it into D0.
@@ -1222,25 +1267,25 @@ void pci_pm_init(struct pci_dev *dev)
1222 dev->d1_support = false; 1267 dev->d1_support = false;
1223 dev->d2_support = false; 1268 dev->d2_support = false;
1224 if (!pci_no_d1d2(dev)) { 1269 if (!pci_no_d1d2(dev)) {
1225 if (pmc & PCI_PM_CAP_D1) { 1270 if (pmc & PCI_PM_CAP_D1)
1226 dev_printk(KERN_DEBUG, &dev->dev, "supports D1\n");
1227 dev->d1_support = true; 1271 dev->d1_support = true;
1228 } 1272 if (pmc & PCI_PM_CAP_D2)
1229 if (pmc & PCI_PM_CAP_D2) {
1230 dev_printk(KERN_DEBUG, &dev->dev, "supports D2\n");
1231 dev->d2_support = true; 1273 dev->d2_support = true;
1232 } 1274
1275 if (dev->d1_support || dev->d2_support)
1276 dev_printk(KERN_DEBUG, &dev->dev, "supports%s%s\n",
1277 dev->d1_support ? " D1" : "",
1278 dev->d2_support ? " D2" : "");
1233 } 1279 }
1234 1280
1235 pmc &= PCI_PM_CAP_PME_MASK; 1281 pmc &= PCI_PM_CAP_PME_MASK;
1236 if (pmc) { 1282 if (pmc) {
1237 dev_printk(KERN_INFO, &dev->dev, 1283 dev_info(&dev->dev, "PME# supported from%s%s%s%s%s\n",
1238 "PME# supported from%s%s%s%s%s\n", 1284 (pmc & PCI_PM_CAP_PME_D0) ? " D0" : "",
1239 (pmc & PCI_PM_CAP_PME_D0) ? " D0" : "", 1285 (pmc & PCI_PM_CAP_PME_D1) ? " D1" : "",
1240 (pmc & PCI_PM_CAP_PME_D1) ? " D1" : "", 1286 (pmc & PCI_PM_CAP_PME_D2) ? " D2" : "",
1241 (pmc & PCI_PM_CAP_PME_D2) ? " D2" : "", 1287 (pmc & PCI_PM_CAP_PME_D3) ? " D3hot" : "",
1242 (pmc & PCI_PM_CAP_PME_D3) ? " D3hot" : "", 1288 (pmc & PCI_PM_CAP_PME_D3cold) ? " D3cold" : "");
1243 (pmc & PCI_PM_CAP_PME_D3cold) ? " D3cold" : "");
1244 dev->pme_support = pmc >> PCI_PM_CAP_PME_SHIFT; 1289 dev->pme_support = pmc >> PCI_PM_CAP_PME_SHIFT;
1245 /* 1290 /*
1246 * Make device's PM flags reflect the wake-up capability, but 1291 * Make device's PM flags reflect the wake-up capability, but
@@ -1255,6 +1300,43 @@ void pci_pm_init(struct pci_dev *dev)
1255 } 1300 }
1256} 1301}
1257 1302
1303/**
1304 * pci_enable_ari - enable ARI forwarding if hardware support it
1305 * @dev: the PCI device
1306 */
1307void pci_enable_ari(struct pci_dev *dev)
1308{
1309 int pos;
1310 u32 cap;
1311 u16 ctrl;
1312 struct pci_dev *bridge;
1313
1314 if (!dev->is_pcie || dev->devfn)
1315 return;
1316
1317 pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ARI);
1318 if (!pos)
1319 return;
1320
1321 bridge = dev->bus->self;
1322 if (!bridge || !bridge->is_pcie)
1323 return;
1324
1325 pos = pci_find_capability(bridge, PCI_CAP_ID_EXP);
1326 if (!pos)
1327 return;
1328
1329 pci_read_config_dword(bridge, pos + PCI_EXP_DEVCAP2, &cap);
1330 if (!(cap & PCI_EXP_DEVCAP2_ARI))
1331 return;
1332
1333 pci_read_config_word(bridge, pos + PCI_EXP_DEVCTL2, &ctrl);
1334 ctrl |= PCI_EXP_DEVCTL2_ARI;
1335 pci_write_config_word(bridge, pos + PCI_EXP_DEVCTL2, ctrl);
1336
1337 bridge->ari_enabled = 1;
1338}
1339
1258int 1340int
1259pci_get_interrupt_pin(struct pci_dev *dev, struct pci_dev **bridge) 1341pci_get_interrupt_pin(struct pci_dev *dev, struct pci_dev **bridge)
1260{ 1342{
@@ -1338,11 +1420,10 @@ int pci_request_region(struct pci_dev *pdev, int bar, const char *res_name)
1338 return 0; 1420 return 0;
1339 1421
1340err_out: 1422err_out:
1341 dev_warn(&pdev->dev, "BAR %d: can't reserve %s region [%#llx-%#llx]\n", 1423 dev_warn(&pdev->dev, "BAR %d: can't reserve %s region %pR\n",
1342 bar, 1424 bar,
1343 pci_resource_flags(pdev, bar) & IORESOURCE_IO ? "I/O" : "mem", 1425 pci_resource_flags(pdev, bar) & IORESOURCE_IO ? "I/O" : "mem",
1344 (unsigned long long)pci_resource_start(pdev, bar), 1426 &pdev->resource[bar]);
1345 (unsigned long long)pci_resource_end(pdev, bar));
1346 return -EBUSY; 1427 return -EBUSY;
1347} 1428}
1348 1429
@@ -1671,6 +1752,103 @@ EXPORT_SYMBOL(pci_set_dma_seg_boundary);
1671#endif 1752#endif
1672 1753
1673/** 1754/**
1755 * pci_execute_reset_function() - Reset a PCI device function
1756 * @dev: Device function to reset
1757 *
1758 * Some devices allow an individual function to be reset without affecting
1759 * other functions in the same device. The PCI device must be responsive
1760 * to PCI config space in order to use this function.
1761 *
1762 * The device function is presumed to be unused when this function is called.
1763 * Resetting the device will make the contents of PCI configuration space
1764 * random, so any caller of this must be prepared to reinitialise the
1765 * device including MSI, bus mastering, BARs, decoding IO and memory spaces,
1766 * etc.
1767 *
1768 * Returns 0 if the device function was successfully reset or -ENOTTY if the
1769 * device doesn't support resetting a single function.
1770 */
1771int pci_execute_reset_function(struct pci_dev *dev)
1772{
1773 u16 status;
1774 u32 cap;
1775 int exppos = pci_find_capability(dev, PCI_CAP_ID_EXP);
1776
1777 if (!exppos)
1778 return -ENOTTY;
1779 pci_read_config_dword(dev, exppos + PCI_EXP_DEVCAP, &cap);
1780 if (!(cap & PCI_EXP_DEVCAP_FLR))
1781 return -ENOTTY;
1782
1783 pci_block_user_cfg_access(dev);
1784
1785 /* Wait for Transaction Pending bit clean */
1786 msleep(100);
1787 pci_read_config_word(dev, exppos + PCI_EXP_DEVSTA, &status);
1788 if (status & PCI_EXP_DEVSTA_TRPND) {
1789 dev_info(&dev->dev, "Busy after 100ms while trying to reset; "
1790 "sleeping for 1 second\n");
1791 ssleep(1);
1792 pci_read_config_word(dev, exppos + PCI_EXP_DEVSTA, &status);
1793 if (status & PCI_EXP_DEVSTA_TRPND)
1794 dev_info(&dev->dev, "Still busy after 1s; "
1795 "proceeding with reset anyway\n");
1796 }
1797
1798 pci_write_config_word(dev, exppos + PCI_EXP_DEVCTL,
1799 PCI_EXP_DEVCTL_BCR_FLR);
1800 mdelay(100);
1801
1802 pci_unblock_user_cfg_access(dev);
1803 return 0;
1804}
1805EXPORT_SYMBOL_GPL(pci_execute_reset_function);
1806
1807/**
1808 * pci_reset_function() - quiesce and reset a PCI device function
1809 * @dev: Device function to reset
1810 *
1811 * Some devices allow an individual function to be reset without affecting
1812 * other functions in the same device. The PCI device must be responsive
1813 * to PCI config space in order to use this function.
1814 *
1815 * This function does not just reset the PCI portion of a device, but
1816 * clears all the state associated with the device. This function differs
1817 * from pci_execute_reset_function in that it saves and restores device state
1818 * over the reset.
1819 *
1820 * Returns 0 if the device function was successfully reset or -ENOTTY if the
1821 * device doesn't support resetting a single function.
1822 */
1823int pci_reset_function(struct pci_dev *dev)
1824{
1825 u32 cap;
1826 int exppos = pci_find_capability(dev, PCI_CAP_ID_EXP);
1827 int r;
1828
1829 if (!exppos)
1830 return -ENOTTY;
1831 pci_read_config_dword(dev, exppos + PCI_EXP_DEVCAP, &cap);
1832 if (!(cap & PCI_EXP_DEVCAP_FLR))
1833 return -ENOTTY;
1834
1835 if (!dev->msi_enabled && !dev->msix_enabled)
1836 disable_irq(dev->irq);
1837 pci_save_state(dev);
1838
1839 pci_write_config_word(dev, PCI_COMMAND, PCI_COMMAND_INTX_DISABLE);
1840
1841 r = pci_execute_reset_function(dev);
1842
1843 pci_restore_state(dev);
1844 if (!dev->msi_enabled && !dev->msix_enabled)
1845 enable_irq(dev->irq);
1846
1847 return r;
1848}
1849EXPORT_SYMBOL_GPL(pci_reset_function);
1850
1851/**
1674 * pcix_get_max_mmrbc - get PCI-X maximum designed memory read byte count 1852 * pcix_get_max_mmrbc - get PCI-X maximum designed memory read byte count
1675 * @dev: PCI device to query 1853 * @dev: PCI device to query
1676 * 1854 *
@@ -1858,6 +2036,9 @@ static int __devinit pci_init(void)
1858 while ((dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev)) != NULL) { 2036 while ((dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev)) != NULL) {
1859 pci_fixup_device(pci_fixup_final, dev); 2037 pci_fixup_device(pci_fixup_final, dev);
1860 } 2038 }
2039
2040 msi_init();
2041
1861 return 0; 2042 return 0;
1862} 2043}
1863 2044
@@ -1920,7 +2101,11 @@ EXPORT_SYMBOL(pci_select_bars);
1920EXPORT_SYMBOL(pci_set_power_state); 2101EXPORT_SYMBOL(pci_set_power_state);
1921EXPORT_SYMBOL(pci_save_state); 2102EXPORT_SYMBOL(pci_save_state);
1922EXPORT_SYMBOL(pci_restore_state); 2103EXPORT_SYMBOL(pci_restore_state);
2104EXPORT_SYMBOL(pci_pme_capable);
2105EXPORT_SYMBOL(pci_pme_active);
1923EXPORT_SYMBOL(pci_enable_wake); 2106EXPORT_SYMBOL(pci_enable_wake);
2107EXPORT_SYMBOL(pci_wake_from_d3);
2108EXPORT_SYMBOL(pci_target_state);
1924EXPORT_SYMBOL(pci_prepare_to_sleep); 2109EXPORT_SYMBOL(pci_prepare_to_sleep);
1925EXPORT_SYMBOL(pci_back_from_sleep); 2110EXPORT_SYMBOL(pci_back_from_sleep);
1926EXPORT_SYMBOL_GPL(pci_set_pcie_reset_state); 2111EXPORT_SYMBOL_GPL(pci_set_pcie_reset_state);
diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h
index d807cd786f20..9de87e9f98f5 100644
--- a/drivers/pci/pci.h
+++ b/drivers/pci/pci.h
@@ -1,3 +1,9 @@
1#ifndef DRIVERS_PCI_H
2#define DRIVERS_PCI_H
3
4#define PCI_CFG_SPACE_SIZE 256
5#define PCI_CFG_SPACE_EXP_SIZE 4096
6
1/* Functions internal to the PCI core code */ 7/* Functions internal to the PCI core code */
2 8
3extern int pci_uevent(struct device *dev, struct kobj_uevent_env *env); 9extern int pci_uevent(struct device *dev, struct kobj_uevent_env *env);
@@ -76,7 +82,13 @@ static inline int pci_proc_detach_bus(struct pci_bus *bus) { return 0; }
76/* Functions for PCI Hotplug drivers to use */ 82/* Functions for PCI Hotplug drivers to use */
77extern unsigned int pci_do_scan_bus(struct pci_bus *bus); 83extern unsigned int pci_do_scan_bus(struct pci_bus *bus);
78 84
85#ifdef HAVE_PCI_LEGACY
86extern void pci_create_legacy_files(struct pci_bus *bus);
79extern void pci_remove_legacy_files(struct pci_bus *bus); 87extern void pci_remove_legacy_files(struct pci_bus *bus);
88#else
89static inline void pci_create_legacy_files(struct pci_bus *bus) { return; }
90static inline void pci_remove_legacy_files(struct pci_bus *bus) { return; }
91#endif
80 92
81/* Lock for read/write access to pci device and bus lists */ 93/* Lock for read/write access to pci device and bus lists */
82extern struct rw_semaphore pci_bus_sem; 94extern struct rw_semaphore pci_bus_sem;
@@ -86,9 +98,11 @@ extern unsigned int pci_pm_d3_delay;
86#ifdef CONFIG_PCI_MSI 98#ifdef CONFIG_PCI_MSI
87void pci_no_msi(void); 99void pci_no_msi(void);
88extern void pci_msi_init_pci_dev(struct pci_dev *dev); 100extern void pci_msi_init_pci_dev(struct pci_dev *dev);
101extern void __devinit msi_init(void);
89#else 102#else
90static inline void pci_no_msi(void) { } 103static inline void pci_no_msi(void) { }
91static inline void pci_msi_init_pci_dev(struct pci_dev *dev) { } 104static inline void pci_msi_init_pci_dev(struct pci_dev *dev) { }
105static inline void msi_init(void) { }
92#endif 106#endif
93 107
94#ifdef CONFIG_PCIEAER 108#ifdef CONFIG_PCIEAER
@@ -109,6 +123,7 @@ static inline int pci_no_d1d2(struct pci_dev *dev)
109extern int pcie_mch_quirk; 123extern int pcie_mch_quirk;
110extern struct device_attribute pci_dev_attrs[]; 124extern struct device_attribute pci_dev_attrs[];
111extern struct device_attribute dev_attr_cpuaffinity; 125extern struct device_attribute dev_attr_cpuaffinity;
126extern struct device_attribute dev_attr_cpulistaffinity;
112 127
113/** 128/**
114 * pci_match_one_device - Tell if a PCI device structure has a matching 129 * pci_match_one_device - Tell if a PCI device structure has a matching
@@ -144,3 +159,16 @@ struct pci_slot_attribute {
144}; 159};
145#define to_pci_slot_attr(s) container_of(s, struct pci_slot_attribute, attr) 160#define to_pci_slot_attr(s) container_of(s, struct pci_slot_attribute, attr)
146 161
162extern void pci_enable_ari(struct pci_dev *dev);
163/**
164 * pci_ari_enabled - query ARI forwarding status
165 * @dev: the PCI device
166 *
167 * Returns 1 if ARI forwarding is enabled, or 0 if not enabled;
168 */
169static inline int pci_ari_enabled(struct pci_dev *dev)
170{
171 return dev->ari_enabled;
172}
173
174#endif /* DRIVERS_PCI_H */
diff --git a/drivers/pci/pcie/aer/aerdrv.c b/drivers/pci/pcie/aer/aerdrv.c
index 77036f46acfe..e390707661dd 100644
--- a/drivers/pci/pcie/aer/aerdrv.c
+++ b/drivers/pci/pcie/aer/aerdrv.c
@@ -105,7 +105,7 @@ static irqreturn_t aer_irq(int irq, void *context)
105 unsigned long flags; 105 unsigned long flags;
106 int pos; 106 int pos;
107 107
108 pos = pci_find_aer_capability(pdev->port); 108 pos = pci_find_ext_capability(pdev->port, PCI_EXT_CAP_ID_ERR);
109 /* 109 /*
110 * Must lock access to Root Error Status Reg, Root Error ID Reg, 110 * Must lock access to Root Error Status Reg, Root Error ID Reg,
111 * and Root error producer/consumer index 111 * and Root error producer/consumer index
@@ -252,7 +252,7 @@ static pci_ers_result_t aer_root_reset(struct pci_dev *dev)
252 u32 status; 252 u32 status;
253 int pos; 253 int pos;
254 254
255 pos = pci_find_aer_capability(dev); 255 pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR);
256 256
257 /* Disable Root's interrupt in response to error messages */ 257 /* Disable Root's interrupt in response to error messages */
258 pci_write_config_dword(dev, pos + PCI_ERR_ROOT_COMMAND, 0); 258 pci_write_config_dword(dev, pos + PCI_ERR_ROOT_COMMAND, 0);
@@ -316,7 +316,7 @@ static void aer_error_resume(struct pci_dev *dev)
316 pci_write_config_word(dev, pos + PCI_EXP_DEVSTA, reg16); 316 pci_write_config_word(dev, pos + PCI_EXP_DEVSTA, reg16);
317 317
318 /* Clean AER Root Error Status */ 318 /* Clean AER Root Error Status */
319 pos = pci_find_aer_capability(dev); 319 pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR);
320 pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_STATUS, &status); 320 pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_STATUS, &status);
321 pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_SEVER, &mask); 321 pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_SEVER, &mask);
322 if (dev->error_state == pci_channel_io_normal) 322 if (dev->error_state == pci_channel_io_normal)
diff --git a/drivers/pci/pcie/aer/aerdrv_acpi.c b/drivers/pci/pcie/aer/aerdrv_acpi.c
index 30f581b8791f..6dd7b13e9808 100644
--- a/drivers/pci/pcie/aer/aerdrv_acpi.c
+++ b/drivers/pci/pcie/aer/aerdrv_acpi.c
@@ -36,12 +36,7 @@ int aer_osc_setup(struct pcie_device *pciedev)
36 if (acpi_pci_disabled) 36 if (acpi_pci_disabled)
37 return -1; 37 return -1;
38 38
39 /* Find root host bridge */ 39 handle = acpi_find_root_bridge_handle(pdev);
40 while (pdev->bus->self)
41 pdev = pdev->bus->self;
42 handle = acpi_get_pci_rootbridge_handle(
43 pci_domain_nr(pdev->bus), pdev->bus->number);
44
45 if (handle) { 40 if (handle) {
46 pcie_osc_support_set(OSC_EXT_PCI_CONFIG_SUPPORT); 41 pcie_osc_support_set(OSC_EXT_PCI_CONFIG_SUPPORT);
47 status = pci_osc_control_set(handle, 42 status = pci_osc_control_set(handle,
diff --git a/drivers/pci/pcie/aer/aerdrv_core.c b/drivers/pci/pcie/aer/aerdrv_core.c
index ee5e7b5176d0..dfc63d01f20a 100644
--- a/drivers/pci/pcie/aer/aerdrv_core.c
+++ b/drivers/pci/pcie/aer/aerdrv_core.c
@@ -28,41 +28,15 @@
28static int forceload; 28static int forceload;
29module_param(forceload, bool, 0); 29module_param(forceload, bool, 0);
30 30
31#define PCI_CFG_SPACE_SIZE (0x100)
32int pci_find_aer_capability(struct pci_dev *dev)
33{
34 int pos;
35 u32 reg32 = 0;
36
37 /* Check if it's a pci-express device */
38 pos = pci_find_capability(dev, PCI_CAP_ID_EXP);
39 if (!pos)
40 return 0;
41
42 /* Check if it supports pci-express AER */
43 pos = PCI_CFG_SPACE_SIZE;
44 while (pos) {
45 if (pci_read_config_dword(dev, pos, &reg32))
46 return 0;
47
48 /* some broken boards return ~0 */
49 if (reg32 == 0xffffffff)
50 return 0;
51
52 if (PCI_EXT_CAP_ID(reg32) == PCI_EXT_CAP_ID_ERR)
53 break;
54
55 pos = reg32 >> 20;
56 }
57
58 return pos;
59}
60
61int pci_enable_pcie_error_reporting(struct pci_dev *dev) 31int pci_enable_pcie_error_reporting(struct pci_dev *dev)
62{ 32{
63 u16 reg16 = 0; 33 u16 reg16 = 0;
64 int pos; 34 int pos;
65 35
36 pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR);
37 if (!pos)
38 return -EIO;
39
66 pos = pci_find_capability(dev, PCI_CAP_ID_EXP); 40 pos = pci_find_capability(dev, PCI_CAP_ID_EXP);
67 if (!pos) 41 if (!pos)
68 return -EIO; 42 return -EIO;
@@ -102,7 +76,7 @@ int pci_cleanup_aer_uncorrect_error_status(struct pci_dev *dev)
102 int pos; 76 int pos;
103 u32 status, mask; 77 u32 status, mask;
104 78
105 pos = pci_find_aer_capability(dev); 79 pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR);
106 if (!pos) 80 if (!pos)
107 return -EIO; 81 return -EIO;
108 82
@@ -123,7 +97,7 @@ int pci_cleanup_aer_correct_error_status(struct pci_dev *dev)
123 int pos; 97 int pos;
124 u32 status; 98 u32 status;
125 99
126 pos = pci_find_aer_capability(dev); 100 pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR);
127 if (!pos) 101 if (!pos)
128 return -EIO; 102 return -EIO;
129 103
@@ -502,7 +476,7 @@ static void handle_error_source(struct pcie_device * aerdev,
502 * Correctable error does not need software intevention. 476 * Correctable error does not need software intevention.
503 * No need to go through error recovery process. 477 * No need to go through error recovery process.
504 */ 478 */
505 pos = pci_find_aer_capability(dev); 479 pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR);
506 if (pos) 480 if (pos)
507 pci_write_config_dword(dev, pos + PCI_ERR_COR_STATUS, 481 pci_write_config_dword(dev, pos + PCI_ERR_COR_STATUS,
508 info.status); 482 info.status);
@@ -542,7 +516,7 @@ void aer_enable_rootport(struct aer_rpc *rpc)
542 reg16 &= ~(SYSTEM_ERROR_INTR_ON_MESG_MASK); 516 reg16 &= ~(SYSTEM_ERROR_INTR_ON_MESG_MASK);
543 pci_write_config_word(pdev, pos + PCI_EXP_RTCTL, reg16); 517 pci_write_config_word(pdev, pos + PCI_EXP_RTCTL, reg16);
544 518
545 aer_pos = pci_find_aer_capability(pdev); 519 aer_pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_ERR);
546 /* Clear error status */ 520 /* Clear error status */
547 pci_read_config_dword(pdev, aer_pos + PCI_ERR_ROOT_STATUS, &reg32); 521 pci_read_config_dword(pdev, aer_pos + PCI_ERR_ROOT_STATUS, &reg32);
548 pci_write_config_dword(pdev, aer_pos + PCI_ERR_ROOT_STATUS, reg32); 522 pci_write_config_dword(pdev, aer_pos + PCI_ERR_ROOT_STATUS, reg32);
@@ -579,7 +553,7 @@ static void disable_root_aer(struct aer_rpc *rpc)
579 u32 reg32; 553 u32 reg32;
580 int pos; 554 int pos;
581 555
582 pos = pci_find_aer_capability(pdev); 556 pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_ERR);
583 /* Disable Root's interrupt in response to error messages */ 557 /* Disable Root's interrupt in response to error messages */
584 pci_write_config_dword(pdev, pos + PCI_ERR_ROOT_COMMAND, 0); 558 pci_write_config_dword(pdev, pos + PCI_ERR_ROOT_COMMAND, 0);
585 559
@@ -618,7 +592,7 @@ static int get_device_error_info(struct pci_dev *dev, struct aer_err_info *info)
618{ 592{
619 int pos; 593 int pos;
620 594
621 pos = pci_find_aer_capability(dev); 595 pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR);
622 596
623 /* The device might not support AER */ 597 /* The device might not support AER */
624 if (!pos) 598 if (!pos)
@@ -755,7 +729,6 @@ int aer_init(struct pcie_device *dev)
755 return AER_SUCCESS; 729 return AER_SUCCESS;
756} 730}
757 731
758EXPORT_SYMBOL_GPL(pci_find_aer_capability);
759EXPORT_SYMBOL_GPL(pci_enable_pcie_error_reporting); 732EXPORT_SYMBOL_GPL(pci_enable_pcie_error_reporting);
760EXPORT_SYMBOL_GPL(pci_disable_pcie_error_reporting); 733EXPORT_SYMBOL_GPL(pci_disable_pcie_error_reporting);
761EXPORT_SYMBOL_GPL(pci_cleanup_aer_uncorrect_error_status); 734EXPORT_SYMBOL_GPL(pci_cleanup_aer_uncorrect_error_status);
diff --git a/drivers/pci/pcie/aspm.c b/drivers/pci/pcie/aspm.c
index f82495583e63..8f63f4c6b85f 100644
--- a/drivers/pci/pcie/aspm.c
+++ b/drivers/pci/pcie/aspm.c
@@ -55,7 +55,7 @@ struct pcie_link_state {
55 struct endpoint_state endpoints[8]; 55 struct endpoint_state endpoints[8];
56}; 56};
57 57
58static int aspm_disabled; 58static int aspm_disabled, aspm_force;
59static DEFINE_MUTEX(aspm_lock); 59static DEFINE_MUTEX(aspm_lock);
60static LIST_HEAD(link_list); 60static LIST_HEAD(link_list);
61 61
@@ -510,6 +510,7 @@ static int pcie_aspm_sanity_check(struct pci_dev *pdev)
510{ 510{
511 struct pci_dev *child_dev; 511 struct pci_dev *child_dev;
512 int child_pos; 512 int child_pos;
513 u32 reg32;
513 514
514 /* 515 /*
515 * Some functions in a slot might not all be PCIE functions, very 516 * Some functions in a slot might not all be PCIE functions, very
@@ -519,6 +520,19 @@ static int pcie_aspm_sanity_check(struct pci_dev *pdev)
519 child_pos = pci_find_capability(child_dev, PCI_CAP_ID_EXP); 520 child_pos = pci_find_capability(child_dev, PCI_CAP_ID_EXP);
520 if (!child_pos) 521 if (!child_pos)
521 return -EINVAL; 522 return -EINVAL;
523
524 /*
525 * Disable ASPM for pre-1.1 PCIe device, we follow MS to use
526 * RBER bit to determine if a function is 1.1 version device
527 */
528 pci_read_config_dword(child_dev, child_pos + PCI_EXP_DEVCAP,
529 &reg32);
530 if (!(reg32 & PCI_EXP_DEVCAP_RBER) && !aspm_force) {
531 dev_printk(KERN_INFO, &child_dev->dev, "disabling ASPM"
532 " on pre-1.1 PCIe device. You can enable it"
533 " with 'pcie_aspm=force'\n");
534 return -EINVAL;
535 }
522 } 536 }
523 return 0; 537 return 0;
524} 538}
@@ -802,11 +816,23 @@ void pcie_aspm_remove_sysfs_dev_files(struct pci_dev *pdev)
802 816
803static int __init pcie_aspm_disable(char *str) 817static int __init pcie_aspm_disable(char *str)
804{ 818{
805 aspm_disabled = 1; 819 if (!strcmp(str, "off")) {
820 aspm_disabled = 1;
821 printk(KERN_INFO "PCIe ASPM is disabled\n");
822 } else if (!strcmp(str, "force")) {
823 aspm_force = 1;
824 printk(KERN_INFO "PCIe ASPM is forcedly enabled\n");
825 }
806 return 1; 826 return 1;
807} 827}
808 828
809__setup("pcie_noaspm", pcie_aspm_disable); 829__setup("pcie_aspm=", pcie_aspm_disable);
830
831void pcie_no_aspm(void)
832{
833 if (!aspm_force)
834 aspm_disabled = 1;
835}
810 836
811#ifdef CONFIG_ACPI 837#ifdef CONFIG_ACPI
812#include <acpi/acpi_bus.h> 838#include <acpi/acpi_bus.h>
diff --git a/drivers/pci/pcie/portdrv.h b/drivers/pci/pcie/portdrv.h
index 3656e0349dd1..2529f3f2ea5a 100644
--- a/drivers/pci/pcie/portdrv.h
+++ b/drivers/pci/pcie/portdrv.h
@@ -25,7 +25,6 @@
25#define PCIE_CAPABILITIES_REG 0x2 25#define PCIE_CAPABILITIES_REG 0x2
26#define PCIE_SLOT_CAPABILITIES_REG 0x14 26#define PCIE_SLOT_CAPABILITIES_REG 0x14
27#define PCIE_PORT_DEVICE_MAXSERVICES 4 27#define PCIE_PORT_DEVICE_MAXSERVICES 4
28#define PCI_CFG_SPACE_SIZE 256
29 28
30#define get_descriptor_id(type, service) (((type - 4) << 4) | service) 29#define get_descriptor_id(type, service) (((type - 4) << 4) | service)
31 30
diff --git a/drivers/pci/pcie/portdrv_core.c b/drivers/pci/pcie/portdrv_core.c
index 890f0d2b370a..2e091e014829 100644
--- a/drivers/pci/pcie/portdrv_core.c
+++ b/drivers/pci/pcie/portdrv_core.c
@@ -195,24 +195,11 @@ static int get_port_device_capability(struct pci_dev *dev)
195 /* PME Capable - root port capability */ 195 /* PME Capable - root port capability */
196 if (((reg16 >> 4) & PORT_TYPE_MASK) == PCIE_RC_PORT) 196 if (((reg16 >> 4) & PORT_TYPE_MASK) == PCIE_RC_PORT)
197 services |= PCIE_PORT_SERVICE_PME; 197 services |= PCIE_PORT_SERVICE_PME;
198 198
199 pos = PCI_CFG_SPACE_SIZE; 199 if (pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR))
200 while (pos) { 200 services |= PCIE_PORT_SERVICE_AER;
201 pci_read_config_dword(dev, pos, &reg32); 201 if (pci_find_ext_capability(dev, PCI_EXT_CAP_ID_VC))
202 switch (reg32 & 0xffff) { 202 services |= PCIE_PORT_SERVICE_VC;
203 case PCI_EXT_CAP_ID_ERR:
204 services |= PCIE_PORT_SERVICE_AER;
205 pos = reg32 >> 20;
206 break;
207 case PCI_EXT_CAP_ID_VC:
208 services |= PCIE_PORT_SERVICE_VC;
209 pos = reg32 >> 20;
210 break;
211 default:
212 pos = 0;
213 break;
214 }
215 }
216 203
217 return services; 204 return services;
218} 205}
diff --git a/drivers/pci/pcie/portdrv_pci.c b/drivers/pci/pcie/portdrv_pci.c
index 367c9c20000d..584422da8d8b 100644
--- a/drivers/pci/pcie/portdrv_pci.c
+++ b/drivers/pci/pcie/portdrv_pci.c
@@ -91,7 +91,7 @@ static int __devinit pcie_portdrv_probe (struct pci_dev *dev,
91 91
92 pci_set_master(dev); 92 pci_set_master(dev);
93 if (!dev->irq && dev->pin) { 93 if (!dev->irq && dev->pin) {
94 dev_warn(&dev->dev, "device [%04x/%04x] has invalid IRQ; " 94 dev_warn(&dev->dev, "device [%04x:%04x] has invalid IRQ; "
95 "check vendor BIOS\n", dev->vendor, dev->device); 95 "check vendor BIOS\n", dev->vendor, dev->device);
96 } 96 }
97 if (pcie_port_device_register(dev)) { 97 if (pcie_port_device_register(dev)) {
diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
index b1724cf31b66..003a9b3c293f 100644
--- a/drivers/pci/probe.c
+++ b/drivers/pci/probe.c
@@ -14,8 +14,6 @@
14 14
15#define CARDBUS_LATENCY_TIMER 176 /* secondary latency timer */ 15#define CARDBUS_LATENCY_TIMER 176 /* secondary latency timer */
16#define CARDBUS_RESERVE_BUSNR 3 16#define CARDBUS_RESERVE_BUSNR 3
17#define PCI_CFG_SPACE_SIZE 256
18#define PCI_CFG_SPACE_EXP_SIZE 4096
19 17
20/* Ugh. Need to stop exporting this to modules. */ 18/* Ugh. Need to stop exporting this to modules. */
21LIST_HEAD(pci_root_buses); 19LIST_HEAD(pci_root_buses);
@@ -44,50 +42,6 @@ int no_pci_devices(void)
44} 42}
45EXPORT_SYMBOL(no_pci_devices); 43EXPORT_SYMBOL(no_pci_devices);
46 44
47#ifdef HAVE_PCI_LEGACY
48/**
49 * pci_create_legacy_files - create legacy I/O port and memory files
50 * @b: bus to create files under
51 *
52 * Some platforms allow access to legacy I/O port and ISA memory space on
53 * a per-bus basis. This routine creates the files and ties them into
54 * their associated read, write and mmap files from pci-sysfs.c
55 */
56static void pci_create_legacy_files(struct pci_bus *b)
57{
58 b->legacy_io = kzalloc(sizeof(struct bin_attribute) * 2,
59 GFP_ATOMIC);
60 if (b->legacy_io) {
61 b->legacy_io->attr.name = "legacy_io";
62 b->legacy_io->size = 0xffff;
63 b->legacy_io->attr.mode = S_IRUSR | S_IWUSR;
64 b->legacy_io->read = pci_read_legacy_io;
65 b->legacy_io->write = pci_write_legacy_io;
66 device_create_bin_file(&b->dev, b->legacy_io);
67
68 /* Allocated above after the legacy_io struct */
69 b->legacy_mem = b->legacy_io + 1;
70 b->legacy_mem->attr.name = "legacy_mem";
71 b->legacy_mem->size = 1024*1024;
72 b->legacy_mem->attr.mode = S_IRUSR | S_IWUSR;
73 b->legacy_mem->mmap = pci_mmap_legacy_mem;
74 device_create_bin_file(&b->dev, b->legacy_mem);
75 }
76}
77
78void pci_remove_legacy_files(struct pci_bus *b)
79{
80 if (b->legacy_io) {
81 device_remove_bin_file(&b->dev, b->legacy_io);
82 device_remove_bin_file(&b->dev, b->legacy_mem);
83 kfree(b->legacy_io); /* both are allocated here */
84 }
85}
86#else /* !HAVE_PCI_LEGACY */
87static inline void pci_create_legacy_files(struct pci_bus *bus) { return; }
88void pci_remove_legacy_files(struct pci_bus *bus) { return; }
89#endif /* HAVE_PCI_LEGACY */
90
91/* 45/*
92 * PCI Bus Class Devices 46 * PCI Bus Class Devices
93 */ 47 */
@@ -163,12 +117,9 @@ static inline unsigned int pci_calc_resource_flags(unsigned int flags)
163 return IORESOURCE_MEM; 117 return IORESOURCE_MEM;
164} 118}
165 119
166/* 120static u64 pci_size(u64 base, u64 maxbase, u64 mask)
167 * Find the extent of a PCI decode..
168 */
169static u32 pci_size(u32 base, u32 maxbase, u32 mask)
170{ 121{
171 u32 size = mask & maxbase; /* Find the significant bits */ 122 u64 size = mask & maxbase; /* Find the significant bits */
172 if (!size) 123 if (!size)
173 return 0; 124 return 0;
174 125
@@ -184,135 +135,148 @@ static u32 pci_size(u32 base, u32 maxbase, u32 mask)
184 return size; 135 return size;
185} 136}
186 137
187static u64 pci_size64(u64 base, u64 maxbase, u64 mask) 138enum pci_bar_type {
188{ 139 pci_bar_unknown, /* Standard PCI BAR probe */
189 u64 size = mask & maxbase; /* Find the significant bits */ 140 pci_bar_io, /* An io port BAR */
190 if (!size) 141 pci_bar_mem32, /* A 32-bit memory BAR */
191 return 0; 142 pci_bar_mem64, /* A 64-bit memory BAR */
143};
192 144
193 /* Get the lowest of them to find the decode size, and 145static inline enum pci_bar_type decode_bar(struct resource *res, u32 bar)
194 from that the extent. */ 146{
195 size = (size & ~(size-1)) - 1; 147 if ((bar & PCI_BASE_ADDRESS_SPACE) == PCI_BASE_ADDRESS_SPACE_IO) {
148 res->flags = bar & ~PCI_BASE_ADDRESS_IO_MASK;
149 return pci_bar_io;
150 }
196 151
197 /* base == maxbase can be valid only if the BAR has 152 res->flags = bar & ~PCI_BASE_ADDRESS_MEM_MASK;
198 already been programmed with all 1s. */
199 if (base == maxbase && ((base | size) & mask) != mask)
200 return 0;
201 153
202 return size; 154 if (res->flags & PCI_BASE_ADDRESS_MEM_TYPE_64)
155 return pci_bar_mem64;
156 return pci_bar_mem32;
203} 157}
204 158
205static inline int is_64bit_memory(u32 mask) 159/*
160 * If the type is not unknown, we assume that the lowest bit is 'enable'.
161 * Returns 1 if the BAR was 64-bit and 0 if it was 32-bit.
162 */
163static int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type,
164 struct resource *res, unsigned int pos)
206{ 165{
207 if ((mask & (PCI_BASE_ADDRESS_SPACE|PCI_BASE_ADDRESS_MEM_TYPE_MASK)) == 166 u32 l, sz, mask;
208 (PCI_BASE_ADDRESS_SPACE_MEMORY|PCI_BASE_ADDRESS_MEM_TYPE_64))
209 return 1;
210 return 0;
211}
212 167
213static void pci_read_bases(struct pci_dev *dev, unsigned int howmany, int rom) 168 mask = type ? ~PCI_ROM_ADDRESS_ENABLE : ~0;
214{
215 unsigned int pos, reg, next;
216 u32 l, sz;
217 struct resource *res;
218 169
219 for(pos=0; pos<howmany; pos = next) { 170 res->name = pci_name(dev);
220 u64 l64;
221 u64 sz64;
222 u32 raw_sz;
223 171
224 next = pos+1; 172 pci_read_config_dword(dev, pos, &l);
225 res = &dev->resource[pos]; 173 pci_write_config_dword(dev, pos, mask);
226 res->name = pci_name(dev); 174 pci_read_config_dword(dev, pos, &sz);
227 reg = PCI_BASE_ADDRESS_0 + (pos << 2); 175 pci_write_config_dword(dev, pos, l);
228 pci_read_config_dword(dev, reg, &l); 176
229 pci_write_config_dword(dev, reg, ~0); 177 /*
230 pci_read_config_dword(dev, reg, &sz); 178 * All bits set in sz means the device isn't working properly.
231 pci_write_config_dword(dev, reg, l); 179 * If the BAR isn't implemented, all bits must be 0. If it's a
232 if (!sz || sz == 0xffffffff) 180 * memory BAR or a ROM, bit 0 must be clear; if it's an io BAR, bit
233 continue; 181 * 1 must be clear.
234 if (l == 0xffffffff) 182 */
235 l = 0; 183 if (!sz || sz == 0xffffffff)
236 raw_sz = sz; 184 goto fail;
237 if ((l & PCI_BASE_ADDRESS_SPACE) == 185
238 PCI_BASE_ADDRESS_SPACE_MEMORY) { 186 /*
239 sz = pci_size(l, sz, (u32)PCI_BASE_ADDRESS_MEM_MASK); 187 * I don't know how l can have all bits set. Copied from old code.
240 /* 188 * Maybe it fixes a bug on some ancient platform.
241 * For 64bit prefetchable memory sz could be 0, if the 189 */
242 * real size is bigger than 4G, so we need to check 190 if (l == 0xffffffff)
243 * szhi for that. 191 l = 0;
244 */ 192
245 if (!is_64bit_memory(l) && !sz) 193 if (type == pci_bar_unknown) {
246 continue; 194 type = decode_bar(res, l);
247 res->start = l & PCI_BASE_ADDRESS_MEM_MASK; 195 res->flags |= pci_calc_resource_flags(l) | IORESOURCE_SIZEALIGN;
248 res->flags |= l & ~PCI_BASE_ADDRESS_MEM_MASK; 196 if (type == pci_bar_io) {
197 l &= PCI_BASE_ADDRESS_IO_MASK;
198 mask = PCI_BASE_ADDRESS_IO_MASK & 0xffff;
249 } else { 199 } else {
250 sz = pci_size(l, sz, PCI_BASE_ADDRESS_IO_MASK & 0xffff); 200 l &= PCI_BASE_ADDRESS_MEM_MASK;
251 if (!sz) 201 mask = (u32)PCI_BASE_ADDRESS_MEM_MASK;
252 continue;
253 res->start = l & PCI_BASE_ADDRESS_IO_MASK;
254 res->flags |= l & ~PCI_BASE_ADDRESS_IO_MASK;
255 } 202 }
256 res->end = res->start + (unsigned long) sz; 203 } else {
257 res->flags |= pci_calc_resource_flags(l) | IORESOURCE_SIZEALIGN; 204 res->flags |= (l & IORESOURCE_ROM_ENABLE);
258 if (is_64bit_memory(l)) { 205 l &= PCI_ROM_ADDRESS_MASK;
259 u32 szhi, lhi; 206 mask = (u32)PCI_ROM_ADDRESS_MASK;
260 207 }
261 pci_read_config_dword(dev, reg+4, &lhi); 208
262 pci_write_config_dword(dev, reg+4, ~0); 209 if (type == pci_bar_mem64) {
263 pci_read_config_dword(dev, reg+4, &szhi); 210 u64 l64 = l;
264 pci_write_config_dword(dev, reg+4, lhi); 211 u64 sz64 = sz;
265 sz64 = ((u64)szhi << 32) | raw_sz; 212 u64 mask64 = mask | (u64)~0 << 32;
266 l64 = ((u64)lhi << 32) | l; 213
267 sz64 = pci_size64(l64, sz64, PCI_BASE_ADDRESS_MEM_MASK); 214 pci_read_config_dword(dev, pos + 4, &l);
268 next++; 215 pci_write_config_dword(dev, pos + 4, ~0);
269#if BITS_PER_LONG == 64 216 pci_read_config_dword(dev, pos + 4, &sz);
270 if (!sz64) { 217 pci_write_config_dword(dev, pos + 4, l);
271 res->start = 0; 218
272 res->end = 0; 219 l64 |= ((u64)l << 32);
273 res->flags = 0; 220 sz64 |= ((u64)sz << 32);
274 continue; 221
275 } 222 sz64 = pci_size(l64, sz64, mask64);
276 res->start = l64 & PCI_BASE_ADDRESS_MEM_MASK; 223
277 res->end = res->start + sz64; 224 if (!sz64)
278#else 225 goto fail;
279 if (sz64 > 0x100000000ULL) { 226
280 dev_err(&dev->dev, "BAR %d: can't handle 64-bit" 227 if ((sizeof(resource_size_t) < 8) && (sz64 > 0x100000000ULL)) {
281 " BAR\n", pos); 228 dev_err(&dev->dev, "can't handle 64-bit BAR\n");
282 res->start = 0; 229 goto fail;
283 res->flags = 0; 230 } else if ((sizeof(resource_size_t) < 8) && l) {
284 } else if (lhi) { 231 /* Address above 32-bit boundary; disable the BAR */
285 /* 64-bit wide address, treat as disabled */ 232 pci_write_config_dword(dev, pos, 0);
286 pci_write_config_dword(dev, reg, 233 pci_write_config_dword(dev, pos + 4, 0);
287 l & ~(u32)PCI_BASE_ADDRESS_MEM_MASK); 234 res->start = 0;
288 pci_write_config_dword(dev, reg+4, 0); 235 res->end = sz64;
289 res->start = 0; 236 } else {
290 res->end = sz; 237 res->start = l64;
291 } 238 res->end = l64 + sz64;
292#endif 239 dev_printk(KERN_DEBUG, &dev->dev,
240 "reg %x 64bit mmio: %pR\n", pos, res);
293 } 241 }
242 } else {
243 sz = pci_size(l, sz, mask);
244
245 if (!sz)
246 goto fail;
247
248 res->start = l;
249 res->end = l + sz;
250
251 dev_printk(KERN_DEBUG, &dev->dev, "reg %x %s: %pR\n", pos,
252 (res->flags & IORESOURCE_IO) ? "io port" : "32bit mmio",
253 res);
294 } 254 }
255
256 out:
257 return (type == pci_bar_mem64) ? 1 : 0;
258 fail:
259 res->flags = 0;
260 goto out;
261}
262
263static void pci_read_bases(struct pci_dev *dev, unsigned int howmany, int rom)
264{
265 unsigned int pos, reg;
266
267 for (pos = 0; pos < howmany; pos++) {
268 struct resource *res = &dev->resource[pos];
269 reg = PCI_BASE_ADDRESS_0 + (pos << 2);
270 pos += __pci_read_base(dev, pci_bar_unknown, res, reg);
271 }
272
295 if (rom) { 273 if (rom) {
274 struct resource *res = &dev->resource[PCI_ROM_RESOURCE];
296 dev->rom_base_reg = rom; 275 dev->rom_base_reg = rom;
297 res = &dev->resource[PCI_ROM_RESOURCE]; 276 res->flags = IORESOURCE_MEM | IORESOURCE_PREFETCH |
298 res->name = pci_name(dev); 277 IORESOURCE_READONLY | IORESOURCE_CACHEABLE |
299 pci_read_config_dword(dev, rom, &l); 278 IORESOURCE_SIZEALIGN;
300 pci_write_config_dword(dev, rom, ~PCI_ROM_ADDRESS_ENABLE); 279 __pci_read_base(dev, pci_bar_mem32, res, rom);
301 pci_read_config_dword(dev, rom, &sz);
302 pci_write_config_dword(dev, rom, l);
303 if (l == 0xffffffff)
304 l = 0;
305 if (sz && sz != 0xffffffff) {
306 sz = pci_size(l, sz, (u32)PCI_ROM_ADDRESS_MASK);
307 if (sz) {
308 res->flags = (l & IORESOURCE_ROM_ENABLE) |
309 IORESOURCE_MEM | IORESOURCE_PREFETCH |
310 IORESOURCE_READONLY | IORESOURCE_CACHEABLE |
311 IORESOURCE_SIZEALIGN;
312 res->start = l & PCI_ROM_ADDRESS_MASK;
313 res->end = res->start + (unsigned long) sz;
314 }
315 }
316 } 280 }
317} 281}
318 282
@@ -334,9 +298,6 @@ void __devinit pci_read_bridge_bases(struct pci_bus *child)
334 child->resource[i] = child->parent->resource[i - 3]; 298 child->resource[i] = child->parent->resource[i - 3];
335 } 299 }
336 300
337 for(i=0; i<3; i++)
338 child->resource[i] = &dev->resource[PCI_BRIDGE_RESOURCES+i];
339
340 res = child->resource[0]; 301 res = child->resource[0];
341 pci_read_config_byte(dev, PCI_IO_BASE, &io_base_lo); 302 pci_read_config_byte(dev, PCI_IO_BASE, &io_base_lo);
342 pci_read_config_byte(dev, PCI_IO_LIMIT, &io_limit_lo); 303 pci_read_config_byte(dev, PCI_IO_LIMIT, &io_limit_lo);
@@ -357,6 +318,7 @@ void __devinit pci_read_bridge_bases(struct pci_bus *child)
357 res->start = base; 318 res->start = base;
358 if (!res->end) 319 if (!res->end)
359 res->end = limit + 0xfff; 320 res->end = limit + 0xfff;
321 dev_printk(KERN_DEBUG, &dev->dev, "bridge io port: %pR\n", res);
360 } 322 }
361 323
362 res = child->resource[1]; 324 res = child->resource[1];
@@ -368,6 +330,8 @@ void __devinit pci_read_bridge_bases(struct pci_bus *child)
368 res->flags = (mem_base_lo & PCI_MEMORY_RANGE_TYPE_MASK) | IORESOURCE_MEM; 330 res->flags = (mem_base_lo & PCI_MEMORY_RANGE_TYPE_MASK) | IORESOURCE_MEM;
369 res->start = base; 331 res->start = base;
370 res->end = limit + 0xfffff; 332 res->end = limit + 0xfffff;
333 dev_printk(KERN_DEBUG, &dev->dev, "bridge 32bit mmio: %pR\n",
334 res);
371 } 335 }
372 336
373 res = child->resource[2]; 337 res = child->resource[2];
@@ -403,6 +367,9 @@ void __devinit pci_read_bridge_bases(struct pci_bus *child)
403 res->flags = (mem_base_lo & PCI_MEMORY_RANGE_TYPE_MASK) | IORESOURCE_MEM | IORESOURCE_PREFETCH; 367 res->flags = (mem_base_lo & PCI_MEMORY_RANGE_TYPE_MASK) | IORESOURCE_MEM | IORESOURCE_PREFETCH;
404 res->start = base; 368 res->start = base;
405 res->end = limit + 0xfffff; 369 res->end = limit + 0xfffff;
370 dev_printk(KERN_DEBUG, &dev->dev, "bridge %sbit mmio pref: %pR\n",
371 (res->flags & PCI_PREF_RANGE_TYPE_64) ? "64" : "32",
372 res);
406 } 373 }
407} 374}
408 375
@@ -510,19 +477,27 @@ int __devinit pci_scan_bridge(struct pci_bus *bus, struct pci_dev *dev, int max,
510 int is_cardbus = (dev->hdr_type == PCI_HEADER_TYPE_CARDBUS); 477 int is_cardbus = (dev->hdr_type == PCI_HEADER_TYPE_CARDBUS);
511 u32 buses, i, j = 0; 478 u32 buses, i, j = 0;
512 u16 bctl; 479 u16 bctl;
480 int broken = 0;
513 481
514 pci_read_config_dword(dev, PCI_PRIMARY_BUS, &buses); 482 pci_read_config_dword(dev, PCI_PRIMARY_BUS, &buses);
515 483
516 dev_dbg(&dev->dev, "scanning behind bridge, config %06x, pass %d\n", 484 dev_dbg(&dev->dev, "scanning behind bridge, config %06x, pass %d\n",
517 buses & 0xffffff, pass); 485 buses & 0xffffff, pass);
518 486
487 /* Check if setup is sensible at all */
488 if (!pass &&
489 ((buses & 0xff) != bus->number || ((buses >> 8) & 0xff) <= bus->number)) {
490 dev_dbg(&dev->dev, "bus configuration invalid, reconfiguring\n");
491 broken = 1;
492 }
493
519 /* Disable MasterAbortMode during probing to avoid reporting 494 /* Disable MasterAbortMode during probing to avoid reporting
520 of bus errors (in some architectures) */ 495 of bus errors (in some architectures) */
521 pci_read_config_word(dev, PCI_BRIDGE_CONTROL, &bctl); 496 pci_read_config_word(dev, PCI_BRIDGE_CONTROL, &bctl);
522 pci_write_config_word(dev, PCI_BRIDGE_CONTROL, 497 pci_write_config_word(dev, PCI_BRIDGE_CONTROL,
523 bctl & ~PCI_BRIDGE_CTL_MASTER_ABORT); 498 bctl & ~PCI_BRIDGE_CTL_MASTER_ABORT);
524 499
525 if ((buses & 0xffff00) && !pcibios_assign_all_busses() && !is_cardbus) { 500 if ((buses & 0xffff00) && !pcibios_assign_all_busses() && !is_cardbus && !broken) {
526 unsigned int cmax, busnr; 501 unsigned int cmax, busnr;
527 /* 502 /*
528 * Bus already configured by firmware, process it in the first 503 * Bus already configured by firmware, process it in the first
@@ -560,7 +535,7 @@ int __devinit pci_scan_bridge(struct pci_bus *bus, struct pci_dev *dev, int max,
560 * do in the second pass. 535 * do in the second pass.
561 */ 536 */
562 if (!pass) { 537 if (!pass) {
563 if (pcibios_assign_all_busses()) 538 if (pcibios_assign_all_busses() || broken)
564 /* Temporarily disable forwarding of the 539 /* Temporarily disable forwarding of the
565 configuration cycles on all bridges in 540 configuration cycles on all bridges in
566 this bus segment to avoid possible 541 this bus segment to avoid possible
@@ -723,7 +698,7 @@ static int pci_setup_device(struct pci_dev * dev)
723 dev->class = class; 698 dev->class = class;
724 class >>= 8; 699 class >>= 8;
725 700
726 dev_dbg(&dev->dev, "found [%04x/%04x] class %06x header type %02x\n", 701 dev_dbg(&dev->dev, "found [%04x:%04x] class %06x header type %02x\n",
727 dev->vendor, dev->device, class, dev->hdr_type); 702 dev->vendor, dev->device, class, dev->hdr_type);
728 703
729 /* "Unknown power state" */ 704 /* "Unknown power state" */
@@ -805,6 +780,11 @@ static int pci_setup_device(struct pci_dev * dev)
805 return 0; 780 return 0;
806} 781}
807 782
783static void pci_release_capabilities(struct pci_dev *dev)
784{
785 pci_vpd_release(dev);
786}
787
808/** 788/**
809 * pci_release_dev - free a pci device structure when all users of it are finished. 789 * pci_release_dev - free a pci device structure when all users of it are finished.
810 * @dev: device that's been disconnected 790 * @dev: device that's been disconnected
@@ -817,7 +797,7 @@ static void pci_release_dev(struct device *dev)
817 struct pci_dev *pci_dev; 797 struct pci_dev *pci_dev;
818 798
819 pci_dev = to_pci_dev(dev); 799 pci_dev = to_pci_dev(dev);
820 pci_vpd_release(pci_dev); 800 pci_release_capabilities(pci_dev);
821 kfree(pci_dev); 801 kfree(pci_dev);
822} 802}
823 803
@@ -848,8 +828,9 @@ static void set_pcie_port_type(struct pci_dev *pdev)
848int pci_cfg_space_size_ext(struct pci_dev *dev) 828int pci_cfg_space_size_ext(struct pci_dev *dev)
849{ 829{
850 u32 status; 830 u32 status;
831 int pos = PCI_CFG_SPACE_SIZE;
851 832
852 if (pci_read_config_dword(dev, 256, &status) != PCIBIOS_SUCCESSFUL) 833 if (pci_read_config_dword(dev, pos, &status) != PCIBIOS_SUCCESSFUL)
853 goto fail; 834 goto fail;
854 if (status == 0xffffffff) 835 if (status == 0xffffffff)
855 goto fail; 836 goto fail;
@@ -897,8 +878,6 @@ struct pci_dev *alloc_pci_dev(void)
897 878
898 INIT_LIST_HEAD(&dev->bus_list); 879 INIT_LIST_HEAD(&dev->bus_list);
899 880
900 pci_msi_init_pci_dev(dev);
901
902 return dev; 881 return dev;
903} 882}
904EXPORT_SYMBOL(alloc_pci_dev); 883EXPORT_SYMBOL(alloc_pci_dev);
@@ -910,6 +889,7 @@ EXPORT_SYMBOL(alloc_pci_dev);
910static struct pci_dev *pci_scan_device(struct pci_bus *bus, int devfn) 889static struct pci_dev *pci_scan_device(struct pci_bus *bus, int devfn)
911{ 890{
912 struct pci_dev *dev; 891 struct pci_dev *dev;
892 struct pci_slot *slot;
913 u32 l; 893 u32 l;
914 u8 hdr_type; 894 u8 hdr_type;
915 int delay = 1; 895 int delay = 1;
@@ -958,6 +938,10 @@ static struct pci_dev *pci_scan_device(struct pci_bus *bus, int devfn)
958 dev->error_state = pci_channel_io_normal; 938 dev->error_state = pci_channel_io_normal;
959 set_pcie_port_type(dev); 939 set_pcie_port_type(dev);
960 940
941 list_for_each_entry(slot, &bus->slots, list)
942 if (PCI_SLOT(devfn) == slot->number)
943 dev->slot = slot;
944
961 /* Assume 32-bit PCI; let 64-bit PCI cards (which are far rarer) 945 /* Assume 32-bit PCI; let 64-bit PCI cards (which are far rarer)
962 set this higher, assuming the system even supports it. */ 946 set this higher, assuming the system even supports it. */
963 dev->dma_mask = 0xffffffff; 947 dev->dma_mask = 0xffffffff;
@@ -966,9 +950,22 @@ static struct pci_dev *pci_scan_device(struct pci_bus *bus, int devfn)
966 return NULL; 950 return NULL;
967 } 951 }
968 952
953 return dev;
954}
955
956static void pci_init_capabilities(struct pci_dev *dev)
957{
958 /* MSI/MSI-X list */
959 pci_msi_init_pci_dev(dev);
960
961 /* Power Management */
962 pci_pm_init(dev);
963
964 /* Vital Product Data */
969 pci_vpd_pci22_init(dev); 965 pci_vpd_pci22_init(dev);
970 966
971 return dev; 967 /* Alternative Routing-ID Forwarding */
968 pci_enable_ari(dev);
972} 969}
973 970
974void pci_device_add(struct pci_dev *dev, struct pci_bus *bus) 971void pci_device_add(struct pci_dev *dev, struct pci_bus *bus)
@@ -987,8 +984,8 @@ void pci_device_add(struct pci_dev *dev, struct pci_bus *bus)
987 /* Fix up broken headers */ 984 /* Fix up broken headers */
988 pci_fixup_device(pci_fixup_header, dev); 985 pci_fixup_device(pci_fixup_header, dev);
989 986
990 /* Initialize power management of the device */ 987 /* Initialize various capabilities */
991 pci_pm_init(dev); 988 pci_init_capabilities(dev);
992 989
993 /* 990 /*
994 * Add the device to our list of discovered devices 991 * Add the device to our list of discovered devices
@@ -1053,7 +1050,8 @@ int pci_scan_slot(struct pci_bus *bus, int devfn)
1053 } 1050 }
1054 } 1051 }
1055 1052
1056 if (bus->self) 1053 /* only one slot has pcie device */
1054 if (bus->self && nr)
1057 pcie_aspm_init_link_state(bus->self); 1055 pcie_aspm_init_link_state(bus->self);
1058 1056
1059 return nr; 1057 return nr;
@@ -1195,8 +1193,11 @@ EXPORT_SYMBOL(pci_scan_bridge);
1195EXPORT_SYMBOL_GPL(pci_scan_child_bus); 1193EXPORT_SYMBOL_GPL(pci_scan_child_bus);
1196#endif 1194#endif
1197 1195
1198static int __init pci_sort_bf_cmp(const struct pci_dev *a, const struct pci_dev *b) 1196static int __init pci_sort_bf_cmp(const struct device *d_a, const struct device *d_b)
1199{ 1197{
1198 const struct pci_dev *a = to_pci_dev(d_a);
1199 const struct pci_dev *b = to_pci_dev(d_b);
1200
1200 if (pci_domain_nr(a->bus) < pci_domain_nr(b->bus)) return -1; 1201 if (pci_domain_nr(a->bus) < pci_domain_nr(b->bus)) return -1;
1201 else if (pci_domain_nr(a->bus) > pci_domain_nr(b->bus)) return 1; 1202 else if (pci_domain_nr(a->bus) > pci_domain_nr(b->bus)) return 1;
1202 1203
@@ -1209,50 +1210,7 @@ static int __init pci_sort_bf_cmp(const struct pci_dev *a, const struct pci_dev
1209 return 0; 1210 return 0;
1210} 1211}
1211 1212
1212/*
1213 * Yes, this forcably breaks the klist abstraction temporarily. It
1214 * just wants to sort the klist, not change reference counts and
1215 * take/drop locks rapidly in the process. It does all this while
1216 * holding the lock for the list, so objects can't otherwise be
1217 * added/removed while we're swizzling.
1218 */
1219static void __init pci_insertion_sort_klist(struct pci_dev *a, struct list_head *list)
1220{
1221 struct list_head *pos;
1222 struct klist_node *n;
1223 struct device *dev;
1224 struct pci_dev *b;
1225
1226 list_for_each(pos, list) {
1227 n = container_of(pos, struct klist_node, n_node);
1228 dev = container_of(n, struct device, knode_bus);
1229 b = to_pci_dev(dev);
1230 if (pci_sort_bf_cmp(a, b) <= 0) {
1231 list_move_tail(&a->dev.knode_bus.n_node, &b->dev.knode_bus.n_node);
1232 return;
1233 }
1234 }
1235 list_move_tail(&a->dev.knode_bus.n_node, list);
1236}
1237
1238void __init pci_sort_breadthfirst(void) 1213void __init pci_sort_breadthfirst(void)
1239{ 1214{
1240 LIST_HEAD(sorted_devices); 1215 bus_sort_breadthfirst(&pci_bus_type, &pci_sort_bf_cmp);
1241 struct list_head *pos, *tmp;
1242 struct klist_node *n;
1243 struct device *dev;
1244 struct pci_dev *pdev;
1245 struct klist *device_klist;
1246
1247 device_klist = bus_get_device_klist(&pci_bus_type);
1248
1249 spin_lock(&device_klist->k_lock);
1250 list_for_each_safe(pos, tmp, &device_klist->k_list) {
1251 n = container_of(pos, struct klist_node, n_node);
1252 dev = container_of(n, struct device, knode_bus);
1253 pdev = to_pci_dev(dev);
1254 pci_insertion_sort_klist(pdev, &sorted_devices);
1255 }
1256 list_splice(&sorted_devices, &device_klist->k_list);
1257 spin_unlock(&device_klist->k_lock);
1258} 1216}
diff --git a/drivers/pci/proc.c b/drivers/pci/proc.c
index 4400dffbd93a..e1098c302c45 100644
--- a/drivers/pci/proc.c
+++ b/drivers/pci/proc.c
@@ -88,7 +88,7 @@ proc_bus_pci_read(struct file *file, char __user *buf, size_t nbytes, loff_t *pp
88 if ((pos & 3) && cnt > 2) { 88 if ((pos & 3) && cnt > 2) {
89 unsigned short val; 89 unsigned short val;
90 pci_user_read_config_word(dev, pos, &val); 90 pci_user_read_config_word(dev, pos, &val);
91 __put_user(cpu_to_le16(val), (unsigned short __user *) buf); 91 __put_user(cpu_to_le16(val), (__le16 __user *) buf);
92 buf += 2; 92 buf += 2;
93 pos += 2; 93 pos += 2;
94 cnt -= 2; 94 cnt -= 2;
@@ -97,7 +97,7 @@ proc_bus_pci_read(struct file *file, char __user *buf, size_t nbytes, loff_t *pp
97 while (cnt >= 4) { 97 while (cnt >= 4) {
98 unsigned int val; 98 unsigned int val;
99 pci_user_read_config_dword(dev, pos, &val); 99 pci_user_read_config_dword(dev, pos, &val);
100 __put_user(cpu_to_le32(val), (unsigned int __user *) buf); 100 __put_user(cpu_to_le32(val), (__le32 __user *) buf);
101 buf += 4; 101 buf += 4;
102 pos += 4; 102 pos += 4;
103 cnt -= 4; 103 cnt -= 4;
@@ -106,7 +106,7 @@ proc_bus_pci_read(struct file *file, char __user *buf, size_t nbytes, loff_t *pp
106 if (cnt >= 2) { 106 if (cnt >= 2) {
107 unsigned short val; 107 unsigned short val;
108 pci_user_read_config_word(dev, pos, &val); 108 pci_user_read_config_word(dev, pos, &val);
109 __put_user(cpu_to_le16(val), (unsigned short __user *) buf); 109 __put_user(cpu_to_le16(val), (__le16 __user *) buf);
110 buf += 2; 110 buf += 2;
111 pos += 2; 111 pos += 2;
112 cnt -= 2; 112 cnt -= 2;
@@ -156,8 +156,8 @@ proc_bus_pci_write(struct file *file, const char __user *buf, size_t nbytes, lof
156 } 156 }
157 157
158 if ((pos & 3) && cnt > 2) { 158 if ((pos & 3) && cnt > 2) {
159 unsigned short val; 159 __le16 val;
160 __get_user(val, (unsigned short __user *) buf); 160 __get_user(val, (__le16 __user *) buf);
161 pci_user_write_config_word(dev, pos, le16_to_cpu(val)); 161 pci_user_write_config_word(dev, pos, le16_to_cpu(val));
162 buf += 2; 162 buf += 2;
163 pos += 2; 163 pos += 2;
@@ -165,8 +165,8 @@ proc_bus_pci_write(struct file *file, const char __user *buf, size_t nbytes, lof
165 } 165 }
166 166
167 while (cnt >= 4) { 167 while (cnt >= 4) {
168 unsigned int val; 168 __le32 val;
169 __get_user(val, (unsigned int __user *) buf); 169 __get_user(val, (__le32 __user *) buf);
170 pci_user_write_config_dword(dev, pos, le32_to_cpu(val)); 170 pci_user_write_config_dword(dev, pos, le32_to_cpu(val));
171 buf += 4; 171 buf += 4;
172 pos += 4; 172 pos += 4;
@@ -174,8 +174,8 @@ proc_bus_pci_write(struct file *file, const char __user *buf, size_t nbytes, lof
174 } 174 }
175 175
176 if (cnt >= 2) { 176 if (cnt >= 2) {
177 unsigned short val; 177 __le16 val;
178 __get_user(val, (unsigned short __user *) buf); 178 __get_user(val, (__le16 __user *) buf);
179 pci_user_write_config_word(dev, pos, le16_to_cpu(val)); 179 pci_user_write_config_word(dev, pos, le16_to_cpu(val));
180 buf += 2; 180 buf += 2;
181 pos += 2; 181 pos += 2;
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
index c880dd0bbfb5..0b60ed884d98 100644
--- a/drivers/pci/quirks.c
+++ b/drivers/pci/quirks.c
@@ -24,6 +24,14 @@
24#include <linux/kallsyms.h> 24#include <linux/kallsyms.h>
25#include "pci.h" 25#include "pci.h"
26 26
27int isa_dma_bridge_buggy;
28EXPORT_SYMBOL(isa_dma_bridge_buggy);
29int pci_pci_problems;
30EXPORT_SYMBOL(pci_pci_problems);
31int pcie_mch_quirk;
32EXPORT_SYMBOL(pcie_mch_quirk);
33
34#ifdef CONFIG_PCI_QUIRKS
27/* The Mellanox Tavor device gives false positive parity errors 35/* The Mellanox Tavor device gives false positive parity errors
28 * Mark this device with a broken_parity_status, to allow 36 * Mark this device with a broken_parity_status, to allow
29 * PCI scanning code to "skip" this now blacklisted device. 37 * PCI scanning code to "skip" this now blacklisted device.
@@ -35,6 +43,20 @@ static void __devinit quirk_mellanox_tavor(struct pci_dev *dev)
35DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_MELLANOX,PCI_DEVICE_ID_MELLANOX_TAVOR,quirk_mellanox_tavor); 43DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_MELLANOX,PCI_DEVICE_ID_MELLANOX_TAVOR,quirk_mellanox_tavor);
36DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_MELLANOX,PCI_DEVICE_ID_MELLANOX_TAVOR_BRIDGE,quirk_mellanox_tavor); 44DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_MELLANOX,PCI_DEVICE_ID_MELLANOX_TAVOR_BRIDGE,quirk_mellanox_tavor);
37 45
46/* Many VIA bridges seem to corrupt data for DAC. Disable it here */
47int forbid_dac __read_mostly;
48EXPORT_SYMBOL(forbid_dac);
49
50static __devinit void via_no_dac(struct pci_dev *dev)
51{
52 if ((dev->class >> 8) == PCI_CLASS_BRIDGE_PCI && forbid_dac == 0) {
53 dev_info(&dev->dev,
54 "VIA PCI bridge detected. Disabling DAC.\n");
55 forbid_dac = 1;
56 }
57}
58DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, PCI_ANY_ID, via_no_dac);
59
38/* Deal with broken BIOS'es that neglect to enable passive release, 60/* Deal with broken BIOS'es that neglect to enable passive release,
39 which can cause problems in combination with the 82441FX/PPro MTRRs */ 61 which can cause problems in combination with the 82441FX/PPro MTRRs */
40static void quirk_passive_release(struct pci_dev *dev) 62static void quirk_passive_release(struct pci_dev *dev)
@@ -62,8 +84,6 @@ DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82441, quirk_p
62 84
63 This appears to be BIOS not version dependent. So presumably there is a 85 This appears to be BIOS not version dependent. So presumably there is a
64 chipset level fix */ 86 chipset level fix */
65int isa_dma_bridge_buggy;
66EXPORT_SYMBOL(isa_dma_bridge_buggy);
67 87
68static void __devinit quirk_isa_dma_hangs(struct pci_dev *dev) 88static void __devinit quirk_isa_dma_hangs(struct pci_dev *dev)
69{ 89{
@@ -84,9 +104,6 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_NEC, PCI_DEVICE_ID_NEC_CBUS_1, quirk_isa_d
84DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_NEC, PCI_DEVICE_ID_NEC_CBUS_2, quirk_isa_dma_hangs); 104DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_NEC, PCI_DEVICE_ID_NEC_CBUS_2, quirk_isa_dma_hangs);
85DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_NEC, PCI_DEVICE_ID_NEC_CBUS_3, quirk_isa_dma_hangs); 105DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_NEC, PCI_DEVICE_ID_NEC_CBUS_3, quirk_isa_dma_hangs);
86 106
87int pci_pci_problems;
88EXPORT_SYMBOL(pci_pci_problems);
89
90/* 107/*
91 * Chipsets where PCI->PCI transfers vanish or hang 108 * Chipsets where PCI->PCI transfers vanish or hang
92 */ 109 */
@@ -902,6 +919,19 @@ static void __init quirk_ide_samemode(struct pci_dev *pdev)
902} 919}
903DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801CA_10, quirk_ide_samemode); 920DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801CA_10, quirk_ide_samemode);
904 921
922/*
923 * Some ATA devices break if put into D3
924 */
925
926static void __devinit quirk_no_ata_d3(struct pci_dev *pdev)
927{
928 /* Quirk the legacy ATA devices only. The AHCI ones are ok */
929 if ((pdev->class >> 8) == PCI_CLASS_STORAGE_IDE)
930 pdev->dev_flags |= PCI_DEV_FLAGS_NO_D3;
931}
932DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_SERVERWORKS, PCI_ANY_ID, quirk_no_ata_d3);
933DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_ATI, PCI_ANY_ID, quirk_no_ata_d3);
934
905/* This was originally an Alpha specific thing, but it really fits here. 935/* This was originally an Alpha specific thing, but it really fits here.
906 * The i82375 PCI/EISA bridge appears as non-classified. Fix that. 936 * The i82375 PCI/EISA bridge appears as non-classified. Fix that.
907 */ 937 */
@@ -1328,9 +1358,6 @@ static void __init quirk_alder_ioapic(struct pci_dev *pdev)
1328DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_EESSC, quirk_alder_ioapic); 1358DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_EESSC, quirk_alder_ioapic);
1329#endif 1359#endif
1330 1360
1331int pcie_mch_quirk;
1332EXPORT_SYMBOL(pcie_mch_quirk);
1333
1334static void __devinit quirk_pcie_mch(struct pci_dev *pdev) 1361static void __devinit quirk_pcie_mch(struct pci_dev *pdev)
1335{ 1362{
1336 pcie_mch_quirk = 1; 1363 pcie_mch_quirk = 1;
@@ -1670,85 +1697,6 @@ static void __devinit fixup_rev1_53c810(struct pci_dev* dev)
1670} 1697}
1671DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NCR, PCI_DEVICE_ID_NCR_53C810, fixup_rev1_53c810); 1698DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NCR, PCI_DEVICE_ID_NCR_53C810, fixup_rev1_53c810);
1672 1699
1673static void pci_do_fixups(struct pci_dev *dev, struct pci_fixup *f, struct pci_fixup *end)
1674{
1675 while (f < end) {
1676 if ((f->vendor == dev->vendor || f->vendor == (u16) PCI_ANY_ID) &&
1677 (f->device == dev->device || f->device == (u16) PCI_ANY_ID)) {
1678#ifdef DEBUG
1679 dev_dbg(&dev->dev, "calling ");
1680 print_fn_descriptor_symbol("%s\n", f->hook);
1681#endif
1682 f->hook(dev);
1683 }
1684 f++;
1685 }
1686}
1687
1688extern struct pci_fixup __start_pci_fixups_early[];
1689extern struct pci_fixup __end_pci_fixups_early[];
1690extern struct pci_fixup __start_pci_fixups_header[];
1691extern struct pci_fixup __end_pci_fixups_header[];
1692extern struct pci_fixup __start_pci_fixups_final[];
1693extern struct pci_fixup __end_pci_fixups_final[];
1694extern struct pci_fixup __start_pci_fixups_enable[];
1695extern struct pci_fixup __end_pci_fixups_enable[];
1696extern struct pci_fixup __start_pci_fixups_resume[];
1697extern struct pci_fixup __end_pci_fixups_resume[];
1698extern struct pci_fixup __start_pci_fixups_resume_early[];
1699extern struct pci_fixup __end_pci_fixups_resume_early[];
1700extern struct pci_fixup __start_pci_fixups_suspend[];
1701extern struct pci_fixup __end_pci_fixups_suspend[];
1702
1703
1704void pci_fixup_device(enum pci_fixup_pass pass, struct pci_dev *dev)
1705{
1706 struct pci_fixup *start, *end;
1707
1708 switch(pass) {
1709 case pci_fixup_early:
1710 start = __start_pci_fixups_early;
1711 end = __end_pci_fixups_early;
1712 break;
1713
1714 case pci_fixup_header:
1715 start = __start_pci_fixups_header;
1716 end = __end_pci_fixups_header;
1717 break;
1718
1719 case pci_fixup_final:
1720 start = __start_pci_fixups_final;
1721 end = __end_pci_fixups_final;
1722 break;
1723
1724 case pci_fixup_enable:
1725 start = __start_pci_fixups_enable;
1726 end = __end_pci_fixups_enable;
1727 break;
1728
1729 case pci_fixup_resume:
1730 start = __start_pci_fixups_resume;
1731 end = __end_pci_fixups_resume;
1732 break;
1733
1734 case pci_fixup_resume_early:
1735 start = __start_pci_fixups_resume_early;
1736 end = __end_pci_fixups_resume_early;
1737 break;
1738
1739 case pci_fixup_suspend:
1740 start = __start_pci_fixups_suspend;
1741 end = __end_pci_fixups_suspend;
1742 break;
1743
1744 default:
1745 /* stupid compiler warning, you would think with an enum... */
1746 return;
1747 }
1748 pci_do_fixups(dev, start, end);
1749}
1750EXPORT_SYMBOL(pci_fixup_device);
1751
1752/* Enable 1k I/O space granularity on the Intel P64H2 */ 1700/* Enable 1k I/O space granularity on the Intel P64H2 */
1753static void __devinit quirk_p64h2_1k_io(struct pci_dev *dev) 1701static void __devinit quirk_p64h2_1k_io(struct pci_dev *dev)
1754{ 1702{
@@ -1871,9 +1819,14 @@ DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_VIA, 0x324e, quirk_via_cx700_pci_parking_c
1871 */ 1819 */
1872static void __devinit quirk_brcm_570x_limit_vpd(struct pci_dev *dev) 1820static void __devinit quirk_brcm_570x_limit_vpd(struct pci_dev *dev)
1873{ 1821{
1874 /* Only disable the VPD capability for 5706, 5708, and 5709 rev. A */ 1822 /*
1823 * Only disable the VPD capability for 5706, 5706S, 5708,
1824 * 5708S and 5709 rev. A
1825 */
1875 if ((dev->device == PCI_DEVICE_ID_NX2_5706) || 1826 if ((dev->device == PCI_DEVICE_ID_NX2_5706) ||
1827 (dev->device == PCI_DEVICE_ID_NX2_5706S) ||
1876 (dev->device == PCI_DEVICE_ID_NX2_5708) || 1828 (dev->device == PCI_DEVICE_ID_NX2_5708) ||
1829 (dev->device == PCI_DEVICE_ID_NX2_5708S) ||
1877 ((dev->device == PCI_DEVICE_ID_NX2_5709) && 1830 ((dev->device == PCI_DEVICE_ID_NX2_5709) &&
1878 (dev->revision & 0xf0) == 0x0)) { 1831 (dev->revision & 0xf0) == 0x0)) {
1879 if (dev->vpd) 1832 if (dev->vpd)
@@ -2117,3 +2070,82 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x4375,
2117 quirk_msi_intx_disable_bug); 2070 quirk_msi_intx_disable_bug);
2118 2071
2119#endif /* CONFIG_PCI_MSI */ 2072#endif /* CONFIG_PCI_MSI */
2073
2074static void pci_do_fixups(struct pci_dev *dev, struct pci_fixup *f, struct pci_fixup *end)
2075{
2076 while (f < end) {
2077 if ((f->vendor == dev->vendor || f->vendor == (u16) PCI_ANY_ID) &&
2078 (f->device == dev->device || f->device == (u16) PCI_ANY_ID)) {
2079 dev_dbg(&dev->dev, "calling %pF\n", f->hook);
2080 f->hook(dev);
2081 }
2082 f++;
2083 }
2084}
2085
2086extern struct pci_fixup __start_pci_fixups_early[];
2087extern struct pci_fixup __end_pci_fixups_early[];
2088extern struct pci_fixup __start_pci_fixups_header[];
2089extern struct pci_fixup __end_pci_fixups_header[];
2090extern struct pci_fixup __start_pci_fixups_final[];
2091extern struct pci_fixup __end_pci_fixups_final[];
2092extern struct pci_fixup __start_pci_fixups_enable[];
2093extern struct pci_fixup __end_pci_fixups_enable[];
2094extern struct pci_fixup __start_pci_fixups_resume[];
2095extern struct pci_fixup __end_pci_fixups_resume[];
2096extern struct pci_fixup __start_pci_fixups_resume_early[];
2097extern struct pci_fixup __end_pci_fixups_resume_early[];
2098extern struct pci_fixup __start_pci_fixups_suspend[];
2099extern struct pci_fixup __end_pci_fixups_suspend[];
2100
2101
2102void pci_fixup_device(enum pci_fixup_pass pass, struct pci_dev *dev)
2103{
2104 struct pci_fixup *start, *end;
2105
2106 switch(pass) {
2107 case pci_fixup_early:
2108 start = __start_pci_fixups_early;
2109 end = __end_pci_fixups_early;
2110 break;
2111
2112 case pci_fixup_header:
2113 start = __start_pci_fixups_header;
2114 end = __end_pci_fixups_header;
2115 break;
2116
2117 case pci_fixup_final:
2118 start = __start_pci_fixups_final;
2119 end = __end_pci_fixups_final;
2120 break;
2121
2122 case pci_fixup_enable:
2123 start = __start_pci_fixups_enable;
2124 end = __end_pci_fixups_enable;
2125 break;
2126
2127 case pci_fixup_resume:
2128 start = __start_pci_fixups_resume;
2129 end = __end_pci_fixups_resume;
2130 break;
2131
2132 case pci_fixup_resume_early:
2133 start = __start_pci_fixups_resume_early;
2134 end = __end_pci_fixups_resume_early;
2135 break;
2136
2137 case pci_fixup_suspend:
2138 start = __start_pci_fixups_suspend;
2139 end = __end_pci_fixups_suspend;
2140 break;
2141
2142 default:
2143 /* stupid compiler warning, you would think with an enum... */
2144 return;
2145 }
2146 pci_do_fixups(dev, start, end);
2147}
2148#else
2149void pci_fixup_device(enum pci_fixup_pass pass, struct pci_dev *dev) {}
2150#endif
2151EXPORT_SYMBOL(pci_fixup_device);
diff --git a/drivers/pci/remove.c b/drivers/pci/remove.c
index bdc2a44d68e1..042e08924421 100644
--- a/drivers/pci/remove.c
+++ b/drivers/pci/remove.c
@@ -73,6 +73,7 @@ void pci_remove_bus(struct pci_bus *pci_bus)
73 up_write(&pci_bus_sem); 73 up_write(&pci_bus_sem);
74 pci_remove_legacy_files(pci_bus); 74 pci_remove_legacy_files(pci_bus);
75 device_remove_file(&pci_bus->dev, &dev_attr_cpuaffinity); 75 device_remove_file(&pci_bus->dev, &dev_attr_cpuaffinity);
76 device_remove_file(&pci_bus->dev, &dev_attr_cpulistaffinity);
76 device_unregister(&pci_bus->dev); 77 device_unregister(&pci_bus->dev);
77} 78}
78EXPORT_SYMBOL(pci_remove_bus); 79EXPORT_SYMBOL(pci_remove_bus);
@@ -114,13 +115,9 @@ void pci_remove_behind_bridge(struct pci_dev *dev)
114{ 115{
115 struct list_head *l, *n; 116 struct list_head *l, *n;
116 117
117 if (dev->subordinate) { 118 if (dev->subordinate)
118 list_for_each_safe(l, n, &dev->subordinate->devices) { 119 list_for_each_safe(l, n, &dev->subordinate->devices)
119 struct pci_dev *dev = pci_dev_b(l); 120 pci_remove_bus_device(pci_dev_b(l));
120
121 pci_remove_bus_device(dev);
122 }
123 }
124} 121}
125 122
126static void pci_stop_bus_devices(struct pci_bus *bus) 123static void pci_stop_bus_devices(struct pci_bus *bus)
diff --git a/drivers/pci/rom.c b/drivers/pci/rom.c
index bd5c0e031398..1f5f6143f35c 100644
--- a/drivers/pci/rom.c
+++ b/drivers/pci/rom.c
@@ -21,7 +21,7 @@
21 * between the ROM and other resources, so enabling it may disable access 21 * between the ROM and other resources, so enabling it may disable access
22 * to MMIO registers or other card memory. 22 * to MMIO registers or other card memory.
23 */ 23 */
24static int pci_enable_rom(struct pci_dev *pdev) 24int pci_enable_rom(struct pci_dev *pdev)
25{ 25{
26 struct resource *res = pdev->resource + PCI_ROM_RESOURCE; 26 struct resource *res = pdev->resource + PCI_ROM_RESOURCE;
27 struct pci_bus_region region; 27 struct pci_bus_region region;
@@ -45,7 +45,7 @@ static int pci_enable_rom(struct pci_dev *pdev)
45 * Disable ROM decoding on a PCI device by turning off the last bit in the 45 * Disable ROM decoding on a PCI device by turning off the last bit in the
46 * ROM BAR. 46 * ROM BAR.
47 */ 47 */
48static void pci_disable_rom(struct pci_dev *pdev) 48void pci_disable_rom(struct pci_dev *pdev)
49{ 49{
50 u32 rom_addr; 50 u32 rom_addr;
51 pci_read_config_dword(pdev, pdev->rom_base_reg, &rom_addr); 51 pci_read_config_dword(pdev, pdev->rom_base_reg, &rom_addr);
@@ -260,3 +260,5 @@ void pci_cleanup_rom(struct pci_dev *pdev)
260 260
261EXPORT_SYMBOL(pci_map_rom); 261EXPORT_SYMBOL(pci_map_rom);
262EXPORT_SYMBOL(pci_unmap_rom); 262EXPORT_SYMBOL(pci_unmap_rom);
263EXPORT_SYMBOL_GPL(pci_enable_rom);
264EXPORT_SYMBOL_GPL(pci_disable_rom);
diff --git a/drivers/pci/search.c b/drivers/pci/search.c
index 217814fef4ef..5af8bd538149 100644
--- a/drivers/pci/search.c
+++ b/drivers/pci/search.c
@@ -162,10 +162,11 @@ EXPORT_SYMBOL(pci_find_slot);
162 * time. 162 * time.
163 */ 163 */
164struct pci_dev *pci_find_device(unsigned int vendor, unsigned int device, 164struct pci_dev *pci_find_device(unsigned int vendor, unsigned int device,
165 const struct pci_dev *from) 165 struct pci_dev *from)
166{ 166{
167 struct pci_dev *pdev; 167 struct pci_dev *pdev;
168 168
169 pci_dev_get(from);
169 pdev = pci_get_subsys(vendor, device, PCI_ANY_ID, PCI_ANY_ID, from); 170 pdev = pci_get_subsys(vendor, device, PCI_ANY_ID, PCI_ANY_ID, from);
170 pci_dev_put(pdev); 171 pci_dev_put(pdev);
171 return pdev; 172 return pdev;
@@ -263,23 +264,21 @@ static int match_pci_dev_by_id(struct device *dev, void *data)
263 * this file. 264 * this file.
264 */ 265 */
265static struct pci_dev *pci_get_dev_by_id(const struct pci_device_id *id, 266static struct pci_dev *pci_get_dev_by_id(const struct pci_device_id *id,
266 const struct pci_dev *from) 267 struct pci_dev *from)
267{ 268{
268 struct device *dev; 269 struct device *dev;
269 struct device *dev_start = NULL; 270 struct device *dev_start = NULL;
270 struct pci_dev *pdev = NULL; 271 struct pci_dev *pdev = NULL;
271 272
272 WARN_ON(in_interrupt()); 273 WARN_ON(in_interrupt());
273 if (from) { 274 if (from)
274 /* FIXME 275 dev_start = &from->dev;
275 * take the cast off, when bus_find_device is made const.
276 */
277 dev_start = (struct device *)&from->dev;
278 }
279 dev = bus_find_device(&pci_bus_type, dev_start, (void *)id, 276 dev = bus_find_device(&pci_bus_type, dev_start, (void *)id,
280 match_pci_dev_by_id); 277 match_pci_dev_by_id);
281 if (dev) 278 if (dev)
282 pdev = to_pci_dev(dev); 279 pdev = to_pci_dev(dev);
280 if (from)
281 pci_dev_put(from);
283 return pdev; 282 return pdev;
284} 283}
285 284
@@ -301,7 +300,7 @@ static struct pci_dev *pci_get_dev_by_id(const struct pci_device_id *id,
301 */ 300 */
302struct pci_dev *pci_get_subsys(unsigned int vendor, unsigned int device, 301struct pci_dev *pci_get_subsys(unsigned int vendor, unsigned int device,
303 unsigned int ss_vendor, unsigned int ss_device, 302 unsigned int ss_vendor, unsigned int ss_device,
304 const struct pci_dev *from) 303 struct pci_dev *from)
305{ 304{
306 struct pci_dev *pdev; 305 struct pci_dev *pdev;
307 struct pci_device_id *id; 306 struct pci_device_id *id;
diff --git a/drivers/pci/setup-bus.c b/drivers/pci/setup-bus.c
index 827c0a520e2b..ea979f2bc6db 100644
--- a/drivers/pci/setup-bus.c
+++ b/drivers/pci/setup-bus.c
@@ -299,7 +299,7 @@ static void pbus_size_io(struct pci_bus *bus)
299 299
300 if (r->parent || !(r->flags & IORESOURCE_IO)) 300 if (r->parent || !(r->flags & IORESOURCE_IO))
301 continue; 301 continue;
302 r_size = r->end - r->start + 1; 302 r_size = resource_size(r);
303 303
304 if (r_size < 0x400) 304 if (r_size < 0x400)
305 /* Might be re-aligned for ISA */ 305 /* Might be re-aligned for ISA */
@@ -350,15 +350,13 @@ static int pbus_size_mem(struct pci_bus *bus, unsigned long mask, unsigned long
350 350
351 if (r->parent || (r->flags & mask) != type) 351 if (r->parent || (r->flags & mask) != type)
352 continue; 352 continue;
353 r_size = r->end - r->start + 1; 353 r_size = resource_size(r);
354 /* For bridges size != alignment */ 354 /* For bridges size != alignment */
355 align = (i < PCI_BRIDGE_RESOURCES) ? r_size : r->start; 355 align = resource_alignment(r);
356 order = __ffs(align) - 20; 356 order = __ffs(align) - 20;
357 if (order > 11) { 357 if (order > 11) {
358 dev_warn(&dev->dev, "BAR %d too large: " 358 dev_warn(&dev->dev, "BAR %d bad alignment %llx: "
359 "%#016llx-%#016llx\n", i, 359 "%pR\n", i, (unsigned long long)align, r);
360 (unsigned long long)r->start,
361 (unsigned long long)r->end);
362 r->flags = 0; 360 r->flags = 0;
363 continue; 361 continue;
364 } 362 }
@@ -377,11 +375,10 @@ static int pbus_size_mem(struct pci_bus *bus, unsigned long mask, unsigned long
377 align = 0; 375 align = 0;
378 min_align = 0; 376 min_align = 0;
379 for (order = 0; order <= max_order; order++) { 377 for (order = 0; order <= max_order; order++) {
380#ifdef CONFIG_RESOURCES_64BIT 378 resource_size_t align1 = 1;
381 resource_size_t align1 = 1ULL << (order + 20); 379
382#else 380 align1 <<= (order + 20);
383 resource_size_t align1 = 1U << (order + 20); 381
384#endif
385 if (!align) 382 if (!align)
386 min_align = align1; 383 min_align = align1;
387 else if (ALIGN(align + min_align, min_align) < align1) 384 else if (ALIGN(align + min_align, min_align) < align1)
@@ -530,6 +527,38 @@ void __ref pci_bus_assign_resources(struct pci_bus *bus)
530} 527}
531EXPORT_SYMBOL(pci_bus_assign_resources); 528EXPORT_SYMBOL(pci_bus_assign_resources);
532 529
530static void pci_bus_dump_res(struct pci_bus *bus)
531{
532 int i;
533
534 for (i = 0; i < PCI_BUS_NUM_RESOURCES; i++) {
535 struct resource *res = bus->resource[i];
536 if (!res)
537 continue;
538
539 printk(KERN_INFO "bus: %02x index %x %s: %pR\n",
540 bus->number, i,
541 (res->flags & IORESOURCE_IO) ? "io port" : "mmio", res);
542 }
543}
544
545static void pci_bus_dump_resources(struct pci_bus *bus)
546{
547 struct pci_bus *b;
548 struct pci_dev *dev;
549
550
551 pci_bus_dump_res(bus);
552
553 list_for_each_entry(dev, &bus->devices, bus_list) {
554 b = dev->subordinate;
555 if (!b)
556 continue;
557
558 pci_bus_dump_resources(b);
559 }
560}
561
533void __init 562void __init
534pci_assign_unassigned_resources(void) 563pci_assign_unassigned_resources(void)
535{ 564{
@@ -545,4 +574,9 @@ pci_assign_unassigned_resources(void)
545 pci_bus_assign_resources(bus); 574 pci_bus_assign_resources(bus);
546 pci_enable_bridges(bus); 575 pci_enable_bridges(bus);
547 } 576 }
577
578 /* dump the resource on buses */
579 list_for_each_entry(bus, &pci_root_buses, node) {
580 pci_bus_dump_resources(bus);
581 }
548} 582}
diff --git a/drivers/pci/setup-res.c b/drivers/pci/setup-res.c
index 1a5fc83c71b3..2dbd96cce2d8 100644
--- a/drivers/pci/setup-res.c
+++ b/drivers/pci/setup-res.c
@@ -49,10 +49,8 @@ void pci_update_resource(struct pci_dev *dev, struct resource *res, int resno)
49 49
50 pcibios_resource_to_bus(dev, &region, res); 50 pcibios_resource_to_bus(dev, &region, res);
51 51
52 dev_dbg(&dev->dev, "BAR %d: got res [%#llx-%#llx] bus [%#llx-%#llx] " 52 dev_dbg(&dev->dev, "BAR %d: got res %pR bus [%#llx-%#llx] "
53 "flags %#lx\n", resno, 53 "flags %#lx\n", resno, res,
54 (unsigned long long)res->start,
55 (unsigned long long)res->end,
56 (unsigned long long)region.start, 54 (unsigned long long)region.start,
57 (unsigned long long)region.end, 55 (unsigned long long)region.end,
58 (unsigned long)res->flags); 56 (unsigned long)res->flags);
@@ -114,13 +112,11 @@ int pci_claim_resource(struct pci_dev *dev, int resource)
114 err = insert_resource(root, res); 112 err = insert_resource(root, res);
115 113
116 if (err) { 114 if (err) {
117 dev_err(&dev->dev, "BAR %d: %s of %s [%#llx-%#llx]\n", 115 dev_err(&dev->dev, "BAR %d: %s of %s %pR\n",
118 resource, 116 resource,
119 root ? "address space collision on" : 117 root ? "address space collision on" :
120 "no parent found for", 118 "no parent found for",
121 dtype, 119 dtype, res);
122 (unsigned long long)res->start,
123 (unsigned long long)res->end);
124 } 120 }
125 121
126 return err; 122 return err;
@@ -133,15 +129,14 @@ int pci_assign_resource(struct pci_dev *dev, int resno)
133 resource_size_t size, min, align; 129 resource_size_t size, min, align;
134 int ret; 130 int ret;
135 131
136 size = res->end - res->start + 1; 132 size = resource_size(res);
137 min = (res->flags & IORESOURCE_IO) ? PCIBIOS_MIN_IO : PCIBIOS_MIN_MEM; 133 min = (res->flags & IORESOURCE_IO) ? PCIBIOS_MIN_IO : PCIBIOS_MIN_MEM;
138 134
139 align = resource_alignment(res); 135 align = resource_alignment(res);
140 if (!align) { 136 if (!align) {
141 dev_err(&dev->dev, "BAR %d: can't allocate resource (bogus " 137 dev_err(&dev->dev, "BAR %d: can't allocate resource (bogus "
142 "alignment) [%#llx-%#llx] flags %#lx\n", 138 "alignment) %pR flags %#lx\n",
143 resno, (unsigned long long)res->start, 139 resno, res, res->flags);
144 (unsigned long long)res->end, res->flags);
145 return -EINVAL; 140 return -EINVAL;
146 } 141 }
147 142
@@ -162,11 +157,8 @@ int pci_assign_resource(struct pci_dev *dev, int resno)
162 } 157 }
163 158
164 if (ret) { 159 if (ret) {
165 dev_err(&dev->dev, "BAR %d: can't allocate %s resource " 160 dev_err(&dev->dev, "BAR %d: can't allocate %s resource %pR\n",
166 "[%#llx-%#llx]\n", resno, 161 resno, res->flags & IORESOURCE_IO ? "I/O" : "mem", res);
167 res->flags & IORESOURCE_IO ? "I/O" : "mem",
168 (unsigned long long)res->start,
169 (unsigned long long)res->end);
170 } else { 162 } else {
171 res->flags &= ~IORESOURCE_STARTALIGN; 163 res->flags &= ~IORESOURCE_STARTALIGN;
172 if (resno < PCI_BRIDGE_RESOURCES) 164 if (resno < PCI_BRIDGE_RESOURCES)
@@ -202,11 +194,8 @@ int pci_assign_resource_fixed(struct pci_dev *dev, int resno)
202 } 194 }
203 195
204 if (ret) { 196 if (ret) {
205 dev_err(&dev->dev, "BAR %d: can't allocate %s resource " 197 dev_err(&dev->dev, "BAR %d: can't allocate %s resource %pR\n",
206 "[%#llx-%#llx\n]", resno, 198 resno, res->flags & IORESOURCE_IO ? "I/O" : "mem", res);
207 res->flags & IORESOURCE_IO ? "I/O" : "mem",
208 (unsigned long long)res->start,
209 (unsigned long long)res->end);
210 } else if (resno < PCI_BRIDGE_RESOURCES) { 199 } else if (resno < PCI_BRIDGE_RESOURCES) {
211 pci_update_resource(dev, res, resno); 200 pci_update_resource(dev, res, resno);
212 } 201 }
@@ -237,9 +226,8 @@ void pdev_sort_resources(struct pci_dev *dev, struct resource_list *head)
237 r_align = resource_alignment(r); 226 r_align = resource_alignment(r);
238 if (!r_align) { 227 if (!r_align) {
239 dev_warn(&dev->dev, "BAR %d: bogus alignment " 228 dev_warn(&dev->dev, "BAR %d: bogus alignment "
240 "[%#llx-%#llx] flags %#lx\n", 229 "%pR flags %#lx\n",
241 i, (unsigned long long)r->start, 230 i, r, r->flags);
242 (unsigned long long)r->end, r->flags);
243 continue; 231 continue;
244 } 232 }
245 for (list = head; ; list = list->next) { 233 for (list = head; ; list = list->next) {
@@ -287,9 +275,7 @@ int pci_enable_resources(struct pci_dev *dev, int mask)
287 275
288 if (!r->parent) { 276 if (!r->parent) {
289 dev_err(&dev->dev, "device not available because of " 277 dev_err(&dev->dev, "device not available because of "
290 "BAR %d [%#llx-%#llx] collisions\n", i, 278 "BAR %d %pR collisions\n", i, r);
291 (unsigned long long) r->start,
292 (unsigned long long) r->end);
293 return -EINVAL; 279 return -EINVAL;
294 } 280 }
295 281
diff --git a/drivers/pci/slot.c b/drivers/pci/slot.c
index 7e5b85cbd948..4dd1c3e157ae 100644
--- a/drivers/pci/slot.c
+++ b/drivers/pci/slot.c
@@ -49,11 +49,16 @@ static ssize_t address_read_file(struct pci_slot *slot, char *buf)
49 49
50static void pci_slot_release(struct kobject *kobj) 50static void pci_slot_release(struct kobject *kobj)
51{ 51{
52 struct pci_dev *dev;
52 struct pci_slot *slot = to_pci_slot(kobj); 53 struct pci_slot *slot = to_pci_slot(kobj);
53 54
54 pr_debug("%s: releasing pci_slot on %x:%d\n", __func__, 55 pr_debug("%s: releasing pci_slot on %x:%d\n", __func__,
55 slot->bus->number, slot->number); 56 slot->bus->number, slot->number);
56 57
58 list_for_each_entry(dev, &slot->bus->devices, bus_list)
59 if (PCI_SLOT(dev->devfn) == slot->number)
60 dev->slot = NULL;
61
57 list_del(&slot->list); 62 list_del(&slot->list);
58 63
59 kfree(slot); 64 kfree(slot);
@@ -73,18 +78,100 @@ static struct kobj_type pci_slot_ktype = {
73 .default_attrs = pci_slot_default_attrs, 78 .default_attrs = pci_slot_default_attrs,
74}; 79};
75 80
81static char *make_slot_name(const char *name)
82{
83 char *new_name;
84 int len, max, dup;
85
86 new_name = kstrdup(name, GFP_KERNEL);
87 if (!new_name)
88 return NULL;
89
90 /*
91 * Make sure we hit the realloc case the first time through the
92 * loop. 'len' will be strlen(name) + 3 at that point which is
93 * enough space for "name-X" and the trailing NUL.
94 */
95 len = strlen(name) + 2;
96 max = 1;
97 dup = 1;
98
99 for (;;) {
100 struct kobject *dup_slot;
101 dup_slot = kset_find_obj(pci_slots_kset, new_name);
102 if (!dup_slot)
103 break;
104 kobject_put(dup_slot);
105 if (dup == max) {
106 len++;
107 max *= 10;
108 kfree(new_name);
109 new_name = kmalloc(len, GFP_KERNEL);
110 if (!new_name)
111 break;
112 }
113 sprintf(new_name, "%s-%d", name, dup++);
114 }
115
116 return new_name;
117}
118
119static int rename_slot(struct pci_slot *slot, const char *name)
120{
121 int result = 0;
122 char *slot_name;
123
124 if (strcmp(pci_slot_name(slot), name) == 0)
125 return result;
126
127 slot_name = make_slot_name(name);
128 if (!slot_name)
129 return -ENOMEM;
130
131 result = kobject_rename(&slot->kobj, slot_name);
132 kfree(slot_name);
133
134 return result;
135}
136
137static struct pci_slot *get_slot(struct pci_bus *parent, int slot_nr)
138{
139 struct pci_slot *slot;
140 /*
141 * We already hold pci_bus_sem so don't worry
142 */
143 list_for_each_entry(slot, &parent->slots, list)
144 if (slot->number == slot_nr) {
145 kobject_get(&slot->kobj);
146 return slot;
147 }
148
149 return NULL;
150}
151
76/** 152/**
77 * pci_create_slot - create or increment refcount for physical PCI slot 153 * pci_create_slot - create or increment refcount for physical PCI slot
78 * @parent: struct pci_bus of parent bridge 154 * @parent: struct pci_bus of parent bridge
79 * @slot_nr: PCI_SLOT(pci_dev->devfn) or -1 for placeholder 155 * @slot_nr: PCI_SLOT(pci_dev->devfn) or -1 for placeholder
80 * @name: user visible string presented in /sys/bus/pci/slots/<name> 156 * @name: user visible string presented in /sys/bus/pci/slots/<name>
157 * @hotplug: set if caller is hotplug driver, NULL otherwise
81 * 158 *
82 * PCI slots have first class attributes such as address, speed, width, 159 * PCI slots have first class attributes such as address, speed, width,
83 * and a &struct pci_slot is used to manage them. This interface will 160 * and a &struct pci_slot is used to manage them. This interface will
84 * either return a new &struct pci_slot to the caller, or if the pci_slot 161 * either return a new &struct pci_slot to the caller, or if the pci_slot
85 * already exists, its refcount will be incremented. 162 * already exists, its refcount will be incremented.
86 * 163 *
87 * Slots are uniquely identified by a @pci_bus, @slot_nr, @name tuple. 164 * Slots are uniquely identified by a @pci_bus, @slot_nr tuple.
165 *
166 * There are known platforms with broken firmware that assign the same
167 * name to multiple slots. Workaround these broken platforms by renaming
168 * the slots on behalf of the caller. If firmware assigns name N to
169 * multiple slots:
170 *
171 * The first slot is assigned N
172 * The second slot is assigned N-1
173 * The third slot is assigned N-2
174 * etc.
88 * 175 *
89 * Placeholder slots: 176 * Placeholder slots:
90 * In most cases, @pci_bus, @slot_nr will be sufficient to uniquely identify 177 * In most cases, @pci_bus, @slot_nr will be sufficient to uniquely identify
@@ -93,71 +180,82 @@ static struct kobj_type pci_slot_ktype = {
93 * the slot. In this scenario, the caller may pass -1 for @slot_nr. 180 * the slot. In this scenario, the caller may pass -1 for @slot_nr.
94 * 181 *
95 * The following semantics are imposed when the caller passes @slot_nr == 182 * The following semantics are imposed when the caller passes @slot_nr ==
96 * -1. First, the check for existing %struct pci_slot is skipped, as the 183 * -1. First, we no longer check for an existing %struct pci_slot, as there
97 * caller may know about several unpopulated slots on a given %struct 184 * may be many slots with @slot_nr of -1. The other change in semantics is
98 * pci_bus, and each slot would have a @slot_nr of -1. Uniqueness for
99 * these slots is then determined by the @name parameter. We expect
100 * kobject_init_and_add() to warn us if the caller attempts to create
101 * multiple slots with the same name. The other change in semantics is
102 * user-visible, which is the 'address' parameter presented in sysfs will 185 * user-visible, which is the 'address' parameter presented in sysfs will
103 * consist solely of a dddd:bb tuple, where dddd is the PCI domain of the 186 * consist solely of a dddd:bb tuple, where dddd is the PCI domain of the
104 * %struct pci_bus and bb is the bus number. In other words, the devfn of 187 * %struct pci_bus and bb is the bus number. In other words, the devfn of
105 * the 'placeholder' slot will not be displayed. 188 * the 'placeholder' slot will not be displayed.
106 */ 189 */
107
108struct pci_slot *pci_create_slot(struct pci_bus *parent, int slot_nr, 190struct pci_slot *pci_create_slot(struct pci_bus *parent, int slot_nr,
109 const char *name) 191 const char *name,
192 struct hotplug_slot *hotplug)
110{ 193{
194 struct pci_dev *dev;
111 struct pci_slot *slot; 195 struct pci_slot *slot;
112 int err; 196 int err = 0;
197 char *slot_name = NULL;
113 198
114 down_write(&pci_bus_sem); 199 down_write(&pci_bus_sem);
115 200
116 if (slot_nr == -1) 201 if (slot_nr == -1)
117 goto placeholder; 202 goto placeholder;
118 203
119 /* If we've already created this slot, bump refcount and return. */ 204 /*
120 list_for_each_entry(slot, &parent->slots, list) { 205 * Hotplug drivers are allowed to rename an existing slot,
121 if (slot->number == slot_nr) { 206 * but only if not already claimed.
122 kobject_get(&slot->kobj); 207 */
123 pr_debug("%s: inc refcount to %d on %04x:%02x:%02x\n", 208 slot = get_slot(parent, slot_nr);
124 __func__, 209 if (slot) {
125 atomic_read(&slot->kobj.kref.refcount), 210 if (hotplug) {
126 pci_domain_nr(parent), parent->number, 211 if ((err = slot->hotplug ? -EBUSY : 0)
127 slot_nr); 212 || (err = rename_slot(slot, name))) {
128 goto out; 213 kobject_put(&slot->kobj);
214 slot = NULL;
215 goto err;
216 }
129 } 217 }
218 goto out;
130 } 219 }
131 220
132placeholder: 221placeholder:
133 slot = kzalloc(sizeof(*slot), GFP_KERNEL); 222 slot = kzalloc(sizeof(*slot), GFP_KERNEL);
134 if (!slot) { 223 if (!slot) {
135 slot = ERR_PTR(-ENOMEM); 224 err = -ENOMEM;
136 goto out; 225 goto err;
137 } 226 }
138 227
139 slot->bus = parent; 228 slot->bus = parent;
140 slot->number = slot_nr; 229 slot->number = slot_nr;
141 230
142 slot->kobj.kset = pci_slots_kset; 231 slot->kobj.kset = pci_slots_kset;
143 err = kobject_init_and_add(&slot->kobj, &pci_slot_ktype, NULL, 232
144 "%s", name); 233 slot_name = make_slot_name(name);
145 if (err) { 234 if (!slot_name) {
146 printk(KERN_ERR "Unable to register kobject %s\n", name); 235 err = -ENOMEM;
147 goto err; 236 goto err;
148 } 237 }
149 238
239 err = kobject_init_and_add(&slot->kobj, &pci_slot_ktype, NULL,
240 "%s", slot_name);
241 if (err)
242 goto err;
243
150 INIT_LIST_HEAD(&slot->list); 244 INIT_LIST_HEAD(&slot->list);
151 list_add(&slot->list, &parent->slots); 245 list_add(&slot->list, &parent->slots);
152 246
247 list_for_each_entry(dev, &parent->devices, bus_list)
248 if (PCI_SLOT(dev->devfn) == slot_nr)
249 dev->slot = slot;
250
153 /* Don't care if debug printk has a -1 for slot_nr */ 251 /* Don't care if debug printk has a -1 for slot_nr */
154 pr_debug("%s: created pci_slot on %04x:%02x:%02x\n", 252 pr_debug("%s: created pci_slot on %04x:%02x:%02x\n",
155 __func__, pci_domain_nr(parent), parent->number, slot_nr); 253 __func__, pci_domain_nr(parent), parent->number, slot_nr);
156 254
157 out: 255out:
158 up_write(&pci_bus_sem); 256 up_write(&pci_bus_sem);
159 return slot; 257 return slot;
160 err: 258err:
161 kfree(slot); 259 kfree(slot);
162 slot = ERR_PTR(err); 260 slot = ERR_PTR(err);
163 goto out; 261 goto out;
@@ -165,7 +263,7 @@ placeholder:
165EXPORT_SYMBOL_GPL(pci_create_slot); 263EXPORT_SYMBOL_GPL(pci_create_slot);
166 264
167/** 265/**
168 * pci_update_slot_number - update %struct pci_slot -> number 266 * pci_renumber_slot - update %struct pci_slot -> number
169 * @slot - %struct pci_slot to update 267 * @slot - %struct pci_slot to update
170 * @slot_nr - new number for slot 268 * @slot_nr - new number for slot
171 * 269 *
@@ -173,27 +271,22 @@ EXPORT_SYMBOL_GPL(pci_create_slot);
173 * created a placeholder slot in pci_create_slot() by passing a -1 as 271 * created a placeholder slot in pci_create_slot() by passing a -1 as
174 * slot_nr, to update their %struct pci_slot with the correct @slot_nr. 272 * slot_nr, to update their %struct pci_slot with the correct @slot_nr.
175 */ 273 */
176 274void pci_renumber_slot(struct pci_slot *slot, int slot_nr)
177void pci_update_slot_number(struct pci_slot *slot, int slot_nr)
178{ 275{
179 int name_count = 0;
180 struct pci_slot *tmp; 276 struct pci_slot *tmp;
181 277
182 down_write(&pci_bus_sem); 278 down_write(&pci_bus_sem);
183 279
184 list_for_each_entry(tmp, &slot->bus->slots, list) { 280 list_for_each_entry(tmp, &slot->bus->slots, list) {
185 WARN_ON(tmp->number == slot_nr); 281 WARN_ON(tmp->number == slot_nr);
186 if (!strcmp(kobject_name(&tmp->kobj), kobject_name(&slot->kobj))) 282 goto out;
187 name_count++;
188 } 283 }
189 284
190 if (name_count > 1)
191 printk(KERN_WARNING "pci_update_slot_number found %d slots with the same name: %s\n", name_count, kobject_name(&slot->kobj));
192
193 slot->number = slot_nr; 285 slot->number = slot_nr;
286out:
194 up_write(&pci_bus_sem); 287 up_write(&pci_bus_sem);
195} 288}
196EXPORT_SYMBOL_GPL(pci_update_slot_number); 289EXPORT_SYMBOL_GPL(pci_renumber_slot);
197 290
198/** 291/**
199 * pci_destroy_slot - decrement refcount for physical PCI slot 292 * pci_destroy_slot - decrement refcount for physical PCI slot
@@ -203,7 +296,6 @@ EXPORT_SYMBOL_GPL(pci_update_slot_number);
203 * just call kobject_put on its kobj and let our release methods do the 296 * just call kobject_put on its kobj and let our release methods do the
204 * rest. 297 * rest.
205 */ 298 */
206
207void pci_destroy_slot(struct pci_slot *slot) 299void pci_destroy_slot(struct pci_slot *slot)
208{ 300{
209 pr_debug("%s: dec refcount to %d on %04x:%02x:%02x\n", __func__, 301 pr_debug("%s: dec refcount to %d on %04x:%02x:%02x\n", __func__,