aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/pci
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2009-04-01 12:47:12 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2009-04-01 12:47:12 -0400
commite76e5b2c663ac74ae6a542ac20795c625e36a5cd (patch)
tree2e7271be1f3a26832f4b121839fc4044fbbf27a6 /drivers/pci
parent32527bc0e4b4fa7711ad1c923cf64ae72a7ffd9d (diff)
parenteeafda70bf2807544e96fa4e52b2433cd470ff46 (diff)
Merge branch 'linux-next' of git://git.kernel.org/pub/scm/linux/kernel/git/jbarnes/pci-2.6
* 'linux-next' of git://git.kernel.org/pub/scm/linux/kernel/git/jbarnes/pci-2.6: (88 commits) PCI: fix HT MSI mapping fix PCI: don't enable too much HT MSI mapping x86/PCI: make pci=lastbus=255 work when acpi is on PCI: save and restore PCIe 2.0 registers PCI: update fakephp for bus_id removal PCI: fix kernel oops on bridge removal PCI: fix conflict between SR-IOV and config space sizing powerpc/PCI: include pci.h in powerpc MSI implementation PCI Hotplug: schedule fakephp for feature removal PCI Hotplug: rename legacy_fakephp to fakephp PCI Hotplug: restore fakephp interface with complete reimplementation PCI: Introduce /sys/bus/pci/devices/.../rescan PCI: Introduce /sys/bus/pci/devices/.../remove PCI: Introduce /sys/bus/pci/rescan PCI: Introduce pci_rescan_bus() PCI: do not enable bridges more than once PCI: do not initialize bridges more than once PCI: always scan child buses PCI: pci_scan_slot() returns newly found devices PCI: don't scan existing devices ... Fix trivial append-only conflict in Documentation/feature-removal-schedule.txt
Diffstat (limited to 'drivers/pci')
-rw-r--r--drivers/pci/Kconfig10
-rw-r--r--drivers/pci/Makefile2
-rw-r--r--drivers/pci/bus.c8
-rw-r--r--drivers/pci/hotplug/acpi_pcihp.c58
-rw-r--r--drivers/pci/hotplug/fakephp.c444
-rw-r--r--drivers/pci/hotplug/pciehp.h13
-rw-r--r--drivers/pci/hotplug/pciehp_acpi.c21
-rw-r--r--drivers/pci/hotplug/pciehp_core.c18
-rw-r--r--drivers/pci/hotplug/pciehp_hpc.c34
-rw-r--r--drivers/pci/hotplug/shpchp.h10
-rw-r--r--drivers/pci/hotplug/shpchp_pci.c2
-rw-r--r--drivers/pci/intel-iommu.c2
-rw-r--r--drivers/pci/iov.c680
-rw-r--r--drivers/pci/msi.c426
-rw-r--r--drivers/pci/msi.h6
-rw-r--r--drivers/pci/pci-acpi.c215
-rw-r--r--drivers/pci/pci-driver.c81
-rw-r--r--drivers/pci/pci-sysfs.c124
-rw-r--r--drivers/pci/pci.c193
-rw-r--r--drivers/pci/pci.h65
-rw-r--r--drivers/pci/pcie/aer/aerdrv.c28
-rw-r--r--drivers/pci/pcie/aer/aerdrv_acpi.c2
-rw-r--r--drivers/pci/pcie/aer/aerdrv_core.c10
-rw-r--r--drivers/pci/pcie/portdrv.h14
-rw-r--r--drivers/pci/pcie/portdrv_bus.c18
-rw-r--r--drivers/pci/pcie/portdrv_core.c379
-rw-r--r--drivers/pci/pcie/portdrv_pci.c50
-rw-r--r--drivers/pci/probe.c210
-rw-r--r--drivers/pci/quirks.c221
-rw-r--r--drivers/pci/remove.c4
-rw-r--r--drivers/pci/search.c2
-rw-r--r--drivers/pci/setup-bus.c7
-rw-r--r--drivers/pci/setup-res.c15
-rw-r--r--drivers/pci/slot.c18
34 files changed, 2149 insertions, 1241 deletions
diff --git a/drivers/pci/Kconfig b/drivers/pci/Kconfig
index 2a4501dd2515..fdc864f9cf23 100644
--- a/drivers/pci/Kconfig
+++ b/drivers/pci/Kconfig
@@ -59,3 +59,13 @@ config HT_IRQ
59 This allows native hypertransport devices to use interrupts. 59 This allows native hypertransport devices to use interrupts.
60 60
61 If unsure say Y. 61 If unsure say Y.
62
63config PCI_IOV
64 bool "PCI IOV support"
65 depends on PCI
66 help
67 I/O Virtualization is a PCI feature supported by some devices
68 which allows them to create virtual devices which share their
69 physical resources.
70
71 If unsure, say N.
diff --git a/drivers/pci/Makefile b/drivers/pci/Makefile
index 3d07ce24f6a8..ba6af162fd39 100644
--- a/drivers/pci/Makefile
+++ b/drivers/pci/Makefile
@@ -29,6 +29,8 @@ obj-$(CONFIG_DMAR) += dmar.o iova.o intel-iommu.o
29 29
30obj-$(CONFIG_INTR_REMAP) += dmar.o intr_remapping.o 30obj-$(CONFIG_INTR_REMAP) += dmar.o intr_remapping.o
31 31
32obj-$(CONFIG_PCI_IOV) += iov.o
33
32# 34#
33# Some architectures use the generic PCI setup functions 35# Some architectures use the generic PCI setup functions
34# 36#
diff --git a/drivers/pci/bus.c b/drivers/pci/bus.c
index 52b54f053be0..68f91a252595 100644
--- a/drivers/pci/bus.c
+++ b/drivers/pci/bus.c
@@ -133,7 +133,7 @@ int pci_bus_add_child(struct pci_bus *bus)
133 * 133 *
134 * Call hotplug for each new devices. 134 * Call hotplug for each new devices.
135 */ 135 */
136void pci_bus_add_devices(struct pci_bus *bus) 136void pci_bus_add_devices(const struct pci_bus *bus)
137{ 137{
138 struct pci_dev *dev; 138 struct pci_dev *dev;
139 struct pci_bus *child; 139 struct pci_bus *child;
@@ -184,8 +184,10 @@ void pci_enable_bridges(struct pci_bus *bus)
184 184
185 list_for_each_entry(dev, &bus->devices, bus_list) { 185 list_for_each_entry(dev, &bus->devices, bus_list) {
186 if (dev->subordinate) { 186 if (dev->subordinate) {
187 retval = pci_enable_device(dev); 187 if (atomic_read(&dev->enable_cnt) == 0) {
188 pci_set_master(dev); 188 retval = pci_enable_device(dev);
189 pci_set_master(dev);
190 }
189 pci_enable_bridges(dev->subordinate); 191 pci_enable_bridges(dev->subordinate);
190 } 192 }
191 } 193 }
diff --git a/drivers/pci/hotplug/acpi_pcihp.c b/drivers/pci/hotplug/acpi_pcihp.c
index 1c1141801060..fbc63d5e459f 100644
--- a/drivers/pci/hotplug/acpi_pcihp.c
+++ b/drivers/pci/hotplug/acpi_pcihp.c
@@ -30,9 +30,8 @@
30#include <linux/types.h> 30#include <linux/types.h>
31#include <linux/pci.h> 31#include <linux/pci.h>
32#include <linux/pci_hotplug.h> 32#include <linux/pci_hotplug.h>
33#include <linux/acpi.h>
33#include <linux/pci-acpi.h> 34#include <linux/pci-acpi.h>
34#include <acpi/acpi.h>
35#include <acpi/acpi_bus.h>
36 35
37#define MY_NAME "acpi_pcihp" 36#define MY_NAME "acpi_pcihp"
38 37
@@ -333,19 +332,14 @@ acpi_status acpi_get_hp_params_from_firmware(struct pci_bus *bus,
333{ 332{
334 acpi_status status = AE_NOT_FOUND; 333 acpi_status status = AE_NOT_FOUND;
335 acpi_handle handle, phandle; 334 acpi_handle handle, phandle;
336 struct pci_bus *pbus = bus; 335 struct pci_bus *pbus;
337 struct pci_dev *pdev; 336
338 337 handle = NULL;
339 do { 338 for (pbus = bus; pbus; pbus = pbus->parent) {
340 pdev = pbus->self; 339 handle = acpi_pci_get_bridge_handle(pbus);
341 if (!pdev) { 340 if (handle)
342 handle = acpi_get_pci_rootbridge_handle(
343 pci_domain_nr(pbus), pbus->number);
344 break; 341 break;
345 } 342 }
346 handle = DEVICE_ACPI_HANDLE(&(pdev->dev));
347 pbus = pbus->parent;
348 } while (!handle);
349 343
350 /* 344 /*
351 * _HPP settings apply to all child buses, until another _HPP is 345 * _HPP settings apply to all child buses, until another _HPP is
@@ -378,12 +372,10 @@ EXPORT_SYMBOL_GPL(acpi_get_hp_params_from_firmware);
378 * 372 *
379 * Attempt to take hotplug control from firmware. 373 * Attempt to take hotplug control from firmware.
380 */ 374 */
381int acpi_get_hp_hw_control_from_firmware(struct pci_dev *dev, u32 flags) 375int acpi_get_hp_hw_control_from_firmware(struct pci_dev *pdev, u32 flags)
382{ 376{
383 acpi_status status; 377 acpi_status status;
384 acpi_handle chandle, handle; 378 acpi_handle chandle, handle;
385 struct pci_dev *pdev = dev;
386 struct pci_bus *parent;
387 struct acpi_buffer string = { ACPI_ALLOCATE_BUFFER, NULL }; 379 struct acpi_buffer string = { ACPI_ALLOCATE_BUFFER, NULL };
388 380
389 flags &= (OSC_PCI_EXPRESS_NATIVE_HP_CONTROL | 381 flags &= (OSC_PCI_EXPRESS_NATIVE_HP_CONTROL |
@@ -408,33 +400,25 @@ int acpi_get_hp_hw_control_from_firmware(struct pci_dev *dev, u32 flags)
408 acpi_get_name(handle, ACPI_FULL_PATHNAME, &string); 400 acpi_get_name(handle, ACPI_FULL_PATHNAME, &string);
409 dbg("Trying to get hotplug control for %s\n", 401 dbg("Trying to get hotplug control for %s\n",
410 (char *)string.pointer); 402 (char *)string.pointer);
411 status = pci_osc_control_set(handle, flags); 403 status = acpi_pci_osc_control_set(handle, flags);
412 if (ACPI_SUCCESS(status)) 404 if (ACPI_SUCCESS(status))
413 goto got_one; 405 goto got_one;
414 kfree(string.pointer); 406 kfree(string.pointer);
415 string = (struct acpi_buffer){ ACPI_ALLOCATE_BUFFER, NULL }; 407 string = (struct acpi_buffer){ ACPI_ALLOCATE_BUFFER, NULL };
416 } 408 }
417 409
418 pdev = dev; 410 handle = DEVICE_ACPI_HANDLE(&pdev->dev);
419 handle = DEVICE_ACPI_HANDLE(&dev->dev); 411 if (!handle) {
420 while (!handle) {
421 /* 412 /*
422 * This hotplug controller was not listed in the ACPI name 413 * This hotplug controller was not listed in the ACPI name
423 * space at all. Try to get acpi handle of parent pci bus. 414 * space at all. Try to get acpi handle of parent pci bus.
424 */ 415 */
425 if (!pdev || !pdev->bus->parent) 416 struct pci_bus *pbus;
426 break; 417 for (pbus = pdev->bus; pbus; pbus = pbus->parent) {
427 parent = pdev->bus->parent; 418 handle = acpi_pci_get_bridge_handle(pbus);
428 dbg("Could not find %s in acpi namespace, trying parent\n", 419 if (handle)
429 pci_name(pdev)); 420 break;
430 if (!parent->self) 421 }
431 /* Parent must be a host bridge */
432 handle = acpi_get_pci_rootbridge_handle(
433 pci_domain_nr(parent),
434 parent->number);
435 else
436 handle = DEVICE_ACPI_HANDLE(&(parent->self->dev));
437 pdev = parent->self;
438 } 422 }
439 423
440 while (handle) { 424 while (handle) {
@@ -453,13 +437,13 @@ int acpi_get_hp_hw_control_from_firmware(struct pci_dev *dev, u32 flags)
453 } 437 }
454 438
455 dbg("Cannot get control of hotplug hardware for pci %s\n", 439 dbg("Cannot get control of hotplug hardware for pci %s\n",
456 pci_name(dev)); 440 pci_name(pdev));
457 441
458 kfree(string.pointer); 442 kfree(string.pointer);
459 return -ENODEV; 443 return -ENODEV;
460got_one: 444got_one:
461 dbg("Gained control for hotplug HW for pci %s (%s)\n", pci_name(dev), 445 dbg("Gained control for hotplug HW for pci %s (%s)\n",
462 (char *)string.pointer); 446 pci_name(pdev), (char *)string.pointer);
463 kfree(string.pointer); 447 kfree(string.pointer);
464 return 0; 448 return 0;
465} 449}
diff --git a/drivers/pci/hotplug/fakephp.c b/drivers/pci/hotplug/fakephp.c
index d8649e127298..6151389fd903 100644
--- a/drivers/pci/hotplug/fakephp.c
+++ b/drivers/pci/hotplug/fakephp.c
@@ -1,395 +1,163 @@
1/* 1/* Works like the fakephp driver used to, except a little better.
2 * Fake PCI Hot Plug Controller Driver
3 * 2 *
4 * Copyright (C) 2003 Greg Kroah-Hartman <greg@kroah.com> 3 * - It's possible to remove devices with subordinate busses.
5 * Copyright (C) 2003 IBM Corp. 4 * - New PCI devices that appear via any method, not just a fakephp triggered
6 * Copyright (C) 2003 Rolf Eike Beer <eike-kernel@sf-tec.de> 5 * rescan, will be noticed.
6 * - Devices that are removed via any method, not just a fakephp triggered
7 * removal, will also be noticed.
7 * 8 *
8 * Based on ideas and code from: 9 * Uses nothing from the pci-hotplug subsystem.
9 * Vladimir Kondratiev <vladimir.kondratiev@intel.com>
10 * Rolf Eike Beer <eike-kernel@sf-tec.de>
11 * 10 *
12 * All rights reserved.
13 *
14 * This program is free software; you can redistribute it and/or modify
15 * it under the terms of the GNU General Public License as published by
16 * the Free Software Foundation, version 2 of the License.
17 *
18 * Send feedback to <greg@kroah.com>
19 */ 11 */
20 12
21/*
22 *
23 * This driver will "emulate" removing PCI devices from the system. If
24 * the "power" file is written to with "0" then the specified PCI device
25 * will be completely removed from the kernel.
26 *
27 * WARNING, this does NOT turn off the power to the PCI device. This is
28 * a "logical" removal, not a physical or electrical removal.
29 *
30 * Use this module at your own risk, you have been warned!
31 *
32 * Enabling PCI devices is left as an exercise for the reader...
33 *
34 */
35#include <linux/kernel.h>
36#include <linux/module.h> 13#include <linux/module.h>
37#include <linux/pci.h> 14#include <linux/kernel.h>
38#include <linux/pci_hotplug.h> 15#include <linux/types.h>
16#include <linux/list.h>
17#include <linux/kobject.h>
18#include <linux/sysfs.h>
39#include <linux/init.h> 19#include <linux/init.h>
40#include <linux/string.h> 20#include <linux/pci.h>
41#include <linux/slab.h> 21#include <linux/device.h>
42#include <linux/workqueue.h>
43#include "../pci.h" 22#include "../pci.h"
44 23
45#if !defined(MODULE) 24struct legacy_slot {
46 #define MY_NAME "fakephp" 25 struct kobject kobj;
47#else 26 struct pci_dev *dev;
48 #define MY_NAME THIS_MODULE->name 27 struct list_head list;
49#endif
50
51#define dbg(format, arg...) \
52 do { \
53 if (debug) \
54 printk(KERN_DEBUG "%s: " format, \
55 MY_NAME , ## arg); \
56 } while (0)
57#define err(format, arg...) printk(KERN_ERR "%s: " format, MY_NAME , ## arg)
58#define info(format, arg...) printk(KERN_INFO "%s: " format, MY_NAME , ## arg)
59
60#define DRIVER_AUTHOR "Greg Kroah-Hartman <greg@kroah.com>"
61#define DRIVER_DESC "Fake PCI Hot Plug Controller Driver"
62
63struct dummy_slot {
64 struct list_head node;
65 struct hotplug_slot *slot;
66 struct pci_dev *dev;
67 struct work_struct remove_work;
68 unsigned long removed;
69}; 28};
70 29
71static int debug; 30static LIST_HEAD(legacy_list);
72static int dup_slots;
73static LIST_HEAD(slot_list);
74static struct workqueue_struct *dummyphp_wq;
75
76static void pci_rescan_worker(struct work_struct *work);
77static DECLARE_WORK(pci_rescan_work, pci_rescan_worker);
78
79static int enable_slot (struct hotplug_slot *slot);
80static int disable_slot (struct hotplug_slot *slot);
81 31
82static struct hotplug_slot_ops dummy_hotplug_slot_ops = { 32static ssize_t legacy_show(struct kobject *kobj, struct attribute *attr,
83 .owner = THIS_MODULE, 33 char *buf)
84 .enable_slot = enable_slot,
85 .disable_slot = disable_slot,
86};
87
88static void dummy_release(struct hotplug_slot *slot)
89{ 34{
90 struct dummy_slot *dslot = slot->private; 35 struct legacy_slot *slot = container_of(kobj, typeof(*slot), kobj);
91 36 strcpy(buf, "1\n");
92 list_del(&dslot->node); 37 return 2;
93 kfree(dslot->slot->info);
94 kfree(dslot->slot);
95 pci_dev_put(dslot->dev);
96 kfree(dslot);
97} 38}
98 39
99#define SLOT_NAME_SIZE 8 40static void remove_callback(void *data)
100
101static int add_slot(struct pci_dev *dev)
102{ 41{
103 struct dummy_slot *dslot; 42 pci_remove_bus_device((struct pci_dev *)data);
104 struct hotplug_slot *slot;
105 char name[SLOT_NAME_SIZE];
106 int retval = -ENOMEM;
107 static int count = 1;
108
109 slot = kzalloc(sizeof(struct hotplug_slot), GFP_KERNEL);
110 if (!slot)
111 goto error;
112
113 slot->info = kzalloc(sizeof(struct hotplug_slot_info), GFP_KERNEL);
114 if (!slot->info)
115 goto error_slot;
116
117 slot->info->power_status = 1;
118 slot->info->max_bus_speed = PCI_SPEED_UNKNOWN;
119 slot->info->cur_bus_speed = PCI_SPEED_UNKNOWN;
120
121 dslot = kzalloc(sizeof(struct dummy_slot), GFP_KERNEL);
122 if (!dslot)
123 goto error_info;
124
125 if (dup_slots)
126 snprintf(name, SLOT_NAME_SIZE, "fake");
127 else
128 snprintf(name, SLOT_NAME_SIZE, "fake%d", count++);
129 dbg("slot->name = %s\n", name);
130 slot->ops = &dummy_hotplug_slot_ops;
131 slot->release = &dummy_release;
132 slot->private = dslot;
133
134 retval = pci_hp_register(slot, dev->bus, PCI_SLOT(dev->devfn), name);
135 if (retval) {
136 err("pci_hp_register failed with error %d\n", retval);
137 goto error_dslot;
138 }
139
140 dbg("slot->name = %s\n", hotplug_slot_name(slot));
141 dslot->slot = slot;
142 dslot->dev = pci_dev_get(dev);
143 list_add (&dslot->node, &slot_list);
144 return retval;
145
146error_dslot:
147 kfree(dslot);
148error_info:
149 kfree(slot->info);
150error_slot:
151 kfree(slot);
152error:
153 return retval;
154} 43}
155 44
156static int __init pci_scan_buses(void) 45static ssize_t legacy_store(struct kobject *kobj, struct attribute *attr,
46 const char *buf, size_t len)
157{ 47{
158 struct pci_dev *dev = NULL; 48 struct legacy_slot *slot = container_of(kobj, typeof(*slot), kobj);
159 int lastslot = 0; 49 unsigned long val;
160 50
161 while ((dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev)) != NULL) { 51 if (strict_strtoul(buf, 0, &val) < 0)
162 if (PCI_FUNC(dev->devfn) > 0 && 52 return -EINVAL;
163 lastslot == PCI_SLOT(dev->devfn))
164 continue;
165 lastslot = PCI_SLOT(dev->devfn);
166 add_slot(dev);
167 }
168 53
169 return 0; 54 if (val)
55 pci_rescan_bus(slot->dev->bus);
56 else
57 sysfs_schedule_callback(&slot->dev->dev.kobj, remove_callback,
58 slot->dev, THIS_MODULE);
59 return len;
170} 60}
171 61
172static void remove_slot(struct dummy_slot *dslot) 62static struct attribute *legacy_attrs[] = {
173{ 63 &(struct attribute){ .name = "power", .mode = 0644 },
174 int retval; 64 NULL,
175 65};
176 dbg("removing slot %s\n", hotplug_slot_name(dslot->slot));
177 retval = pci_hp_deregister(dslot->slot);
178 if (retval)
179 err("Problem unregistering a slot %s\n",
180 hotplug_slot_name(dslot->slot));
181}
182 66
183/* called from the single-threaded workqueue handler to remove a slot */ 67static void legacy_release(struct kobject *kobj)
184static void remove_slot_worker(struct work_struct *work)
185{ 68{
186 struct dummy_slot *dslot = 69 struct legacy_slot *slot = container_of(kobj, typeof(*slot), kobj);
187 container_of(work, struct dummy_slot, remove_work);
188 remove_slot(dslot);
189}
190 70
191/** 71 pci_dev_put(slot->dev);
192 * pci_rescan_slot - Rescan slot 72 kfree(slot);
193 * @temp: Device template. Should be set: bus and devfn.
194 *
195 * Tries hard not to re-enable already existing devices;
196 * also handles scanning of subfunctions.
197 */
198static int pci_rescan_slot(struct pci_dev *temp)
199{
200 struct pci_bus *bus = temp->bus;
201 struct pci_dev *dev;
202 int func;
203 u8 hdr_type;
204 int count = 0;
205
206 if (!pci_read_config_byte(temp, PCI_HEADER_TYPE, &hdr_type)) {
207 temp->hdr_type = hdr_type & 0x7f;
208 if ((dev = pci_get_slot(bus, temp->devfn)) != NULL)
209 pci_dev_put(dev);
210 else {
211 dev = pci_scan_single_device(bus, temp->devfn);
212 if (dev) {
213 dbg("New device on %s function %x:%x\n",
214 bus->name, temp->devfn >> 3,
215 temp->devfn & 7);
216 count++;
217 }
218 }
219 /* multifunction device? */
220 if (!(hdr_type & 0x80))
221 return count;
222
223 /* continue scanning for other functions */
224 for (func = 1, temp->devfn++; func < 8; func++, temp->devfn++) {
225 if (pci_read_config_byte(temp, PCI_HEADER_TYPE, &hdr_type))
226 continue;
227 temp->hdr_type = hdr_type & 0x7f;
228
229 if ((dev = pci_get_slot(bus, temp->devfn)) != NULL)
230 pci_dev_put(dev);
231 else {
232 dev = pci_scan_single_device(bus, temp->devfn);
233 if (dev) {
234 dbg("New device on %s function %x:%x\n",
235 bus->name, temp->devfn >> 3,
236 temp->devfn & 7);
237 count++;
238 }
239 }
240 }
241 }
242
243 return count;
244} 73}
245 74
75static struct kobj_type legacy_ktype = {
76 .sysfs_ops = &(struct sysfs_ops){
77 .store = legacy_store, .show = legacy_show
78 },
79 .release = &legacy_release,
80 .default_attrs = legacy_attrs,
81};
246 82
247/** 83static int legacy_add_slot(struct pci_dev *pdev)
248 * pci_rescan_bus - Rescan PCI bus
249 * @bus: the PCI bus to rescan
250 *
251 * Call pci_rescan_slot for each possible function of the bus.
252 */
253static void pci_rescan_bus(const struct pci_bus *bus)
254{ 84{
255 unsigned int devfn; 85 struct legacy_slot *slot = kzalloc(sizeof(*slot), GFP_KERNEL);
256 struct pci_dev *dev;
257 int retval;
258 int found = 0;
259 dev = alloc_pci_dev();
260 if (!dev)
261 return;
262 86
263 dev->bus = (struct pci_bus*)bus; 87 if (!slot)
264 dev->sysdata = bus->sysdata; 88 return -ENOMEM;
265 for (devfn = 0; devfn < 0x100; devfn += 8) {
266 dev->devfn = devfn;
267 found += pci_rescan_slot(dev);
268 }
269
270 if (found) {
271 pci_bus_assign_resources(bus);
272 list_for_each_entry(dev, &bus->devices, bus_list) {
273 /* Skip already-added devices */
274 if (dev->is_added)
275 continue;
276 retval = pci_bus_add_device(dev);
277 if (retval)
278 dev_err(&dev->dev,
279 "Error adding device, continuing\n");
280 else
281 add_slot(dev);
282 }
283 pci_bus_add_devices(bus);
284 }
285 kfree(dev);
286}
287 89
288/* recursively scan all buses */ 90 if (kobject_init_and_add(&slot->kobj, &legacy_ktype,
289static void pci_rescan_buses(const struct list_head *list) 91 &pci_slots_kset->kobj, "%s",
290{ 92 dev_name(&pdev->dev))) {
291 const struct list_head *l; 93 dev_warn(&pdev->dev, "Failed to created legacy fake slot\n");
292 list_for_each(l,list) { 94 return -EINVAL;
293 const struct pci_bus *b = pci_bus_b(l);
294 pci_rescan_bus(b);
295 pci_rescan_buses(&b->children);
296 } 95 }
297} 96 slot->dev = pci_dev_get(pdev);
298 97
299/* initiate rescan of all pci buses */ 98 list_add(&slot->list, &legacy_list);
300static inline void pci_rescan(void) {
301 pci_rescan_buses(&pci_root_buses);
302}
303
304/* called from the single-threaded workqueue handler to rescan all pci buses */
305static void pci_rescan_worker(struct work_struct *work)
306{
307 pci_rescan();
308}
309 99
310static int enable_slot(struct hotplug_slot *hotplug_slot)
311{
312 /* mis-use enable_slot for rescanning of the pci bus */
313 cancel_work_sync(&pci_rescan_work);
314 queue_work(dummyphp_wq, &pci_rescan_work);
315 return 0; 100 return 0;
316} 101}
317 102
318static int disable_slot(struct hotplug_slot *slot) 103static int legacy_notify(struct notifier_block *nb,
104 unsigned long action, void *data)
319{ 105{
320 struct dummy_slot *dslot; 106 struct pci_dev *pdev = to_pci_dev(data);
321 struct pci_dev *dev;
322 int func;
323
324 if (!slot)
325 return -ENODEV;
326 dslot = slot->private;
327
328 dbg("%s - physical_slot = %s\n", __func__, hotplug_slot_name(slot));
329 107
330 for (func = 7; func >= 0; func--) { 108 if (action == BUS_NOTIFY_ADD_DEVICE) {
331 dev = pci_get_slot(dslot->dev->bus, dslot->dev->devfn + func); 109 legacy_add_slot(pdev);
332 if (!dev) 110 } else if (action == BUS_NOTIFY_DEL_DEVICE) {
333 continue; 111 struct legacy_slot *slot;
334 112
335 if (test_and_set_bit(0, &dslot->removed)) { 113 list_for_each_entry(slot, &legacy_list, list)
336 dbg("Slot already scheduled for removal\n"); 114 if (slot->dev == pdev)
337 pci_dev_put(dev); 115 goto found;
338 return -ENODEV;
339 }
340 116
341 /* remove the device from the pci core */ 117 dev_warn(&pdev->dev, "Missing legacy fake slot?");
342 pci_remove_bus_device(dev); 118 return -ENODEV;
343 119found:
344 /* queue work item to blow away this sysfs entry and other 120 kobject_del(&slot->kobj);
345 * parts. 121 list_del(&slot->list);
346 */ 122 kobject_put(&slot->kobj);
347 INIT_WORK(&dslot->remove_work, remove_slot_worker);
348 queue_work(dummyphp_wq, &dslot->remove_work);
349
350 pci_dev_put(dev);
351 } 123 }
124
352 return 0; 125 return 0;
353} 126}
354 127
355static void cleanup_slots (void) 128static struct notifier_block legacy_notifier = {
356{ 129 .notifier_call = legacy_notify
357 struct list_head *tmp; 130};
358 struct list_head *next;
359 struct dummy_slot *dslot;
360
361 destroy_workqueue(dummyphp_wq);
362 list_for_each_safe (tmp, next, &slot_list) {
363 dslot = list_entry (tmp, struct dummy_slot, node);
364 remove_slot(dslot);
365 }
366
367}
368 131
369static int __init dummyphp_init(void) 132static int __init init_legacy(void)
370{ 133{
371 info(DRIVER_DESC "\n"); 134 struct pci_dev *pdev = NULL;
372 135
373 dummyphp_wq = create_singlethread_workqueue(MY_NAME); 136 /* Add existing devices */
374 if (!dummyphp_wq) 137 while ((pdev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, pdev)))
375 return -ENOMEM; 138 legacy_add_slot(pdev);
376 139
377 return pci_scan_buses(); 140 /* Be alerted of any new ones */
141 bus_register_notifier(&pci_bus_type, &legacy_notifier);
142 return 0;
378} 143}
144module_init(init_legacy);
379 145
380 146static void __exit remove_legacy(void)
381static void __exit dummyphp_exit(void)
382{ 147{
383 cleanup_slots(); 148 struct legacy_slot *slot, *tmp;
149
150 bus_unregister_notifier(&pci_bus_type, &legacy_notifier);
151
152 list_for_each_entry_safe(slot, tmp, &legacy_list, list) {
153 list_del(&slot->list);
154 kobject_del(&slot->kobj);
155 kobject_put(&slot->kobj);
156 }
384} 157}
158module_exit(remove_legacy);
385 159
386module_init(dummyphp_init);
387module_exit(dummyphp_exit);
388 160
389MODULE_AUTHOR(DRIVER_AUTHOR); 161MODULE_AUTHOR("Trent Piepho <xyzzy@speakeasy.org>");
390MODULE_DESCRIPTION(DRIVER_DESC); 162MODULE_DESCRIPTION("Legacy version of the fakephp interface");
391MODULE_LICENSE("GPL"); 163MODULE_LICENSE("GPL");
392module_param(debug, bool, S_IRUGO | S_IWUSR);
393MODULE_PARM_DESC(debug, "Debugging mode enabled or not");
394module_param(dup_slots, bool, S_IRUGO | S_IWUSR);
395MODULE_PARM_DESC(dup_slots, "Force duplicate slot names for debugging");
diff --git a/drivers/pci/hotplug/pciehp.h b/drivers/pci/hotplug/pciehp.h
index 39ae37589fda..0a368547e633 100644
--- a/drivers/pci/hotplug/pciehp.h
+++ b/drivers/pci/hotplug/pciehp.h
@@ -46,10 +46,10 @@ extern int pciehp_force;
46extern struct workqueue_struct *pciehp_wq; 46extern struct workqueue_struct *pciehp_wq;
47 47
48#define dbg(format, arg...) \ 48#define dbg(format, arg...) \
49 do { \ 49do { \
50 if (pciehp_debug) \ 50 if (pciehp_debug) \
51 printk("%s: " format, MY_NAME , ## arg); \ 51 printk(KERN_DEBUG "%s: " format, MY_NAME , ## arg); \
52 } while (0) 52} while (0)
53#define err(format, arg...) \ 53#define err(format, arg...) \
54 printk(KERN_ERR "%s: " format, MY_NAME , ## arg) 54 printk(KERN_ERR "%s: " format, MY_NAME , ## arg)
55#define info(format, arg...) \ 55#define info(format, arg...) \
@@ -60,7 +60,7 @@ extern struct workqueue_struct *pciehp_wq;
60#define ctrl_dbg(ctrl, format, arg...) \ 60#define ctrl_dbg(ctrl, format, arg...) \
61 do { \ 61 do { \
62 if (pciehp_debug) \ 62 if (pciehp_debug) \
63 dev_printk(, &ctrl->pcie->device, \ 63 dev_printk(KERN_DEBUG, &ctrl->pcie->device, \
64 format, ## arg); \ 64 format, ## arg); \
65 } while (0) 65 } while (0)
66#define ctrl_err(ctrl, format, arg...) \ 66#define ctrl_err(ctrl, format, arg...) \
@@ -108,10 +108,11 @@ struct controller {
108 u32 slot_cap; 108 u32 slot_cap;
109 u8 cap_base; 109 u8 cap_base;
110 struct timer_list poll_timer; 110 struct timer_list poll_timer;
111 int cmd_busy; 111 unsigned int cmd_busy:1;
112 unsigned int no_cmd_complete:1; 112 unsigned int no_cmd_complete:1;
113 unsigned int link_active_reporting:1; 113 unsigned int link_active_reporting:1;
114 unsigned int notification_enabled:1; 114 unsigned int notification_enabled:1;
115 unsigned int power_fault_detected;
115}; 116};
116 117
117#define INT_BUTTON_IGNORE 0 118#define INT_BUTTON_IGNORE 0
diff --git a/drivers/pci/hotplug/pciehp_acpi.c b/drivers/pci/hotplug/pciehp_acpi.c
index 438d795f9fe3..96048010e7d9 100644
--- a/drivers/pci/hotplug/pciehp_acpi.c
+++ b/drivers/pci/hotplug/pciehp_acpi.c
@@ -67,37 +67,27 @@ static int __init parse_detect_mode(void)
67 return PCIEHP_DETECT_DEFAULT; 67 return PCIEHP_DETECT_DEFAULT;
68} 68}
69 69
70static struct pcie_port_service_id __initdata port_pci_ids[] = {
71 {
72 .vendor = PCI_ANY_ID,
73 .device = PCI_ANY_ID,
74 .port_type = PCIE_ANY_PORT,
75 .service_type = PCIE_PORT_SERVICE_HP,
76 .driver_data = 0,
77 }, { /* end: all zeroes */ }
78};
79
80static int __initdata dup_slot_id; 70static int __initdata dup_slot_id;
81static int __initdata acpi_slot_detected; 71static int __initdata acpi_slot_detected;
82static struct list_head __initdata dummy_slots = LIST_HEAD_INIT(dummy_slots); 72static struct list_head __initdata dummy_slots = LIST_HEAD_INIT(dummy_slots);
83 73
84/* Dummy driver for dumplicate name detection */ 74/* Dummy driver for dumplicate name detection */
85static int __init dummy_probe(struct pcie_device *dev, 75static int __init dummy_probe(struct pcie_device *dev)
86 const struct pcie_port_service_id *id)
87{ 76{
88 int pos; 77 int pos;
89 u32 slot_cap; 78 u32 slot_cap;
90 struct slot *slot, *tmp; 79 struct slot *slot, *tmp;
91 struct pci_dev *pdev = dev->port; 80 struct pci_dev *pdev = dev->port;
92 struct pci_bus *pbus = pdev->subordinate; 81 struct pci_bus *pbus = pdev->subordinate;
93 if (!(slot = kzalloc(sizeof(*slot), GFP_KERNEL)))
94 return -ENOMEM;
95 /* Note: pciehp_detect_mode != PCIEHP_DETECT_ACPI here */ 82 /* Note: pciehp_detect_mode != PCIEHP_DETECT_ACPI here */
96 if (pciehp_get_hp_hw_control_from_firmware(pdev)) 83 if (pciehp_get_hp_hw_control_from_firmware(pdev))
97 return -ENODEV; 84 return -ENODEV;
98 if (!(pos = pci_find_capability(pdev, PCI_CAP_ID_EXP))) 85 if (!(pos = pci_find_capability(pdev, PCI_CAP_ID_EXP)))
99 return -ENODEV; 86 return -ENODEV;
100 pci_read_config_dword(pdev, pos + PCI_EXP_SLTCAP, &slot_cap); 87 pci_read_config_dword(pdev, pos + PCI_EXP_SLTCAP, &slot_cap);
88 slot = kzalloc(sizeof(*slot), GFP_KERNEL);
89 if (!slot)
90 return -ENOMEM;
101 slot->number = slot_cap >> 19; 91 slot->number = slot_cap >> 19;
102 list_for_each_entry(tmp, &dummy_slots, slot_list) { 92 list_for_each_entry(tmp, &dummy_slots, slot_list) {
103 if (tmp->number == slot->number) 93 if (tmp->number == slot->number)
@@ -111,7 +101,8 @@ static int __init dummy_probe(struct pcie_device *dev,
111 101
112static struct pcie_port_service_driver __initdata dummy_driver = { 102static struct pcie_port_service_driver __initdata dummy_driver = {
113 .name = "pciehp_dummy", 103 .name = "pciehp_dummy",
114 .id_table = port_pci_ids, 104 .port_type = PCIE_ANY_PORT,
105 .service = PCIE_PORT_SERVICE_HP,
115 .probe = dummy_probe, 106 .probe = dummy_probe,
116}; 107};
117 108
diff --git a/drivers/pci/hotplug/pciehp_core.c b/drivers/pci/hotplug/pciehp_core.c
index 681e3912b821..fb254b2454de 100644
--- a/drivers/pci/hotplug/pciehp_core.c
+++ b/drivers/pci/hotplug/pciehp_core.c
@@ -401,7 +401,7 @@ static int get_cur_bus_speed(struct hotplug_slot *hotplug_slot, enum pci_bus_spe
401 return 0; 401 return 0;
402} 402}
403 403
404static int pciehp_probe(struct pcie_device *dev, const struct pcie_port_service_id *id) 404static int pciehp_probe(struct pcie_device *dev)
405{ 405{
406 int rc; 406 int rc;
407 struct controller *ctrl; 407 struct controller *ctrl;
@@ -475,7 +475,7 @@ static void pciehp_remove (struct pcie_device *dev)
475} 475}
476 476
477#ifdef CONFIG_PM 477#ifdef CONFIG_PM
478static int pciehp_suspend (struct pcie_device *dev, pm_message_t state) 478static int pciehp_suspend (struct pcie_device *dev)
479{ 479{
480 dev_info(&dev->device, "%s ENTRY\n", __func__); 480 dev_info(&dev->device, "%s ENTRY\n", __func__);
481 return 0; 481 return 0;
@@ -503,20 +503,12 @@ static int pciehp_resume (struct pcie_device *dev)
503 } 503 }
504 return 0; 504 return 0;
505} 505}
506#endif 506#endif /* PM */
507
508static struct pcie_port_service_id port_pci_ids[] = { {
509 .vendor = PCI_ANY_ID,
510 .device = PCI_ANY_ID,
511 .port_type = PCIE_ANY_PORT,
512 .service_type = PCIE_PORT_SERVICE_HP,
513 .driver_data = 0,
514 }, { /* end: all zeroes */ }
515};
516 507
517static struct pcie_port_service_driver hpdriver_portdrv = { 508static struct pcie_port_service_driver hpdriver_portdrv = {
518 .name = PCIE_MODULE_NAME, 509 .name = PCIE_MODULE_NAME,
519 .id_table = &port_pci_ids[0], 510 .port_type = PCIE_ANY_PORT,
511 .service = PCIE_PORT_SERVICE_HP,
520 512
521 .probe = pciehp_probe, 513 .probe = pciehp_probe,
522 .remove = pciehp_remove, 514 .remove = pciehp_remove,
diff --git a/drivers/pci/hotplug/pciehp_hpc.c b/drivers/pci/hotplug/pciehp_hpc.c
index 7a16c6897bb9..07bd32151146 100644
--- a/drivers/pci/hotplug/pciehp_hpc.c
+++ b/drivers/pci/hotplug/pciehp_hpc.c
@@ -548,23 +548,21 @@ static int hpc_power_on_slot(struct slot * slot)
548 548
549 slot_cmd = POWER_ON; 549 slot_cmd = POWER_ON;
550 cmd_mask = PCI_EXP_SLTCTL_PCC; 550 cmd_mask = PCI_EXP_SLTCTL_PCC;
551 /* Enable detection that we turned off at slot power-off time */
552 if (!pciehp_poll_mode) { 551 if (!pciehp_poll_mode) {
553 slot_cmd |= (PCI_EXP_SLTCTL_PFDE | PCI_EXP_SLTCTL_MRLSCE | 552 /* Enable power fault detection turned off at power off time */
554 PCI_EXP_SLTCTL_PDCE); 553 slot_cmd |= PCI_EXP_SLTCTL_PFDE;
555 cmd_mask |= (PCI_EXP_SLTCTL_PFDE | PCI_EXP_SLTCTL_MRLSCE | 554 cmd_mask |= PCI_EXP_SLTCTL_PFDE;
556 PCI_EXP_SLTCTL_PDCE);
557 } 555 }
558 556
559 retval = pcie_write_cmd(ctrl, slot_cmd, cmd_mask); 557 retval = pcie_write_cmd(ctrl, slot_cmd, cmd_mask);
560
561 if (retval) { 558 if (retval) {
562 ctrl_err(ctrl, "Write %x command failed!\n", slot_cmd); 559 ctrl_err(ctrl, "Write %x command failed!\n", slot_cmd);
563 return -1; 560 return retval;
564 } 561 }
565 ctrl_dbg(ctrl, "%s: SLOTCTRL %x write cmd %x\n", 562 ctrl_dbg(ctrl, "%s: SLOTCTRL %x write cmd %x\n",
566 __func__, ctrl->cap_base + PCI_EXP_SLTCTL, slot_cmd); 563 __func__, ctrl->cap_base + PCI_EXP_SLTCTL, slot_cmd);
567 564
565 ctrl->power_fault_detected = 0;
568 return retval; 566 return retval;
569} 567}
570 568
@@ -621,18 +619,10 @@ static int hpc_power_off_slot(struct slot * slot)
621 619
622 slot_cmd = POWER_OFF; 620 slot_cmd = POWER_OFF;
623 cmd_mask = PCI_EXP_SLTCTL_PCC; 621 cmd_mask = PCI_EXP_SLTCTL_PCC;
624 /*
625 * If we get MRL or presence detect interrupts now, the isr
626 * will notice the sticky power-fault bit too and issue power
627 * indicator change commands. This will lead to an endless loop
628 * of command completions, since the power-fault bit remains on
629 * till the slot is powered on again.
630 */
631 if (!pciehp_poll_mode) { 622 if (!pciehp_poll_mode) {
632 slot_cmd &= ~(PCI_EXP_SLTCTL_PFDE | PCI_EXP_SLTCTL_MRLSCE | 623 /* Disable power fault detection */
633 PCI_EXP_SLTCTL_PDCE); 624 slot_cmd &= ~PCI_EXP_SLTCTL_PFDE;
634 cmd_mask |= (PCI_EXP_SLTCTL_PFDE | PCI_EXP_SLTCTL_MRLSCE | 625 cmd_mask |= PCI_EXP_SLTCTL_PFDE;
635 PCI_EXP_SLTCTL_PDCE);
636 } 626 }
637 627
638 retval = pcie_write_cmd(ctrl, slot_cmd, cmd_mask); 628 retval = pcie_write_cmd(ctrl, slot_cmd, cmd_mask);
@@ -672,10 +662,11 @@ static irqreturn_t pcie_isr(int irq, void *dev_id)
672 detected &= (PCI_EXP_SLTSTA_ABP | PCI_EXP_SLTSTA_PFD | 662 detected &= (PCI_EXP_SLTSTA_ABP | PCI_EXP_SLTSTA_PFD |
673 PCI_EXP_SLTSTA_MRLSC | PCI_EXP_SLTSTA_PDC | 663 PCI_EXP_SLTSTA_MRLSC | PCI_EXP_SLTSTA_PDC |
674 PCI_EXP_SLTSTA_CC); 664 PCI_EXP_SLTSTA_CC);
665 detected &= ~intr_loc;
675 intr_loc |= detected; 666 intr_loc |= detected;
676 if (!intr_loc) 667 if (!intr_loc)
677 return IRQ_NONE; 668 return IRQ_NONE;
678 if (detected && pciehp_writew(ctrl, PCI_EXP_SLTSTA, detected)) { 669 if (detected && pciehp_writew(ctrl, PCI_EXP_SLTSTA, intr_loc)) {
679 ctrl_err(ctrl, "%s: Cannot write to SLOTSTATUS\n", 670 ctrl_err(ctrl, "%s: Cannot write to SLOTSTATUS\n",
680 __func__); 671 __func__);
681 return IRQ_NONE; 672 return IRQ_NONE;
@@ -709,9 +700,10 @@ static irqreturn_t pcie_isr(int irq, void *dev_id)
709 pciehp_handle_presence_change(p_slot); 700 pciehp_handle_presence_change(p_slot);
710 701
711 /* Check Power Fault Detected */ 702 /* Check Power Fault Detected */
712 if (intr_loc & PCI_EXP_SLTSTA_PFD) 703 if ((intr_loc & PCI_EXP_SLTSTA_PFD) && !ctrl->power_fault_detected) {
704 ctrl->power_fault_detected = 1;
713 pciehp_handle_power_fault(p_slot); 705 pciehp_handle_power_fault(p_slot);
714 706 }
715 return IRQ_HANDLED; 707 return IRQ_HANDLED;
716} 708}
717 709
diff --git a/drivers/pci/hotplug/shpchp.h b/drivers/pci/hotplug/shpchp.h
index 6aba0b6cf2e0..974e924ca96d 100644
--- a/drivers/pci/hotplug/shpchp.h
+++ b/drivers/pci/hotplug/shpchp.h
@@ -48,10 +48,10 @@ extern int shpchp_debug;
48extern struct workqueue_struct *shpchp_wq; 48extern struct workqueue_struct *shpchp_wq;
49 49
50#define dbg(format, arg...) \ 50#define dbg(format, arg...) \
51 do { \ 51do { \
52 if (shpchp_debug) \ 52 if (shpchp_debug) \
53 printk("%s: " format, MY_NAME , ## arg); \ 53 printk(KERN_DEBUG "%s: " format, MY_NAME , ## arg); \
54 } while (0) 54} while (0)
55#define err(format, arg...) \ 55#define err(format, arg...) \
56 printk(KERN_ERR "%s: " format, MY_NAME , ## arg) 56 printk(KERN_ERR "%s: " format, MY_NAME , ## arg)
57#define info(format, arg...) \ 57#define info(format, arg...) \
@@ -62,7 +62,7 @@ extern struct workqueue_struct *shpchp_wq;
62#define ctrl_dbg(ctrl, format, arg...) \ 62#define ctrl_dbg(ctrl, format, arg...) \
63 do { \ 63 do { \
64 if (shpchp_debug) \ 64 if (shpchp_debug) \
65 dev_printk(, &ctrl->pci_dev->dev, \ 65 dev_printk(KERN_DEBUG, &ctrl->pci_dev->dev, \
66 format, ## arg); \ 66 format, ## arg); \
67 } while (0) 67 } while (0)
68#define ctrl_err(ctrl, format, arg...) \ 68#define ctrl_err(ctrl, format, arg...) \
diff --git a/drivers/pci/hotplug/shpchp_pci.c b/drivers/pci/hotplug/shpchp_pci.c
index 138f161becc0..aa315e52529b 100644
--- a/drivers/pci/hotplug/shpchp_pci.c
+++ b/drivers/pci/hotplug/shpchp_pci.c
@@ -137,7 +137,7 @@ int __ref shpchp_configure_device(struct slot *p_slot)
137 busnr)) 137 busnr))
138 break; 138 break;
139 } 139 }
140 if (busnr >= end) { 140 if (busnr > end) {
141 ctrl_err(ctrl, 141 ctrl_err(ctrl,
142 "No free bus for hot-added bridge\n"); 142 "No free bus for hot-added bridge\n");
143 pci_dev_put(dev); 143 pci_dev_put(dev);
diff --git a/drivers/pci/intel-iommu.c b/drivers/pci/intel-iommu.c
index 49402c399232..9dbd5066acaf 100644
--- a/drivers/pci/intel-iommu.c
+++ b/drivers/pci/intel-iommu.c
@@ -1782,7 +1782,7 @@ static inline void iommu_prepare_isa(void)
1782 ret = iommu_prepare_identity_map(pdev, 0, 16*1024*1024); 1782 ret = iommu_prepare_identity_map(pdev, 0, 16*1024*1024);
1783 1783
1784 if (ret) 1784 if (ret)
1785 printk("IOMMU: Failed to create 0-64M identity map, " 1785 printk(KERN_ERR "IOMMU: Failed to create 0-64M identity map, "
1786 "floppy might not work\n"); 1786 "floppy might not work\n");
1787 1787
1788} 1788}
diff --git a/drivers/pci/iov.c b/drivers/pci/iov.c
new file mode 100644
index 000000000000..7227efc760db
--- /dev/null
+++ b/drivers/pci/iov.c
@@ -0,0 +1,680 @@
1/*
2 * drivers/pci/iov.c
3 *
4 * Copyright (C) 2009 Intel Corporation, Yu Zhao <yu.zhao@intel.com>
5 *
6 * PCI Express I/O Virtualization (IOV) support.
7 * Single Root IOV 1.0
8 */
9
10#include <linux/pci.h>
11#include <linux/mutex.h>
12#include <linux/string.h>
13#include <linux/delay.h>
14#include "pci.h"
15
16#define VIRTFN_ID_LEN 16
17
18static inline u8 virtfn_bus(struct pci_dev *dev, int id)
19{
20 return dev->bus->number + ((dev->devfn + dev->sriov->offset +
21 dev->sriov->stride * id) >> 8);
22}
23
24static inline u8 virtfn_devfn(struct pci_dev *dev, int id)
25{
26 return (dev->devfn + dev->sriov->offset +
27 dev->sriov->stride * id) & 0xff;
28}
29
30static struct pci_bus *virtfn_add_bus(struct pci_bus *bus, int busnr)
31{
32 int rc;
33 struct pci_bus *child;
34
35 if (bus->number == busnr)
36 return bus;
37
38 child = pci_find_bus(pci_domain_nr(bus), busnr);
39 if (child)
40 return child;
41
42 child = pci_add_new_bus(bus, NULL, busnr);
43 if (!child)
44 return NULL;
45
46 child->subordinate = busnr;
47 child->dev.parent = bus->bridge;
48 rc = pci_bus_add_child(child);
49 if (rc) {
50 pci_remove_bus(child);
51 return NULL;
52 }
53
54 return child;
55}
56
57static void virtfn_remove_bus(struct pci_bus *bus, int busnr)
58{
59 struct pci_bus *child;
60
61 if (bus->number == busnr)
62 return;
63
64 child = pci_find_bus(pci_domain_nr(bus), busnr);
65 BUG_ON(!child);
66
67 if (list_empty(&child->devices))
68 pci_remove_bus(child);
69}
70
71static int virtfn_add(struct pci_dev *dev, int id, int reset)
72{
73 int i;
74 int rc;
75 u64 size;
76 char buf[VIRTFN_ID_LEN];
77 struct pci_dev *virtfn;
78 struct resource *res;
79 struct pci_sriov *iov = dev->sriov;
80
81 virtfn = alloc_pci_dev();
82 if (!virtfn)
83 return -ENOMEM;
84
85 mutex_lock(&iov->dev->sriov->lock);
86 virtfn->bus = virtfn_add_bus(dev->bus, virtfn_bus(dev, id));
87 if (!virtfn->bus) {
88 kfree(virtfn);
89 mutex_unlock(&iov->dev->sriov->lock);
90 return -ENOMEM;
91 }
92 virtfn->devfn = virtfn_devfn(dev, id);
93 virtfn->vendor = dev->vendor;
94 pci_read_config_word(dev, iov->pos + PCI_SRIOV_VF_DID, &virtfn->device);
95 pci_setup_device(virtfn);
96 virtfn->dev.parent = dev->dev.parent;
97
98 for (i = 0; i < PCI_SRIOV_NUM_BARS; i++) {
99 res = dev->resource + PCI_IOV_RESOURCES + i;
100 if (!res->parent)
101 continue;
102 virtfn->resource[i].name = pci_name(virtfn);
103 virtfn->resource[i].flags = res->flags;
104 size = resource_size(res);
105 do_div(size, iov->total);
106 virtfn->resource[i].start = res->start + size * id;
107 virtfn->resource[i].end = virtfn->resource[i].start + size - 1;
108 rc = request_resource(res, &virtfn->resource[i]);
109 BUG_ON(rc);
110 }
111
112 if (reset)
113 pci_execute_reset_function(virtfn);
114
115 pci_device_add(virtfn, virtfn->bus);
116 mutex_unlock(&iov->dev->sriov->lock);
117
118 virtfn->physfn = pci_dev_get(dev);
119 virtfn->is_virtfn = 1;
120
121 rc = pci_bus_add_device(virtfn);
122 if (rc)
123 goto failed1;
124 sprintf(buf, "virtfn%u", id);
125 rc = sysfs_create_link(&dev->dev.kobj, &virtfn->dev.kobj, buf);
126 if (rc)
127 goto failed1;
128 rc = sysfs_create_link(&virtfn->dev.kobj, &dev->dev.kobj, "physfn");
129 if (rc)
130 goto failed2;
131
132 kobject_uevent(&virtfn->dev.kobj, KOBJ_CHANGE);
133
134 return 0;
135
136failed2:
137 sysfs_remove_link(&dev->dev.kobj, buf);
138failed1:
139 pci_dev_put(dev);
140 mutex_lock(&iov->dev->sriov->lock);
141 pci_remove_bus_device(virtfn);
142 virtfn_remove_bus(dev->bus, virtfn_bus(dev, id));
143 mutex_unlock(&iov->dev->sriov->lock);
144
145 return rc;
146}
147
148static void virtfn_remove(struct pci_dev *dev, int id, int reset)
149{
150 char buf[VIRTFN_ID_LEN];
151 struct pci_bus *bus;
152 struct pci_dev *virtfn;
153 struct pci_sriov *iov = dev->sriov;
154
155 bus = pci_find_bus(pci_domain_nr(dev->bus), virtfn_bus(dev, id));
156 if (!bus)
157 return;
158
159 virtfn = pci_get_slot(bus, virtfn_devfn(dev, id));
160 if (!virtfn)
161 return;
162
163 pci_dev_put(virtfn);
164
165 if (reset) {
166 device_release_driver(&virtfn->dev);
167 pci_execute_reset_function(virtfn);
168 }
169
170 sprintf(buf, "virtfn%u", id);
171 sysfs_remove_link(&dev->dev.kobj, buf);
172 sysfs_remove_link(&virtfn->dev.kobj, "physfn");
173
174 mutex_lock(&iov->dev->sriov->lock);
175 pci_remove_bus_device(virtfn);
176 virtfn_remove_bus(dev->bus, virtfn_bus(dev, id));
177 mutex_unlock(&iov->dev->sriov->lock);
178
179 pci_dev_put(dev);
180}
181
182static int sriov_migration(struct pci_dev *dev)
183{
184 u16 status;
185 struct pci_sriov *iov = dev->sriov;
186
187 if (!iov->nr_virtfn)
188 return 0;
189
190 if (!(iov->cap & PCI_SRIOV_CAP_VFM))
191 return 0;
192
193 pci_read_config_word(dev, iov->pos + PCI_SRIOV_STATUS, &status);
194 if (!(status & PCI_SRIOV_STATUS_VFM))
195 return 0;
196
197 schedule_work(&iov->mtask);
198
199 return 1;
200}
201
202static void sriov_migration_task(struct work_struct *work)
203{
204 int i;
205 u8 state;
206 u16 status;
207 struct pci_sriov *iov = container_of(work, struct pci_sriov, mtask);
208
209 for (i = iov->initial; i < iov->nr_virtfn; i++) {
210 state = readb(iov->mstate + i);
211 if (state == PCI_SRIOV_VFM_MI) {
212 writeb(PCI_SRIOV_VFM_AV, iov->mstate + i);
213 state = readb(iov->mstate + i);
214 if (state == PCI_SRIOV_VFM_AV)
215 virtfn_add(iov->self, i, 1);
216 } else if (state == PCI_SRIOV_VFM_MO) {
217 virtfn_remove(iov->self, i, 1);
218 writeb(PCI_SRIOV_VFM_UA, iov->mstate + i);
219 state = readb(iov->mstate + i);
220 if (state == PCI_SRIOV_VFM_AV)
221 virtfn_add(iov->self, i, 0);
222 }
223 }
224
225 pci_read_config_word(iov->self, iov->pos + PCI_SRIOV_STATUS, &status);
226 status &= ~PCI_SRIOV_STATUS_VFM;
227 pci_write_config_word(iov->self, iov->pos + PCI_SRIOV_STATUS, status);
228}
229
230static int sriov_enable_migration(struct pci_dev *dev, int nr_virtfn)
231{
232 int bir;
233 u32 table;
234 resource_size_t pa;
235 struct pci_sriov *iov = dev->sriov;
236
237 if (nr_virtfn <= iov->initial)
238 return 0;
239
240 pci_read_config_dword(dev, iov->pos + PCI_SRIOV_VFM, &table);
241 bir = PCI_SRIOV_VFM_BIR(table);
242 if (bir > PCI_STD_RESOURCE_END)
243 return -EIO;
244
245 table = PCI_SRIOV_VFM_OFFSET(table);
246 if (table + nr_virtfn > pci_resource_len(dev, bir))
247 return -EIO;
248
249 pa = pci_resource_start(dev, bir) + table;
250 iov->mstate = ioremap(pa, nr_virtfn);
251 if (!iov->mstate)
252 return -ENOMEM;
253
254 INIT_WORK(&iov->mtask, sriov_migration_task);
255
256 iov->ctrl |= PCI_SRIOV_CTRL_VFM | PCI_SRIOV_CTRL_INTR;
257 pci_write_config_word(dev, iov->pos + PCI_SRIOV_CTRL, iov->ctrl);
258
259 return 0;
260}
261
262static void sriov_disable_migration(struct pci_dev *dev)
263{
264 struct pci_sriov *iov = dev->sriov;
265
266 iov->ctrl &= ~(PCI_SRIOV_CTRL_VFM | PCI_SRIOV_CTRL_INTR);
267 pci_write_config_word(dev, iov->pos + PCI_SRIOV_CTRL, iov->ctrl);
268
269 cancel_work_sync(&iov->mtask);
270 iounmap(iov->mstate);
271}
272
273static int sriov_enable(struct pci_dev *dev, int nr_virtfn)
274{
275 int rc;
276 int i, j;
277 int nres;
278 u16 offset, stride, initial;
279 struct resource *res;
280 struct pci_dev *pdev;
281 struct pci_sriov *iov = dev->sriov;
282
283 if (!nr_virtfn)
284 return 0;
285
286 if (iov->nr_virtfn)
287 return -EINVAL;
288
289 pci_read_config_word(dev, iov->pos + PCI_SRIOV_INITIAL_VF, &initial);
290 if (initial > iov->total ||
291 (!(iov->cap & PCI_SRIOV_CAP_VFM) && (initial != iov->total)))
292 return -EIO;
293
294 if (nr_virtfn < 0 || nr_virtfn > iov->total ||
295 (!(iov->cap & PCI_SRIOV_CAP_VFM) && (nr_virtfn > initial)))
296 return -EINVAL;
297
298 pci_write_config_word(dev, iov->pos + PCI_SRIOV_NUM_VF, nr_virtfn);
299 pci_read_config_word(dev, iov->pos + PCI_SRIOV_VF_OFFSET, &offset);
300 pci_read_config_word(dev, iov->pos + PCI_SRIOV_VF_STRIDE, &stride);
301 if (!offset || (nr_virtfn > 1 && !stride))
302 return -EIO;
303
304 nres = 0;
305 for (i = 0; i < PCI_SRIOV_NUM_BARS; i++) {
306 res = dev->resource + PCI_IOV_RESOURCES + i;
307 if (res->parent)
308 nres++;
309 }
310 if (nres != iov->nres) {
311 dev_err(&dev->dev, "not enough MMIO resources for SR-IOV\n");
312 return -ENOMEM;
313 }
314
315 iov->offset = offset;
316 iov->stride = stride;
317
318 if (virtfn_bus(dev, nr_virtfn - 1) > dev->bus->subordinate) {
319 dev_err(&dev->dev, "SR-IOV: bus number out of range\n");
320 return -ENOMEM;
321 }
322
323 if (iov->link != dev->devfn) {
324 pdev = pci_get_slot(dev->bus, iov->link);
325 if (!pdev)
326 return -ENODEV;
327
328 pci_dev_put(pdev);
329
330 if (!pdev->is_physfn)
331 return -ENODEV;
332
333 rc = sysfs_create_link(&dev->dev.kobj,
334 &pdev->dev.kobj, "dep_link");
335 if (rc)
336 return rc;
337 }
338
339 iov->ctrl |= PCI_SRIOV_CTRL_VFE | PCI_SRIOV_CTRL_MSE;
340 pci_block_user_cfg_access(dev);
341 pci_write_config_word(dev, iov->pos + PCI_SRIOV_CTRL, iov->ctrl);
342 msleep(100);
343 pci_unblock_user_cfg_access(dev);
344
345 iov->initial = initial;
346 if (nr_virtfn < initial)
347 initial = nr_virtfn;
348
349 for (i = 0; i < initial; i++) {
350 rc = virtfn_add(dev, i, 0);
351 if (rc)
352 goto failed;
353 }
354
355 if (iov->cap & PCI_SRIOV_CAP_VFM) {
356 rc = sriov_enable_migration(dev, nr_virtfn);
357 if (rc)
358 goto failed;
359 }
360
361 kobject_uevent(&dev->dev.kobj, KOBJ_CHANGE);
362 iov->nr_virtfn = nr_virtfn;
363
364 return 0;
365
366failed:
367 for (j = 0; j < i; j++)
368 virtfn_remove(dev, j, 0);
369
370 iov->ctrl &= ~(PCI_SRIOV_CTRL_VFE | PCI_SRIOV_CTRL_MSE);
371 pci_block_user_cfg_access(dev);
372 pci_write_config_word(dev, iov->pos + PCI_SRIOV_CTRL, iov->ctrl);
373 ssleep(1);
374 pci_unblock_user_cfg_access(dev);
375
376 if (iov->link != dev->devfn)
377 sysfs_remove_link(&dev->dev.kobj, "dep_link");
378
379 return rc;
380}
381
382static void sriov_disable(struct pci_dev *dev)
383{
384 int i;
385 struct pci_sriov *iov = dev->sriov;
386
387 if (!iov->nr_virtfn)
388 return;
389
390 if (iov->cap & PCI_SRIOV_CAP_VFM)
391 sriov_disable_migration(dev);
392
393 for (i = 0; i < iov->nr_virtfn; i++)
394 virtfn_remove(dev, i, 0);
395
396 iov->ctrl &= ~(PCI_SRIOV_CTRL_VFE | PCI_SRIOV_CTRL_MSE);
397 pci_block_user_cfg_access(dev);
398 pci_write_config_word(dev, iov->pos + PCI_SRIOV_CTRL, iov->ctrl);
399 ssleep(1);
400 pci_unblock_user_cfg_access(dev);
401
402 if (iov->link != dev->devfn)
403 sysfs_remove_link(&dev->dev.kobj, "dep_link");
404
405 iov->nr_virtfn = 0;
406}
407
408static int sriov_init(struct pci_dev *dev, int pos)
409{
410 int i;
411 int rc;
412 int nres;
413 u32 pgsz;
414 u16 ctrl, total, offset, stride;
415 struct pci_sriov *iov;
416 struct resource *res;
417 struct pci_dev *pdev;
418
419 if (dev->pcie_type != PCI_EXP_TYPE_RC_END &&
420 dev->pcie_type != PCI_EXP_TYPE_ENDPOINT)
421 return -ENODEV;
422
423 pci_read_config_word(dev, pos + PCI_SRIOV_CTRL, &ctrl);
424 if (ctrl & PCI_SRIOV_CTRL_VFE) {
425 pci_write_config_word(dev, pos + PCI_SRIOV_CTRL, 0);
426 ssleep(1);
427 }
428
429 pci_read_config_word(dev, pos + PCI_SRIOV_TOTAL_VF, &total);
430 if (!total)
431 return 0;
432
433 ctrl = 0;
434 list_for_each_entry(pdev, &dev->bus->devices, bus_list)
435 if (pdev->is_physfn)
436 goto found;
437
438 pdev = NULL;
439 if (pci_ari_enabled(dev->bus))
440 ctrl |= PCI_SRIOV_CTRL_ARI;
441
442found:
443 pci_write_config_word(dev, pos + PCI_SRIOV_CTRL, ctrl);
444 pci_write_config_word(dev, pos + PCI_SRIOV_NUM_VF, total);
445 pci_read_config_word(dev, pos + PCI_SRIOV_VF_OFFSET, &offset);
446 pci_read_config_word(dev, pos + PCI_SRIOV_VF_STRIDE, &stride);
447 if (!offset || (total > 1 && !stride))
448 return -EIO;
449
450 pci_read_config_dword(dev, pos + PCI_SRIOV_SUP_PGSIZE, &pgsz);
451 i = PAGE_SHIFT > 12 ? PAGE_SHIFT - 12 : 0;
452 pgsz &= ~((1 << i) - 1);
453 if (!pgsz)
454 return -EIO;
455
456 pgsz &= ~(pgsz - 1);
457 pci_write_config_dword(dev, pos + PCI_SRIOV_SYS_PGSIZE, pgsz);
458
459 nres = 0;
460 for (i = 0; i < PCI_SRIOV_NUM_BARS; i++) {
461 res = dev->resource + PCI_IOV_RESOURCES + i;
462 i += __pci_read_base(dev, pci_bar_unknown, res,
463 pos + PCI_SRIOV_BAR + i * 4);
464 if (!res->flags)
465 continue;
466 if (resource_size(res) & (PAGE_SIZE - 1)) {
467 rc = -EIO;
468 goto failed;
469 }
470 res->end = res->start + resource_size(res) * total - 1;
471 nres++;
472 }
473
474 iov = kzalloc(sizeof(*iov), GFP_KERNEL);
475 if (!iov) {
476 rc = -ENOMEM;
477 goto failed;
478 }
479
480 iov->pos = pos;
481 iov->nres = nres;
482 iov->ctrl = ctrl;
483 iov->total = total;
484 iov->offset = offset;
485 iov->stride = stride;
486 iov->pgsz = pgsz;
487 iov->self = dev;
488 pci_read_config_dword(dev, pos + PCI_SRIOV_CAP, &iov->cap);
489 pci_read_config_byte(dev, pos + PCI_SRIOV_FUNC_LINK, &iov->link);
490
491 if (pdev)
492 iov->dev = pci_dev_get(pdev);
493 else {
494 iov->dev = dev;
495 mutex_init(&iov->lock);
496 }
497
498 dev->sriov = iov;
499 dev->is_physfn = 1;
500
501 return 0;
502
503failed:
504 for (i = 0; i < PCI_SRIOV_NUM_BARS; i++) {
505 res = dev->resource + PCI_IOV_RESOURCES + i;
506 res->flags = 0;
507 }
508
509 return rc;
510}
511
512static void sriov_release(struct pci_dev *dev)
513{
514 BUG_ON(dev->sriov->nr_virtfn);
515
516 if (dev == dev->sriov->dev)
517 mutex_destroy(&dev->sriov->lock);
518 else
519 pci_dev_put(dev->sriov->dev);
520
521 kfree(dev->sriov);
522 dev->sriov = NULL;
523}
524
525static void sriov_restore_state(struct pci_dev *dev)
526{
527 int i;
528 u16 ctrl;
529 struct pci_sriov *iov = dev->sriov;
530
531 pci_read_config_word(dev, iov->pos + PCI_SRIOV_CTRL, &ctrl);
532 if (ctrl & PCI_SRIOV_CTRL_VFE)
533 return;
534
535 for (i = PCI_IOV_RESOURCES; i <= PCI_IOV_RESOURCE_END; i++)
536 pci_update_resource(dev, i);
537
538 pci_write_config_dword(dev, iov->pos + PCI_SRIOV_SYS_PGSIZE, iov->pgsz);
539 pci_write_config_word(dev, iov->pos + PCI_SRIOV_NUM_VF, iov->nr_virtfn);
540 pci_write_config_word(dev, iov->pos + PCI_SRIOV_CTRL, iov->ctrl);
541 if (iov->ctrl & PCI_SRIOV_CTRL_VFE)
542 msleep(100);
543}
544
545/**
546 * pci_iov_init - initialize the IOV capability
547 * @dev: the PCI device
548 *
549 * Returns 0 on success, or negative on failure.
550 */
551int pci_iov_init(struct pci_dev *dev)
552{
553 int pos;
554
555 if (!dev->is_pcie)
556 return -ENODEV;
557
558 pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_SRIOV);
559 if (pos)
560 return sriov_init(dev, pos);
561
562 return -ENODEV;
563}
564
565/**
566 * pci_iov_release - release resources used by the IOV capability
567 * @dev: the PCI device
568 */
569void pci_iov_release(struct pci_dev *dev)
570{
571 if (dev->is_physfn)
572 sriov_release(dev);
573}
574
575/**
576 * pci_iov_resource_bar - get position of the SR-IOV BAR
577 * @dev: the PCI device
578 * @resno: the resource number
579 * @type: the BAR type to be filled in
580 *
581 * Returns position of the BAR encapsulated in the SR-IOV capability.
582 */
583int pci_iov_resource_bar(struct pci_dev *dev, int resno,
584 enum pci_bar_type *type)
585{
586 if (resno < PCI_IOV_RESOURCES || resno > PCI_IOV_RESOURCE_END)
587 return 0;
588
589 BUG_ON(!dev->is_physfn);
590
591 *type = pci_bar_unknown;
592
593 return dev->sriov->pos + PCI_SRIOV_BAR +
594 4 * (resno - PCI_IOV_RESOURCES);
595}
596
597/**
598 * pci_restore_iov_state - restore the state of the IOV capability
599 * @dev: the PCI device
600 */
601void pci_restore_iov_state(struct pci_dev *dev)
602{
603 if (dev->is_physfn)
604 sriov_restore_state(dev);
605}
606
607/**
608 * pci_iov_bus_range - find bus range used by Virtual Function
609 * @bus: the PCI bus
610 *
611 * Returns max number of buses (exclude current one) used by Virtual
612 * Functions.
613 */
614int pci_iov_bus_range(struct pci_bus *bus)
615{
616 int max = 0;
617 u8 busnr;
618 struct pci_dev *dev;
619
620 list_for_each_entry(dev, &bus->devices, bus_list) {
621 if (!dev->is_physfn)
622 continue;
623 busnr = virtfn_bus(dev, dev->sriov->total - 1);
624 if (busnr > max)
625 max = busnr;
626 }
627
628 return max ? max - bus->number : 0;
629}
630
631/**
632 * pci_enable_sriov - enable the SR-IOV capability
633 * @dev: the PCI device
634 *
635 * Returns 0 on success, or negative on failure.
636 */
637int pci_enable_sriov(struct pci_dev *dev, int nr_virtfn)
638{
639 might_sleep();
640
641 if (!dev->is_physfn)
642 return -ENODEV;
643
644 return sriov_enable(dev, nr_virtfn);
645}
646EXPORT_SYMBOL_GPL(pci_enable_sriov);
647
648/**
649 * pci_disable_sriov - disable the SR-IOV capability
650 * @dev: the PCI device
651 */
652void pci_disable_sriov(struct pci_dev *dev)
653{
654 might_sleep();
655
656 if (!dev->is_physfn)
657 return;
658
659 sriov_disable(dev);
660}
661EXPORT_SYMBOL_GPL(pci_disable_sriov);
662
663/**
664 * pci_sriov_migration - notify SR-IOV core of Virtual Function Migration
665 * @dev: the PCI device
666 *
667 * Returns IRQ_HANDLED if the IRQ is handled, or IRQ_NONE if not.
668 *
669 * Physical Function driver is responsible to register IRQ handler using
670 * VF Migration Interrupt Message Number, and call this function when the
671 * interrupt is generated by the hardware.
672 */
673irqreturn_t pci_sriov_migration(struct pci_dev *dev)
674{
675 if (!dev->is_physfn)
676 return IRQ_NONE;
677
678 return sriov_migration(dev) ? IRQ_HANDLED : IRQ_NONE;
679}
680EXPORT_SYMBOL_GPL(pci_sriov_migration);
diff --git a/drivers/pci/msi.c b/drivers/pci/msi.c
index baba2eb5367d..6f2e6295e773 100644
--- a/drivers/pci/msi.c
+++ b/drivers/pci/msi.c
@@ -27,48 +27,53 @@ static int pci_msi_enable = 1;
27 27
28/* Arch hooks */ 28/* Arch hooks */
29 29
30int __attribute__ ((weak)) 30#ifndef arch_msi_check_device
31arch_msi_check_device(struct pci_dev *dev, int nvec, int type) 31int arch_msi_check_device(struct pci_dev *dev, int nvec, int type)
32{ 32{
33 return 0; 33 return 0;
34} 34}
35#endif
35 36
36int __attribute__ ((weak)) 37#ifndef arch_setup_msi_irqs
37arch_setup_msi_irq(struct pci_dev *dev, struct msi_desc *entry) 38int arch_setup_msi_irqs(struct pci_dev *dev, int nvec, int type)
38{
39 return 0;
40}
41
42int __attribute__ ((weak))
43arch_setup_msi_irqs(struct pci_dev *dev, int nvec, int type)
44{ 39{
45 struct msi_desc *entry; 40 struct msi_desc *entry;
46 int ret; 41 int ret;
47 42
43 /*
44 * If an architecture wants to support multiple MSI, it needs to
45 * override arch_setup_msi_irqs()
46 */
47 if (type == PCI_CAP_ID_MSI && nvec > 1)
48 return 1;
49
48 list_for_each_entry(entry, &dev->msi_list, list) { 50 list_for_each_entry(entry, &dev->msi_list, list) {
49 ret = arch_setup_msi_irq(dev, entry); 51 ret = arch_setup_msi_irq(dev, entry);
50 if (ret) 52 if (ret < 0)
51 return ret; 53 return ret;
54 if (ret > 0)
55 return -ENOSPC;
52 } 56 }
53 57
54 return 0; 58 return 0;
55} 59}
60#endif
56 61
57void __attribute__ ((weak)) arch_teardown_msi_irq(unsigned int irq) 62#ifndef arch_teardown_msi_irqs
58{ 63void arch_teardown_msi_irqs(struct pci_dev *dev)
59 return;
60}
61
62void __attribute__ ((weak))
63arch_teardown_msi_irqs(struct pci_dev *dev)
64{ 64{
65 struct msi_desc *entry; 65 struct msi_desc *entry;
66 66
67 list_for_each_entry(entry, &dev->msi_list, list) { 67 list_for_each_entry(entry, &dev->msi_list, list) {
68 if (entry->irq != 0) 68 int i, nvec;
69 arch_teardown_msi_irq(entry->irq); 69 if (entry->irq == 0)
70 continue;
71 nvec = 1 << entry->msi_attrib.multiple;
72 for (i = 0; i < nvec; i++)
73 arch_teardown_msi_irq(entry->irq + i);
70 } 74 }
71} 75}
76#endif
72 77
73static void __msi_set_enable(struct pci_dev *dev, int pos, int enable) 78static void __msi_set_enable(struct pci_dev *dev, int pos, int enable)
74{ 79{
@@ -111,27 +116,14 @@ static inline __attribute_const__ u32 msi_mask(unsigned x)
111 return (1 << (1 << x)) - 1; 116 return (1 << (1 << x)) - 1;
112} 117}
113 118
114static void msix_flush_writes(struct irq_desc *desc) 119static inline __attribute_const__ u32 msi_capable_mask(u16 control)
115{ 120{
116 struct msi_desc *entry; 121 return msi_mask((control >> 1) & 7);
122}
117 123
118 entry = get_irq_desc_msi(desc); 124static inline __attribute_const__ u32 msi_enabled_mask(u16 control)
119 BUG_ON(!entry || !entry->dev); 125{
120 switch (entry->msi_attrib.type) { 126 return msi_mask((control >> 4) & 7);
121 case PCI_CAP_ID_MSI:
122 /* nothing to do */
123 break;
124 case PCI_CAP_ID_MSIX:
125 {
126 int offset = entry->msi_attrib.entry_nr * PCI_MSIX_ENTRY_SIZE +
127 PCI_MSIX_ENTRY_VECTOR_CTRL_OFFSET;
128 readl(entry->mask_base + offset);
129 break;
130 }
131 default:
132 BUG();
133 break;
134 }
135} 127}
136 128
137/* 129/*
@@ -143,49 +135,71 @@ static void msix_flush_writes(struct irq_desc *desc)
143 * Returns 1 if it succeeded in masking the interrupt and 0 if the device 135 * Returns 1 if it succeeded in masking the interrupt and 0 if the device
144 * doesn't support MSI masking. 136 * doesn't support MSI masking.
145 */ 137 */
146static int msi_set_mask_bits(struct irq_desc *desc, u32 mask, u32 flag) 138static void msi_mask_irq(struct msi_desc *desc, u32 mask, u32 flag)
147{ 139{
148 struct msi_desc *entry; 140 u32 mask_bits = desc->masked;
149 141
150 entry = get_irq_desc_msi(desc); 142 if (!desc->msi_attrib.maskbit)
151 BUG_ON(!entry || !entry->dev); 143 return;
152 switch (entry->msi_attrib.type) { 144
153 case PCI_CAP_ID_MSI: 145 mask_bits &= ~mask;
154 if (entry->msi_attrib.maskbit) { 146 mask_bits |= flag;
155 int pos; 147 pci_write_config_dword(desc->dev, desc->mask_pos, mask_bits);
156 u32 mask_bits; 148 desc->masked = mask_bits;
157 149}
158 pos = (long)entry->mask_base; 150
159 pci_read_config_dword(entry->dev, pos, &mask_bits); 151/*
160 mask_bits &= ~(mask); 152 * This internal function does not flush PCI writes to the device.
161 mask_bits |= flag & mask; 153 * All users must ensure that they read from the device before either
162 pci_write_config_dword(entry->dev, pos, mask_bits); 154 * assuming that the device state is up to date, or returning out of this
163 } else { 155 * file. This saves a few milliseconds when initialising devices with lots
164 return 0; 156 * of MSI-X interrupts.
165 } 157 */
166 break; 158static void msix_mask_irq(struct msi_desc *desc, u32 flag)
167 case PCI_CAP_ID_MSIX: 159{
168 { 160 u32 mask_bits = desc->masked;
169 int offset = entry->msi_attrib.entry_nr * PCI_MSIX_ENTRY_SIZE + 161 unsigned offset = desc->msi_attrib.entry_nr * PCI_MSIX_ENTRY_SIZE +
170 PCI_MSIX_ENTRY_VECTOR_CTRL_OFFSET; 162 PCI_MSIX_ENTRY_VECTOR_CTRL_OFFSET;
171 writel(flag, entry->mask_base + offset); 163 mask_bits &= ~1;
172 readl(entry->mask_base + offset); 164 mask_bits |= flag;
173 break; 165 writel(mask_bits, desc->mask_base + offset);
174 } 166 desc->masked = mask_bits;
175 default: 167}
176 BUG(); 168
177 break; 169static void msi_set_mask_bit(unsigned irq, u32 flag)
170{
171 struct msi_desc *desc = get_irq_msi(irq);
172
173 if (desc->msi_attrib.is_msix) {
174 msix_mask_irq(desc, flag);
175 readl(desc->mask_base); /* Flush write to device */
176 } else {
177 unsigned offset = irq - desc->dev->irq;
178 msi_mask_irq(desc, 1 << offset, flag << offset);
178 } 179 }
179 entry->msi_attrib.masked = !!flag; 180}
180 return 1; 181
182void mask_msi_irq(unsigned int irq)
183{
184 msi_set_mask_bit(irq, 1);
185}
186
187void unmask_msi_irq(unsigned int irq)
188{
189 msi_set_mask_bit(irq, 0);
181} 190}
182 191
183void read_msi_msg_desc(struct irq_desc *desc, struct msi_msg *msg) 192void read_msi_msg_desc(struct irq_desc *desc, struct msi_msg *msg)
184{ 193{
185 struct msi_desc *entry = get_irq_desc_msi(desc); 194 struct msi_desc *entry = get_irq_desc_msi(desc);
186 switch(entry->msi_attrib.type) { 195 if (entry->msi_attrib.is_msix) {
187 case PCI_CAP_ID_MSI: 196 void __iomem *base = entry->mask_base +
188 { 197 entry->msi_attrib.entry_nr * PCI_MSIX_ENTRY_SIZE;
198
199 msg->address_lo = readl(base + PCI_MSIX_ENTRY_LOWER_ADDR_OFFSET);
200 msg->address_hi = readl(base + PCI_MSIX_ENTRY_UPPER_ADDR_OFFSET);
201 msg->data = readl(base + PCI_MSIX_ENTRY_DATA_OFFSET);
202 } else {
189 struct pci_dev *dev = entry->dev; 203 struct pci_dev *dev = entry->dev;
190 int pos = entry->msi_attrib.pos; 204 int pos = entry->msi_attrib.pos;
191 u16 data; 205 u16 data;
@@ -201,21 +215,6 @@ void read_msi_msg_desc(struct irq_desc *desc, struct msi_msg *msg)
201 pci_read_config_word(dev, msi_data_reg(pos, 0), &data); 215 pci_read_config_word(dev, msi_data_reg(pos, 0), &data);
202 } 216 }
203 msg->data = data; 217 msg->data = data;
204 break;
205 }
206 case PCI_CAP_ID_MSIX:
207 {
208 void __iomem *base;
209 base = entry->mask_base +
210 entry->msi_attrib.entry_nr * PCI_MSIX_ENTRY_SIZE;
211
212 msg->address_lo = readl(base + PCI_MSIX_ENTRY_LOWER_ADDR_OFFSET);
213 msg->address_hi = readl(base + PCI_MSIX_ENTRY_UPPER_ADDR_OFFSET);
214 msg->data = readl(base + PCI_MSIX_ENTRY_DATA_OFFSET);
215 break;
216 }
217 default:
218 BUG();
219 } 218 }
220} 219}
221 220
@@ -229,11 +228,25 @@ void read_msi_msg(unsigned int irq, struct msi_msg *msg)
229void write_msi_msg_desc(struct irq_desc *desc, struct msi_msg *msg) 228void write_msi_msg_desc(struct irq_desc *desc, struct msi_msg *msg)
230{ 229{
231 struct msi_desc *entry = get_irq_desc_msi(desc); 230 struct msi_desc *entry = get_irq_desc_msi(desc);
232 switch (entry->msi_attrib.type) { 231 if (entry->msi_attrib.is_msix) {
233 case PCI_CAP_ID_MSI: 232 void __iomem *base;
234 { 233 base = entry->mask_base +
234 entry->msi_attrib.entry_nr * PCI_MSIX_ENTRY_SIZE;
235
236 writel(msg->address_lo,
237 base + PCI_MSIX_ENTRY_LOWER_ADDR_OFFSET);
238 writel(msg->address_hi,
239 base + PCI_MSIX_ENTRY_UPPER_ADDR_OFFSET);
240 writel(msg->data, base + PCI_MSIX_ENTRY_DATA_OFFSET);
241 } else {
235 struct pci_dev *dev = entry->dev; 242 struct pci_dev *dev = entry->dev;
236 int pos = entry->msi_attrib.pos; 243 int pos = entry->msi_attrib.pos;
244 u16 msgctl;
245
246 pci_read_config_word(dev, msi_control_reg(pos), &msgctl);
247 msgctl &= ~PCI_MSI_FLAGS_QSIZE;
248 msgctl |= entry->msi_attrib.multiple << 4;
249 pci_write_config_word(dev, msi_control_reg(pos), msgctl);
237 250
238 pci_write_config_dword(dev, msi_lower_address_reg(pos), 251 pci_write_config_dword(dev, msi_lower_address_reg(pos),
239 msg->address_lo); 252 msg->address_lo);
@@ -246,23 +259,6 @@ void write_msi_msg_desc(struct irq_desc *desc, struct msi_msg *msg)
246 pci_write_config_word(dev, msi_data_reg(pos, 0), 259 pci_write_config_word(dev, msi_data_reg(pos, 0),
247 msg->data); 260 msg->data);
248 } 261 }
249 break;
250 }
251 case PCI_CAP_ID_MSIX:
252 {
253 void __iomem *base;
254 base = entry->mask_base +
255 entry->msi_attrib.entry_nr * PCI_MSIX_ENTRY_SIZE;
256
257 writel(msg->address_lo,
258 base + PCI_MSIX_ENTRY_LOWER_ADDR_OFFSET);
259 writel(msg->address_hi,
260 base + PCI_MSIX_ENTRY_UPPER_ADDR_OFFSET);
261 writel(msg->data, base + PCI_MSIX_ENTRY_DATA_OFFSET);
262 break;
263 }
264 default:
265 BUG();
266 } 262 }
267 entry->msg = *msg; 263 entry->msg = *msg;
268} 264}
@@ -274,37 +270,18 @@ void write_msi_msg(unsigned int irq, struct msi_msg *msg)
274 write_msi_msg_desc(desc, msg); 270 write_msi_msg_desc(desc, msg);
275} 271}
276 272
277void mask_msi_irq(unsigned int irq)
278{
279 struct irq_desc *desc = irq_to_desc(irq);
280
281 msi_set_mask_bits(desc, 1, 1);
282 msix_flush_writes(desc);
283}
284
285void unmask_msi_irq(unsigned int irq)
286{
287 struct irq_desc *desc = irq_to_desc(irq);
288
289 msi_set_mask_bits(desc, 1, 0);
290 msix_flush_writes(desc);
291}
292
293static int msi_free_irqs(struct pci_dev* dev); 273static int msi_free_irqs(struct pci_dev* dev);
294 274
295static struct msi_desc* alloc_msi_entry(void) 275static struct msi_desc *alloc_msi_entry(struct pci_dev *dev)
296{ 276{
297 struct msi_desc *entry; 277 struct msi_desc *desc = kzalloc(sizeof(*desc), GFP_KERNEL);
298 278 if (!desc)
299 entry = kzalloc(sizeof(struct msi_desc), GFP_KERNEL);
300 if (!entry)
301 return NULL; 279 return NULL;
302 280
303 INIT_LIST_HEAD(&entry->list); 281 INIT_LIST_HEAD(&desc->list);
304 entry->irq = 0; 282 desc->dev = dev;
305 entry->dev = NULL;
306 283
307 return entry; 284 return desc;
308} 285}
309 286
310static void pci_intx_for_msi(struct pci_dev *dev, int enable) 287static void pci_intx_for_msi(struct pci_dev *dev, int enable)
@@ -328,15 +305,11 @@ static void __pci_restore_msi_state(struct pci_dev *dev)
328 pci_intx_for_msi(dev, 0); 305 pci_intx_for_msi(dev, 0);
329 msi_set_enable(dev, 0); 306 msi_set_enable(dev, 0);
330 write_msi_msg(dev->irq, &entry->msg); 307 write_msi_msg(dev->irq, &entry->msg);
331 if (entry->msi_attrib.maskbit) {
332 struct irq_desc *desc = irq_to_desc(dev->irq);
333 msi_set_mask_bits(desc, entry->msi_attrib.maskbits_mask,
334 entry->msi_attrib.masked);
335 }
336 308
337 pci_read_config_word(dev, pos + PCI_MSI_FLAGS, &control); 309 pci_read_config_word(dev, pos + PCI_MSI_FLAGS, &control);
310 msi_mask_irq(entry, msi_capable_mask(control), entry->masked);
338 control &= ~PCI_MSI_FLAGS_QSIZE; 311 control &= ~PCI_MSI_FLAGS_QSIZE;
339 control |= PCI_MSI_FLAGS_ENABLE; 312 control |= (entry->msi_attrib.multiple << 4) | PCI_MSI_FLAGS_ENABLE;
340 pci_write_config_word(dev, pos + PCI_MSI_FLAGS, control); 313 pci_write_config_word(dev, pos + PCI_MSI_FLAGS, control);
341} 314}
342 315
@@ -354,9 +327,8 @@ static void __pci_restore_msix_state(struct pci_dev *dev)
354 msix_set_enable(dev, 0); 327 msix_set_enable(dev, 0);
355 328
356 list_for_each_entry(entry, &dev->msi_list, list) { 329 list_for_each_entry(entry, &dev->msi_list, list) {
357 struct irq_desc *desc = irq_to_desc(entry->irq);
358 write_msi_msg(entry->irq, &entry->msg); 330 write_msi_msg(entry->irq, &entry->msg);
359 msi_set_mask_bits(desc, 1, entry->msi_attrib.masked); 331 msix_mask_irq(entry, entry->masked);
360 } 332 }
361 333
362 BUG_ON(list_empty(&dev->msi_list)); 334 BUG_ON(list_empty(&dev->msi_list));
@@ -378,52 +350,48 @@ EXPORT_SYMBOL_GPL(pci_restore_msi_state);
378/** 350/**
379 * msi_capability_init - configure device's MSI capability structure 351 * msi_capability_init - configure device's MSI capability structure
380 * @dev: pointer to the pci_dev data structure of MSI device function 352 * @dev: pointer to the pci_dev data structure of MSI device function
353 * @nvec: number of interrupts to allocate
381 * 354 *
382 * Setup the MSI capability structure of device function with a single 355 * Setup the MSI capability structure of the device with the requested
383 * MSI irq, regardless of device function is capable of handling 356 * number of interrupts. A return value of zero indicates the successful
384 * multiple messages. A return of zero indicates the successful setup 357 * setup of an entry with the new MSI irq. A negative return value indicates
385 * of an entry zero with the new MSI irq or non-zero for otherwise. 358 * an error, and a positive return value indicates the number of interrupts
386 **/ 359 * which could have been allocated.
387static int msi_capability_init(struct pci_dev *dev) 360 */
361static int msi_capability_init(struct pci_dev *dev, int nvec)
388{ 362{
389 struct msi_desc *entry; 363 struct msi_desc *entry;
390 int pos, ret; 364 int pos, ret;
391 u16 control; 365 u16 control;
366 unsigned mask;
392 367
393 msi_set_enable(dev, 0); /* Ensure msi is disabled as I set it up */ 368 msi_set_enable(dev, 0); /* Ensure msi is disabled as I set it up */
394 369
395 pos = pci_find_capability(dev, PCI_CAP_ID_MSI); 370 pos = pci_find_capability(dev, PCI_CAP_ID_MSI);
396 pci_read_config_word(dev, msi_control_reg(pos), &control); 371 pci_read_config_word(dev, msi_control_reg(pos), &control);
397 /* MSI Entry Initialization */ 372 /* MSI Entry Initialization */
398 entry = alloc_msi_entry(); 373 entry = alloc_msi_entry(dev);
399 if (!entry) 374 if (!entry)
400 return -ENOMEM; 375 return -ENOMEM;
401 376
402 entry->msi_attrib.type = PCI_CAP_ID_MSI; 377 entry->msi_attrib.is_msix = 0;
403 entry->msi_attrib.is_64 = is_64bit_address(control); 378 entry->msi_attrib.is_64 = is_64bit_address(control);
404 entry->msi_attrib.entry_nr = 0; 379 entry->msi_attrib.entry_nr = 0;
405 entry->msi_attrib.maskbit = is_mask_bit_support(control); 380 entry->msi_attrib.maskbit = is_mask_bit_support(control);
406 entry->msi_attrib.masked = 1;
407 entry->msi_attrib.default_irq = dev->irq; /* Save IOAPIC IRQ */ 381 entry->msi_attrib.default_irq = dev->irq; /* Save IOAPIC IRQ */
408 entry->msi_attrib.pos = pos; 382 entry->msi_attrib.pos = pos;
409 entry->dev = dev; 383
410 if (entry->msi_attrib.maskbit) { 384 entry->mask_pos = msi_mask_bits_reg(pos, entry->msi_attrib.is_64);
411 unsigned int base, maskbits, temp; 385 /* All MSIs are unmasked by default, Mask them all */
412 386 if (entry->msi_attrib.maskbit)
413 base = msi_mask_bits_reg(pos, entry->msi_attrib.is_64); 387 pci_read_config_dword(dev, entry->mask_pos, &entry->masked);
414 entry->mask_base = (void __iomem *)(long)base; 388 mask = msi_capable_mask(control);
415 389 msi_mask_irq(entry, mask, mask);
416 /* All MSIs are unmasked by default, Mask them all */ 390
417 pci_read_config_dword(dev, base, &maskbits);
418 temp = msi_mask((control & PCI_MSI_FLAGS_QMASK) >> 1);
419 maskbits |= temp;
420 pci_write_config_dword(dev, base, maskbits);
421 entry->msi_attrib.maskbits_mask = temp;
422 }
423 list_add_tail(&entry->list, &dev->msi_list); 391 list_add_tail(&entry->list, &dev->msi_list);
424 392
425 /* Configure MSI capability structure */ 393 /* Configure MSI capability structure */
426 ret = arch_setup_msi_irqs(dev, 1, PCI_CAP_ID_MSI); 394 ret = arch_setup_msi_irqs(dev, nvec, PCI_CAP_ID_MSI);
427 if (ret) { 395 if (ret) {
428 msi_free_irqs(dev); 396 msi_free_irqs(dev);
429 return ret; 397 return ret;
@@ -476,26 +444,28 @@ static int msix_capability_init(struct pci_dev *dev,
476 444
477 /* MSI-X Table Initialization */ 445 /* MSI-X Table Initialization */
478 for (i = 0; i < nvec; i++) { 446 for (i = 0; i < nvec; i++) {
479 entry = alloc_msi_entry(); 447 entry = alloc_msi_entry(dev);
480 if (!entry) 448 if (!entry)
481 break; 449 break;
482 450
483 j = entries[i].entry; 451 j = entries[i].entry;
484 entry->msi_attrib.type = PCI_CAP_ID_MSIX; 452 entry->msi_attrib.is_msix = 1;
485 entry->msi_attrib.is_64 = 1; 453 entry->msi_attrib.is_64 = 1;
486 entry->msi_attrib.entry_nr = j; 454 entry->msi_attrib.entry_nr = j;
487 entry->msi_attrib.maskbit = 1;
488 entry->msi_attrib.masked = 1;
489 entry->msi_attrib.default_irq = dev->irq; 455 entry->msi_attrib.default_irq = dev->irq;
490 entry->msi_attrib.pos = pos; 456 entry->msi_attrib.pos = pos;
491 entry->dev = dev;
492 entry->mask_base = base; 457 entry->mask_base = base;
458 entry->masked = readl(base + j * PCI_MSIX_ENTRY_SIZE +
459 PCI_MSIX_ENTRY_VECTOR_CTRL_OFFSET);
460 msix_mask_irq(entry, 1);
493 461
494 list_add_tail(&entry->list, &dev->msi_list); 462 list_add_tail(&entry->list, &dev->msi_list);
495 } 463 }
496 464
497 ret = arch_setup_msi_irqs(dev, nvec, PCI_CAP_ID_MSIX); 465 ret = arch_setup_msi_irqs(dev, nvec, PCI_CAP_ID_MSIX);
498 if (ret) { 466 if (ret < 0) {
467 /* If we had some success report the number of irqs
468 * we succeeded in setting up. */
499 int avail = 0; 469 int avail = 0;
500 list_for_each_entry(entry, &dev->msi_list, list) { 470 list_for_each_entry(entry, &dev->msi_list, list) {
501 if (entry->irq != 0) { 471 if (entry->irq != 0) {
@@ -503,14 +473,13 @@ static int msix_capability_init(struct pci_dev *dev,
503 } 473 }
504 } 474 }
505 475
506 msi_free_irqs(dev); 476 if (avail != 0)
477 ret = avail;
478 }
507 479
508 /* If we had some success report the number of irqs 480 if (ret) {
509 * we succeeded in setting up. 481 msi_free_irqs(dev);
510 */ 482 return ret;
511 if (avail == 0)
512 avail = ret;
513 return avail;
514 } 483 }
515 484
516 i = 0; 485 i = 0;
@@ -575,39 +544,54 @@ static int pci_msi_check_device(struct pci_dev* dev, int nvec, int type)
575} 544}
576 545
577/** 546/**
578 * pci_enable_msi - configure device's MSI capability structure 547 * pci_enable_msi_block - configure device's MSI capability structure
579 * @dev: pointer to the pci_dev data structure of MSI device function 548 * @dev: device to configure
549 * @nvec: number of interrupts to configure
580 * 550 *
581 * Setup the MSI capability structure of device function with 551 * Allocate IRQs for a device with the MSI capability.
582 * a single MSI irq upon its software driver call to request for 552 * This function returns a negative errno if an error occurs. If it
583 * MSI mode enabled on its hardware device function. A return of zero 553 * is unable to allocate the number of interrupts requested, it returns
584 * indicates the successful setup of an entry zero with the new MSI 554 * the number of interrupts it might be able to allocate. If it successfully
585 * irq or non-zero for otherwise. 555 * allocates at least the number of interrupts requested, it returns 0 and
586 **/ 556 * updates the @dev's irq member to the lowest new interrupt number; the
587int pci_enable_msi(struct pci_dev* dev) 557 * other interrupt numbers allocated to this device are consecutive.
558 */
559int pci_enable_msi_block(struct pci_dev *dev, unsigned int nvec)
588{ 560{
589 int status; 561 int status, pos, maxvec;
562 u16 msgctl;
590 563
591 status = pci_msi_check_device(dev, 1, PCI_CAP_ID_MSI); 564 pos = pci_find_capability(dev, PCI_CAP_ID_MSI);
565 if (!pos)
566 return -EINVAL;
567 pci_read_config_word(dev, pos + PCI_MSI_FLAGS, &msgctl);
568 maxvec = 1 << ((msgctl & PCI_MSI_FLAGS_QMASK) >> 1);
569 if (nvec > maxvec)
570 return maxvec;
571
572 status = pci_msi_check_device(dev, nvec, PCI_CAP_ID_MSI);
592 if (status) 573 if (status)
593 return status; 574 return status;
594 575
595 WARN_ON(!!dev->msi_enabled); 576 WARN_ON(!!dev->msi_enabled);
596 577
597 /* Check whether driver already requested for MSI-X irqs */ 578 /* Check whether driver already requested MSI-X irqs */
598 if (dev->msix_enabled) { 579 if (dev->msix_enabled) {
599 dev_info(&dev->dev, "can't enable MSI " 580 dev_info(&dev->dev, "can't enable MSI "
600 "(MSI-X already enabled)\n"); 581 "(MSI-X already enabled)\n");
601 return -EINVAL; 582 return -EINVAL;
602 } 583 }
603 status = msi_capability_init(dev); 584
585 status = msi_capability_init(dev, nvec);
604 return status; 586 return status;
605} 587}
606EXPORT_SYMBOL(pci_enable_msi); 588EXPORT_SYMBOL(pci_enable_msi_block);
607 589
608void pci_msi_shutdown(struct pci_dev* dev) 590void pci_msi_shutdown(struct pci_dev *dev)
609{ 591{
610 struct msi_desc *entry; 592 struct msi_desc *desc;
593 u32 mask;
594 u16 ctrl;
611 595
612 if (!pci_msi_enable || !dev || !dev->msi_enabled) 596 if (!pci_msi_enable || !dev || !dev->msi_enabled)
613 return; 597 return;
@@ -617,19 +601,15 @@ void pci_msi_shutdown(struct pci_dev* dev)
617 dev->msi_enabled = 0; 601 dev->msi_enabled = 0;
618 602
619 BUG_ON(list_empty(&dev->msi_list)); 603 BUG_ON(list_empty(&dev->msi_list));
620 entry = list_entry(dev->msi_list.next, struct msi_desc, list); 604 desc = list_first_entry(&dev->msi_list, struct msi_desc, list);
621 /* Return the the pci reset with msi irqs unmasked */ 605 pci_read_config_word(dev, desc->msi_attrib.pos + PCI_MSI_FLAGS, &ctrl);
622 if (entry->msi_attrib.maskbit) { 606 mask = msi_capable_mask(ctrl);
623 u32 mask = entry->msi_attrib.maskbits_mask; 607 msi_mask_irq(desc, mask, ~mask);
624 struct irq_desc *desc = irq_to_desc(dev->irq);
625 msi_set_mask_bits(desc, mask, ~mask);
626 }
627 if (!entry->dev || entry->msi_attrib.type != PCI_CAP_ID_MSI)
628 return;
629 608
630 /* Restore dev->irq to its default pin-assertion irq */ 609 /* Restore dev->irq to its default pin-assertion irq */
631 dev->irq = entry->msi_attrib.default_irq; 610 dev->irq = desc->msi_attrib.default_irq;
632} 611}
612
633void pci_disable_msi(struct pci_dev* dev) 613void pci_disable_msi(struct pci_dev* dev)
634{ 614{
635 struct msi_desc *entry; 615 struct msi_desc *entry;
@@ -640,7 +620,7 @@ void pci_disable_msi(struct pci_dev* dev)
640 pci_msi_shutdown(dev); 620 pci_msi_shutdown(dev);
641 621
642 entry = list_entry(dev->msi_list.next, struct msi_desc, list); 622 entry = list_entry(dev->msi_list.next, struct msi_desc, list);
643 if (!entry->dev || entry->msi_attrib.type != PCI_CAP_ID_MSI) 623 if (entry->msi_attrib.is_msix)
644 return; 624 return;
645 625
646 msi_free_irqs(dev); 626 msi_free_irqs(dev);
@@ -652,14 +632,18 @@ static int msi_free_irqs(struct pci_dev* dev)
652 struct msi_desc *entry, *tmp; 632 struct msi_desc *entry, *tmp;
653 633
654 list_for_each_entry(entry, &dev->msi_list, list) { 634 list_for_each_entry(entry, &dev->msi_list, list) {
655 if (entry->irq) 635 int i, nvec;
656 BUG_ON(irq_has_action(entry->irq)); 636 if (!entry->irq)
637 continue;
638 nvec = 1 << entry->msi_attrib.multiple;
639 for (i = 0; i < nvec; i++)
640 BUG_ON(irq_has_action(entry->irq + i));
657 } 641 }
658 642
659 arch_teardown_msi_irqs(dev); 643 arch_teardown_msi_irqs(dev);
660 644
661 list_for_each_entry_safe(entry, tmp, &dev->msi_list, list) { 645 list_for_each_entry_safe(entry, tmp, &dev->msi_list, list) {
662 if (entry->msi_attrib.type == PCI_CAP_ID_MSIX) { 646 if (entry->msi_attrib.is_msix) {
663 writel(1, entry->mask_base + entry->msi_attrib.entry_nr 647 writel(1, entry->mask_base + entry->msi_attrib.entry_nr
664 * PCI_MSIX_ENTRY_SIZE 648 * PCI_MSIX_ENTRY_SIZE
665 + PCI_MSIX_ENTRY_VECTOR_CTRL_OFFSET); 649 + PCI_MSIX_ENTRY_VECTOR_CTRL_OFFSET);
@@ -675,6 +659,23 @@ static int msi_free_irqs(struct pci_dev* dev)
675} 659}
676 660
677/** 661/**
662 * pci_msix_table_size - return the number of device's MSI-X table entries
663 * @dev: pointer to the pci_dev data structure of MSI-X device function
664 */
665int pci_msix_table_size(struct pci_dev *dev)
666{
667 int pos;
668 u16 control;
669
670 pos = pci_find_capability(dev, PCI_CAP_ID_MSIX);
671 if (!pos)
672 return 0;
673
674 pci_read_config_word(dev, msi_control_reg(pos), &control);
675 return multi_msix_capable(control);
676}
677
678/**
678 * pci_enable_msix - configure device's MSI-X capability structure 679 * pci_enable_msix - configure device's MSI-X capability structure
679 * @dev: pointer to the pci_dev data structure of MSI-X device function 680 * @dev: pointer to the pci_dev data structure of MSI-X device function
680 * @entries: pointer to an array of MSI-X entries 681 * @entries: pointer to an array of MSI-X entries
@@ -691,9 +692,8 @@ static int msi_free_irqs(struct pci_dev* dev)
691 **/ 692 **/
692int pci_enable_msix(struct pci_dev* dev, struct msix_entry *entries, int nvec) 693int pci_enable_msix(struct pci_dev* dev, struct msix_entry *entries, int nvec)
693{ 694{
694 int status, pos, nr_entries; 695 int status, nr_entries;
695 int i, j; 696 int i, j;
696 u16 control;
697 697
698 if (!entries) 698 if (!entries)
699 return -EINVAL; 699 return -EINVAL;
@@ -702,9 +702,7 @@ int pci_enable_msix(struct pci_dev* dev, struct msix_entry *entries, int nvec)
702 if (status) 702 if (status)
703 return status; 703 return status;
704 704
705 pos = pci_find_capability(dev, PCI_CAP_ID_MSIX); 705 nr_entries = pci_msix_table_size(dev);
706 pci_read_config_word(dev, msi_control_reg(pos), &control);
707 nr_entries = multi_msix_capable(control);
708 if (nvec > nr_entries) 706 if (nvec > nr_entries)
709 return -EINVAL; 707 return -EINVAL;
710 708
diff --git a/drivers/pci/msi.h b/drivers/pci/msi.h
index 3898f5237144..71f4df2ef654 100644
--- a/drivers/pci/msi.h
+++ b/drivers/pci/msi.h
@@ -20,14 +20,8 @@
20#define msi_mask_bits_reg(base, is64bit) \ 20#define msi_mask_bits_reg(base, is64bit) \
21 ( (is64bit == 1) ? base+PCI_MSI_MASK_BIT : base+PCI_MSI_MASK_BIT-4) 21 ( (is64bit == 1) ? base+PCI_MSI_MASK_BIT : base+PCI_MSI_MASK_BIT-4)
22#define msi_disable(control) control &= ~PCI_MSI_FLAGS_ENABLE 22#define msi_disable(control) control &= ~PCI_MSI_FLAGS_ENABLE
23#define multi_msi_capable(control) \
24 (1 << ((control & PCI_MSI_FLAGS_QMASK) >> 1))
25#define multi_msi_enable(control, num) \
26 control |= (((num >> 1) << 4) & PCI_MSI_FLAGS_QSIZE);
27#define is_64bit_address(control) (!!(control & PCI_MSI_FLAGS_64BIT)) 23#define is_64bit_address(control) (!!(control & PCI_MSI_FLAGS_64BIT))
28#define is_mask_bit_support(control) (!!(control & PCI_MSI_FLAGS_MASKBIT)) 24#define is_mask_bit_support(control) (!!(control & PCI_MSI_FLAGS_MASKBIT))
29#define msi_enable(control, num) multi_msi_enable(control, num); \
30 control |= PCI_MSI_FLAGS_ENABLE
31 25
32#define msix_table_offset_reg(base) (base + 0x04) 26#define msix_table_offset_reg(base) (base + 0x04)
33#define msix_pba_offset_reg(base) (base + 0x08) 27#define msix_pba_offset_reg(base) (base + 0x08)
diff --git a/drivers/pci/pci-acpi.c b/drivers/pci/pci-acpi.c
index deea8a187eb8..fac5eddcefd2 100644
--- a/drivers/pci/pci-acpi.c
+++ b/drivers/pci/pci-acpi.c
@@ -18,221 +18,6 @@
18#include <linux/pci-acpi.h> 18#include <linux/pci-acpi.h>
19#include "pci.h" 19#include "pci.h"
20 20
21struct acpi_osc_data {
22 acpi_handle handle;
23 u32 support_set;
24 u32 control_set;
25 u32 control_query;
26 int is_queried;
27 struct list_head sibiling;
28};
29static LIST_HEAD(acpi_osc_data_list);
30
31struct acpi_osc_args {
32 u32 capbuf[3];
33};
34
35static DEFINE_MUTEX(pci_acpi_lock);
36
37static struct acpi_osc_data *acpi_get_osc_data(acpi_handle handle)
38{
39 struct acpi_osc_data *data;
40
41 list_for_each_entry(data, &acpi_osc_data_list, sibiling) {
42 if (data->handle == handle)
43 return data;
44 }
45 data = kzalloc(sizeof(*data), GFP_KERNEL);
46 if (!data)
47 return NULL;
48 INIT_LIST_HEAD(&data->sibiling);
49 data->handle = handle;
50 list_add_tail(&data->sibiling, &acpi_osc_data_list);
51 return data;
52}
53
54static u8 OSC_UUID[16] = {0x5B, 0x4D, 0xDB, 0x33, 0xF7, 0x1F, 0x1C, 0x40,
55 0x96, 0x57, 0x74, 0x41, 0xC0, 0x3D, 0xD7, 0x66};
56
57static acpi_status acpi_run_osc(acpi_handle handle,
58 struct acpi_osc_args *osc_args, u32 *retval)
59{
60 acpi_status status;
61 struct acpi_object_list input;
62 union acpi_object in_params[4];
63 struct acpi_buffer output = {ACPI_ALLOCATE_BUFFER, NULL};
64 union acpi_object *out_obj;
65 u32 errors, flags = osc_args->capbuf[OSC_QUERY_TYPE];
66
67 /* Setting up input parameters */
68 input.count = 4;
69 input.pointer = in_params;
70 in_params[0].type = ACPI_TYPE_BUFFER;
71 in_params[0].buffer.length = 16;
72 in_params[0].buffer.pointer = OSC_UUID;
73 in_params[1].type = ACPI_TYPE_INTEGER;
74 in_params[1].integer.value = 1;
75 in_params[2].type = ACPI_TYPE_INTEGER;
76 in_params[2].integer.value = 3;
77 in_params[3].type = ACPI_TYPE_BUFFER;
78 in_params[3].buffer.length = 12;
79 in_params[3].buffer.pointer = (u8 *)osc_args->capbuf;
80
81 status = acpi_evaluate_object(handle, "_OSC", &input, &output);
82 if (ACPI_FAILURE(status))
83 return status;
84
85 if (!output.length)
86 return AE_NULL_OBJECT;
87
88 out_obj = output.pointer;
89 if (out_obj->type != ACPI_TYPE_BUFFER) {
90 printk(KERN_DEBUG "Evaluate _OSC returns wrong type\n");
91 status = AE_TYPE;
92 goto out_kfree;
93 }
94 /* Need to ignore the bit0 in result code */
95 errors = *((u32 *)out_obj->buffer.pointer) & ~(1 << 0);
96 if (errors) {
97 if (errors & OSC_REQUEST_ERROR)
98 printk(KERN_DEBUG "_OSC request fails\n");
99 if (errors & OSC_INVALID_UUID_ERROR)
100 printk(KERN_DEBUG "_OSC invalid UUID\n");
101 if (errors & OSC_INVALID_REVISION_ERROR)
102 printk(KERN_DEBUG "_OSC invalid revision\n");
103 if (errors & OSC_CAPABILITIES_MASK_ERROR) {
104 if (flags & OSC_QUERY_ENABLE)
105 goto out_success;
106 printk(KERN_DEBUG "_OSC FW not grant req. control\n");
107 status = AE_SUPPORT;
108 goto out_kfree;
109 }
110 status = AE_ERROR;
111 goto out_kfree;
112 }
113out_success:
114 *retval = *((u32 *)(out_obj->buffer.pointer + 8));
115 status = AE_OK;
116
117out_kfree:
118 kfree(output.pointer);
119 return status;
120}
121
122static acpi_status __acpi_query_osc(u32 flags, struct acpi_osc_data *osc_data)
123{
124 acpi_status status;
125 u32 support_set, result;
126 struct acpi_osc_args osc_args;
127
128 /* do _OSC query for all possible controls */
129 support_set = osc_data->support_set | (flags & OSC_SUPPORT_MASKS);
130 osc_args.capbuf[OSC_QUERY_TYPE] = OSC_QUERY_ENABLE;
131 osc_args.capbuf[OSC_SUPPORT_TYPE] = support_set;
132 osc_args.capbuf[OSC_CONTROL_TYPE] = OSC_CONTROL_MASKS;
133
134 status = acpi_run_osc(osc_data->handle, &osc_args, &result);
135 if (ACPI_SUCCESS(status)) {
136 osc_data->support_set = support_set;
137 osc_data->control_query = result;
138 osc_data->is_queried = 1;
139 }
140
141 return status;
142}
143
144/*
145 * pci_acpi_osc_support: Invoke _OSC indicating support for the given feature
146 * @flags: Bitmask of flags to support
147 *
148 * See the ACPI spec for the definition of the flags
149 */
150int pci_acpi_osc_support(acpi_handle handle, u32 flags)
151{
152 acpi_status status;
153 acpi_handle tmp;
154 struct acpi_osc_data *osc_data;
155 int rc = 0;
156
157 status = acpi_get_handle(handle, "_OSC", &tmp);
158 if (ACPI_FAILURE(status))
159 return -ENOTTY;
160
161 mutex_lock(&pci_acpi_lock);
162 osc_data = acpi_get_osc_data(handle);
163 if (!osc_data) {
164 printk(KERN_ERR "acpi osc data array is full\n");
165 rc = -ENOMEM;
166 goto out;
167 }
168
169 __acpi_query_osc(flags, osc_data);
170out:
171 mutex_unlock(&pci_acpi_lock);
172 return rc;
173}
174
175/**
176 * pci_osc_control_set - commit requested control to Firmware
177 * @handle: acpi_handle for the target ACPI object
178 * @flags: driver's requested control bits
179 *
180 * Attempt to take control from Firmware on requested control bits.
181 **/
182acpi_status pci_osc_control_set(acpi_handle handle, u32 flags)
183{
184 acpi_status status;
185 u32 control_req, control_set, result;
186 acpi_handle tmp;
187 struct acpi_osc_data *osc_data;
188 struct acpi_osc_args osc_args;
189
190 status = acpi_get_handle(handle, "_OSC", &tmp);
191 if (ACPI_FAILURE(status))
192 return status;
193
194 mutex_lock(&pci_acpi_lock);
195 osc_data = acpi_get_osc_data(handle);
196 if (!osc_data) {
197 printk(KERN_ERR "acpi osc data array is full\n");
198 status = AE_ERROR;
199 goto out;
200 }
201
202 control_req = (flags & OSC_CONTROL_MASKS);
203 if (!control_req) {
204 status = AE_TYPE;
205 goto out;
206 }
207
208 /* No need to evaluate _OSC if the control was already granted. */
209 if ((osc_data->control_set & control_req) == control_req)
210 goto out;
211
212 if (!osc_data->is_queried) {
213 status = __acpi_query_osc(osc_data->support_set, osc_data);
214 if (ACPI_FAILURE(status))
215 goto out;
216 }
217
218 if ((osc_data->control_query & control_req) != control_req) {
219 status = AE_SUPPORT;
220 goto out;
221 }
222
223 control_set = osc_data->control_set | control_req;
224 osc_args.capbuf[OSC_QUERY_TYPE] = 0;
225 osc_args.capbuf[OSC_SUPPORT_TYPE] = osc_data->support_set;
226 osc_args.capbuf[OSC_CONTROL_TYPE] = control_set;
227 status = acpi_run_osc(handle, &osc_args, &result);
228 if (ACPI_SUCCESS(status))
229 osc_data->control_set = result;
230out:
231 mutex_unlock(&pci_acpi_lock);
232 return status;
233}
234EXPORT_SYMBOL(pci_osc_control_set);
235
236/* 21/*
237 * _SxD returns the D-state with the highest power 22 * _SxD returns the D-state with the highest power
238 * (lowest D-state number) supported in the S-state "x". 23 * (lowest D-state number) supported in the S-state "x".
diff --git a/drivers/pci/pci-driver.c b/drivers/pci/pci-driver.c
index 267de88551c9..c0cbbb5a245e 100644
--- a/drivers/pci/pci-driver.c
+++ b/drivers/pci/pci-driver.c
@@ -99,6 +99,52 @@ store_new_id(struct device_driver *driver, const char *buf, size_t count)
99} 99}
100static DRIVER_ATTR(new_id, S_IWUSR, NULL, store_new_id); 100static DRIVER_ATTR(new_id, S_IWUSR, NULL, store_new_id);
101 101
102/**
103 * store_remove_id - remove a PCI device ID from this driver
104 * @driver: target device driver
105 * @buf: buffer for scanning device ID data
106 * @count: input size
107 *
108 * Removes a dynamic pci device ID to this driver.
109 */
110static ssize_t
111store_remove_id(struct device_driver *driver, const char *buf, size_t count)
112{
113 struct pci_dynid *dynid, *n;
114 struct pci_driver *pdrv = to_pci_driver(driver);
115 __u32 vendor, device, subvendor = PCI_ANY_ID,
116 subdevice = PCI_ANY_ID, class = 0, class_mask = 0;
117 int fields = 0;
118 int retval = -ENODEV;
119
120 fields = sscanf(buf, "%x %x %x %x %x %x",
121 &vendor, &device, &subvendor, &subdevice,
122 &class, &class_mask);
123 if (fields < 2)
124 return -EINVAL;
125
126 spin_lock(&pdrv->dynids.lock);
127 list_for_each_entry_safe(dynid, n, &pdrv->dynids.list, node) {
128 struct pci_device_id *id = &dynid->id;
129 if ((id->vendor == vendor) &&
130 (id->device == device) &&
131 (subvendor == PCI_ANY_ID || id->subvendor == subvendor) &&
132 (subdevice == PCI_ANY_ID || id->subdevice == subdevice) &&
133 !((id->class ^ class) & class_mask)) {
134 list_del(&dynid->node);
135 kfree(dynid);
136 retval = 0;
137 break;
138 }
139 }
140 spin_unlock(&pdrv->dynids.lock);
141
142 if (retval)
143 return retval;
144 return count;
145}
146static DRIVER_ATTR(remove_id, S_IWUSR, NULL, store_remove_id);
147
102static void 148static void
103pci_free_dynids(struct pci_driver *drv) 149pci_free_dynids(struct pci_driver *drv)
104{ 150{
@@ -125,6 +171,20 @@ static void pci_remove_newid_file(struct pci_driver *drv)
125{ 171{
126 driver_remove_file(&drv->driver, &driver_attr_new_id); 172 driver_remove_file(&drv->driver, &driver_attr_new_id);
127} 173}
174
175static int
176pci_create_removeid_file(struct pci_driver *drv)
177{
178 int error = 0;
179 if (drv->probe != NULL)
180 error = driver_create_file(&drv->driver,&driver_attr_remove_id);
181 return error;
182}
183
184static void pci_remove_removeid_file(struct pci_driver *drv)
185{
186 driver_remove_file(&drv->driver, &driver_attr_remove_id);
187}
128#else /* !CONFIG_HOTPLUG */ 188#else /* !CONFIG_HOTPLUG */
129static inline void pci_free_dynids(struct pci_driver *drv) {} 189static inline void pci_free_dynids(struct pci_driver *drv) {}
130static inline int pci_create_newid_file(struct pci_driver *drv) 190static inline int pci_create_newid_file(struct pci_driver *drv)
@@ -132,6 +192,11 @@ static inline int pci_create_newid_file(struct pci_driver *drv)
132 return 0; 192 return 0;
133} 193}
134static inline void pci_remove_newid_file(struct pci_driver *drv) {} 194static inline void pci_remove_newid_file(struct pci_driver *drv) {}
195static inline int pci_create_removeid_file(struct pci_driver *drv)
196{
197 return 0;
198}
199static inline void pci_remove_removeid_file(struct pci_driver *drv) {}
135#endif 200#endif
136 201
137/** 202/**
@@ -899,13 +964,23 @@ int __pci_register_driver(struct pci_driver *drv, struct module *owner,
899 /* register with core */ 964 /* register with core */
900 error = driver_register(&drv->driver); 965 error = driver_register(&drv->driver);
901 if (error) 966 if (error)
902 return error; 967 goto out;
903 968
904 error = pci_create_newid_file(drv); 969 error = pci_create_newid_file(drv);
905 if (error) 970 if (error)
906 driver_unregister(&drv->driver); 971 goto out_newid;
907 972
973 error = pci_create_removeid_file(drv);
974 if (error)
975 goto out_removeid;
976out:
908 return error; 977 return error;
978
979out_removeid:
980 pci_remove_newid_file(drv);
981out_newid:
982 driver_unregister(&drv->driver);
983 goto out;
909} 984}
910 985
911/** 986/**
@@ -921,6 +996,7 @@ int __pci_register_driver(struct pci_driver *drv, struct module *owner,
921void 996void
922pci_unregister_driver(struct pci_driver *drv) 997pci_unregister_driver(struct pci_driver *drv)
923{ 998{
999 pci_remove_removeid_file(drv);
924 pci_remove_newid_file(drv); 1000 pci_remove_newid_file(drv);
925 driver_unregister(&drv->driver); 1001 driver_unregister(&drv->driver);
926 pci_free_dynids(drv); 1002 pci_free_dynids(drv);
@@ -1020,6 +1096,7 @@ struct bus_type pci_bus_type = {
1020 .remove = pci_device_remove, 1096 .remove = pci_device_remove,
1021 .shutdown = pci_device_shutdown, 1097 .shutdown = pci_device_shutdown,
1022 .dev_attrs = pci_dev_attrs, 1098 .dev_attrs = pci_dev_attrs,
1099 .bus_attrs = pci_bus_attrs,
1023 .pm = PCI_PM_OPS_PTR, 1100 .pm = PCI_PM_OPS_PTR,
1024}; 1101};
1025 1102
diff --git a/drivers/pci/pci-sysfs.c b/drivers/pci/pci-sysfs.c
index dfc4e0ddf241..e9a8706a6401 100644
--- a/drivers/pci/pci-sysfs.c
+++ b/drivers/pci/pci-sysfs.c
@@ -219,6 +219,83 @@ msi_bus_store(struct device *dev, struct device_attribute *attr,
219 return count; 219 return count;
220} 220}
221 221
222#ifdef CONFIG_HOTPLUG
223static DEFINE_MUTEX(pci_remove_rescan_mutex);
224static ssize_t bus_rescan_store(struct bus_type *bus, const char *buf,
225 size_t count)
226{
227 unsigned long val;
228 struct pci_bus *b = NULL;
229
230 if (strict_strtoul(buf, 0, &val) < 0)
231 return -EINVAL;
232
233 if (val) {
234 mutex_lock(&pci_remove_rescan_mutex);
235 while ((b = pci_find_next_bus(b)) != NULL)
236 pci_rescan_bus(b);
237 mutex_unlock(&pci_remove_rescan_mutex);
238 }
239 return count;
240}
241
242struct bus_attribute pci_bus_attrs[] = {
243 __ATTR(rescan, (S_IWUSR|S_IWGRP), NULL, bus_rescan_store),
244 __ATTR_NULL
245};
246
247static ssize_t
248dev_rescan_store(struct device *dev, struct device_attribute *attr,
249 const char *buf, size_t count)
250{
251 unsigned long val;
252 struct pci_dev *pdev = to_pci_dev(dev);
253
254 if (strict_strtoul(buf, 0, &val) < 0)
255 return -EINVAL;
256
257 if (val) {
258 mutex_lock(&pci_remove_rescan_mutex);
259 pci_rescan_bus(pdev->bus);
260 mutex_unlock(&pci_remove_rescan_mutex);
261 }
262 return count;
263}
264
265static void remove_callback(struct device *dev)
266{
267 struct pci_dev *pdev = to_pci_dev(dev);
268
269 mutex_lock(&pci_remove_rescan_mutex);
270 pci_remove_bus_device(pdev);
271 mutex_unlock(&pci_remove_rescan_mutex);
272}
273
274static ssize_t
275remove_store(struct device *dev, struct device_attribute *dummy,
276 const char *buf, size_t count)
277{
278 int ret = 0;
279 unsigned long val;
280 struct pci_dev *pdev = to_pci_dev(dev);
281
282 if (strict_strtoul(buf, 0, &val) < 0)
283 return -EINVAL;
284
285 if (pci_is_root_bus(pdev->bus))
286 return -EBUSY;
287
288 /* An attribute cannot be unregistered by one of its own methods,
289 * so we have to use this roundabout approach.
290 */
291 if (val)
292 ret = device_schedule_callback(dev, remove_callback);
293 if (ret)
294 count = ret;
295 return count;
296}
297#endif
298
222struct device_attribute pci_dev_attrs[] = { 299struct device_attribute pci_dev_attrs[] = {
223 __ATTR_RO(resource), 300 __ATTR_RO(resource),
224 __ATTR_RO(vendor), 301 __ATTR_RO(vendor),
@@ -237,10 +314,25 @@ struct device_attribute pci_dev_attrs[] = {
237 __ATTR(broken_parity_status,(S_IRUGO|S_IWUSR), 314 __ATTR(broken_parity_status,(S_IRUGO|S_IWUSR),
238 broken_parity_status_show,broken_parity_status_store), 315 broken_parity_status_show,broken_parity_status_store),
239 __ATTR(msi_bus, 0644, msi_bus_show, msi_bus_store), 316 __ATTR(msi_bus, 0644, msi_bus_show, msi_bus_store),
317#ifdef CONFIG_HOTPLUG
318 __ATTR(remove, (S_IWUSR|S_IWGRP), NULL, remove_store),
319 __ATTR(rescan, (S_IWUSR|S_IWGRP), NULL, dev_rescan_store),
320#endif
240 __ATTR_NULL, 321 __ATTR_NULL,
241}; 322};
242 323
243static ssize_t 324static ssize_t
325boot_vga_show(struct device *dev, struct device_attribute *attr, char *buf)
326{
327 struct pci_dev *pdev = to_pci_dev(dev);
328
329 return sprintf(buf, "%u\n",
330 !!(pdev->resource[PCI_ROM_RESOURCE].flags &
331 IORESOURCE_ROM_SHADOW));
332}
333struct device_attribute vga_attr = __ATTR_RO(boot_vga);
334
335static ssize_t
244pci_read_config(struct kobject *kobj, struct bin_attribute *bin_attr, 336pci_read_config(struct kobject *kobj, struct bin_attribute *bin_attr,
245 char *buf, loff_t off, size_t count) 337 char *buf, loff_t off, size_t count)
246{ 338{
@@ -493,6 +585,19 @@ pci_mmap_legacy_io(struct kobject *kobj, struct bin_attribute *attr,
493} 585}
494 586
495/** 587/**
588 * pci_adjust_legacy_attr - adjustment of legacy file attributes
589 * @b: bus to create files under
590 * @mmap_type: I/O port or memory
591 *
592 * Stub implementation. Can be overridden by arch if necessary.
593 */
594void __weak
595pci_adjust_legacy_attr(struct pci_bus *b, enum pci_mmap_state mmap_type)
596{
597 return;
598}
599
600/**
496 * pci_create_legacy_files - create legacy I/O port and memory files 601 * pci_create_legacy_files - create legacy I/O port and memory files
497 * @b: bus to create files under 602 * @b: bus to create files under
498 * 603 *
@@ -518,6 +623,7 @@ void pci_create_legacy_files(struct pci_bus *b)
518 b->legacy_io->read = pci_read_legacy_io; 623 b->legacy_io->read = pci_read_legacy_io;
519 b->legacy_io->write = pci_write_legacy_io; 624 b->legacy_io->write = pci_write_legacy_io;
520 b->legacy_io->mmap = pci_mmap_legacy_io; 625 b->legacy_io->mmap = pci_mmap_legacy_io;
626 pci_adjust_legacy_attr(b, pci_mmap_io);
521 error = device_create_bin_file(&b->dev, b->legacy_io); 627 error = device_create_bin_file(&b->dev, b->legacy_io);
522 if (error) 628 if (error)
523 goto legacy_io_err; 629 goto legacy_io_err;
@@ -528,6 +634,7 @@ void pci_create_legacy_files(struct pci_bus *b)
528 b->legacy_mem->size = 1024*1024; 634 b->legacy_mem->size = 1024*1024;
529 b->legacy_mem->attr.mode = S_IRUSR | S_IWUSR; 635 b->legacy_mem->attr.mode = S_IRUSR | S_IWUSR;
530 b->legacy_mem->mmap = pci_mmap_legacy_mem; 636 b->legacy_mem->mmap = pci_mmap_legacy_mem;
637 pci_adjust_legacy_attr(b, pci_mmap_mem);
531 error = device_create_bin_file(&b->dev, b->legacy_mem); 638 error = device_create_bin_file(&b->dev, b->legacy_mem);
532 if (error) 639 if (error)
533 goto legacy_mem_err; 640 goto legacy_mem_err;
@@ -719,8 +826,8 @@ static int pci_create_resource_files(struct pci_dev *pdev)
719 return 0; 826 return 0;
720} 827}
721#else /* !HAVE_PCI_MMAP */ 828#else /* !HAVE_PCI_MMAP */
722static inline int pci_create_resource_files(struct pci_dev *dev) { return 0; } 829int __weak pci_create_resource_files(struct pci_dev *dev) { return 0; }
723static inline void pci_remove_resource_files(struct pci_dev *dev) { return; } 830void __weak pci_remove_resource_files(struct pci_dev *dev) { return; }
724#endif /* HAVE_PCI_MMAP */ 831#endif /* HAVE_PCI_MMAP */
725 832
726/** 833/**
@@ -884,18 +991,27 @@ int __must_check pci_create_sysfs_dev_files (struct pci_dev *pdev)
884 pdev->rom_attr = attr; 991 pdev->rom_attr = attr;
885 } 992 }
886 993
994 if ((pdev->class >> 8) == PCI_CLASS_DISPLAY_VGA) {
995 retval = device_create_file(&pdev->dev, &vga_attr);
996 if (retval)
997 goto err_rom_file;
998 }
999
887 /* add platform-specific attributes */ 1000 /* add platform-specific attributes */
888 retval = pcibios_add_platform_entries(pdev); 1001 retval = pcibios_add_platform_entries(pdev);
889 if (retval) 1002 if (retval)
890 goto err_rom_file; 1003 goto err_vga_file;
891 1004
892 /* add sysfs entries for various capabilities */ 1005 /* add sysfs entries for various capabilities */
893 retval = pci_create_capabilities_sysfs(pdev); 1006 retval = pci_create_capabilities_sysfs(pdev);
894 if (retval) 1007 if (retval)
895 goto err_rom_file; 1008 goto err_vga_file;
896 1009
897 return 0; 1010 return 0;
898 1011
1012err_vga_file:
1013 if ((pdev->class >> 8) == PCI_CLASS_DISPLAY_VGA)
1014 device_remove_file(&pdev->dev, &vga_attr);
899err_rom_file: 1015err_rom_file:
900 if (rom_size) { 1016 if (rom_size) {
901 sysfs_remove_bin_file(&pdev->dev.kobj, pdev->rom_attr); 1017 sysfs_remove_bin_file(&pdev->dev.kobj, pdev->rom_attr);
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
index 0195066251e5..fe7ac2cea7c9 100644
--- a/drivers/pci/pci.c
+++ b/drivers/pci/pci.c
@@ -20,6 +20,8 @@
20#include <linux/pm_wakeup.h> 20#include <linux/pm_wakeup.h>
21#include <linux/interrupt.h> 21#include <linux/interrupt.h>
22#include <asm/dma.h> /* isa_dma_bridge_buggy */ 22#include <asm/dma.h> /* isa_dma_bridge_buggy */
23#include <linux/device.h>
24#include <asm/setup.h>
23#include "pci.h" 25#include "pci.h"
24 26
25unsigned int pci_pm_d3_delay = PCI_PM_D3_WAIT; 27unsigned int pci_pm_d3_delay = PCI_PM_D3_WAIT;
@@ -677,6 +679,8 @@ pci_power_t pci_choose_state(struct pci_dev *dev, pm_message_t state)
677 679
678EXPORT_SYMBOL(pci_choose_state); 680EXPORT_SYMBOL(pci_choose_state);
679 681
682#define PCI_EXP_SAVE_REGS 7
683
680static int pci_save_pcie_state(struct pci_dev *dev) 684static int pci_save_pcie_state(struct pci_dev *dev)
681{ 685{
682 int pos, i = 0; 686 int pos, i = 0;
@@ -689,7 +693,7 @@ static int pci_save_pcie_state(struct pci_dev *dev)
689 693
690 save_state = pci_find_saved_cap(dev, PCI_CAP_ID_EXP); 694 save_state = pci_find_saved_cap(dev, PCI_CAP_ID_EXP);
691 if (!save_state) { 695 if (!save_state) {
692 dev_err(&dev->dev, "buffer not found in %s\n", __FUNCTION__); 696 dev_err(&dev->dev, "buffer not found in %s\n", __func__);
693 return -ENOMEM; 697 return -ENOMEM;
694 } 698 }
695 cap = (u16 *)&save_state->data[0]; 699 cap = (u16 *)&save_state->data[0];
@@ -698,6 +702,9 @@ static int pci_save_pcie_state(struct pci_dev *dev)
698 pci_read_config_word(dev, pos + PCI_EXP_LNKCTL, &cap[i++]); 702 pci_read_config_word(dev, pos + PCI_EXP_LNKCTL, &cap[i++]);
699 pci_read_config_word(dev, pos + PCI_EXP_SLTCTL, &cap[i++]); 703 pci_read_config_word(dev, pos + PCI_EXP_SLTCTL, &cap[i++]);
700 pci_read_config_word(dev, pos + PCI_EXP_RTCTL, &cap[i++]); 704 pci_read_config_word(dev, pos + PCI_EXP_RTCTL, &cap[i++]);
705 pci_read_config_word(dev, pos + PCI_EXP_DEVCTL2, &cap[i++]);
706 pci_read_config_word(dev, pos + PCI_EXP_LNKCTL2, &cap[i++]);
707 pci_read_config_word(dev, pos + PCI_EXP_SLTCTL2, &cap[i++]);
701 708
702 return 0; 709 return 0;
703} 710}
@@ -718,6 +725,9 @@ static void pci_restore_pcie_state(struct pci_dev *dev)
718 pci_write_config_word(dev, pos + PCI_EXP_LNKCTL, cap[i++]); 725 pci_write_config_word(dev, pos + PCI_EXP_LNKCTL, cap[i++]);
719 pci_write_config_word(dev, pos + PCI_EXP_SLTCTL, cap[i++]); 726 pci_write_config_word(dev, pos + PCI_EXP_SLTCTL, cap[i++]);
720 pci_write_config_word(dev, pos + PCI_EXP_RTCTL, cap[i++]); 727 pci_write_config_word(dev, pos + PCI_EXP_RTCTL, cap[i++]);
728 pci_write_config_word(dev, pos + PCI_EXP_DEVCTL2, cap[i++]);
729 pci_write_config_word(dev, pos + PCI_EXP_LNKCTL2, cap[i++]);
730 pci_write_config_word(dev, pos + PCI_EXP_SLTCTL2, cap[i++]);
721} 731}
722 732
723 733
@@ -732,7 +742,7 @@ static int pci_save_pcix_state(struct pci_dev *dev)
732 742
733 save_state = pci_find_saved_cap(dev, PCI_CAP_ID_PCIX); 743 save_state = pci_find_saved_cap(dev, PCI_CAP_ID_PCIX);
734 if (!save_state) { 744 if (!save_state) {
735 dev_err(&dev->dev, "buffer not found in %s\n", __FUNCTION__); 745 dev_err(&dev->dev, "buffer not found in %s\n", __func__);
736 return -ENOMEM; 746 return -ENOMEM;
737 } 747 }
738 748
@@ -805,6 +815,7 @@ pci_restore_state(struct pci_dev *dev)
805 } 815 }
806 pci_restore_pcix_state(dev); 816 pci_restore_pcix_state(dev);
807 pci_restore_msi_state(dev); 817 pci_restore_msi_state(dev);
818 pci_restore_iov_state(dev);
808 819
809 return 0; 820 return 0;
810} 821}
@@ -1401,7 +1412,8 @@ void pci_allocate_cap_save_buffers(struct pci_dev *dev)
1401{ 1412{
1402 int error; 1413 int error;
1403 1414
1404 error = pci_add_cap_save_buffer(dev, PCI_CAP_ID_EXP, 4 * sizeof(u16)); 1415 error = pci_add_cap_save_buffer(dev, PCI_CAP_ID_EXP,
1416 PCI_EXP_SAVE_REGS * sizeof(u16));
1405 if (error) 1417 if (error)
1406 dev_err(&dev->dev, 1418 dev_err(&dev->dev,
1407 "unable to preallocate PCI Express save buffer\n"); 1419 "unable to preallocate PCI Express save buffer\n");
@@ -1472,7 +1484,7 @@ pci_get_interrupt_pin(struct pci_dev *dev, struct pci_dev **bridge)
1472 if (!pin) 1484 if (!pin)
1473 return -1; 1485 return -1;
1474 1486
1475 while (dev->bus->self) { 1487 while (dev->bus->parent) {
1476 pin = pci_swizzle_interrupt_pin(dev, pin); 1488 pin = pci_swizzle_interrupt_pin(dev, pin);
1477 dev = dev->bus->self; 1489 dev = dev->bus->self;
1478 } 1490 }
@@ -1492,7 +1504,7 @@ u8 pci_common_swizzle(struct pci_dev *dev, u8 *pinp)
1492{ 1504{
1493 u8 pin = *pinp; 1505 u8 pin = *pinp;
1494 1506
1495 while (dev->bus->self) { 1507 while (dev->bus->parent) {
1496 pin = pci_swizzle_interrupt_pin(dev, pin); 1508 pin = pci_swizzle_interrupt_pin(dev, pin);
1497 dev = dev->bus->self; 1509 dev = dev->bus->self;
1498 } 1510 }
@@ -2016,18 +2028,24 @@ static int __pcie_flr(struct pci_dev *dev, int probe)
2016 pci_block_user_cfg_access(dev); 2028 pci_block_user_cfg_access(dev);
2017 2029
2018 /* Wait for Transaction Pending bit clean */ 2030 /* Wait for Transaction Pending bit clean */
2031 pci_read_config_word(dev, exppos + PCI_EXP_DEVSTA, &status);
2032 if (!(status & PCI_EXP_DEVSTA_TRPND))
2033 goto transaction_done;
2034
2019 msleep(100); 2035 msleep(100);
2020 pci_read_config_word(dev, exppos + PCI_EXP_DEVSTA, &status); 2036 pci_read_config_word(dev, exppos + PCI_EXP_DEVSTA, &status);
2021 if (status & PCI_EXP_DEVSTA_TRPND) { 2037 if (!(status & PCI_EXP_DEVSTA_TRPND))
2022 dev_info(&dev->dev, "Busy after 100ms while trying to reset; " 2038 goto transaction_done;
2039
2040 dev_info(&dev->dev, "Busy after 100ms while trying to reset; "
2023 "sleeping for 1 second\n"); 2041 "sleeping for 1 second\n");
2024 ssleep(1); 2042 ssleep(1);
2025 pci_read_config_word(dev, exppos + PCI_EXP_DEVSTA, &status); 2043 pci_read_config_word(dev, exppos + PCI_EXP_DEVSTA, &status);
2026 if (status & PCI_EXP_DEVSTA_TRPND) 2044 if (status & PCI_EXP_DEVSTA_TRPND)
2027 dev_info(&dev->dev, "Still busy after 1s; " 2045 dev_info(&dev->dev, "Still busy after 1s; "
2028 "proceeding with reset anyway\n"); 2046 "proceeding with reset anyway\n");
2029 }
2030 2047
2048transaction_done:
2031 pci_write_config_word(dev, exppos + PCI_EXP_DEVCTL, 2049 pci_write_config_word(dev, exppos + PCI_EXP_DEVCTL,
2032 PCI_EXP_DEVCTL_BCR_FLR); 2050 PCI_EXP_DEVCTL_BCR_FLR);
2033 mdelay(100); 2051 mdelay(100);
@@ -2054,18 +2072,24 @@ static int __pci_af_flr(struct pci_dev *dev, int probe)
2054 pci_block_user_cfg_access(dev); 2072 pci_block_user_cfg_access(dev);
2055 2073
2056 /* Wait for Transaction Pending bit clean */ 2074 /* Wait for Transaction Pending bit clean */
2075 pci_read_config_byte(dev, cappos + PCI_AF_STATUS, &status);
2076 if (!(status & PCI_AF_STATUS_TP))
2077 goto transaction_done;
2078
2057 msleep(100); 2079 msleep(100);
2058 pci_read_config_byte(dev, cappos + PCI_AF_STATUS, &status); 2080 pci_read_config_byte(dev, cappos + PCI_AF_STATUS, &status);
2059 if (status & PCI_AF_STATUS_TP) { 2081 if (!(status & PCI_AF_STATUS_TP))
2060 dev_info(&dev->dev, "Busy after 100ms while trying to" 2082 goto transaction_done;
2061 " reset; sleeping for 1 second\n"); 2083
2062 ssleep(1); 2084 dev_info(&dev->dev, "Busy after 100ms while trying to"
2063 pci_read_config_byte(dev, 2085 " reset; sleeping for 1 second\n");
2064 cappos + PCI_AF_STATUS, &status); 2086 ssleep(1);
2065 if (status & PCI_AF_STATUS_TP) 2087 pci_read_config_byte(dev, cappos + PCI_AF_STATUS, &status);
2066 dev_info(&dev->dev, "Still busy after 1s; " 2088 if (status & PCI_AF_STATUS_TP)
2067 "proceeding with reset anyway\n"); 2089 dev_info(&dev->dev, "Still busy after 1s; "
2068 } 2090 "proceeding with reset anyway\n");
2091
2092transaction_done:
2069 pci_write_config_byte(dev, cappos + PCI_AF_CTRL, PCI_AF_CTRL_FLR); 2093 pci_write_config_byte(dev, cappos + PCI_AF_CTRL, PCI_AF_CTRL_FLR);
2070 mdelay(100); 2094 mdelay(100);
2071 2095
@@ -2334,18 +2358,140 @@ int pci_select_bars(struct pci_dev *dev, unsigned long flags)
2334 */ 2358 */
2335int pci_resource_bar(struct pci_dev *dev, int resno, enum pci_bar_type *type) 2359int pci_resource_bar(struct pci_dev *dev, int resno, enum pci_bar_type *type)
2336{ 2360{
2361 int reg;
2362
2337 if (resno < PCI_ROM_RESOURCE) { 2363 if (resno < PCI_ROM_RESOURCE) {
2338 *type = pci_bar_unknown; 2364 *type = pci_bar_unknown;
2339 return PCI_BASE_ADDRESS_0 + 4 * resno; 2365 return PCI_BASE_ADDRESS_0 + 4 * resno;
2340 } else if (resno == PCI_ROM_RESOURCE) { 2366 } else if (resno == PCI_ROM_RESOURCE) {
2341 *type = pci_bar_mem32; 2367 *type = pci_bar_mem32;
2342 return dev->rom_base_reg; 2368 return dev->rom_base_reg;
2369 } else if (resno < PCI_BRIDGE_RESOURCES) {
2370 /* device specific resource */
2371 reg = pci_iov_resource_bar(dev, resno, type);
2372 if (reg)
2373 return reg;
2343 } 2374 }
2344 2375
2345 dev_err(&dev->dev, "BAR: invalid resource #%d\n", resno); 2376 dev_err(&dev->dev, "BAR: invalid resource #%d\n", resno);
2346 return 0; 2377 return 0;
2347} 2378}
2348 2379
2380#define RESOURCE_ALIGNMENT_PARAM_SIZE COMMAND_LINE_SIZE
2381static char resource_alignment_param[RESOURCE_ALIGNMENT_PARAM_SIZE] = {0};
2382spinlock_t resource_alignment_lock = SPIN_LOCK_UNLOCKED;
2383
2384/**
2385 * pci_specified_resource_alignment - get resource alignment specified by user.
2386 * @dev: the PCI device to get
2387 *
2388 * RETURNS: Resource alignment if it is specified.
2389 * Zero if it is not specified.
2390 */
2391resource_size_t pci_specified_resource_alignment(struct pci_dev *dev)
2392{
2393 int seg, bus, slot, func, align_order, count;
2394 resource_size_t align = 0;
2395 char *p;
2396
2397 spin_lock(&resource_alignment_lock);
2398 p = resource_alignment_param;
2399 while (*p) {
2400 count = 0;
2401 if (sscanf(p, "%d%n", &align_order, &count) == 1 &&
2402 p[count] == '@') {
2403 p += count + 1;
2404 } else {
2405 align_order = -1;
2406 }
2407 if (sscanf(p, "%x:%x:%x.%x%n",
2408 &seg, &bus, &slot, &func, &count) != 4) {
2409 seg = 0;
2410 if (sscanf(p, "%x:%x.%x%n",
2411 &bus, &slot, &func, &count) != 3) {
2412 /* Invalid format */
2413 printk(KERN_ERR "PCI: Can't parse resource_alignment parameter: %s\n",
2414 p);
2415 break;
2416 }
2417 }
2418 p += count;
2419 if (seg == pci_domain_nr(dev->bus) &&
2420 bus == dev->bus->number &&
2421 slot == PCI_SLOT(dev->devfn) &&
2422 func == PCI_FUNC(dev->devfn)) {
2423 if (align_order == -1) {
2424 align = PAGE_SIZE;
2425 } else {
2426 align = 1 << align_order;
2427 }
2428 /* Found */
2429 break;
2430 }
2431 if (*p != ';' && *p != ',') {
2432 /* End of param or invalid format */
2433 break;
2434 }
2435 p++;
2436 }
2437 spin_unlock(&resource_alignment_lock);
2438 return align;
2439}
2440
2441/**
2442 * pci_is_reassigndev - check if specified PCI is target device to reassign
2443 * @dev: the PCI device to check
2444 *
2445 * RETURNS: non-zero for PCI device is a target device to reassign,
2446 * or zero is not.
2447 */
2448int pci_is_reassigndev(struct pci_dev *dev)
2449{
2450 return (pci_specified_resource_alignment(dev) != 0);
2451}
2452
2453ssize_t pci_set_resource_alignment_param(const char *buf, size_t count)
2454{
2455 if (count > RESOURCE_ALIGNMENT_PARAM_SIZE - 1)
2456 count = RESOURCE_ALIGNMENT_PARAM_SIZE - 1;
2457 spin_lock(&resource_alignment_lock);
2458 strncpy(resource_alignment_param, buf, count);
2459 resource_alignment_param[count] = '\0';
2460 spin_unlock(&resource_alignment_lock);
2461 return count;
2462}
2463
2464ssize_t pci_get_resource_alignment_param(char *buf, size_t size)
2465{
2466 size_t count;
2467 spin_lock(&resource_alignment_lock);
2468 count = snprintf(buf, size, "%s", resource_alignment_param);
2469 spin_unlock(&resource_alignment_lock);
2470 return count;
2471}
2472
2473static ssize_t pci_resource_alignment_show(struct bus_type *bus, char *buf)
2474{
2475 return pci_get_resource_alignment_param(buf, PAGE_SIZE);
2476}
2477
2478static ssize_t pci_resource_alignment_store(struct bus_type *bus,
2479 const char *buf, size_t count)
2480{
2481 return pci_set_resource_alignment_param(buf, count);
2482}
2483
2484BUS_ATTR(resource_alignment, 0644, pci_resource_alignment_show,
2485 pci_resource_alignment_store);
2486
2487static int __init pci_resource_alignment_sysfs_init(void)
2488{
2489 return bus_create_file(&pci_bus_type,
2490 &bus_attr_resource_alignment);
2491}
2492
2493late_initcall(pci_resource_alignment_sysfs_init);
2494
2349static void __devinit pci_no_domains(void) 2495static void __devinit pci_no_domains(void)
2350{ 2496{
2351#ifdef CONFIG_PCI_DOMAINS 2497#ifdef CONFIG_PCI_DOMAINS
@@ -2394,6 +2540,9 @@ static int __init pci_setup(char *str)
2394 pci_cardbus_io_size = memparse(str + 9, &str); 2540 pci_cardbus_io_size = memparse(str + 9, &str);
2395 } else if (!strncmp(str, "cbmemsize=", 10)) { 2541 } else if (!strncmp(str, "cbmemsize=", 10)) {
2396 pci_cardbus_mem_size = memparse(str + 10, &str); 2542 pci_cardbus_mem_size = memparse(str + 10, &str);
2543 } else if (!strncmp(str, "resource_alignment=", 19)) {
2544 pci_set_resource_alignment_param(str + 19,
2545 strlen(str + 19));
2397 } else { 2546 } else {
2398 printk(KERN_ERR "PCI: Unknown option `%s'\n", 2547 printk(KERN_ERR "PCI: Unknown option `%s'\n",
2399 str); 2548 str);
diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h
index 149fff65891f..d03f6b99f292 100644
--- a/drivers/pci/pci.h
+++ b/drivers/pci/pci.h
@@ -1,6 +1,8 @@
1#ifndef DRIVERS_PCI_H 1#ifndef DRIVERS_PCI_H
2#define DRIVERS_PCI_H 2#define DRIVERS_PCI_H
3 3
4#include <linux/workqueue.h>
5
4#define PCI_CFG_SPACE_SIZE 256 6#define PCI_CFG_SPACE_SIZE 256
5#define PCI_CFG_SPACE_EXP_SIZE 4096 7#define PCI_CFG_SPACE_EXP_SIZE 4096
6 8
@@ -135,6 +137,12 @@ extern int pcie_mch_quirk;
135extern struct device_attribute pci_dev_attrs[]; 137extern struct device_attribute pci_dev_attrs[];
136extern struct device_attribute dev_attr_cpuaffinity; 138extern struct device_attribute dev_attr_cpuaffinity;
137extern struct device_attribute dev_attr_cpulistaffinity; 139extern struct device_attribute dev_attr_cpulistaffinity;
140#ifdef CONFIG_HOTPLUG
141extern struct bus_attribute pci_bus_attrs[];
142#else
143#define pci_bus_attrs NULL
144#endif
145
138 146
139/** 147/**
140 * pci_match_one_device - Tell if a PCI device structure has a matching 148 * pci_match_one_device - Tell if a PCI device structure has a matching
@@ -177,6 +185,7 @@ enum pci_bar_type {
177 pci_bar_mem64, /* A 64-bit memory BAR */ 185 pci_bar_mem64, /* A 64-bit memory BAR */
178}; 186};
179 187
188extern int pci_setup_device(struct pci_dev *dev);
180extern int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type, 189extern int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type,
181 struct resource *res, unsigned int reg); 190 struct resource *res, unsigned int reg);
182extern int pci_resource_bar(struct pci_dev *dev, int resno, 191extern int pci_resource_bar(struct pci_dev *dev, int resno,
@@ -194,4 +203,60 @@ static inline int pci_ari_enabled(struct pci_bus *bus)
194 return bus->self && bus->self->ari_enabled; 203 return bus->self && bus->self->ari_enabled;
195} 204}
196 205
206#ifdef CONFIG_PCI_QUIRKS
207extern int pci_is_reassigndev(struct pci_dev *dev);
208resource_size_t pci_specified_resource_alignment(struct pci_dev *dev);
209extern void pci_disable_bridge_window(struct pci_dev *dev);
210#endif
211
212/* Single Root I/O Virtualization */
213struct pci_sriov {
214 int pos; /* capability position */
215 int nres; /* number of resources */
216 u32 cap; /* SR-IOV Capabilities */
217 u16 ctrl; /* SR-IOV Control */
218 u16 total; /* total VFs associated with the PF */
219 u16 initial; /* initial VFs associated with the PF */
220 u16 nr_virtfn; /* number of VFs available */
221 u16 offset; /* first VF Routing ID offset */
222 u16 stride; /* following VF stride */
223 u32 pgsz; /* page size for BAR alignment */
224 u8 link; /* Function Dependency Link */
225 struct pci_dev *dev; /* lowest numbered PF */
226 struct pci_dev *self; /* this PF */
227 struct mutex lock; /* lock for VF bus */
228 struct work_struct mtask; /* VF Migration task */
229 u8 __iomem *mstate; /* VF Migration State Array */
230};
231
232#ifdef CONFIG_PCI_IOV
233extern int pci_iov_init(struct pci_dev *dev);
234extern void pci_iov_release(struct pci_dev *dev);
235extern int pci_iov_resource_bar(struct pci_dev *dev, int resno,
236 enum pci_bar_type *type);
237extern void pci_restore_iov_state(struct pci_dev *dev);
238extern int pci_iov_bus_range(struct pci_bus *bus);
239#else
240static inline int pci_iov_init(struct pci_dev *dev)
241{
242 return -ENODEV;
243}
244static inline void pci_iov_release(struct pci_dev *dev)
245
246{
247}
248static inline int pci_iov_resource_bar(struct pci_dev *dev, int resno,
249 enum pci_bar_type *type)
250{
251 return 0;
252}
253static inline void pci_restore_iov_state(struct pci_dev *dev)
254{
255}
256static inline int pci_iov_bus_range(struct pci_bus *bus)
257{
258 return 0;
259}
260#endif /* CONFIG_PCI_IOV */
261
197#endif /* DRIVERS_PCI_H */ 262#endif /* DRIVERS_PCI_H */
diff --git a/drivers/pci/pcie/aer/aerdrv.c b/drivers/pci/pcie/aer/aerdrv.c
index e390707661dd..32ade5af927e 100644
--- a/drivers/pci/pcie/aer/aerdrv.c
+++ b/drivers/pci/pcie/aer/aerdrv.c
@@ -38,30 +38,13 @@ MODULE_AUTHOR(DRIVER_AUTHOR);
38MODULE_DESCRIPTION(DRIVER_DESC); 38MODULE_DESCRIPTION(DRIVER_DESC);
39MODULE_LICENSE("GPL"); 39MODULE_LICENSE("GPL");
40 40
41static int __devinit aer_probe (struct pcie_device *dev, 41static int __devinit aer_probe (struct pcie_device *dev);
42 const struct pcie_port_service_id *id );
43static void aer_remove(struct pcie_device *dev); 42static void aer_remove(struct pcie_device *dev);
44static int aer_suspend(struct pcie_device *dev, pm_message_t state)
45{return 0;}
46static int aer_resume(struct pcie_device *dev) {return 0;}
47static pci_ers_result_t aer_error_detected(struct pci_dev *dev, 43static pci_ers_result_t aer_error_detected(struct pci_dev *dev,
48 enum pci_channel_state error); 44 enum pci_channel_state error);
49static void aer_error_resume(struct pci_dev *dev); 45static void aer_error_resume(struct pci_dev *dev);
50static pci_ers_result_t aer_root_reset(struct pci_dev *dev); 46static pci_ers_result_t aer_root_reset(struct pci_dev *dev);
51 47
52/*
53 * PCI Express bus's AER Root service driver data structure
54 */
55static struct pcie_port_service_id aer_id[] = {
56 {
57 .vendor = PCI_ANY_ID,
58 .device = PCI_ANY_ID,
59 .port_type = PCIE_RC_PORT,
60 .service_type = PCIE_PORT_SERVICE_AER,
61 },
62 { /* end: all zeroes */ }
63};
64
65static struct pci_error_handlers aer_error_handlers = { 48static struct pci_error_handlers aer_error_handlers = {
66 .error_detected = aer_error_detected, 49 .error_detected = aer_error_detected,
67 .resume = aer_error_resume, 50 .resume = aer_error_resume,
@@ -69,14 +52,12 @@ static struct pci_error_handlers aer_error_handlers = {
69 52
70static struct pcie_port_service_driver aerdriver = { 53static struct pcie_port_service_driver aerdriver = {
71 .name = "aer", 54 .name = "aer",
72 .id_table = &aer_id[0], 55 .port_type = PCIE_ANY_PORT,
56 .service = PCIE_PORT_SERVICE_AER,
73 57
74 .probe = aer_probe, 58 .probe = aer_probe,
75 .remove = aer_remove, 59 .remove = aer_remove,
76 60
77 .suspend = aer_suspend,
78 .resume = aer_resume,
79
80 .err_handler = &aer_error_handlers, 61 .err_handler = &aer_error_handlers,
81 62
82 .reset_link = aer_root_reset, 63 .reset_link = aer_root_reset,
@@ -207,8 +188,7 @@ static void aer_remove(struct pcie_device *dev)
207 * 188 *
208 * Invoked when PCI Express bus loads AER service driver. 189 * Invoked when PCI Express bus loads AER service driver.
209 **/ 190 **/
210static int __devinit aer_probe (struct pcie_device *dev, 191static int __devinit aer_probe (struct pcie_device *dev)
211 const struct pcie_port_service_id *id )
212{ 192{
213 int status; 193 int status;
214 struct aer_rpc *rpc; 194 struct aer_rpc *rpc;
diff --git a/drivers/pci/pcie/aer/aerdrv_acpi.c b/drivers/pci/pcie/aer/aerdrv_acpi.c
index ebce26c37049..8edb2f300e8f 100644
--- a/drivers/pci/pcie/aer/aerdrv_acpi.c
+++ b/drivers/pci/pcie/aer/aerdrv_acpi.c
@@ -38,7 +38,7 @@ int aer_osc_setup(struct pcie_device *pciedev)
38 38
39 handle = acpi_find_root_bridge_handle(pdev); 39 handle = acpi_find_root_bridge_handle(pdev);
40 if (handle) { 40 if (handle) {
41 status = pci_osc_control_set(handle, 41 status = acpi_pci_osc_control_set(handle,
42 OSC_PCI_EXPRESS_AER_CONTROL | 42 OSC_PCI_EXPRESS_AER_CONTROL |
43 OSC_PCI_EXPRESS_CAP_STRUCTURE_CONTROL); 43 OSC_PCI_EXPRESS_CAP_STRUCTURE_CONTROL);
44 } 44 }
diff --git a/drivers/pci/pcie/aer/aerdrv_core.c b/drivers/pci/pcie/aer/aerdrv_core.c
index 382575007382..307452f30035 100644
--- a/drivers/pci/pcie/aer/aerdrv_core.c
+++ b/drivers/pci/pcie/aer/aerdrv_core.c
@@ -351,21 +351,21 @@ static int find_aer_service_iter(struct device *device, void *data)
351{ 351{
352 struct device_driver *driver; 352 struct device_driver *driver;
353 struct pcie_port_service_driver *service_driver; 353 struct pcie_port_service_driver *service_driver;
354 struct pcie_device *pcie_dev;
355 struct find_aer_service_data *result; 354 struct find_aer_service_data *result;
356 355
357 result = (struct find_aer_service_data *) data; 356 result = (struct find_aer_service_data *) data;
358 357
359 if (device->bus == &pcie_port_bus_type) { 358 if (device->bus == &pcie_port_bus_type) {
360 pcie_dev = to_pcie_device(device); 359 struct pcie_port_data *port_data;
361 if (pcie_dev->id.port_type == PCIE_SW_DOWNSTREAM_PORT) 360
361 port_data = pci_get_drvdata(to_pcie_device(device)->port);
362 if (port_data->port_type == PCIE_SW_DOWNSTREAM_PORT)
362 result->is_downstream = 1; 363 result->is_downstream = 1;
363 364
364 driver = device->driver; 365 driver = device->driver;
365 if (driver) { 366 if (driver) {
366 service_driver = to_service_driver(driver); 367 service_driver = to_service_driver(driver);
367 if (service_driver->id_table->service_type == 368 if (service_driver->service == PCIE_PORT_SERVICE_AER) {
368 PCIE_PORT_SERVICE_AER) {
369 result->aer_driver = service_driver; 369 result->aer_driver = service_driver;
370 return 1; 370 return 1;
371 } 371 }
diff --git a/drivers/pci/pcie/portdrv.h b/drivers/pci/pcie/portdrv.h
index 2529f3f2ea5a..17ad53868f9f 100644
--- a/drivers/pci/pcie/portdrv.h
+++ b/drivers/pci/pcie/portdrv.h
@@ -25,19 +25,21 @@
25#define PCIE_CAPABILITIES_REG 0x2 25#define PCIE_CAPABILITIES_REG 0x2
26#define PCIE_SLOT_CAPABILITIES_REG 0x14 26#define PCIE_SLOT_CAPABILITIES_REG 0x14
27#define PCIE_PORT_DEVICE_MAXSERVICES 4 27#define PCIE_PORT_DEVICE_MAXSERVICES 4
28#define PCIE_PORT_MSI_VECTOR_MASK 0x1f
29/*
30 * According to the PCI Express Base Specification 2.0, the indices of the MSI-X
31 * table entires used by port services must not exceed 31
32 */
33#define PCIE_PORT_MAX_MSIX_ENTRIES 32
28 34
29#define get_descriptor_id(type, service) (((type - 4) << 4) | service) 35#define get_descriptor_id(type, service) (((type - 4) << 4) | service)
30 36
31struct pcie_port_device_ext {
32 int interrupt_mode; /* [0:INTx | 1:MSI | 2:MSI-X] */
33};
34
35extern struct bus_type pcie_port_bus_type; 37extern struct bus_type pcie_port_bus_type;
36extern int pcie_port_device_probe(struct pci_dev *dev); 38extern int pcie_port_device_probe(struct pci_dev *dev);
37extern int pcie_port_device_register(struct pci_dev *dev); 39extern int pcie_port_device_register(struct pci_dev *dev);
38#ifdef CONFIG_PM 40#ifdef CONFIG_PM
39extern int pcie_port_device_suspend(struct pci_dev *dev, pm_message_t state); 41extern int pcie_port_device_suspend(struct device *dev);
40extern int pcie_port_device_resume(struct pci_dev *dev); 42extern int pcie_port_device_resume(struct device *dev);
41#endif 43#endif
42extern void pcie_port_device_remove(struct pci_dev *dev); 44extern void pcie_port_device_remove(struct pci_dev *dev);
43extern int __must_check pcie_port_bus_register(void); 45extern int __must_check pcie_port_bus_register(void);
diff --git a/drivers/pci/pcie/portdrv_bus.c b/drivers/pci/pcie/portdrv_bus.c
index eec89b767f9f..ef3a4eeaebb4 100644
--- a/drivers/pci/pcie/portdrv_bus.c
+++ b/drivers/pci/pcie/portdrv_bus.c
@@ -26,20 +26,22 @@ EXPORT_SYMBOL_GPL(pcie_port_bus_type);
26static int pcie_port_bus_match(struct device *dev, struct device_driver *drv) 26static int pcie_port_bus_match(struct device *dev, struct device_driver *drv)
27{ 27{
28 struct pcie_device *pciedev; 28 struct pcie_device *pciedev;
29 struct pcie_port_data *port_data;
29 struct pcie_port_service_driver *driver; 30 struct pcie_port_service_driver *driver;
30 31
31 if (drv->bus != &pcie_port_bus_type || dev->bus != &pcie_port_bus_type) 32 if (drv->bus != &pcie_port_bus_type || dev->bus != &pcie_port_bus_type)
32 return 0; 33 return 0;
33 34
34 pciedev = to_pcie_device(dev); 35 pciedev = to_pcie_device(dev);
35 driver = to_service_driver(drv); 36 driver = to_service_driver(drv);
36 if ( (driver->id_table->vendor != PCI_ANY_ID && 37
37 driver->id_table->vendor != pciedev->id.vendor) || 38 if (driver->service != pciedev->service)
38 (driver->id_table->device != PCI_ANY_ID && 39 return 0;
39 driver->id_table->device != pciedev->id.device) || 40
40 (driver->id_table->port_type != PCIE_ANY_PORT && 41 port_data = pci_get_drvdata(pciedev->port);
41 driver->id_table->port_type != pciedev->id.port_type) || 42
42 driver->id_table->service_type != pciedev->id.service_type ) 43 if (driver->port_type != PCIE_ANY_PORT
44 && driver->port_type != port_data->port_type)
43 return 0; 45 return 0;
44 46
45 return 1; 47 return 1;
diff --git a/drivers/pci/pcie/portdrv_core.c b/drivers/pci/pcie/portdrv_core.c
index 8b3f8c18032f..e39982503863 100644
--- a/drivers/pci/pcie/portdrv_core.c
+++ b/drivers/pci/pcie/portdrv_core.c
@@ -15,10 +15,9 @@
15#include <linux/slab.h> 15#include <linux/slab.h>
16#include <linux/pcieport_if.h> 16#include <linux/pcieport_if.h>
17 17
18#include "../pci.h"
18#include "portdrv.h" 19#include "portdrv.h"
19 20
20extern int pcie_mch_quirk; /* MSI-quirk Indicator */
21
22/** 21/**
23 * release_pcie_device - free PCI Express port service device structure 22 * release_pcie_device - free PCI Express port service device structure
24 * @dev: Port service device to release 23 * @dev: Port service device to release
@@ -31,26 +30,150 @@ static void release_pcie_device(struct device *dev)
31 kfree(to_pcie_device(dev)); 30 kfree(to_pcie_device(dev));
32} 31}
33 32
34static int is_msi_quirked(struct pci_dev *dev) 33/**
34 * pcie_port_msix_add_entry - add entry to given array of MSI-X entries
35 * @entries: Array of MSI-X entries
36 * @new_entry: Index of the entry to add to the array
37 * @nr_entries: Number of entries aleady in the array
38 *
39 * Return value: Position of the added entry in the array
40 */
41static int pcie_port_msix_add_entry(
42 struct msix_entry *entries, int new_entry, int nr_entries)
35{ 43{
36 int port_type, quirk = 0; 44 int j;
45
46 for (j = 0; j < nr_entries; j++)
47 if (entries[j].entry == new_entry)
48 return j;
49
50 entries[j].entry = new_entry;
51 return j;
52}
53
54/**
55 * pcie_port_enable_msix - try to set up MSI-X as interrupt mode for given port
56 * @dev: PCI Express port to handle
57 * @vectors: Array of interrupt vectors to populate
58 * @mask: Bitmask of port capabilities returned by get_port_device_capability()
59 *
60 * Return value: 0 on success, error code on failure
61 */
62static int pcie_port_enable_msix(struct pci_dev *dev, int *vectors, int mask)
63{
64 struct msix_entry *msix_entries;
65 int idx[PCIE_PORT_DEVICE_MAXSERVICES];
66 int nr_entries, status, pos, i, nvec;
37 u16 reg16; 67 u16 reg16;
68 u32 reg32;
38 69
39 pci_read_config_word(dev, 70 nr_entries = pci_msix_table_size(dev);
40 pci_find_capability(dev, PCI_CAP_ID_EXP) + 71 if (!nr_entries)
41 PCIE_CAPABILITIES_REG, &reg16); 72 return -EINVAL;
42 port_type = (reg16 >> 4) & PORT_TYPE_MASK; 73 if (nr_entries > PCIE_PORT_MAX_MSIX_ENTRIES)
43 switch(port_type) { 74 nr_entries = PCIE_PORT_MAX_MSIX_ENTRIES;
44 case PCIE_RC_PORT: 75
45 if (pcie_mch_quirk == 1) 76 msix_entries = kzalloc(sizeof(*msix_entries) * nr_entries, GFP_KERNEL);
46 quirk = 1; 77 if (!msix_entries)
47 break; 78 return -ENOMEM;
48 case PCIE_SW_UPSTREAM_PORT: 79
49 case PCIE_SW_DOWNSTREAM_PORT: 80 /*
50 default: 81 * Allocate as many entries as the port wants, so that we can check
51 break; 82 * which of them will be useful. Moreover, if nr_entries is correctly
83 * equal to the number of entries this port actually uses, we'll happily
84 * go through without any tricks.
85 */
86 for (i = 0; i < nr_entries; i++)
87 msix_entries[i].entry = i;
88
89 status = pci_enable_msix(dev, msix_entries, nr_entries);
90 if (status)
91 goto Exit;
92
93 for (i = 0; i < PCIE_PORT_DEVICE_MAXSERVICES; i++)
94 idx[i] = -1;
95 status = -EIO;
96 nvec = 0;
97
98 if (mask & (PCIE_PORT_SERVICE_PME | PCIE_PORT_SERVICE_HP)) {
99 int entry;
100
101 /*
102 * The code below follows the PCI Express Base Specification 2.0
103 * stating in Section 6.1.6 that "PME and Hot-Plug Event
104 * interrupts (when both are implemented) always share the same
105 * MSI or MSI-X vector, as indicated by the Interrupt Message
106 * Number field in the PCI Express Capabilities register", where
107 * according to Section 7.8.2 of the specification "For MSI-X,
108 * the value in this field indicates which MSI-X Table entry is
109 * used to generate the interrupt message."
110 */
111 pos = pci_find_capability(dev, PCI_CAP_ID_EXP);
112 pci_read_config_word(dev, pos + PCIE_CAPABILITIES_REG, &reg16);
113 entry = (reg16 >> 9) & PCIE_PORT_MSI_VECTOR_MASK;
114 if (entry >= nr_entries)
115 goto Error;
116
117 i = pcie_port_msix_add_entry(msix_entries, entry, nvec);
118 if (i == nvec)
119 nvec++;
120
121 idx[PCIE_PORT_SERVICE_PME_SHIFT] = i;
122 idx[PCIE_PORT_SERVICE_HP_SHIFT] = i;
123 }
124
125 if (mask & PCIE_PORT_SERVICE_AER) {
126 int entry;
127
128 /*
129 * The code below follows Section 7.10.10 of the PCI Express
130 * Base Specification 2.0 stating that bits 31-27 of the Root
131 * Error Status Register contain a value indicating which of the
132 * MSI/MSI-X vectors assigned to the port is going to be used
133 * for AER, where "For MSI-X, the value in this register
134 * indicates which MSI-X Table entry is used to generate the
135 * interrupt message."
136 */
137 pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR);
138 pci_read_config_dword(dev, pos + PCI_ERR_ROOT_STATUS, &reg32);
139 entry = reg32 >> 27;
140 if (entry >= nr_entries)
141 goto Error;
142
143 i = pcie_port_msix_add_entry(msix_entries, entry, nvec);
144 if (i == nvec)
145 nvec++;
146
147 idx[PCIE_PORT_SERVICE_AER_SHIFT] = i;
52 } 148 }
53 return quirk; 149
150 /*
151 * If nvec is equal to the allocated number of entries, we can just use
152 * what we have. Otherwise, the port has some extra entries not for the
153 * services we know and we need to work around that.
154 */
155 if (nvec == nr_entries) {
156 status = 0;
157 } else {
158 /* Drop the temporary MSI-X setup */
159 pci_disable_msix(dev);
160
161 /* Now allocate the MSI-X vectors for real */
162 status = pci_enable_msix(dev, msix_entries, nvec);
163 if (status)
164 goto Exit;
165 }
166
167 for (i = 0; i < PCIE_PORT_DEVICE_MAXSERVICES; i++)
168 vectors[i] = idx[i] >= 0 ? msix_entries[idx[i]].vector : -1;
169
170 Exit:
171 kfree(msix_entries);
172 return status;
173
174 Error:
175 pci_disable_msix(dev);
176 goto Exit;
54} 177}
55 178
56/** 179/**
@@ -64,47 +187,32 @@ static int is_msi_quirked(struct pci_dev *dev)
64 */ 187 */
65static int assign_interrupt_mode(struct pci_dev *dev, int *vectors, int mask) 188static int assign_interrupt_mode(struct pci_dev *dev, int *vectors, int mask)
66{ 189{
67 int i, pos, nvec, status = -EINVAL; 190 struct pcie_port_data *port_data = pci_get_drvdata(dev);
68 int interrupt_mode = PCIE_PORT_INTx_MODE; 191 int irq, interrupt_mode = PCIE_PORT_NO_IRQ;
192 int i;
69 193
70 /* Set INTx as default */
71 for (i = 0, nvec = 0; i < PCIE_PORT_DEVICE_MAXSERVICES; i++) {
72 if (mask & (1 << i))
73 nvec++;
74 vectors[i] = dev->irq;
75 }
76
77 /* Check MSI quirk */ 194 /* Check MSI quirk */
78 if (is_msi_quirked(dev)) 195 if (port_data->port_type == PCIE_RC_PORT && pcie_mch_quirk)
79 return interrupt_mode; 196 goto Fallback;
80 197
81 /* Select MSI-X over MSI if supported */ 198 /* Try to use MSI-X if supported */
82 pos = pci_find_capability(dev, PCI_CAP_ID_MSIX); 199 if (!pcie_port_enable_msix(dev, vectors, mask))
83 if (pos) { 200 return PCIE_PORT_MSIX_MODE;
84 struct msix_entry msix_entries[PCIE_PORT_DEVICE_MAXSERVICES] = 201
85 {{0, 0}, {0, 1}, {0, 2}, {0, 3}}; 202 /* We're not going to use MSI-X, so try MSI and fall back to INTx */
86 status = pci_enable_msix(dev, msix_entries, nvec); 203 if (!pci_enable_msi(dev))
87 if (!status) { 204 interrupt_mode = PCIE_PORT_MSI_MODE;
88 int j = 0; 205
89 206 Fallback:
90 interrupt_mode = PCIE_PORT_MSIX_MODE; 207 if (interrupt_mode == PCIE_PORT_NO_IRQ && dev->pin)
91 for (i = 0; i < PCIE_PORT_DEVICE_MAXSERVICES; i++) { 208 interrupt_mode = PCIE_PORT_INTx_MODE;
92 if (mask & (1 << i)) 209
93 vectors[i] = msix_entries[j++].vector; 210 irq = interrupt_mode != PCIE_PORT_NO_IRQ ? dev->irq : -1;
94 } 211 for (i = 0; i < PCIE_PORT_DEVICE_MAXSERVICES; i++)
95 } 212 vectors[i] = irq;
96 } 213
97 if (status) { 214 vectors[PCIE_PORT_SERVICE_VC_SHIFT] = -1;
98 pos = pci_find_capability(dev, PCI_CAP_ID_MSI); 215
99 if (pos) {
100 status = pci_enable_msi(dev);
101 if (!status) {
102 interrupt_mode = PCIE_PORT_MSI_MODE;
103 for (i = 0;i < PCIE_PORT_DEVICE_MAXSERVICES;i++)
104 vectors[i] = dev->irq;
105 }
106 }
107 }
108 return interrupt_mode; 216 return interrupt_mode;
109} 217}
110 218
@@ -132,13 +240,11 @@ static int get_port_device_capability(struct pci_dev *dev)
132 pos + PCIE_SLOT_CAPABILITIES_REG, &reg32); 240 pos + PCIE_SLOT_CAPABILITIES_REG, &reg32);
133 if (reg32 & SLOT_HP_CAPABLE_MASK) 241 if (reg32 & SLOT_HP_CAPABLE_MASK)
134 services |= PCIE_PORT_SERVICE_HP; 242 services |= PCIE_PORT_SERVICE_HP;
135 } 243 }
136 /* PME Capable - root port capability */ 244 /* AER capable */
137 if (((reg16 >> 4) & PORT_TYPE_MASK) == PCIE_RC_PORT)
138 services |= PCIE_PORT_SERVICE_PME;
139
140 if (pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR)) 245 if (pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR))
141 services |= PCIE_PORT_SERVICE_AER; 246 services |= PCIE_PORT_SERVICE_AER;
247 /* VC support */
142 if (pci_find_ext_capability(dev, PCI_EXT_CAP_ID_VC)) 248 if (pci_find_ext_capability(dev, PCI_EXT_CAP_ID_VC))
143 services |= PCIE_PORT_SERVICE_VC; 249 services |= PCIE_PORT_SERVICE_VC;
144 250
@@ -152,20 +258,17 @@ static int get_port_device_capability(struct pci_dev *dev)
152 * @port_type: Type of the port 258 * @port_type: Type of the port
153 * @service_type: Type of service to associate with the service device 259 * @service_type: Type of service to associate with the service device
154 * @irq: Interrupt vector to associate with the service device 260 * @irq: Interrupt vector to associate with the service device
155 * @irq_mode: Interrupt mode of the service (INTx, MSI-X, MSI)
156 */ 261 */
157static void pcie_device_init(struct pci_dev *parent, struct pcie_device *dev, 262static void pcie_device_init(struct pci_dev *parent, struct pcie_device *dev,
158 int port_type, int service_type, int irq, int irq_mode) 263 int service_type, int irq)
159{ 264{
265 struct pcie_port_data *port_data = pci_get_drvdata(parent);
160 struct device *device; 266 struct device *device;
267 int port_type = port_data->port_type;
161 268
162 dev->port = parent; 269 dev->port = parent;
163 dev->interrupt_mode = irq_mode;
164 dev->irq = irq; 270 dev->irq = irq;
165 dev->id.vendor = parent->vendor; 271 dev->service = service_type;
166 dev->id.device = parent->device;
167 dev->id.port_type = port_type;
168 dev->id.service_type = (1 << service_type);
169 272
170 /* Initialize generic device interface */ 273 /* Initialize generic device interface */
171 device = &dev->device; 274 device = &dev->device;
@@ -185,10 +288,9 @@ static void pcie_device_init(struct pci_dev *parent, struct pcie_device *dev,
185 * @port_type: Type of the port 288 * @port_type: Type of the port
186 * @service_type: Type of service to associate with the service device 289 * @service_type: Type of service to associate with the service device
187 * @irq: Interrupt vector to associate with the service device 290 * @irq: Interrupt vector to associate with the service device
188 * @irq_mode: Interrupt mode of the service (INTx, MSI-X, MSI)
189 */ 291 */
190static struct pcie_device* alloc_pcie_device(struct pci_dev *parent, 292static struct pcie_device* alloc_pcie_device(struct pci_dev *parent,
191 int port_type, int service_type, int irq, int irq_mode) 293 int service_type, int irq)
192{ 294{
193 struct pcie_device *device; 295 struct pcie_device *device;
194 296
@@ -196,7 +298,7 @@ static struct pcie_device* alloc_pcie_device(struct pci_dev *parent,
196 if (!device) 298 if (!device)
197 return NULL; 299 return NULL;
198 300
199 pcie_device_init(parent, device, port_type, service_type, irq,irq_mode); 301 pcie_device_init(parent, device, service_type, irq);
200 return device; 302 return device;
201} 303}
202 304
@@ -230,63 +332,90 @@ int pcie_port_device_probe(struct pci_dev *dev)
230 */ 332 */
231int pcie_port_device_register(struct pci_dev *dev) 333int pcie_port_device_register(struct pci_dev *dev)
232{ 334{
233 struct pcie_port_device_ext *p_ext; 335 struct pcie_port_data *port_data;
234 int status, type, capabilities, irq_mode, i; 336 int status, capabilities, irq_mode, i, nr_serv;
235 int vectors[PCIE_PORT_DEVICE_MAXSERVICES]; 337 int vectors[PCIE_PORT_DEVICE_MAXSERVICES];
236 u16 reg16; 338 u16 reg16;
237 339
238 /* Allocate port device extension */ 340 port_data = kzalloc(sizeof(*port_data), GFP_KERNEL);
239 if (!(p_ext = kmalloc(sizeof(struct pcie_port_device_ext), GFP_KERNEL))) 341 if (!port_data)
240 return -ENOMEM; 342 return -ENOMEM;
241 343 pci_set_drvdata(dev, port_data);
242 pci_set_drvdata(dev, p_ext);
243 344
244 /* Get port type */ 345 /* Get port type */
245 pci_read_config_word(dev, 346 pci_read_config_word(dev,
246 pci_find_capability(dev, PCI_CAP_ID_EXP) + 347 pci_find_capability(dev, PCI_CAP_ID_EXP) +
247 PCIE_CAPABILITIES_REG, &reg16); 348 PCIE_CAPABILITIES_REG, &reg16);
248 type = (reg16 >> 4) & PORT_TYPE_MASK; 349 port_data->port_type = (reg16 >> 4) & PORT_TYPE_MASK;
249 350
250 /* Now get port services */
251 capabilities = get_port_device_capability(dev); 351 capabilities = get_port_device_capability(dev);
352 /* Root ports are capable of generating PME too */
353 if (port_data->port_type == PCIE_RC_PORT)
354 capabilities |= PCIE_PORT_SERVICE_PME;
355
252 irq_mode = assign_interrupt_mode(dev, vectors, capabilities); 356 irq_mode = assign_interrupt_mode(dev, vectors, capabilities);
253 p_ext->interrupt_mode = irq_mode; 357 if (irq_mode == PCIE_PORT_NO_IRQ) {
358 /*
359 * Don't use service devices that require interrupts if there is
360 * no way to generate them.
361 */
362 if (!(capabilities & PCIE_PORT_SERVICE_VC)) {
363 status = -ENODEV;
364 goto Error;
365 }
366 capabilities = PCIE_PORT_SERVICE_VC;
367 }
368 port_data->port_irq_mode = irq_mode;
369
370 status = pci_enable_device(dev);
371 if (status)
372 goto Error;
373 pci_set_master(dev);
254 374
255 /* Allocate child services if any */ 375 /* Allocate child services if any */
256 for (i = 0; i < PCIE_PORT_DEVICE_MAXSERVICES; i++) { 376 for (i = 0, nr_serv = 0; i < PCIE_PORT_DEVICE_MAXSERVICES; i++) {
257 struct pcie_device *child; 377 struct pcie_device *child;
378 int service = 1 << i;
379
380 if (!(capabilities & service))
381 continue;
258 382
259 if (capabilities & (1 << i)) { 383 child = alloc_pcie_device(dev, service, vectors[i]);
260 child = alloc_pcie_device( 384 if (!child)
261 dev, /* parent */ 385 continue;
262 type, /* port type */ 386
263 i, /* service type */ 387 status = device_register(&child->device);
264 vectors[i], /* irq */ 388 if (status) {
265 irq_mode /* interrupt mode */); 389 kfree(child);
266 if (child) { 390 continue;
267 status = device_register(&child->device);
268 if (status) {
269 kfree(child);
270 continue;
271 }
272 get_device(&child->device);
273 }
274 } 391 }
392
393 get_device(&child->device);
394 nr_serv++;
395 }
396 if (!nr_serv) {
397 pci_disable_device(dev);
398 status = -ENODEV;
399 goto Error;
275 } 400 }
401
276 return 0; 402 return 0;
403
404 Error:
405 kfree(port_data);
406 return status;
277} 407}
278 408
279#ifdef CONFIG_PM 409#ifdef CONFIG_PM
280static int suspend_iter(struct device *dev, void *data) 410static int suspend_iter(struct device *dev, void *data)
281{ 411{
282 struct pcie_port_service_driver *service_driver; 412 struct pcie_port_service_driver *service_driver;
283 pm_message_t state = * (pm_message_t *) data;
284 413
285 if ((dev->bus == &pcie_port_bus_type) && 414 if ((dev->bus == &pcie_port_bus_type) &&
286 (dev->driver)) { 415 (dev->driver)) {
287 service_driver = to_service_driver(dev->driver); 416 service_driver = to_service_driver(dev->driver);
288 if (service_driver->suspend) 417 if (service_driver->suspend)
289 service_driver->suspend(to_pcie_device(dev), state); 418 service_driver->suspend(to_pcie_device(dev));
290 } 419 }
291 return 0; 420 return 0;
292} 421}
@@ -294,11 +423,10 @@ static int suspend_iter(struct device *dev, void *data)
294/** 423/**
295 * pcie_port_device_suspend - suspend port services associated with a PCIe port 424 * pcie_port_device_suspend - suspend port services associated with a PCIe port
296 * @dev: PCI Express port to handle 425 * @dev: PCI Express port to handle
297 * @state: Representation of system power management transition in progress
298 */ 426 */
299int pcie_port_device_suspend(struct pci_dev *dev, pm_message_t state) 427int pcie_port_device_suspend(struct device *dev)
300{ 428{
301 return device_for_each_child(&dev->dev, &state, suspend_iter); 429 return device_for_each_child(dev, NULL, suspend_iter);
302} 430}
303 431
304static int resume_iter(struct device *dev, void *data) 432static int resume_iter(struct device *dev, void *data)
@@ -318,24 +446,17 @@ static int resume_iter(struct device *dev, void *data)
318 * pcie_port_device_suspend - resume port services associated with a PCIe port 446 * pcie_port_device_suspend - resume port services associated with a PCIe port
319 * @dev: PCI Express port to handle 447 * @dev: PCI Express port to handle
320 */ 448 */
321int pcie_port_device_resume(struct pci_dev *dev) 449int pcie_port_device_resume(struct device *dev)
322{ 450{
323 return device_for_each_child(&dev->dev, NULL, resume_iter); 451 return device_for_each_child(dev, NULL, resume_iter);
324} 452}
325#endif 453#endif /* PM */
326 454
327static int remove_iter(struct device *dev, void *data) 455static int remove_iter(struct device *dev, void *data)
328{ 456{
329 struct pcie_port_service_driver *service_driver;
330
331 if (dev->bus == &pcie_port_bus_type) { 457 if (dev->bus == &pcie_port_bus_type) {
332 if (dev->driver) { 458 put_device(dev);
333 service_driver = to_service_driver(dev->driver); 459 device_unregister(dev);
334 if (service_driver->remove)
335 service_driver->remove(to_pcie_device(dev));
336 }
337 *(unsigned long*)data = (unsigned long)dev;
338 return 1;
339 } 460 }
340 return 0; 461 return 0;
341} 462}
@@ -349,25 +470,21 @@ static int remove_iter(struct device *dev, void *data)
349 */ 470 */
350void pcie_port_device_remove(struct pci_dev *dev) 471void pcie_port_device_remove(struct pci_dev *dev)
351{ 472{
352 struct device *device; 473 struct pcie_port_data *port_data = pci_get_drvdata(dev);
353 unsigned long device_addr;
354 int interrupt_mode = PCIE_PORT_INTx_MODE;
355 int status;
356 474
357 do { 475 device_for_each_child(&dev->dev, NULL, remove_iter);
358 status = device_for_each_child(&dev->dev, &device_addr, remove_iter); 476 pci_disable_device(dev);
359 if (status) { 477
360 device = (struct device*)device_addr; 478 switch (port_data->port_irq_mode) {
361 interrupt_mode = (to_pcie_device(device))->interrupt_mode; 479 case PCIE_PORT_MSIX_MODE:
362 put_device(device);
363 device_unregister(device);
364 }
365 } while (status);
366 /* Switch to INTx by default if MSI enabled */
367 if (interrupt_mode == PCIE_PORT_MSIX_MODE)
368 pci_disable_msix(dev); 480 pci_disable_msix(dev);
369 else if (interrupt_mode == PCIE_PORT_MSI_MODE) 481 break;
482 case PCIE_PORT_MSI_MODE:
370 pci_disable_msi(dev); 483 pci_disable_msi(dev);
484 break;
485 }
486
487 kfree(port_data);
371} 488}
372 489
373/** 490/**
@@ -392,7 +509,7 @@ static int pcie_port_probe_service(struct device *dev)
392 return -ENODEV; 509 return -ENODEV;
393 510
394 pciedev = to_pcie_device(dev); 511 pciedev = to_pcie_device(dev);
395 status = driver->probe(pciedev, driver->id_table); 512 status = driver->probe(pciedev);
396 if (!status) { 513 if (!status) {
397 dev_printk(KERN_DEBUG, dev, "service driver %s loaded\n", 514 dev_printk(KERN_DEBUG, dev, "service driver %s loaded\n",
398 driver->name); 515 driver->name);
diff --git a/drivers/pci/pcie/portdrv_pci.c b/drivers/pci/pcie/portdrv_pci.c
index 5ea566e20b37..b924e2463f85 100644
--- a/drivers/pci/pcie/portdrv_pci.c
+++ b/drivers/pci/pcie/portdrv_pci.c
@@ -32,11 +32,6 @@ MODULE_LICENSE("GPL");
32/* global data */ 32/* global data */
33static const char device_name[] = "pcieport-driver"; 33static const char device_name[] = "pcieport-driver";
34 34
35static int pcie_portdrv_save_config(struct pci_dev *dev)
36{
37 return pci_save_state(dev);
38}
39
40static int pcie_portdrv_restore_config(struct pci_dev *dev) 35static int pcie_portdrv_restore_config(struct pci_dev *dev)
41{ 36{
42 int retval; 37 int retval;
@@ -49,21 +44,21 @@ static int pcie_portdrv_restore_config(struct pci_dev *dev)
49} 44}
50 45
51#ifdef CONFIG_PM 46#ifdef CONFIG_PM
52static int pcie_portdrv_suspend(struct pci_dev *dev, pm_message_t state) 47static struct dev_pm_ops pcie_portdrv_pm_ops = {
53{ 48 .suspend = pcie_port_device_suspend,
54 return pcie_port_device_suspend(dev, state); 49 .resume = pcie_port_device_resume,
50 .freeze = pcie_port_device_suspend,
51 .thaw = pcie_port_device_resume,
52 .poweroff = pcie_port_device_suspend,
53 .restore = pcie_port_device_resume,
54};
55 55
56} 56#define PCIE_PORTDRV_PM_OPS (&pcie_portdrv_pm_ops)
57 57
58static int pcie_portdrv_resume(struct pci_dev *dev) 58#else /* !PM */
59{ 59
60 pci_set_master(dev); 60#define PCIE_PORTDRV_PM_OPS NULL
61 return pcie_port_device_resume(dev); 61#endif /* !PM */
62}
63#else
64#define pcie_portdrv_suspend NULL
65#define pcie_portdrv_resume NULL
66#endif
67 62
68/* 63/*
69 * pcie_portdrv_probe - Probe PCI-Express port devices 64 * pcie_portdrv_probe - Probe PCI-Express port devices
@@ -82,20 +77,15 @@ static int __devinit pcie_portdrv_probe (struct pci_dev *dev,
82 if (status) 77 if (status)
83 return status; 78 return status;
84 79
85 if (pci_enable_device(dev) < 0)
86 return -ENODEV;
87
88 pci_set_master(dev);
89 if (!dev->irq && dev->pin) { 80 if (!dev->irq && dev->pin) {
90 dev_warn(&dev->dev, "device [%04x:%04x] has invalid IRQ; " 81 dev_warn(&dev->dev, "device [%04x:%04x] has invalid IRQ; "
91 "check vendor BIOS\n", dev->vendor, dev->device); 82 "check vendor BIOS\n", dev->vendor, dev->device);
92 } 83 }
93 if (pcie_port_device_register(dev)) { 84 status = pcie_port_device_register(dev);
94 pci_disable_device(dev); 85 if (status)
95 return -ENOMEM; 86 return status;
96 }
97 87
98 pcie_portdrv_save_config(dev); 88 pci_save_state(dev);
99 89
100 return 0; 90 return 0;
101} 91}
@@ -104,7 +94,6 @@ static void pcie_portdrv_remove (struct pci_dev *dev)
104{ 94{
105 pcie_port_device_remove(dev); 95 pcie_port_device_remove(dev);
106 pci_disable_device(dev); 96 pci_disable_device(dev);
107 kfree(pci_get_drvdata(dev));
108} 97}
109 98
110static int error_detected_iter(struct device *device, void *data) 99static int error_detected_iter(struct device *device, void *data)
@@ -278,10 +267,9 @@ static struct pci_driver pcie_portdriver = {
278 .probe = pcie_portdrv_probe, 267 .probe = pcie_portdrv_probe,
279 .remove = pcie_portdrv_remove, 268 .remove = pcie_portdrv_remove,
280 269
281 .suspend = pcie_portdrv_suspend,
282 .resume = pcie_portdrv_resume,
283
284 .err_handler = &pcie_portdrv_err_handler, 270 .err_handler = &pcie_portdrv_err_handler,
271
272 .driver.pm = PCIE_PORTDRV_PM_OPS,
285}; 273};
286 274
287static int __init pcie_portdrv_init(void) 275static int __init pcie_portdrv_init(void)
diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
index 55ec44a27e89..e2f3dd098cfa 100644
--- a/drivers/pci/probe.c
+++ b/drivers/pci/probe.c
@@ -287,7 +287,7 @@ void __devinit pci_read_bridge_bases(struct pci_bus *child)
287 struct resource *res; 287 struct resource *res;
288 int i; 288 int i;
289 289
290 if (!dev) /* It's a host bus, nothing to read */ 290 if (!child->parent) /* It's a host bus, nothing to read */
291 return; 291 return;
292 292
293 if (dev->transparent) { 293 if (dev->transparent) {
@@ -511,21 +511,21 @@ int __devinit pci_scan_bridge(struct pci_bus *bus, struct pci_dev *dev, int max,
511 511
512 /* 512 /*
513 * If we already got to this bus through a different bridge, 513 * If we already got to this bus through a different bridge,
514 * ignore it. This can happen with the i450NX chipset. 514 * don't re-add it. This can happen with the i450NX chipset.
515 *
516 * However, we continue to descend down the hierarchy and
517 * scan remaining child buses.
515 */ 518 */
516 if (pci_find_bus(pci_domain_nr(bus), busnr)) { 519 child = pci_find_bus(pci_domain_nr(bus), busnr);
517 dev_info(&dev->dev, "bus %04x:%02x already known\n", 520 if (!child) {
518 pci_domain_nr(bus), busnr); 521 child = pci_add_new_bus(bus, dev, busnr);
519 goto out; 522 if (!child)
523 goto out;
524 child->primary = buses & 0xFF;
525 child->subordinate = (buses >> 16) & 0xFF;
526 child->bridge_ctl = bctl;
520 } 527 }
521 528
522 child = pci_add_new_bus(bus, dev, busnr);
523 if (!child)
524 goto out;
525 child->primary = buses & 0xFF;
526 child->subordinate = (buses >> 16) & 0xFF;
527 child->bridge_ctl = bctl;
528
529 cmax = pci_scan_child_bus(child); 529 cmax = pci_scan_child_bus(child);
530 if (cmax > max) 530 if (cmax > max)
531 max = cmax; 531 max = cmax;
@@ -674,6 +674,19 @@ static void pci_read_irq(struct pci_dev *dev)
674 dev->irq = irq; 674 dev->irq = irq;
675} 675}
676 676
677static void set_pcie_port_type(struct pci_dev *pdev)
678{
679 int pos;
680 u16 reg16;
681
682 pos = pci_find_capability(pdev, PCI_CAP_ID_EXP);
683 if (!pos)
684 return;
685 pdev->is_pcie = 1;
686 pci_read_config_word(pdev, pos + PCI_EXP_FLAGS, &reg16);
687 pdev->pcie_type = (reg16 & PCI_EXP_FLAGS_TYPE) >> 4;
688}
689
677#define LEGACY_IO_RESOURCE (IORESOURCE_IO | IORESOURCE_PCI_FIXED) 690#define LEGACY_IO_RESOURCE (IORESOURCE_IO | IORESOURCE_PCI_FIXED)
678 691
679/** 692/**
@@ -683,12 +696,33 @@ static void pci_read_irq(struct pci_dev *dev)
683 * Initialize the device structure with information about the device's 696 * Initialize the device structure with information about the device's
684 * vendor,class,memory and IO-space addresses,IRQ lines etc. 697 * vendor,class,memory and IO-space addresses,IRQ lines etc.
685 * Called at initialisation of the PCI subsystem and by CardBus services. 698 * Called at initialisation of the PCI subsystem and by CardBus services.
686 * Returns 0 on success and -1 if unknown type of device (not normal, bridge 699 * Returns 0 on success and negative if unknown type of device (not normal,
687 * or CardBus). 700 * bridge or CardBus).
688 */ 701 */
689static int pci_setup_device(struct pci_dev * dev) 702int pci_setup_device(struct pci_dev *dev)
690{ 703{
691 u32 class; 704 u32 class;
705 u8 hdr_type;
706 struct pci_slot *slot;
707
708 if (pci_read_config_byte(dev, PCI_HEADER_TYPE, &hdr_type))
709 return -EIO;
710
711 dev->sysdata = dev->bus->sysdata;
712 dev->dev.parent = dev->bus->bridge;
713 dev->dev.bus = &pci_bus_type;
714 dev->hdr_type = hdr_type & 0x7f;
715 dev->multifunction = !!(hdr_type & 0x80);
716 dev->error_state = pci_channel_io_normal;
717 set_pcie_port_type(dev);
718
719 list_for_each_entry(slot, &dev->bus->slots, list)
720 if (PCI_SLOT(dev->devfn) == slot->number)
721 dev->slot = slot;
722
723 /* Assume 32-bit PCI; let 64-bit PCI cards (which are far rarer)
724 set this higher, assuming the system even supports it. */
725 dev->dma_mask = 0xffffffff;
692 726
693 dev_set_name(&dev->dev, "%04x:%02x:%02x.%d", pci_domain_nr(dev->bus), 727 dev_set_name(&dev->dev, "%04x:%02x:%02x.%d", pci_domain_nr(dev->bus),
694 dev->bus->number, PCI_SLOT(dev->devfn), 728 dev->bus->number, PCI_SLOT(dev->devfn),
@@ -703,12 +737,14 @@ static int pci_setup_device(struct pci_dev * dev)
703 dev_dbg(&dev->dev, "found [%04x:%04x] class %06x header type %02x\n", 737 dev_dbg(&dev->dev, "found [%04x:%04x] class %06x header type %02x\n",
704 dev->vendor, dev->device, class, dev->hdr_type); 738 dev->vendor, dev->device, class, dev->hdr_type);
705 739
740 /* need to have dev->class ready */
741 dev->cfg_size = pci_cfg_space_size(dev);
742
706 /* "Unknown power state" */ 743 /* "Unknown power state" */
707 dev->current_state = PCI_UNKNOWN; 744 dev->current_state = PCI_UNKNOWN;
708 745
709 /* Early fixups, before probing the BARs */ 746 /* Early fixups, before probing the BARs */
710 pci_fixup_device(pci_fixup_early, dev); 747 pci_fixup_device(pci_fixup_early, dev);
711 class = dev->class >> 8;
712 748
713 switch (dev->hdr_type) { /* header type */ 749 switch (dev->hdr_type) { /* header type */
714 case PCI_HEADER_TYPE_NORMAL: /* standard header */ 750 case PCI_HEADER_TYPE_NORMAL: /* standard header */
@@ -770,7 +806,7 @@ static int pci_setup_device(struct pci_dev * dev)
770 default: /* unknown header */ 806 default: /* unknown header */
771 dev_err(&dev->dev, "unknown header type %02x, " 807 dev_err(&dev->dev, "unknown header type %02x, "
772 "ignoring device\n", dev->hdr_type); 808 "ignoring device\n", dev->hdr_type);
773 return -1; 809 return -EIO;
774 810
775 bad: 811 bad:
776 dev_err(&dev->dev, "ignoring class %02x (doesn't match header " 812 dev_err(&dev->dev, "ignoring class %02x (doesn't match header "
@@ -785,6 +821,7 @@ static int pci_setup_device(struct pci_dev * dev)
785static void pci_release_capabilities(struct pci_dev *dev) 821static void pci_release_capabilities(struct pci_dev *dev)
786{ 822{
787 pci_vpd_release(dev); 823 pci_vpd_release(dev);
824 pci_iov_release(dev);
788} 825}
789 826
790/** 827/**
@@ -803,19 +840,6 @@ static void pci_release_dev(struct device *dev)
803 kfree(pci_dev); 840 kfree(pci_dev);
804} 841}
805 842
806static void set_pcie_port_type(struct pci_dev *pdev)
807{
808 int pos;
809 u16 reg16;
810
811 pos = pci_find_capability(pdev, PCI_CAP_ID_EXP);
812 if (!pos)
813 return;
814 pdev->is_pcie = 1;
815 pci_read_config_word(pdev, pos + PCI_EXP_FLAGS, &reg16);
816 pdev->pcie_type = (reg16 & PCI_EXP_FLAGS_TYPE) >> 4;
817}
818
819/** 843/**
820 * pci_cfg_space_size - get the configuration space size of the PCI device. 844 * pci_cfg_space_size - get the configuration space size of the PCI device.
821 * @dev: PCI device 845 * @dev: PCI device
@@ -847,6 +871,11 @@ int pci_cfg_space_size(struct pci_dev *dev)
847{ 871{
848 int pos; 872 int pos;
849 u32 status; 873 u32 status;
874 u16 class;
875
876 class = dev->class >> 8;
877 if (class == PCI_CLASS_BRIDGE_HOST)
878 return pci_cfg_space_size_ext(dev);
850 879
851 pos = pci_find_capability(dev, PCI_CAP_ID_EXP); 880 pos = pci_find_capability(dev, PCI_CAP_ID_EXP);
852 if (!pos) { 881 if (!pos) {
@@ -891,9 +920,7 @@ EXPORT_SYMBOL(alloc_pci_dev);
891static struct pci_dev *pci_scan_device(struct pci_bus *bus, int devfn) 920static struct pci_dev *pci_scan_device(struct pci_bus *bus, int devfn)
892{ 921{
893 struct pci_dev *dev; 922 struct pci_dev *dev;
894 struct pci_slot *slot;
895 u32 l; 923 u32 l;
896 u8 hdr_type;
897 int delay = 1; 924 int delay = 1;
898 925
899 if (pci_bus_read_config_dword(bus, devfn, PCI_VENDOR_ID, &l)) 926 if (pci_bus_read_config_dword(bus, devfn, PCI_VENDOR_ID, &l))
@@ -920,34 +947,16 @@ static struct pci_dev *pci_scan_device(struct pci_bus *bus, int devfn)
920 } 947 }
921 } 948 }
922 949
923 if (pci_bus_read_config_byte(bus, devfn, PCI_HEADER_TYPE, &hdr_type))
924 return NULL;
925
926 dev = alloc_pci_dev(); 950 dev = alloc_pci_dev();
927 if (!dev) 951 if (!dev)
928 return NULL; 952 return NULL;
929 953
930 dev->bus = bus; 954 dev->bus = bus;
931 dev->sysdata = bus->sysdata;
932 dev->dev.parent = bus->bridge;
933 dev->dev.bus = &pci_bus_type;
934 dev->devfn = devfn; 955 dev->devfn = devfn;
935 dev->hdr_type = hdr_type & 0x7f;
936 dev->multifunction = !!(hdr_type & 0x80);
937 dev->vendor = l & 0xffff; 956 dev->vendor = l & 0xffff;
938 dev->device = (l >> 16) & 0xffff; 957 dev->device = (l >> 16) & 0xffff;
939 dev->cfg_size = pci_cfg_space_size(dev);
940 dev->error_state = pci_channel_io_normal;
941 set_pcie_port_type(dev);
942
943 list_for_each_entry(slot, &bus->slots, list)
944 if (PCI_SLOT(devfn) == slot->number)
945 dev->slot = slot;
946 958
947 /* Assume 32-bit PCI; let 64-bit PCI cards (which are far rarer) 959 if (pci_setup_device(dev)) {
948 set this higher, assuming the system even supports it. */
949 dev->dma_mask = 0xffffffff;
950 if (pci_setup_device(dev) < 0) {
951 kfree(dev); 960 kfree(dev);
952 return NULL; 961 return NULL;
953 } 962 }
@@ -972,6 +981,9 @@ static void pci_init_capabilities(struct pci_dev *dev)
972 981
973 /* Alternative Routing-ID Forwarding */ 982 /* Alternative Routing-ID Forwarding */
974 pci_enable_ari(dev); 983 pci_enable_ari(dev);
984
985 /* Single Root I/O Virtualization */
986 pci_iov_init(dev);
975} 987}
976 988
977void pci_device_add(struct pci_dev *dev, struct pci_bus *bus) 989void pci_device_add(struct pci_dev *dev, struct pci_bus *bus)
@@ -1006,6 +1018,12 @@ struct pci_dev *__ref pci_scan_single_device(struct pci_bus *bus, int devfn)
1006{ 1018{
1007 struct pci_dev *dev; 1019 struct pci_dev *dev;
1008 1020
1021 dev = pci_get_slot(bus, devfn);
1022 if (dev) {
1023 pci_dev_put(dev);
1024 return dev;
1025 }
1026
1009 dev = pci_scan_device(bus, devfn); 1027 dev = pci_scan_device(bus, devfn);
1010 if (!dev) 1028 if (!dev)
1011 return NULL; 1029 return NULL;
@@ -1024,35 +1042,27 @@ EXPORT_SYMBOL(pci_scan_single_device);
1024 * Scan a PCI slot on the specified PCI bus for devices, adding 1042 * Scan a PCI slot on the specified PCI bus for devices, adding
1025 * discovered devices to the @bus->devices list. New devices 1043 * discovered devices to the @bus->devices list. New devices
1026 * will not have is_added set. 1044 * will not have is_added set.
1045 *
1046 * Returns the number of new devices found.
1027 */ 1047 */
1028int pci_scan_slot(struct pci_bus *bus, int devfn) 1048int pci_scan_slot(struct pci_bus *bus, int devfn)
1029{ 1049{
1030 int func, nr = 0; 1050 int fn, nr = 0;
1031 int scan_all_fns; 1051 struct pci_dev *dev;
1032
1033 scan_all_fns = pcibios_scan_all_fns(bus, devfn);
1034
1035 for (func = 0; func < 8; func++, devfn++) {
1036 struct pci_dev *dev;
1037
1038 dev = pci_scan_single_device(bus, devfn);
1039 if (dev) {
1040 nr++;
1041 1052
1042 /* 1053 dev = pci_scan_single_device(bus, devfn);
1043 * If this is a single function device, 1054 if (dev && !dev->is_added) /* new device? */
1044 * don't scan past the first function. 1055 nr++;
1045 */ 1056
1046 if (!dev->multifunction) { 1057 if ((dev && dev->multifunction) ||
1047 if (func > 0) { 1058 (!dev && pcibios_scan_all_fns(bus, devfn))) {
1048 dev->multifunction = 1; 1059 for (fn = 1; fn < 8; fn++) {
1049 } else { 1060 dev = pci_scan_single_device(bus, devfn + fn);
1050 break; 1061 if (dev) {
1051 } 1062 if (!dev->is_added)
1063 nr++;
1064 dev->multifunction = 1;
1052 } 1065 }
1053 } else {
1054 if (func == 0 && !scan_all_fns)
1055 break;
1056 } 1066 }
1057 } 1067 }
1058 1068
@@ -1074,12 +1084,21 @@ unsigned int __devinit pci_scan_child_bus(struct pci_bus *bus)
1074 for (devfn = 0; devfn < 0x100; devfn += 8) 1084 for (devfn = 0; devfn < 0x100; devfn += 8)
1075 pci_scan_slot(bus, devfn); 1085 pci_scan_slot(bus, devfn);
1076 1086
1087 /* Reserve buses for SR-IOV capability. */
1088 max += pci_iov_bus_range(bus);
1089
1077 /* 1090 /*
1078 * After performing arch-dependent fixup of the bus, look behind 1091 * After performing arch-dependent fixup of the bus, look behind
1079 * all PCI-to-PCI bridges on this bus. 1092 * all PCI-to-PCI bridges on this bus.
1080 */ 1093 */
1081 pr_debug("PCI: Fixups for bus %04x:%02x\n", pci_domain_nr(bus), bus->number); 1094 if (!bus->is_added) {
1082 pcibios_fixup_bus(bus); 1095 pr_debug("PCI: Fixups for bus %04x:%02x\n",
1096 pci_domain_nr(bus), bus->number);
1097 pcibios_fixup_bus(bus);
1098 if (pci_is_root_bus(bus))
1099 bus->is_added = 1;
1100 }
1101
1083 for (pass=0; pass < 2; pass++) 1102 for (pass=0; pass < 2; pass++)
1084 list_for_each_entry(dev, &bus->devices, bus_list) { 1103 list_for_each_entry(dev, &bus->devices, bus_list) {
1085 if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE || 1104 if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE ||
@@ -1114,7 +1133,7 @@ struct pci_bus * pci_create_bus(struct device *parent,
1114 if (!b) 1133 if (!b)
1115 return NULL; 1134 return NULL;
1116 1135
1117 dev = kmalloc(sizeof(*dev), GFP_KERNEL); 1136 dev = kzalloc(sizeof(*dev), GFP_KERNEL);
1118 if (!dev){ 1137 if (!dev){
1119 kfree(b); 1138 kfree(b);
1120 return NULL; 1139 return NULL;
@@ -1133,7 +1152,6 @@ struct pci_bus * pci_create_bus(struct device *parent,
1133 list_add_tail(&b->node, &pci_root_buses); 1152 list_add_tail(&b->node, &pci_root_buses);
1134 up_write(&pci_bus_sem); 1153 up_write(&pci_bus_sem);
1135 1154
1136 memset(dev, 0, sizeof(*dev));
1137 dev->parent = parent; 1155 dev->parent = parent;
1138 dev->release = pci_release_bus_bridge_dev; 1156 dev->release = pci_release_bus_bridge_dev;
1139 dev_set_name(dev, "pci%04x:%02x", pci_domain_nr(b), bus); 1157 dev_set_name(dev, "pci%04x:%02x", pci_domain_nr(b), bus);
@@ -1193,6 +1211,38 @@ struct pci_bus * __devinit pci_scan_bus_parented(struct device *parent,
1193EXPORT_SYMBOL(pci_scan_bus_parented); 1211EXPORT_SYMBOL(pci_scan_bus_parented);
1194 1212
1195#ifdef CONFIG_HOTPLUG 1213#ifdef CONFIG_HOTPLUG
1214/**
1215 * pci_rescan_bus - scan a PCI bus for devices.
1216 * @bus: PCI bus to scan
1217 *
1218 * Scan a PCI bus and child buses for new devices, adds them,
1219 * and enables them.
1220 *
1221 * Returns the max number of subordinate bus discovered.
1222 */
1223unsigned int __devinit pci_rescan_bus(struct pci_bus *bus)
1224{
1225 unsigned int max;
1226 struct pci_dev *dev;
1227
1228 max = pci_scan_child_bus(bus);
1229
1230 down_read(&pci_bus_sem);
1231 list_for_each_entry(dev, &bus->devices, bus_list)
1232 if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE ||
1233 dev->hdr_type == PCI_HEADER_TYPE_CARDBUS)
1234 if (dev->subordinate)
1235 pci_bus_size_bridges(dev->subordinate);
1236 up_read(&pci_bus_sem);
1237
1238 pci_bus_assign_resources(bus);
1239 pci_enable_bridges(bus);
1240 pci_bus_add_devices(bus);
1241
1242 return max;
1243}
1244EXPORT_SYMBOL_GPL(pci_rescan_bus);
1245
1196EXPORT_SYMBOL(pci_add_new_bus); 1246EXPORT_SYMBOL(pci_add_new_bus);
1197EXPORT_SYMBOL(pci_scan_slot); 1247EXPORT_SYMBOL(pci_scan_slot);
1198EXPORT_SYMBOL(pci_scan_bridge); 1248EXPORT_SYMBOL(pci_scan_bridge);
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
index 92b9efe9bcaf..9b2f0d96900d 100644
--- a/drivers/pci/quirks.c
+++ b/drivers/pci/quirks.c
@@ -24,6 +24,7 @@
24#include <linux/kallsyms.h> 24#include <linux/kallsyms.h>
25#include <linux/dmi.h> 25#include <linux/dmi.h>
26#include <linux/pci-aspm.h> 26#include <linux/pci-aspm.h>
27#include <linux/ioport.h>
27#include "pci.h" 28#include "pci.h"
28 29
29int isa_dma_bridge_buggy; 30int isa_dma_bridge_buggy;
@@ -34,6 +35,65 @@ int pcie_mch_quirk;
34EXPORT_SYMBOL(pcie_mch_quirk); 35EXPORT_SYMBOL(pcie_mch_quirk);
35 36
36#ifdef CONFIG_PCI_QUIRKS 37#ifdef CONFIG_PCI_QUIRKS
38/*
39 * This quirk function disables the device and releases resources
40 * which is specified by kernel's boot parameter 'pci=resource_alignment='.
41 * It also rounds up size to specified alignment.
42 * Later on, the kernel will assign page-aligned memory resource back
43 * to that device.
44 */
45static void __devinit quirk_resource_alignment(struct pci_dev *dev)
46{
47 int i;
48 struct resource *r;
49 resource_size_t align, size;
50
51 if (!pci_is_reassigndev(dev))
52 return;
53
54 if (dev->hdr_type == PCI_HEADER_TYPE_NORMAL &&
55 (dev->class >> 8) == PCI_CLASS_BRIDGE_HOST) {
56 dev_warn(&dev->dev,
57 "Can't reassign resources to host bridge.\n");
58 return;
59 }
60
61 dev_info(&dev->dev, "Disabling device and release resources.\n");
62 pci_disable_device(dev);
63
64 align = pci_specified_resource_alignment(dev);
65 for (i=0; i < PCI_BRIDGE_RESOURCES; i++) {
66 r = &dev->resource[i];
67 if (!(r->flags & IORESOURCE_MEM))
68 continue;
69 size = resource_size(r);
70 if (size < align) {
71 size = align;
72 dev_info(&dev->dev,
73 "Rounding up size of resource #%d to %#llx.\n",
74 i, (unsigned long long)size);
75 }
76 r->end = size - 1;
77 r->start = 0;
78 }
79 /* Need to disable bridge's resource window,
80 * to enable the kernel to reassign new resource
81 * window later on.
82 */
83 if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE &&
84 (dev->class >> 8) == PCI_CLASS_BRIDGE_PCI) {
85 for (i = PCI_BRIDGE_RESOURCES; i < PCI_NUM_RESOURCES; i++) {
86 r = &dev->resource[i];
87 if (!(r->flags & IORESOURCE_MEM))
88 continue;
89 r->end = resource_size(r) - 1;
90 r->start = 0;
91 }
92 pci_disable_bridge_window(dev);
93 }
94}
95DECLARE_PCI_FIXUP_HEADER(PCI_ANY_ID, PCI_ANY_ID, quirk_resource_alignment);
96
37/* The Mellanox Tavor device gives false positive parity errors 97/* The Mellanox Tavor device gives false positive parity errors
38 * Mark this device with a broken_parity_status, to allow 98 * Mark this device with a broken_parity_status, to allow
39 * PCI scanning code to "skip" this now blacklisted device. 99 * PCI scanning code to "skip" this now blacklisted device.
@@ -1126,10 +1186,15 @@ static void __init asus_hides_smbus_hostbridge(struct pci_dev *dev)
1126 * its on-board VGA controller */ 1186 * its on-board VGA controller */
1127 asus_hides_smbus = 1; 1187 asus_hides_smbus = 1;
1128 } 1188 }
1129 else if (dev->device == PCI_DEVICE_ID_INTEL_82845G_IG) 1189 else if (dev->device == PCI_DEVICE_ID_INTEL_82801DB_2)
1130 switch(dev->subsystem_device) { 1190 switch(dev->subsystem_device) {
1131 case 0x00b8: /* Compaq Evo D510 CMT */ 1191 case 0x00b8: /* Compaq Evo D510 CMT */
1132 case 0x00b9: /* Compaq Evo D510 SFF */ 1192 case 0x00b9: /* Compaq Evo D510 SFF */
1193 /* Motherboard doesn't have Host bridge
1194 * subvendor/subdevice IDs and on-board VGA
1195 * controller is disabled if an AGP card is
1196 * inserted, therefore checking USB UHCI
1197 * Controller #1 */
1133 asus_hides_smbus = 1; 1198 asus_hides_smbus = 1;
1134 } 1199 }
1135 else if (dev->device == PCI_DEVICE_ID_INTEL_82815_CGC) 1200 else if (dev->device == PCI_DEVICE_ID_INTEL_82815_CGC)
@@ -1154,7 +1219,7 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82855GM_HB, as
1154DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82915GM_HB, asus_hides_smbus_hostbridge); 1219DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82915GM_HB, asus_hides_smbus_hostbridge);
1155 1220
1156DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82810_IG3, asus_hides_smbus_hostbridge); 1221DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82810_IG3, asus_hides_smbus_hostbridge);
1157DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82845G_IG, asus_hides_smbus_hostbridge); 1222DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801DB_2, asus_hides_smbus_hostbridge);
1158DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82815_CGC, asus_hides_smbus_hostbridge); 1223DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82815_CGC, asus_hides_smbus_hostbridge);
1159 1224
1160static void asus_hides_smbus_lpc(struct pci_dev *dev) 1225static void asus_hides_smbus_lpc(struct pci_dev *dev)
@@ -1664,9 +1729,13 @@ static void __devinit quirk_netmos(struct pci_dev *dev)
1664 * of parallel ports and <S> is the number of serial ports. 1729 * of parallel ports and <S> is the number of serial ports.
1665 */ 1730 */
1666 switch (dev->device) { 1731 switch (dev->device) {
1732 case PCI_DEVICE_ID_NETMOS_9835:
1733 /* Well, this rule doesn't hold for the following 9835 device */
1734 if (dev->subsystem_vendor == PCI_VENDOR_ID_IBM &&
1735 dev->subsystem_device == 0x0299)
1736 return;
1667 case PCI_DEVICE_ID_NETMOS_9735: 1737 case PCI_DEVICE_ID_NETMOS_9735:
1668 case PCI_DEVICE_ID_NETMOS_9745: 1738 case PCI_DEVICE_ID_NETMOS_9745:
1669 case PCI_DEVICE_ID_NETMOS_9835:
1670 case PCI_DEVICE_ID_NETMOS_9845: 1739 case PCI_DEVICE_ID_NETMOS_9845:
1671 case PCI_DEVICE_ID_NETMOS_9855: 1740 case PCI_DEVICE_ID_NETMOS_9855:
1672 if ((dev->class >> 8) == PCI_CLASS_COMMUNICATION_SERIAL && 1741 if ((dev->class >> 8) == PCI_CLASS_COMMUNICATION_SERIAL &&
@@ -2078,6 +2147,92 @@ DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_NVIDIA,
2078 PCI_DEVICE_ID_NVIDIA_NVENET_15, 2147 PCI_DEVICE_ID_NVIDIA_NVENET_15,
2079 nvenet_msi_disable); 2148 nvenet_msi_disable);
2080 2149
2150static int __devinit ht_check_msi_mapping(struct pci_dev *dev)
2151{
2152 int pos, ttl = 48;
2153 int found = 0;
2154
2155 /* check if there is HT MSI cap or enabled on this device */
2156 pos = pci_find_ht_capability(dev, HT_CAPTYPE_MSI_MAPPING);
2157 while (pos && ttl--) {
2158 u8 flags;
2159
2160 if (found < 1)
2161 found = 1;
2162 if (pci_read_config_byte(dev, pos + HT_MSI_FLAGS,
2163 &flags) == 0) {
2164 if (flags & HT_MSI_FLAGS_ENABLE) {
2165 if (found < 2) {
2166 found = 2;
2167 break;
2168 }
2169 }
2170 }
2171 pos = pci_find_next_ht_capability(dev, pos,
2172 HT_CAPTYPE_MSI_MAPPING);
2173 }
2174
2175 return found;
2176}
2177
2178static int __devinit host_bridge_with_leaf(struct pci_dev *host_bridge)
2179{
2180 struct pci_dev *dev;
2181 int pos;
2182 int i, dev_no;
2183 int found = 0;
2184
2185 dev_no = host_bridge->devfn >> 3;
2186 for (i = dev_no + 1; i < 0x20; i++) {
2187 dev = pci_get_slot(host_bridge->bus, PCI_DEVFN(i, 0));
2188 if (!dev)
2189 continue;
2190
2191 /* found next host bridge ?*/
2192 pos = pci_find_ht_capability(dev, HT_CAPTYPE_SLAVE);
2193 if (pos != 0) {
2194 pci_dev_put(dev);
2195 break;
2196 }
2197
2198 if (ht_check_msi_mapping(dev)) {
2199 found = 1;
2200 pci_dev_put(dev);
2201 break;
2202 }
2203 pci_dev_put(dev);
2204 }
2205
2206 return found;
2207}
2208
2209#define PCI_HT_CAP_SLAVE_CTRL0 4 /* link control */
2210#define PCI_HT_CAP_SLAVE_CTRL1 8 /* link control to */
2211
2212static int __devinit is_end_of_ht_chain(struct pci_dev *dev)
2213{
2214 int pos, ctrl_off;
2215 int end = 0;
2216 u16 flags, ctrl;
2217
2218 pos = pci_find_ht_capability(dev, HT_CAPTYPE_SLAVE);
2219
2220 if (!pos)
2221 goto out;
2222
2223 pci_read_config_word(dev, pos + PCI_CAP_FLAGS, &flags);
2224
2225 ctrl_off = ((flags >> 10) & 1) ?
2226 PCI_HT_CAP_SLAVE_CTRL0 : PCI_HT_CAP_SLAVE_CTRL1;
2227 pci_read_config_word(dev, pos + ctrl_off, &ctrl);
2228
2229 if (ctrl & (1 << 6))
2230 end = 1;
2231
2232out:
2233 return end;
2234}
2235
2081static void __devinit nv_ht_enable_msi_mapping(struct pci_dev *dev) 2236static void __devinit nv_ht_enable_msi_mapping(struct pci_dev *dev)
2082{ 2237{
2083 struct pci_dev *host_bridge; 2238 struct pci_dev *host_bridge;
@@ -2102,6 +2257,11 @@ static void __devinit nv_ht_enable_msi_mapping(struct pci_dev *dev)
2102 if (!found) 2257 if (!found)
2103 return; 2258 return;
2104 2259
2260 /* don't enable end_device/host_bridge with leaf directly here */
2261 if (host_bridge == dev && is_end_of_ht_chain(host_bridge) &&
2262 host_bridge_with_leaf(host_bridge))
2263 goto out;
2264
2105 /* root did that ! */ 2265 /* root did that ! */
2106 if (msi_ht_cap_enabled(host_bridge)) 2266 if (msi_ht_cap_enabled(host_bridge))
2107 goto out; 2267 goto out;
@@ -2132,44 +2292,12 @@ static void __devinit ht_disable_msi_mapping(struct pci_dev *dev)
2132 } 2292 }
2133} 2293}
2134 2294
2135static int __devinit ht_check_msi_mapping(struct pci_dev *dev) 2295static void __devinit __nv_msi_ht_cap_quirk(struct pci_dev *dev, int all)
2136{
2137 int pos, ttl = 48;
2138 int found = 0;
2139
2140 /* check if there is HT MSI cap or enabled on this device */
2141 pos = pci_find_ht_capability(dev, HT_CAPTYPE_MSI_MAPPING);
2142 while (pos && ttl--) {
2143 u8 flags;
2144
2145 if (found < 1)
2146 found = 1;
2147 if (pci_read_config_byte(dev, pos + HT_MSI_FLAGS,
2148 &flags) == 0) {
2149 if (flags & HT_MSI_FLAGS_ENABLE) {
2150 if (found < 2) {
2151 found = 2;
2152 break;
2153 }
2154 }
2155 }
2156 pos = pci_find_next_ht_capability(dev, pos,
2157 HT_CAPTYPE_MSI_MAPPING);
2158 }
2159
2160 return found;
2161}
2162
2163static void __devinit nv_msi_ht_cap_quirk(struct pci_dev *dev)
2164{ 2296{
2165 struct pci_dev *host_bridge; 2297 struct pci_dev *host_bridge;
2166 int pos; 2298 int pos;
2167 int found; 2299 int found;
2168 2300
2169 /* Enabling HT MSI mapping on this device breaks MCP51 */
2170 if (dev->device == 0x270)
2171 return;
2172
2173 /* check if there is HT MSI cap or enabled on this device */ 2301 /* check if there is HT MSI cap or enabled on this device */
2174 found = ht_check_msi_mapping(dev); 2302 found = ht_check_msi_mapping(dev);
2175 2303
@@ -2193,7 +2321,10 @@ static void __devinit nv_msi_ht_cap_quirk(struct pci_dev *dev)
2193 /* Host bridge is to HT */ 2321 /* Host bridge is to HT */
2194 if (found == 1) { 2322 if (found == 1) {
2195 /* it is not enabled, try to enable it */ 2323 /* it is not enabled, try to enable it */
2196 nv_ht_enable_msi_mapping(dev); 2324 if (all)
2325 ht_enable_msi_mapping(dev);
2326 else
2327 nv_ht_enable_msi_mapping(dev);
2197 } 2328 }
2198 return; 2329 return;
2199 } 2330 }
@@ -2205,8 +2336,20 @@ static void __devinit nv_msi_ht_cap_quirk(struct pci_dev *dev)
2205 /* Host bridge is not to HT, disable HT MSI mapping on this device */ 2336 /* Host bridge is not to HT, disable HT MSI mapping on this device */
2206 ht_disable_msi_mapping(dev); 2337 ht_disable_msi_mapping(dev);
2207} 2338}
2208DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID, nv_msi_ht_cap_quirk); 2339
2209DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AL, PCI_ANY_ID, nv_msi_ht_cap_quirk); 2340static void __devinit nv_msi_ht_cap_quirk_all(struct pci_dev *dev)
2341{
2342 return __nv_msi_ht_cap_quirk(dev, 1);
2343}
2344
2345static void __devinit nv_msi_ht_cap_quirk_leaf(struct pci_dev *dev)
2346{
2347 return __nv_msi_ht_cap_quirk(dev, 0);
2348}
2349
2350DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID, nv_msi_ht_cap_quirk_leaf);
2351
2352DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AL, PCI_ANY_ID, nv_msi_ht_cap_quirk_all);
2210 2353
2211static void __devinit quirk_msi_intx_disable_bug(struct pci_dev *dev) 2354static void __devinit quirk_msi_intx_disable_bug(struct pci_dev *dev)
2212{ 2355{
diff --git a/drivers/pci/remove.c b/drivers/pci/remove.c
index 042e08924421..86503c14ce7e 100644
--- a/drivers/pci/remove.c
+++ b/drivers/pci/remove.c
@@ -71,6 +71,9 @@ void pci_remove_bus(struct pci_bus *pci_bus)
71 down_write(&pci_bus_sem); 71 down_write(&pci_bus_sem);
72 list_del(&pci_bus->node); 72 list_del(&pci_bus->node);
73 up_write(&pci_bus_sem); 73 up_write(&pci_bus_sem);
74 if (!pci_bus->is_added)
75 return;
76
74 pci_remove_legacy_files(pci_bus); 77 pci_remove_legacy_files(pci_bus);
75 device_remove_file(&pci_bus->dev, &dev_attr_cpuaffinity); 78 device_remove_file(&pci_bus->dev, &dev_attr_cpuaffinity);
76 device_remove_file(&pci_bus->dev, &dev_attr_cpulistaffinity); 79 device_remove_file(&pci_bus->dev, &dev_attr_cpulistaffinity);
@@ -92,6 +95,7 @@ EXPORT_SYMBOL(pci_remove_bus);
92 */ 95 */
93void pci_remove_bus_device(struct pci_dev *dev) 96void pci_remove_bus_device(struct pci_dev *dev)
94{ 97{
98 pci_stop_bus_device(dev);
95 if (dev->subordinate) { 99 if (dev->subordinate) {
96 struct pci_bus *b = dev->subordinate; 100 struct pci_bus *b = dev->subordinate;
97 101
diff --git a/drivers/pci/search.c b/drivers/pci/search.c
index 5af8bd538149..710d4ea69568 100644
--- a/drivers/pci/search.c
+++ b/drivers/pci/search.c
@@ -29,7 +29,7 @@ pci_find_upstream_pcie_bridge(struct pci_dev *pdev)
29 if (pdev->is_pcie) 29 if (pdev->is_pcie)
30 return NULL; 30 return NULL;
31 while (1) { 31 while (1) {
32 if (!pdev->bus->self) 32 if (!pdev->bus->parent)
33 break; 33 break;
34 pdev = pdev->bus->self; 34 pdev = pdev->bus->self;
35 /* a p2p bridge */ 35 /* a p2p bridge */
diff --git a/drivers/pci/setup-bus.c b/drivers/pci/setup-bus.c
index 704608945780..334285a8e237 100644
--- a/drivers/pci/setup-bus.c
+++ b/drivers/pci/setup-bus.c
@@ -27,7 +27,7 @@
27#include <linux/slab.h> 27#include <linux/slab.h>
28 28
29 29
30static void pbus_assign_resources_sorted(struct pci_bus *bus) 30static void pbus_assign_resources_sorted(const struct pci_bus *bus)
31{ 31{
32 struct pci_dev *dev; 32 struct pci_dev *dev;
33 struct resource *res; 33 struct resource *res;
@@ -144,6 +144,9 @@ static void pci_setup_bridge(struct pci_bus *bus)
144 struct pci_bus_region region; 144 struct pci_bus_region region;
145 u32 l, bu, lu, io_upper16; 145 u32 l, bu, lu, io_upper16;
146 146
147 if (!pci_is_root_bus(bus) && bus->is_added)
148 return;
149
147 dev_info(&bridge->dev, "PCI bridge, secondary bus %04x:%02x\n", 150 dev_info(&bridge->dev, "PCI bridge, secondary bus %04x:%02x\n",
148 pci_domain_nr(bus), bus->number); 151 pci_domain_nr(bus), bus->number);
149 152
@@ -495,7 +498,7 @@ void __ref pci_bus_size_bridges(struct pci_bus *bus)
495} 498}
496EXPORT_SYMBOL(pci_bus_size_bridges); 499EXPORT_SYMBOL(pci_bus_size_bridges);
497 500
498void __ref pci_bus_assign_resources(struct pci_bus *bus) 501void __ref pci_bus_assign_resources(const struct pci_bus *bus)
499{ 502{
500 struct pci_bus *b; 503 struct pci_bus *b;
501 struct pci_dev *dev; 504 struct pci_dev *dev;
diff --git a/drivers/pci/setup-res.c b/drivers/pci/setup-res.c
index 32e8d88a4619..3039fcb86afc 100644
--- a/drivers/pci/setup-res.c
+++ b/drivers/pci/setup-res.c
@@ -120,6 +120,21 @@ int pci_claim_resource(struct pci_dev *dev, int resource)
120 return err; 120 return err;
121} 121}
122 122
123#ifdef CONFIG_PCI_QUIRKS
124void pci_disable_bridge_window(struct pci_dev *dev)
125{
126 dev_dbg(&dev->dev, "Disabling bridge window.\n");
127
128 /* MMIO Base/Limit */
129 pci_write_config_dword(dev, PCI_MEMORY_BASE, 0x0000fff0);
130
131 /* Prefetchable MMIO Base/Limit */
132 pci_write_config_dword(dev, PCI_PREF_LIMIT_UPPER32, 0);
133 pci_write_config_dword(dev, PCI_PREF_MEMORY_BASE, 0x0000fff0);
134 pci_write_config_dword(dev, PCI_PREF_BASE_UPPER32, 0xffffffff);
135}
136#endif /* CONFIG_PCI_QUIRKS */
137
123int pci_assign_resource(struct pci_dev *dev, int resno) 138int pci_assign_resource(struct pci_dev *dev, int resno)
124{ 139{
125 struct pci_bus *bus = dev->bus; 140 struct pci_bus *bus = dev->bus;
diff --git a/drivers/pci/slot.c b/drivers/pci/slot.c
index 5a8ccb4f604d..21189447e545 100644
--- a/drivers/pci/slot.c
+++ b/drivers/pci/slot.c
@@ -1,8 +1,8 @@
1/* 1/*
2 * drivers/pci/slot.c 2 * drivers/pci/slot.c
3 * Copyright (C) 2006 Matthew Wilcox <matthew@wil.cx> 3 * Copyright (C) 2006 Matthew Wilcox <matthew@wil.cx>
4 * Copyright (C) 2006-2008 Hewlett-Packard Development Company, L.P. 4 * Copyright (C) 2006-2009 Hewlett-Packard Development Company, L.P.
5 * Alex Chiang <achiang@hp.com> 5 * Alex Chiang <achiang@hp.com>
6 */ 6 */
7 7
8#include <linux/kobject.h> 8#include <linux/kobject.h>
@@ -52,8 +52,8 @@ static void pci_slot_release(struct kobject *kobj)
52 struct pci_dev *dev; 52 struct pci_dev *dev;
53 struct pci_slot *slot = to_pci_slot(kobj); 53 struct pci_slot *slot = to_pci_slot(kobj);
54 54
55 pr_debug("%s: releasing pci_slot on %x:%d\n", __func__, 55 dev_dbg(&slot->bus->dev, "dev %02x, released physical slot %s\n",
56 slot->bus->number, slot->number); 56 slot->number, pci_slot_name(slot));
57 57
58 list_for_each_entry(dev, &slot->bus->devices, bus_list) 58 list_for_each_entry(dev, &slot->bus->devices, bus_list)
59 if (PCI_SLOT(dev->devfn) == slot->number) 59 if (PCI_SLOT(dev->devfn) == slot->number)
@@ -248,9 +248,8 @@ placeholder:
248 if (PCI_SLOT(dev->devfn) == slot_nr) 248 if (PCI_SLOT(dev->devfn) == slot_nr)
249 dev->slot = slot; 249 dev->slot = slot;
250 250
251 /* Don't care if debug printk has a -1 for slot_nr */ 251 dev_dbg(&parent->dev, "dev %02x, created physical slot %s\n",
252 pr_debug("%s: created pci_slot on %04x:%02x:%02x\n", 252 slot_nr, pci_slot_name(slot));
253 __func__, pci_domain_nr(parent), parent->number, slot_nr);
254 253
255out: 254out:
256 kfree(slot_name); 255 kfree(slot_name);
@@ -299,9 +298,8 @@ EXPORT_SYMBOL_GPL(pci_renumber_slot);
299 */ 298 */
300void pci_destroy_slot(struct pci_slot *slot) 299void pci_destroy_slot(struct pci_slot *slot)
301{ 300{
302 pr_debug("%s: dec refcount to %d on %04x:%02x:%02x\n", __func__, 301 dev_dbg(&slot->bus->dev, "dev %02x, dec refcount to %d\n",
303 atomic_read(&slot->kobj.kref.refcount) - 1, 302 slot->number, atomic_read(&slot->kobj.kref.refcount) - 1);
304 pci_domain_nr(slot->bus), slot->bus->number, slot->number);
305 303
306 down_write(&pci_bus_sem); 304 down_write(&pci_bus_sem);
307 kobject_put(&slot->kobj); 305 kobject_put(&slot->kobj);