aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/ABI/testing/sysfs-bus-pci70
-rw-r--r--Documentation/DocBook/kernel-api.tmpl1
-rw-r--r--Documentation/PCI/MSI-HOWTO.txt814
-rw-r--r--Documentation/PCI/pci-iov-howto.txt99
-rw-r--r--Documentation/feature-removal-schedule.txt32
-rw-r--r--Documentation/filesystems/sysfs-pci.txt10
-rw-r--r--Documentation/kernel-parameters.txt11
-rw-r--r--arch/alpha/include/asm/pci.h14
-rw-r--r--arch/alpha/kernel/Makefile2
-rw-r--r--arch/alpha/kernel/pci-sysfs.c366
-rw-r--r--arch/powerpc/include/asm/pci.h4
-rw-r--r--arch/powerpc/kernel/msi.c5
-rw-r--r--arch/x86/include/asm/pci.h3
-rw-r--r--arch/x86/kernel/apic/io_apic.c4
-rw-r--r--arch/x86/kernel/pci-dma.c3
-rw-r--r--arch/x86/pci/early.c19
-rw-r--r--arch/x86/pci/fixup.c20
-rw-r--r--arch/x86/pci/legacy.c3
-rw-r--r--arch/x86/pci/mmconfig-shared.c227
-rw-r--r--arch/x86/pci/mmconfig_64.c17
-rw-r--r--drivers/acpi/pci_root.c180
-rw-r--r--drivers/pci/Kconfig10
-rw-r--r--drivers/pci/Makefile2
-rw-r--r--drivers/pci/bus.c8
-rw-r--r--drivers/pci/hotplug/acpi_pcihp.c58
-rw-r--r--drivers/pci/hotplug/fakephp.c444
-rw-r--r--drivers/pci/hotplug/pciehp.h13
-rw-r--r--drivers/pci/hotplug/pciehp_acpi.c21
-rw-r--r--drivers/pci/hotplug/pciehp_core.c18
-rw-r--r--drivers/pci/hotplug/pciehp_hpc.c34
-rw-r--r--drivers/pci/hotplug/shpchp.h10
-rw-r--r--drivers/pci/hotplug/shpchp_pci.c2
-rw-r--r--drivers/pci/intel-iommu.c2
-rw-r--r--drivers/pci/iov.c680
-rw-r--r--drivers/pci/msi.c426
-rw-r--r--drivers/pci/msi.h6
-rw-r--r--drivers/pci/pci-acpi.c215
-rw-r--r--drivers/pci/pci-driver.c81
-rw-r--r--drivers/pci/pci-sysfs.c124
-rw-r--r--drivers/pci/pci.c193
-rw-r--r--drivers/pci/pci.h65
-rw-r--r--drivers/pci/pcie/aer/aerdrv.c28
-rw-r--r--drivers/pci/pcie/aer/aerdrv_acpi.c2
-rw-r--r--drivers/pci/pcie/aer/aerdrv_core.c10
-rw-r--r--drivers/pci/pcie/portdrv.h14
-rw-r--r--drivers/pci/pcie/portdrv_bus.c18
-rw-r--r--drivers/pci/pcie/portdrv_core.c379
-rw-r--r--drivers/pci/pcie/portdrv_pci.c50
-rw-r--r--drivers/pci/probe.c210
-rw-r--r--drivers/pci/quirks.c221
-rw-r--r--drivers/pci/remove.c4
-rw-r--r--drivers/pci/search.c2
-rw-r--r--drivers/pci/setup-bus.c7
-rw-r--r--drivers/pci/setup-res.c15
-rw-r--r--drivers/pci/slot.c18
-rw-r--r--include/linux/acpi.h34
-rw-r--r--include/linux/msi.h13
-rw-r--r--include/linux/pci-acpi.h67
-rw-r--r--include/linux/pci.h61
-rw-r--r--include/linux/pci_ids.h1
-rw-r--r--include/linux/pci_regs.h37
-rw-r--r--include/linux/pcieport_if.h36
62 files changed, 3641 insertions, 1902 deletions
diff --git a/Documentation/ABI/testing/sysfs-bus-pci b/Documentation/ABI/testing/sysfs-bus-pci
index e638e15a8895..97ad190e13af 100644
--- a/Documentation/ABI/testing/sysfs-bus-pci
+++ b/Documentation/ABI/testing/sysfs-bus-pci
@@ -41,6 +41,49 @@ Description:
41 for the device and attempt to bind to it. For example: 41 for the device and attempt to bind to it. For example:
42 # echo "8086 10f5" > /sys/bus/pci/drivers/foo/new_id 42 # echo "8086 10f5" > /sys/bus/pci/drivers/foo/new_id
43 43
44What: /sys/bus/pci/drivers/.../remove_id
45Date: February 2009
46Contact: Chris Wright <chrisw@sous-sol.org>
47Description:
48 Writing a device ID to this file will remove an ID
49 that was dynamically added via the new_id sysfs entry.
50 The format for the device ID is:
51 VVVV DDDD SVVV SDDD CCCC MMMM. That is Vendor ID, Device
52 ID, Subsystem Vendor ID, Subsystem Device ID, Class,
53 and Class Mask. The Vendor ID and Device ID fields are
54 required, the rest are optional. After successfully
55 removing an ID, the driver will no longer support the
56 device. This is useful to ensure auto probing won't
57 match the driver to the device. For example:
58 # echo "8086 10f5" > /sys/bus/pci/drivers/foo/remove_id
59
60What: /sys/bus/pci/rescan
61Date: January 2009
62Contact: Linux PCI developers <linux-pci@vger.kernel.org>
63Description:
64 Writing a non-zero value to this attribute will
65 force a rescan of all PCI buses in the system, and
66 re-discover previously removed devices.
67 Depends on CONFIG_HOTPLUG.
68
69What: /sys/bus/pci/devices/.../remove
70Date: January 2009
71Contact: Linux PCI developers <linux-pci@vger.kernel.org>
72Description:
73 Writing a non-zero value to this attribute will
74 hot-remove the PCI device and any of its children.
75 Depends on CONFIG_HOTPLUG.
76
77What: /sys/bus/pci/devices/.../rescan
78Date: January 2009
79Contact: Linux PCI developers <linux-pci@vger.kernel.org>
80Description:
81 Writing a non-zero value to this attribute will
82 force a rescan of the device's parent bus and all
83 child buses, and re-discover devices removed earlier
84 from this part of the device tree.
85 Depends on CONFIG_HOTPLUG.
86
44What: /sys/bus/pci/devices/.../vpd 87What: /sys/bus/pci/devices/.../vpd
45Date: February 2008 88Date: February 2008
46Contact: Ben Hutchings <bhutchings@solarflare.com> 89Contact: Ben Hutchings <bhutchings@solarflare.com>
@@ -52,3 +95,30 @@ Description:
52 that some devices may have malformatted data. If the 95 that some devices may have malformatted data. If the
53 underlying VPD has a writable section then the 96 underlying VPD has a writable section then the
54 corresponding section of this file will be writable. 97 corresponding section of this file will be writable.
98
99What: /sys/bus/pci/devices/.../virtfnN
100Date: March 2009
101Contact: Yu Zhao <yu.zhao@intel.com>
102Description:
103 This symbolic link appears when hardware supports the SR-IOV
104 capability and the Physical Function driver has enabled it.
105 The symbolic link points to the PCI device sysfs entry of the
106 Virtual Function whose index is N (0...MaxVFs-1).
107
108What: /sys/bus/pci/devices/.../dep_link
109Date: March 2009
110Contact: Yu Zhao <yu.zhao@intel.com>
111Description:
112 This symbolic link appears when hardware supports the SR-IOV
113 capability and the Physical Function driver has enabled it,
114 and this device has vendor specific dependencies with others.
115 The symbolic link points to the PCI device sysfs entry of
116 Physical Function this device depends on.
117
118What: /sys/bus/pci/devices/.../physfn
119Date: March 2009
120Contact: Yu Zhao <yu.zhao@intel.com>
121Description:
122 This symbolic link appears when a device is a Virtual Function.
123 The symbolic link points to the PCI device sysfs entry of the
124 Physical Function this device associates with.
diff --git a/Documentation/DocBook/kernel-api.tmpl b/Documentation/DocBook/kernel-api.tmpl
index bc962cda6504..58c194572c76 100644
--- a/Documentation/DocBook/kernel-api.tmpl
+++ b/Documentation/DocBook/kernel-api.tmpl
@@ -199,6 +199,7 @@ X!Edrivers/pci/hotplug.c
199--> 199-->
200!Edrivers/pci/probe.c 200!Edrivers/pci/probe.c
201!Edrivers/pci/rom.c 201!Edrivers/pci/rom.c
202!Edrivers/pci/iov.c
202 </sect1> 203 </sect1>
203 <sect1><title>PCI Hotplug Support Library</title> 204 <sect1><title>PCI Hotplug Support Library</title>
204!Edrivers/pci/hotplug/pci_hotplug_core.c 205!Edrivers/pci/hotplug/pci_hotplug_core.c
diff --git a/Documentation/PCI/MSI-HOWTO.txt b/Documentation/PCI/MSI-HOWTO.txt
index 256defd7e174..dcf7acc720e1 100644
--- a/Documentation/PCI/MSI-HOWTO.txt
+++ b/Documentation/PCI/MSI-HOWTO.txt
@@ -4,506 +4,356 @@
4 Revised Feb 12, 2004 by Martine Silbermann 4 Revised Feb 12, 2004 by Martine Silbermann
5 email: Martine.Silbermann@hp.com 5 email: Martine.Silbermann@hp.com
6 Revised Jun 25, 2004 by Tom L Nguyen 6 Revised Jun 25, 2004 by Tom L Nguyen
7 Revised Jul 9, 2008 by Matthew Wilcox <willy@linux.intel.com>
8 Copyright 2003, 2008 Intel Corporation
7 9
81. About this guide 101. About this guide
9 11
10This guide describes the basics of Message Signaled Interrupts (MSI), 12This guide describes the basics of Message Signaled Interrupts (MSIs),
11the advantages of using MSI over traditional interrupt mechanisms, 13the advantages of using MSI over traditional interrupt mechanisms, how
12and how to enable your driver to use MSI or MSI-X. Also included is 14to change your driver to use MSI or MSI-X and some basic diagnostics to
13a Frequently Asked Questions (FAQ) section. 15try if a device doesn't support MSIs.
14
151.1 Terminology
16
17PCI devices can be single-function or multi-function. In either case,
18when this text talks about enabling or disabling MSI on a "device
19function," it is referring to one specific PCI device and function and
20not to all functions on a PCI device (unless the PCI device has only
21one function).
22
232. Copyright 2003 Intel Corporation
24
253. What is MSI/MSI-X?
26
27Message Signaled Interrupt (MSI), as described in the PCI Local Bus
28Specification Revision 2.3 or later, is an optional feature, and a
29required feature for PCI Express devices. MSI enables a device function
30to request service by sending an Inbound Memory Write on its PCI bus to
31the FSB as a Message Signal Interrupt transaction. Because MSI is
32generated in the form of a Memory Write, all transaction conditions,
33such as a Retry, Master-Abort, Target-Abort or normal completion, are
34supported.
35
36A PCI device that supports MSI must also support pin IRQ assertion
37interrupt mechanism to provide backward compatibility for systems that
38do not support MSI. In systems which support MSI, the bus driver is
39responsible for initializing the message address and message data of
40the device function's MSI/MSI-X capability structure during device
41initial configuration.
42
43An MSI capable device function indicates MSI support by implementing
44the MSI/MSI-X capability structure in its PCI capability list. The
45device function may implement both the MSI capability structure and
46the MSI-X capability structure; however, the bus driver should not
47enable both.
48
49The MSI capability structure contains Message Control register,
50Message Address register and Message Data register. These registers
51provide the bus driver control over MSI. The Message Control register
52indicates the MSI capability supported by the device. The Message
53Address register specifies the target address and the Message Data
54register specifies the characteristics of the message. To request
55service, the device function writes the content of the Message Data
56register to the target address. The device and its software driver
57are prohibited from writing to these registers.
58
59The MSI-X capability structure is an optional extension to MSI. It
60uses an independent and separate capability structure. There are
61some key advantages to implementing the MSI-X capability structure
62over the MSI capability structure as described below.
63
64 - Support a larger maximum number of vectors per function.
65
66 - Provide the ability for system software to configure
67 each vector with an independent message address and message
68 data, specified by a table that resides in Memory Space.
69
70 - MSI and MSI-X both support per-vector masking. Per-vector
71 masking is an optional extension of MSI but a required
72 feature for MSI-X. Per-vector masking provides the kernel the
73 ability to mask/unmask a single MSI while running its
74 interrupt service routine. If per-vector masking is
75 not supported, then the device driver should provide the
76 hardware/software synchronization to ensure that the device
77 generates MSI when the driver wants it to do so.
78
794. Why use MSI?
80
81As a benefit to the simplification of board design, MSI allows board
82designers to remove out-of-band interrupt routing. MSI is another
83step towards a legacy-free environment.
84
85Due to increasing pressure on chipset and processor packages to
86reduce pin count, the need for interrupt pins is expected to
87diminish over time. Devices, due to pin constraints, may implement
88messages to increase performance.
89
90PCI Express endpoints uses INTx emulation (in-band messages) instead
91of IRQ pin assertion. Using INTx emulation requires interrupt
92sharing among devices connected to the same node (PCI bridge) while
93MSI is unique (non-shared) and does not require BIOS configuration
94support. As a result, the PCI Express technology requires MSI
95support for better interrupt performance.
96
97Using MSI enables the device functions to support two or more
98vectors, which can be configured to target different CPUs to
99increase scalability.
100
1015. Configuring a driver to use MSI/MSI-X
102
103By default, the kernel will not enable MSI/MSI-X on all devices that
104support this capability. The CONFIG_PCI_MSI kernel option
105must be selected to enable MSI/MSI-X support.
106
1075.1 Including MSI/MSI-X support into the kernel
108
109To allow MSI/MSI-X capable device drivers to selectively enable
110MSI/MSI-X (using pci_enable_msi()/pci_enable_msix() as described
111below), the VECTOR based scheme needs to be enabled by setting
112CONFIG_PCI_MSI during kernel config.
113
114Since the target of the inbound message is the local APIC, providing
115CONFIG_X86_LOCAL_APIC must be enabled as well as CONFIG_PCI_MSI.
116
1175.2 Configuring for MSI support
118
119Due to the non-contiguous fashion in vector assignment of the
120existing Linux kernel, this version does not support multiple
121messages regardless of a device function is capable of supporting
122more than one vector. To enable MSI on a device function's MSI
123capability structure requires a device driver to call the function
124pci_enable_msi() explicitly.
125
1265.2.1 API pci_enable_msi
127 16
128int pci_enable_msi(struct pci_dev *dev)
129 17
130With this new API, a device driver that wants to have MSI 182. What are MSIs?
131enabled on its device function must call this API to enable MSI.
132A successful call will initialize the MSI capability structure
133with ONE vector, regardless of whether a device function is
134capable of supporting multiple messages. This vector replaces the
135pre-assigned dev->irq with a new MSI vector. To avoid a conflict
136of the new assigned vector with existing pre-assigned vector requires
137a device driver to call this API before calling request_irq().
138 19
1395.2.2 API pci_disable_msi 20A Message Signaled Interrupt is a write from the device to a special
21address which causes an interrupt to be received by the CPU.
140 22
141void pci_disable_msi(struct pci_dev *dev) 23The MSI capability was first specified in PCI 2.2 and was later enhanced
24in PCI 3.0 to allow each interrupt to be masked individually. The MSI-X
25capability was also introduced with PCI 3.0. It supports more interrupts
26per device than MSI and allows interrupts to be independently configured.
142 27
143This API should always be used to undo the effect of pci_enable_msi() 28Devices may support both MSI and MSI-X, but only one can be enabled at
144when a device driver is unloading. This API restores dev->irq with 29a time.
145the pre-assigned IOAPIC vector and switches a device's interrupt
146mode to PCI pin-irq assertion/INTx emulation mode.
147
148Note that a device driver should always call free_irq() on the MSI vector
149that it has done request_irq() on before calling this API. Failure to do
150so results in a BUG_ON() and a device will be left with MSI enabled and
151leaks its vector.
152
1535.2.3 MSI mode vs. legacy mode diagram
154
155The below diagram shows the events which switch the interrupt
156mode on the MSI-capable device function between MSI mode and
157PIN-IRQ assertion mode.
158
159 ------------ pci_enable_msi ------------------------
160 | | <=============== | |
161 | MSI MODE | | PIN-IRQ ASSERTION MODE |
162 | | ===============> | |
163 ------------ pci_disable_msi ------------------------
164
165
166Figure 1. MSI Mode vs. Legacy Mode
167
168In Figure 1, a device operates by default in legacy mode. Legacy
169in this context means PCI pin-irq assertion or PCI-Express INTx
170emulation. A successful MSI request (using pci_enable_msi()) switches
171a device's interrupt mode to MSI mode. A pre-assigned IOAPIC vector
172stored in dev->irq will be saved by the PCI subsystem and a new
173assigned MSI vector will replace dev->irq.
174
175To return back to its default mode, a device driver should always call
176pci_disable_msi() to undo the effect of pci_enable_msi(). Note that a
177device driver should always call free_irq() on the MSI vector it has
178done request_irq() on before calling pci_disable_msi(). Failure to do
179so results in a BUG_ON() and a device will be left with MSI enabled and
180leaks its vector. Otherwise, the PCI subsystem restores a device's
181dev->irq with a pre-assigned IOAPIC vector and marks the released
182MSI vector as unused.
183
184Once being marked as unused, there is no guarantee that the PCI
185subsystem will reserve this MSI vector for a device. Depending on
186the availability of current PCI vector resources and the number of
187MSI/MSI-X requests from other drivers, this MSI may be re-assigned.
188
189For the case where the PCI subsystem re-assigns this MSI vector to
190another driver, a request to switch back to MSI mode may result
191in being assigned a different MSI vector or a failure if no more
192vectors are available.
193
1945.3 Configuring for MSI-X support
195
196Due to the ability of the system software to configure each vector of
197the MSI-X capability structure with an independent message address
198and message data, the non-contiguous fashion in vector assignment of
199the existing Linux kernel has no impact on supporting multiple
200messages on an MSI-X capable device functions. To enable MSI-X on
201a device function's MSI-X capability structure requires its device
202driver to call the function pci_enable_msix() explicitly.
203
204The function pci_enable_msix(), once invoked, enables either
205all or nothing, depending on the current availability of PCI vector
206resources. If the PCI vector resources are available for the number
207of vectors requested by a device driver, this function will configure
208the MSI-X table of the MSI-X capability structure of a device with
209requested messages. To emphasize this reason, for example, a device
210may be capable for supporting the maximum of 32 vectors while its
211software driver usually may request 4 vectors. It is recommended
212that the device driver should call this function once during the
213initialization phase of the device driver.
214
215Unlike the function pci_enable_msi(), the function pci_enable_msix()
216does not replace the pre-assigned IOAPIC dev->irq with a new MSI
217vector because the PCI subsystem writes the 1:1 vector-to-entry mapping
218into the field vector of each element contained in a second argument.
219Note that the pre-assigned IOAPIC dev->irq is valid only if the device
220operates in PIN-IRQ assertion mode. In MSI-X mode, any attempt at
221using dev->irq by the device driver to request for interrupt service
222may result in unpredictable behavior.
223
224For each MSI-X vector granted, a device driver is responsible for calling
225other functions like request_irq(), enable_irq(), etc. to enable
226this vector with its corresponding interrupt service handler. It is
227a device driver's choice to assign all vectors with the same
228interrupt service handler or each vector with a unique interrupt
229service handler.
230
2315.3.1 Handling MMIO address space of MSI-X Table
232
233The PCI 3.0 specification has implementation notes that MMIO address
234space for a device's MSI-X structure should be isolated so that the
235software system can set different pages for controlling accesses to the
236MSI-X structure. The implementation of MSI support requires the PCI
237subsystem, not a device driver, to maintain full control of the MSI-X
238table/MSI-X PBA (Pending Bit Array) and MMIO address space of the MSI-X
239table/MSI-X PBA. A device driver should not access the MMIO address
240space of the MSI-X table/MSI-X PBA.
241
2425.3.2 API pci_enable_msix
243 30
244int pci_enable_msix(struct pci_dev *dev, struct msix_entry *entries, int nvec)
245 31
246This API enables a device driver to request the PCI subsystem 323. Why use MSIs?
247to enable MSI-X messages on its hardware device. Depending on 33
248the availability of PCI vectors resources, the PCI subsystem enables 34There are three reasons why using MSIs can give an advantage over
249either all or none of the requested vectors. 35traditional pin-based interrupts.
36
37Pin-based PCI interrupts are often shared amongst several devices.
38To support this, the kernel must call each interrupt handler associated
39with an interrupt, which leads to reduced performance for the system as
40a whole. MSIs are never shared, so this problem cannot arise.
41
42When a device writes data to memory, then raises a pin-based interrupt,
43it is possible that the interrupt may arrive before all the data has
44arrived in memory (this becomes more likely with devices behind PCI-PCI
45bridges). In order to ensure that all the data has arrived in memory,
46the interrupt handler must read a register on the device which raised
47the interrupt. PCI transaction ordering rules require that all the data
48arrives in memory before the value can be returned from the register.
49Using MSIs avoids this problem as the interrupt-generating write cannot
50pass the data writes, so by the time the interrupt is raised, the driver
51knows that all the data has arrived in memory.
52
53PCI devices can only support a single pin-based interrupt per function.
54Often drivers have to query the device to find out what event has
55occurred, slowing down interrupt handling for the common case. With
56MSIs, a device can support more interrupts, allowing each interrupt
57to be specialised to a different purpose. One possible design gives
58infrequent conditions (such as errors) their own interrupt which allows
59the driver to handle the normal interrupt handling path more efficiently.
60Other possible designs include giving one interrupt to each packet queue
61in a network card or each port in a storage controller.
62
63
644. How to use MSIs
65
66PCI devices are initialised to use pin-based interrupts. The device
67driver has to set up the device to use MSI or MSI-X. Not all machines
68support MSIs correctly, and for those machines, the APIs described below
69will simply fail and the device will continue to use pin-based interrupts.
70
714.1 Include kernel support for MSIs
72
73To support MSI or MSI-X, the kernel must be built with the CONFIG_PCI_MSI
74option enabled. This option is only available on some architectures,
75and it may depend on some other options also being set. For example,
76on x86, you must also enable X86_UP_APIC or SMP in order to see the
77CONFIG_PCI_MSI option.
78
794.2 Using MSI
80
81Most of the hard work is done for the driver in the PCI layer. It simply
82has to request that the PCI layer set up the MSI capability for this
83device.
84
854.2.1 pci_enable_msi
86
87int pci_enable_msi(struct pci_dev *dev)
88
89A successful call will allocate ONE interrupt to the device, regardless
90of how many MSIs the device supports. The device will be switched from
91pin-based interrupt mode to MSI mode. The dev->irq number is changed
92to a new number which represents the message signaled interrupt.
93This function should be called before the driver calls request_irq()
94since enabling MSIs disables the pin-based IRQ and the driver will not
95receive interrupts on the old interrupt.
96
974.2.2 pci_enable_msi_block
98
99int pci_enable_msi_block(struct pci_dev *dev, int count)
100
101This variation on the above call allows a device driver to request multiple
102MSIs. The MSI specification only allows interrupts to be allocated in
103powers of two, up to a maximum of 2^5 (32).
104
105If this function returns 0, it has succeeded in allocating at least as many
106interrupts as the driver requested (it may have allocated more in order
107to satisfy the power-of-two requirement). In this case, the function
108enables MSI on this device and updates dev->irq to be the lowest of
109the new interrupts assigned to it. The other interrupts assigned to
110the device are in the range dev->irq to dev->irq + count - 1.
111
112If this function returns a negative number, it indicates an error and
113the driver should not attempt to request any more MSI interrupts for
114this device. If this function returns a positive number, it will be
115less than 'count' and indicate the number of interrupts that could have
116been allocated. In neither case will the irq value have been
117updated, nor will the device have been switched into MSI mode.
118
119The device driver must decide what action to take if
120pci_enable_msi_block() returns a value less than the number asked for.
121Some devices can make use of fewer interrupts than the maximum they
122request; in this case the driver should call pci_enable_msi_block()
123again. Note that it is not guaranteed to succeed, even when the
124'count' has been reduced to the value returned from a previous call to
125pci_enable_msi_block(). This is because there are multiple constraints
126on the number of vectors that can be allocated; pci_enable_msi_block()
127will return as soon as it finds any constraint that doesn't allow the
128call to succeed.
129
1304.2.3 pci_disable_msi
131
132void pci_disable_msi(struct pci_dev *dev)
250 133
251Argument 'dev' points to the device (pci_dev) structure. 134This function should be used to undo the effect of pci_enable_msi() or
135pci_enable_msi_block(). Calling it restores dev->irq to the pin-based
136interrupt number and frees the previously allocated message signaled
137interrupt(s). The interrupt may subsequently be assigned to another
138device, so drivers should not cache the value of dev->irq.
252 139
253Argument 'entries' is a pointer to an array of msix_entry structs. 140A device driver must always call free_irq() on the interrupt(s)
254The number of entries is indicated in argument 'nvec'. 141for which it has called request_irq() before calling this function.
255struct msix_entry is defined in /driver/pci/msi.h: 142Failure to do so will result in a BUG_ON(), the device will be left with
143MSI enabled and will leak its vector.
144
1454.3 Using MSI-X
146
147The MSI-X capability is much more flexible than the MSI capability.
148It supports up to 2048 interrupts, each of which can be controlled
149independently. To support this flexibility, drivers must use an array of
150`struct msix_entry':
256 151
257struct msix_entry { 152struct msix_entry {
258 u16 vector; /* kernel uses to write alloc vector */ 153 u16 vector; /* kernel uses to write alloc vector */
259 u16 entry; /* driver uses to specify entry */ 154 u16 entry; /* driver uses to specify entry */
260}; 155};
261 156
262A device driver is responsible for initializing the field 'entry' of 157This allows for the device to use these interrupts in a sparse fashion;
263each element with a unique entry supported by MSI-X table. Otherwise, 158for example it could use interrupts 3 and 1027 and allocate only a
264-EINVAL will be returned as a result. A successful return of zero 159two-element array. The driver is expected to fill in the 'entry' value
265indicates the PCI subsystem completed initializing each of the requested 160in each element of the array to indicate which entries it wants the kernel
266entries of the MSI-X table with message address and message data. 161to assign interrupts for. It is invalid to fill in two entries with the
267Last but not least, the PCI subsystem will write the 1:1 162same number.
268vector-to-entry mapping into the field 'vector' of each element. A 163
269device driver is responsible for keeping track of allocated MSI-X 1644.3.1 pci_enable_msix
270vectors in its internal data structure. 165
271 166int pci_enable_msix(struct pci_dev *dev, struct msix_entry *entries, int nvec)
272A return of zero indicates that the number of MSI-X vectors was 167
273successfully allocated. A return of greater than zero indicates 168Calling this function asks the PCI subsystem to allocate 'nvec' MSIs.
274MSI-X vector shortage. Or a return of less than zero indicates 169The 'entries' argument is a pointer to an array of msix_entry structs
275a failure. This failure may be a result of duplicate entries 170which should be at least 'nvec' entries in size. On success, the
276specified in second argument, or a result of no available vector, 171function will return 0 and the device will have been switched into
277or a result of failing to initialize MSI-X table entries. 172MSI-X interrupt mode. The 'vector' elements in each entry will have
278 173been filled in with the interrupt number. The driver should then call
2795.3.3 API pci_disable_msix 174request_irq() for each 'vector' that it decides to use.
175
176If this function returns a negative number, it indicates an error and
177the driver should not attempt to allocate any more MSI-X interrupts for
178this device. If it returns a positive number, it indicates the maximum
179number of interrupt vectors that could have been allocated. See example
180below.
181
182This function, in contrast with pci_enable_msi(), does not adjust
183dev->irq. The device will not generate interrupts for this interrupt
184number once MSI-X is enabled. The device driver is responsible for
185keeping track of the interrupts assigned to the MSI-X vectors so it can
186free them again later.
187
188Device drivers should normally call this function once per device
189during the initialization phase.
190
191It is ideal if drivers can cope with a variable number of MSI-X interrupts,
192there are many reasons why the platform may not be able to provide the
193exact number a driver asks for.
194
195A request loop to achieve that might look like:
196
197static int foo_driver_enable_msix(struct foo_adapter *adapter, int nvec)
198{
199 while (nvec >= FOO_DRIVER_MINIMUM_NVEC) {
200 rc = pci_enable_msix(adapter->pdev,
201 adapter->msix_entries, nvec);
202 if (rc > 0)
203 nvec = rc;
204 else
205 return rc;
206 }
207
208 return -ENOSPC;
209}
210
2114.3.2 pci_disable_msix
280 212
281void pci_disable_msix(struct pci_dev *dev) 213void pci_disable_msix(struct pci_dev *dev)
282 214
283This API should always be used to undo the effect of pci_enable_msix() 215This API should be used to undo the effect of pci_enable_msix(). It frees
284when a device driver is unloading. Note that a device driver should 216the previously allocated message signaled interrupts. The interrupts may
285always call free_irq() on all MSI-X vectors it has done request_irq() 217subsequently be assigned to another device, so drivers should not cache
286on before calling this API. Failure to do so results in a BUG_ON() and 218the value of the 'vector' elements over a call to pci_disable_msix().
287a device will be left with MSI-X enabled and leaks its vectors. 219
288 220A device driver must always call free_irq() on the interrupt(s)
2895.3.4 MSI-X mode vs. legacy mode diagram 221for which it has called request_irq() before calling this function.
290 222Failure to do so will result in a BUG_ON(), the device will be left with
291The below diagram shows the events which switch the interrupt 223MSI enabled and will leak its vector.
292mode on the MSI-X capable device function between MSI-X mode and 224
293PIN-IRQ assertion mode (legacy). 2254.3.3 The MSI-X Table
294 226
295 ------------ pci_enable_msix(,,n) ------------------------ 227The MSI-X capability specifies a BAR and offset within that BAR for the
296 | | <=============== | | 228MSI-X Table. This address is mapped by the PCI subsystem, and should not
297 | MSI-X MODE | | PIN-IRQ ASSERTION MODE | 229be accessed directly by the device driver. If the driver wishes to
298 | | ===============> | | 230mask or unmask an interrupt, it should call disable_irq() / enable_irq().
299 ------------ pci_disable_msix ------------------------ 231
300 2324.4 Handling devices implementing both MSI and MSI-X capabilities
301Figure 2. MSI-X Mode vs. Legacy Mode 233
302 234If a device implements both MSI and MSI-X capabilities, it can
303In Figure 2, a device operates by default in legacy mode. A 235run in either MSI mode or MSI-X mode but not both simultaneously.
304successful MSI-X request (using pci_enable_msix()) switches a 236This is a requirement of the PCI spec, and it is enforced by the
305device's interrupt mode to MSI-X mode. A pre-assigned IOAPIC vector 237PCI layer. Calling pci_enable_msi() when MSI-X is already enabled or
306stored in dev->irq will be saved by the PCI subsystem; however, 238pci_enable_msix() when MSI is already enabled will result in an error.
307unlike MSI mode, the PCI subsystem will not replace dev->irq with 239If a device driver wishes to switch between MSI and MSI-X at runtime,
308assigned MSI-X vector because the PCI subsystem already writes the 1:1 240it must first quiesce the device, then switch it back to pin-interrupt
309vector-to-entry mapping into the field 'vector' of each element 241mode, before calling pci_enable_msi() or pci_enable_msix() and resuming
310specified in second argument. 242operation. This is not expected to be a common operation but may be
311 243useful for debugging or testing during development.
312To return back to its default mode, a device driver should always call 244
313pci_disable_msix() to undo the effect of pci_enable_msix(). Note that 2454.5 Considerations when using MSIs
314a device driver should always call free_irq() on all MSI-X vectors it 246
315has done request_irq() on before calling pci_disable_msix(). Failure 2474.5.1 Choosing between MSI-X and MSI
316to do so results in a BUG_ON() and a device will be left with MSI-X 248
317enabled and leaks its vectors. Otherwise, the PCI subsystem switches a 249If your device supports both MSI-X and MSI capabilities, you should use
318device function's interrupt mode from MSI-X mode to legacy mode and 250the MSI-X facilities in preference to the MSI facilities. As mentioned
319marks all allocated MSI-X vectors as unused. 251above, MSI-X supports any number of interrupts between 1 and 2048.
320 252In constrast, MSI is restricted to a maximum of 32 interrupts (and
321Once being marked as unused, there is no guarantee that the PCI 253must be a power of two). In addition, the MSI interrupt vectors must
322subsystem will reserve these MSI-X vectors for a device. Depending on 254be allocated consecutively, so the system may not be able to allocate
323the availability of current PCI vector resources and the number of 255as many vectors for MSI as it could for MSI-X. On some platforms, MSI
324MSI/MSI-X requests from other drivers, these MSI-X vectors may be 256interrupts must all be targetted at the same set of CPUs whereas MSI-X
325re-assigned. 257interrupts can all be targetted at different CPUs.
326 258
327For the case where the PCI subsystem re-assigned these MSI-X vectors 2594.5.2 Spinlocks
328to other drivers, a request to switch back to MSI-X mode may result 260
329being assigned with another set of MSI-X vectors or a failure if no 261Most device drivers have a per-device spinlock which is taken in the
330more vectors are available. 262interrupt handler. With pin-based interrupts or a single MSI, it is not
331 263necessary to disable interrupts (Linux guarantees the same interrupt will
3325.4 Handling function implementing both MSI and MSI-X capabilities 264not be re-entered). If a device uses multiple interrupts, the driver
333 265must disable interrupts while the lock is held. If the device sends
334For the case where a function implements both MSI and MSI-X 266a different interrupt, the driver will deadlock trying to recursively
335capabilities, the PCI subsystem enables a device to run either in MSI 267acquire the spinlock.
336mode or MSI-X mode but not both. A device driver determines whether it 268
337wants MSI or MSI-X enabled on its hardware device. Once a device 269There are two solutions. The first is to take the lock with
338driver requests for MSI, for example, it is prohibited from requesting 270spin_lock_irqsave() or spin_lock_irq() (see
339MSI-X; in other words, a device driver is not permitted to ping-pong 271Documentation/DocBook/kernel-locking). The second is to specify
340between MSI mod MSI-X mode during a run-time. 272IRQF_DISABLED to request_irq() so that the kernel runs the entire
341 273interrupt routine with interrupts disabled.
3425.5 Hardware requirements for MSI/MSI-X support 274
343 275If your MSI interrupt routine does not hold the lock for the whole time
344MSI/MSI-X support requires support from both system hardware and 276it is running, the first solution may be best. The second solution is
345individual hardware device functions. 277normally preferred as it avoids making two transitions from interrupt
346 278disabled to enabled and back again.
3475.5.1 Required x86 hardware support 279
348 2804.6 How to tell whether MSI/MSI-X is enabled on a device
349Since the target of MSI address is the local APIC CPU, enabling 281
350MSI/MSI-X support in the Linux kernel is dependent on whether existing 282Using 'lspci -v' (as root) may show some devices with "MSI", "Message
351system hardware supports local APIC. Users should verify that their 283Signalled Interrupts" or "MSI-X" capabilities. Each of these capabilities
352system supports local APIC operation by testing that it runs when 284has an 'Enable' flag which will be followed with either "+" (enabled)
353CONFIG_X86_LOCAL_APIC=y. 285or "-" (disabled).
354 286
355In SMP environment, CONFIG_X86_LOCAL_APIC is automatically set; 287
356however, in UP environment, users must manually set 2885. MSI quirks
357CONFIG_X86_LOCAL_APIC. Once CONFIG_X86_LOCAL_APIC=y, setting 289
358CONFIG_PCI_MSI enables the VECTOR based scheme and the option for 290Several PCI chipsets or devices are known not to support MSIs.
359MSI-capable device drivers to selectively enable MSI/MSI-X. 291The PCI stack provides three ways to disable MSIs:
360 292
361Note that CONFIG_X86_IO_APIC setting is irrelevant because MSI/MSI-X 2931. globally
362vector is allocated new during runtime and MSI/MSI-X support does not 2942. on all devices behind a specific bridge
363depend on BIOS support. This key independency enables MSI/MSI-X 2953. on a single device
364support on future IOxAPIC free platforms. 296
365 2975.1. Disabling MSIs globally
3665.5.2 Device hardware support 298
367 299Some host chipsets simply don't support MSIs properly. If we're
368The hardware device function supports MSI by indicating the 300lucky, the manufacturer knows this and has indicated it in the ACPI
369MSI/MSI-X capability structure on its PCI capability list. By 301FADT table. In this case, Linux will automatically disable MSIs.
370default, this capability structure will not be initialized by 302Some boards don't include this information in the table and so we have
371the kernel to enable MSI during the system boot. In other words, 303to detect them ourselves. The complete list of these is found near the
372the device function is running on its default pin assertion mode. 304quirk_disable_all_msi() function in drivers/pci/quirks.c.
373Note that in many cases the hardware supporting MSI have bugs, 305
374which may result in system hangs. The software driver of specific 306If you have a board which has problems with MSIs, you can pass pci=nomsi
375MSI-capable hardware is responsible for deciding whether to call 307on the kernel command line to disable MSIs on all devices. It would be
376pci_enable_msi or not. A return of zero indicates the kernel 308in your best interests to report the problem to linux-pci@vger.kernel.org
377successfully initialized the MSI/MSI-X capability structure of the 309including a full 'lspci -v' so we can add the quirks to the kernel.
378device function. The device function is now running on MSI/MSI-X mode. 310
379 3115.2. Disabling MSIs below a bridge
3805.6 How to tell whether MSI/MSI-X is enabled on device function 312
381 313Some PCI bridges are not able to route MSIs between busses properly.
382At the driver level, a return of zero from the function call of 314In this case, MSIs must be disabled on all devices behind the bridge.
383pci_enable_msi()/pci_enable_msix() indicates to a device driver that 315
384its device function is initialized successfully and ready to run in 316Some bridges allow you to enable MSIs by changing some bits in their
385MSI/MSI-X mode. 317PCI configuration space (especially the Hypertransport chipsets such
386 318as the nVidia nForce and Serverworks HT2000). As with host chipsets,
387At the user level, users can use the command 'cat /proc/interrupts' 319Linux mostly knows about them and automatically enables MSIs if it can.
388to display the vectors allocated for devices and their interrupt 320If you have a bridge which Linux doesn't yet know about, you can enable
389MSI/MSI-X modes ("PCI-MSI"/"PCI-MSI-X"). Below shows MSI mode is 321MSIs in configuration space using whatever method you know works, then
390enabled on a SCSI Adaptec 39320D Ultra320 controller. 322enable MSIs on that bridge by doing:
391 323
392 CPU0 CPU1 324 echo 1 > /sys/bus/pci/devices/$bridge/msi_bus
393 0: 324639 0 IO-APIC-edge timer 325
394 1: 1186 0 IO-APIC-edge i8042 326where $bridge is the PCI address of the bridge you've enabled (eg
395 2: 0 0 XT-PIC cascade 3270000:00:0e.0).
396 12: 2797 0 IO-APIC-edge i8042 328
397 14: 6543 0 IO-APIC-edge ide0 329To disable MSIs, echo 0 instead of 1. Changing this value should be
398 15: 1 0 IO-APIC-edge ide1 330done with caution as it can break interrupt handling for all devices
399169: 0 0 IO-APIC-level uhci-hcd 331below this bridge.
400185: 0 0 IO-APIC-level uhci-hcd 332
401193: 138 10 PCI-MSI aic79xx 333Again, please notify linux-pci@vger.kernel.org of any bridges that need
402201: 30 0 PCI-MSI aic79xx 334special handling.
403225: 30 0 IO-APIC-level aic7xxx 335
404233: 30 0 IO-APIC-level aic7xxx 3365.3. Disabling MSIs on a single device
405NMI: 0 0 337
406LOC: 324553 325068 338Some devices are known to have faulty MSI implementations. Usually this
407ERR: 0 339is handled in the individual device driver but occasionally it's necessary
408MIS: 0 340to handle this with a quirk. Some drivers have an option to disable use
409 341of MSI. While this is a convenient workaround for the driver author,
4106. MSI quirks 342it is not good practise, and should not be emulated.
411 343
412Several PCI chipsets or devices are known to not support MSI. 3445.4. Finding why MSIs are disabled on a device
413The PCI stack provides 3 possible levels of MSI disabling: 345
414* on a single device 346From the above three sections, you can see that there are many reasons
415* on all devices behind a specific bridge 347why MSIs may not be enabled for a given device. Your first step should
416* globally 348be to examine your dmesg carefully to determine whether MSIs are enabled
417 349for your machine. You should also check your .config to be sure you
4186.1. Disabling MSI on a single device 350have enabled CONFIG_PCI_MSI.
419 351
420Under some circumstances it might be required to disable MSI on a 352Then, 'lspci -t' gives the list of bridges above a device. Reading
421single device. This may be achieved by either not calling pci_enable_msi() 353/sys/bus/pci/devices/*/msi_bus will tell you whether MSI are enabled (1)
422or all, or setting the pci_dev->no_msi flag before (most of the time 354or disabled (0). If 0 is found in any of the msi_bus files belonging
423in a quirk). 355to bridges between the PCI root and the device, MSIs are disabled.
424 356
4256.2. Disabling MSI below a bridge 357It is also worth checking the device driver to see whether it supports MSIs.
426 358For example, it may contain calls to pci_enable_msi(), pci_enable_msix() or
427The vast majority of MSI quirks are required by PCI bridges not 359pci_enable_msi_block().
428being able to route MSI between busses. In this case, MSI have to be
429disabled on all devices behind this bridge. It is achieves by setting
430the PCI_BUS_FLAGS_NO_MSI flag in the pci_bus->bus_flags of the bridge
431subordinate bus. There is no need to set the same flag on bridges that
432are below the broken bridge. When pci_enable_msi() is called to enable
433MSI on a device, pci_msi_supported() takes care of checking the NO_MSI
434flag in all parent busses of the device.
435
436Some bridges actually support dynamic MSI support enabling/disabling
437by changing some bits in their PCI configuration space (especially
438the Hypertransport chipsets such as the nVidia nForce and Serverworks
439HT2000). It may then be required to update the NO_MSI flag on the
440corresponding devices in the sysfs hierarchy. To enable MSI support
441on device "0000:00:0e", do:
442
443 echo 1 > /sys/bus/pci/devices/0000:00:0e/msi_bus
444
445To disable MSI support, echo 0 instead of 1. Note that it should be
446used with caution since changing this value might break interrupts.
447
4486.3. Disabling MSI globally
449
450Some extreme cases may require to disable MSI globally on the system.
451For now, the only known case is a Serverworks PCI-X chipsets (MSI are
452not supported on several busses that are not all connected to the
453chipset in the Linux PCI hierarchy). In the vast majority of other
454cases, disabling only behind a specific bridge is enough.
455
456For debugging purpose, the user may also pass pci=nomsi on the kernel
457command-line to explicitly disable MSI globally. But, once the appro-
458priate quirks are added to the kernel, this option should not be
459required anymore.
460
4616.4. Finding why MSI cannot be enabled on a device
462
463Assuming that MSI are not enabled on a device, you should look at
464dmesg to find messages that quirks may output when disabling MSI
465on some devices, some bridges or even globally.
466Then, lspci -t gives the list of bridges above a device. Reading
467/sys/bus/pci/devices/0000:00:0e/msi_bus will tell you whether MSI
468are enabled (1) or disabled (0). In 0 is found in a single bridge
469msi_bus file above the device, MSI cannot be enabled.
470
4717. FAQ
472
473Q1. Are there any limitations on using the MSI?
474
475A1. If the PCI device supports MSI and conforms to the
476specification and the platform supports the APIC local bus,
477then using MSI should work.
478
479Q2. Will it work on all the Pentium processors (P3, P4, Xeon,
480AMD processors)? In P3 IPI's are transmitted on the APIC local
481bus and in P4 and Xeon they are transmitted on the system
482bus. Are there any implications with this?
483
484A2. MSI support enables a PCI device sending an inbound
485memory write (0xfeexxxxx as target address) on its PCI bus
486directly to the FSB. Since the message address has a
487redirection hint bit cleared, it should work.
488
489Q3. The target address 0xfeexxxxx will be translated by the
490Host Bridge into an interrupt message. Are there any
491limitations on the chipsets such as Intel 8xx, Intel e7xxx,
492or VIA?
493
494A3. If these chipsets support an inbound memory write with
495target address set as 0xfeexxxxx, as conformed to PCI
496specification 2.3 or latest, then it should work.
497
498Q4. From the driver point of view, if the MSI is lost because
499of errors occurring during inbound memory write, then it may
500wait forever. Is there a mechanism for it to recover?
501
502A4. Since the target of the transaction is an inbound memory
503write, all transaction termination conditions (Retry,
504Master-Abort, Target-Abort, or normal completion) are
505supported. A device sending an MSI must abide by all the PCI
506rules and conditions regarding that inbound memory write. So,
507if a retry is signaled it must retry, etc... We believe that
508the recommendation for Abort is also a retry (refer to PCI
509specification 2.3 or latest).
diff --git a/Documentation/PCI/pci-iov-howto.txt b/Documentation/PCI/pci-iov-howto.txt
new file mode 100644
index 000000000000..fc73ef5d65b8
--- /dev/null
+++ b/Documentation/PCI/pci-iov-howto.txt
@@ -0,0 +1,99 @@
1 PCI Express I/O Virtualization Howto
2 Copyright (C) 2009 Intel Corporation
3 Yu Zhao <yu.zhao@intel.com>
4
5
61. Overview
7
81.1 What is SR-IOV
9
10Single Root I/O Virtualization (SR-IOV) is a PCI Express Extended
11capability which makes one physical device appear as multiple virtual
12devices. The physical device is referred to as Physical Function (PF)
13while the virtual devices are referred to as Virtual Functions (VF).
14Allocation of the VF can be dynamically controlled by the PF via
15registers encapsulated in the capability. By default, this feature is
16not enabled and the PF behaves as traditional PCIe device. Once it's
17turned on, each VF's PCI configuration space can be accessed by its own
18Bus, Device and Function Number (Routing ID). And each VF also has PCI
19Memory Space, which is used to map its register set. VF device driver
20operates on the register set so it can be functional and appear as a
21real existing PCI device.
22
232. User Guide
24
252.1 How can I enable SR-IOV capability
26
27The device driver (PF driver) will control the enabling and disabling
28of the capability via API provided by SR-IOV core. If the hardware
29has SR-IOV capability, loading its PF driver would enable it and all
30VFs associated with the PF.
31
322.2 How can I use the Virtual Functions
33
34The VF is treated as hot-plugged PCI devices in the kernel, so they
35should be able to work in the same way as real PCI devices. The VF
36requires device driver that is same as a normal PCI device's.
37
383. Developer Guide
39
403.1 SR-IOV API
41
42To enable SR-IOV capability:
43 int pci_enable_sriov(struct pci_dev *dev, int nr_virtfn);
44 'nr_virtfn' is number of VFs to be enabled.
45
46To disable SR-IOV capability:
47 void pci_disable_sriov(struct pci_dev *dev);
48
49To notify SR-IOV core of Virtual Function Migration:
50 irqreturn_t pci_sriov_migration(struct pci_dev *dev);
51
523.2 Usage example
53
54Following piece of code illustrates the usage of the SR-IOV API.
55
56static int __devinit dev_probe(struct pci_dev *dev, const struct pci_device_id *id)
57{
58 pci_enable_sriov(dev, NR_VIRTFN);
59
60 ...
61
62 return 0;
63}
64
65static void __devexit dev_remove(struct pci_dev *dev)
66{
67 pci_disable_sriov(dev);
68
69 ...
70}
71
72static int dev_suspend(struct pci_dev *dev, pm_message_t state)
73{
74 ...
75
76 return 0;
77}
78
79static int dev_resume(struct pci_dev *dev)
80{
81 ...
82
83 return 0;
84}
85
86static void dev_shutdown(struct pci_dev *dev)
87{
88 ...
89}
90
91static struct pci_driver dev_driver = {
92 .name = "SR-IOV Physical Function driver",
93 .id_table = dev_id_table,
94 .probe = dev_probe,
95 .remove = __devexit_p(dev_remove),
96 .suspend = dev_suspend,
97 .resume = dev_resume,
98 .shutdown = dev_shutdown,
99};
diff --git a/Documentation/feature-removal-schedule.txt b/Documentation/feature-removal-schedule.txt
index ea7d1bdad34d..d0f354670646 100644
--- a/Documentation/feature-removal-schedule.txt
+++ b/Documentation/feature-removal-schedule.txt
@@ -392,3 +392,35 @@ Why: The defines and typedefs (hw_interrupt_type, no_irq_type, irq_desc_t)
392 have been kept around for migration reasons. After more than two years 392 have been kept around for migration reasons. After more than two years
393 it's time to remove them finally 393 it's time to remove them finally
394Who: Thomas Gleixner <tglx@linutronix.de> 394Who: Thomas Gleixner <tglx@linutronix.de>
395
396---------------------------
397
398What: fakephp and associated sysfs files in /sys/bus/pci/slots/
399When: 2011
400Why: In 2.6.27, the semantics of /sys/bus/pci/slots was redefined to
401 represent a machine's physical PCI slots. The change in semantics
402 had userspace implications, as the hotplug core no longer allowed
403 drivers to create multiple sysfs files per physical slot (required
404 for multi-function devices, e.g.). fakephp was seen as a developer's
405 tool only, and its interface changed. Too late, we learned that
406 there were some users of the fakephp interface.
407
408 In 2.6.30, the original fakephp interface was restored. At the same
409 time, the PCI core gained the ability that fakephp provided, namely
410 function-level hot-remove and hot-add.
411
412 Since the PCI core now provides the same functionality, exposed in:
413
414 /sys/bus/pci/rescan
415 /sys/bus/pci/devices/.../remove
416 /sys/bus/pci/devices/.../rescan
417
418 there is no functional reason to maintain fakephp as well.
419
420 We will keep the existing module so that 'modprobe fakephp' will
421 present the old /sys/bus/pci/slots/... interface for compatibility,
422 but users are urged to migrate their applications to the API above.
423
424 After a reasonable transition period, we will remove the legacy
425 fakephp interface.
426Who: Alex Chiang <achiang@hp.com>
diff --git a/Documentation/filesystems/sysfs-pci.txt b/Documentation/filesystems/sysfs-pci.txt
index 9f8740ca3f3b..26e4b8bc53ee 100644
--- a/Documentation/filesystems/sysfs-pci.txt
+++ b/Documentation/filesystems/sysfs-pci.txt
@@ -12,6 +12,7 @@ that support it. For example, a given bus might look like this:
12 | |-- enable 12 | |-- enable
13 | |-- irq 13 | |-- irq
14 | |-- local_cpus 14 | |-- local_cpus
15 | |-- remove
15 | |-- resource 16 | |-- resource
16 | |-- resource0 17 | |-- resource0
17 | |-- resource1 18 | |-- resource1
@@ -36,6 +37,7 @@ files, each with their own function.
36 enable Whether the device is enabled (ascii, rw) 37 enable Whether the device is enabled (ascii, rw)
37 irq IRQ number (ascii, ro) 38 irq IRQ number (ascii, ro)
38 local_cpus nearby CPU mask (cpumask, ro) 39 local_cpus nearby CPU mask (cpumask, ro)
40 remove remove device from kernel's list (ascii, wo)
39 resource PCI resource host addresses (ascii, ro) 41 resource PCI resource host addresses (ascii, ro)
40 resource0..N PCI resource N, if present (binary, mmap) 42 resource0..N PCI resource N, if present (binary, mmap)
41 resource0_wc..N_wc PCI WC map resource N, if prefetchable (binary, mmap) 43 resource0_wc..N_wc PCI WC map resource N, if prefetchable (binary, mmap)
@@ -46,6 +48,7 @@ files, each with their own function.
46 48
47 ro - read only file 49 ro - read only file
48 rw - file is readable and writable 50 rw - file is readable and writable
51 wo - write only file
49 mmap - file is mmapable 52 mmap - file is mmapable
50 ascii - file contains ascii text 53 ascii - file contains ascii text
51 binary - file contains binary data 54 binary - file contains binary data
@@ -73,6 +76,13 @@ that the device must be enabled for a rom read to return data succesfully.
73In the event a driver is not bound to the device, it can be enabled using the 76In the event a driver is not bound to the device, it can be enabled using the
74'enable' file, documented above. 77'enable' file, documented above.
75 78
79The 'remove' file is used to remove the PCI device, by writing a non-zero
80integer to the file. This does not involve any kind of hot-plug functionality,
81e.g. powering off the device. The device is removed from the kernel's list of
82PCI devices, the sysfs directory for it is removed, and the device will be
83removed from any drivers attached to it. Removal of PCI root buses is
84disallowed.
85
76Accessing legacy resources through sysfs 86Accessing legacy resources through sysfs
77---------------------------------------- 87----------------------------------------
78 88
diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
index aeedb89a307a..240257dd4238 100644
--- a/Documentation/kernel-parameters.txt
+++ b/Documentation/kernel-parameters.txt
@@ -1695,6 +1695,8 @@ and is between 256 and 4096 characters. It is defined in the file
1695 See also Documentation/blockdev/paride.txt. 1695 See also Documentation/blockdev/paride.txt.
1696 1696
1697 pci=option[,option...] [PCI] various PCI subsystem options: 1697 pci=option[,option...] [PCI] various PCI subsystem options:
1698 earlydump [X86] dump PCI config space before the kernel
1699 changes anything
1698 off [X86] don't probe for the PCI bus 1700 off [X86] don't probe for the PCI bus
1699 bios [X86-32] force use of PCI BIOS, don't access 1701 bios [X86-32] force use of PCI BIOS, don't access
1700 the hardware directly. Use this if your machine 1702 the hardware directly. Use this if your machine
@@ -1794,6 +1796,15 @@ and is between 256 and 4096 characters. It is defined in the file
1794 cbmemsize=nn[KMG] The fixed amount of bus space which is 1796 cbmemsize=nn[KMG] The fixed amount of bus space which is
1795 reserved for the CardBus bridge's memory 1797 reserved for the CardBus bridge's memory
1796 window. The default value is 64 megabytes. 1798 window. The default value is 64 megabytes.
1799 resource_alignment=
1800 Format:
1801 [<order of align>@][<domain>:]<bus>:<slot>.<func>[; ...]
1802 Specifies alignment and device to reassign
1803 aligned memory resources.
1804 If <order of align> is not specified,
1805 PAGE_SIZE is used as alignment.
1806 PCI-PCI bridge can be specified, if resource
1807 windows need to be expanded.
1797 1808
1798 pcie_aspm= [PCIE] Forcibly enable or disable PCIe Active State Power 1809 pcie_aspm= [PCIE] Forcibly enable or disable PCIe Active State Power
1799 Management. 1810 Management.
diff --git a/arch/alpha/include/asm/pci.h b/arch/alpha/include/asm/pci.h
index 2a14302c17a3..cb04eaa6ba33 100644
--- a/arch/alpha/include/asm/pci.h
+++ b/arch/alpha/include/asm/pci.h
@@ -273,4 +273,18 @@ struct pci_dev *alpha_gendev_to_pci(struct device *dev);
273 273
274extern struct pci_dev *isa_bridge; 274extern struct pci_dev *isa_bridge;
275 275
276extern int pci_legacy_read(struct pci_bus *bus, loff_t port, u32 *val,
277 size_t count);
278extern int pci_legacy_write(struct pci_bus *bus, loff_t port, u32 val,
279 size_t count);
280extern int pci_mmap_legacy_page_range(struct pci_bus *bus,
281 struct vm_area_struct *vma,
282 enum pci_mmap_state mmap_state);
283extern void pci_adjust_legacy_attr(struct pci_bus *bus,
284 enum pci_mmap_state mmap_type);
285#define HAVE_PCI_LEGACY 1
286
287extern int pci_create_resource_files(struct pci_dev *dev);
288extern void pci_remove_resource_files(struct pci_dev *dev);
289
276#endif /* __ALPHA_PCI_H */ 290#endif /* __ALPHA_PCI_H */
diff --git a/arch/alpha/kernel/Makefile b/arch/alpha/kernel/Makefile
index b4697759a123..a427538252f8 100644
--- a/arch/alpha/kernel/Makefile
+++ b/arch/alpha/kernel/Makefile
@@ -12,7 +12,7 @@ obj-y := entry.o traps.o process.o init_task.o osf_sys.o irq.o \
12 12
13obj-$(CONFIG_VGA_HOSE) += console.o 13obj-$(CONFIG_VGA_HOSE) += console.o
14obj-$(CONFIG_SMP) += smp.o 14obj-$(CONFIG_SMP) += smp.o
15obj-$(CONFIG_PCI) += pci.o pci_iommu.o 15obj-$(CONFIG_PCI) += pci.o pci_iommu.o pci-sysfs.o
16obj-$(CONFIG_SRM_ENV) += srm_env.o 16obj-$(CONFIG_SRM_ENV) += srm_env.o
17obj-$(CONFIG_MODULES) += module.o 17obj-$(CONFIG_MODULES) += module.o
18 18
diff --git a/arch/alpha/kernel/pci-sysfs.c b/arch/alpha/kernel/pci-sysfs.c
new file mode 100644
index 000000000000..6ea822e7f724
--- /dev/null
+++ b/arch/alpha/kernel/pci-sysfs.c
@@ -0,0 +1,366 @@
1/*
2 * arch/alpha/kernel/pci-sysfs.c
3 *
4 * Copyright (C) 2009 Ivan Kokshaysky
5 *
6 * Alpha PCI resource files.
7 *
8 * Loosely based on generic HAVE_PCI_MMAP implementation in
9 * drivers/pci/pci-sysfs.c
10 */
11
12#include <linux/sched.h>
13#include <linux/pci.h>
14
15static int hose_mmap_page_range(struct pci_controller *hose,
16 struct vm_area_struct *vma,
17 enum pci_mmap_state mmap_type, int sparse)
18{
19 unsigned long base;
20
21 if (mmap_type == pci_mmap_mem)
22 base = sparse ? hose->sparse_mem_base : hose->dense_mem_base;
23 else
24 base = sparse ? hose->sparse_io_base : hose->dense_io_base;
25
26 vma->vm_pgoff += base >> PAGE_SHIFT;
27 vma->vm_flags |= (VM_IO | VM_RESERVED);
28
29 return io_remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
30 vma->vm_end - vma->vm_start,
31 vma->vm_page_prot);
32}
33
34static int __pci_mmap_fits(struct pci_dev *pdev, int num,
35 struct vm_area_struct *vma, int sparse)
36{
37 unsigned long nr, start, size;
38 int shift = sparse ? 5 : 0;
39
40 nr = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
41 start = vma->vm_pgoff;
42 size = ((pci_resource_len(pdev, num) - 1) >> (PAGE_SHIFT - shift)) + 1;
43
44 if (start < size && size - start >= nr)
45 return 1;
46 WARN(1, "process \"%s\" tried to map%s 0x%08lx-0x%08lx on %s BAR %d "
47 "(size 0x%08lx)\n",
48 current->comm, sparse ? " sparse" : "", start, start + nr,
49 pci_name(pdev), num, size);
50 return 0;
51}
52
53/**
54 * pci_mmap_resource - map a PCI resource into user memory space
55 * @kobj: kobject for mapping
56 * @attr: struct bin_attribute for the file being mapped
57 * @vma: struct vm_area_struct passed into the mmap
58 * @sparse: address space type
59 *
60 * Use the bus mapping routines to map a PCI resource into userspace.
61 */
62static int pci_mmap_resource(struct kobject *kobj, struct bin_attribute *attr,
63 struct vm_area_struct *vma, int sparse)
64{
65 struct pci_dev *pdev = to_pci_dev(container_of(kobj,
66 struct device, kobj));
67 struct resource *res = (struct resource *)attr->private;
68 enum pci_mmap_state mmap_type;
69 struct pci_bus_region bar;
70 int i;
71
72 for (i = 0; i < PCI_ROM_RESOURCE; i++)
73 if (res == &pdev->resource[i])
74 break;
75 if (i >= PCI_ROM_RESOURCE)
76 return -ENODEV;
77
78 if (!__pci_mmap_fits(pdev, i, vma, sparse))
79 return -EINVAL;
80
81 if (iomem_is_exclusive(res->start))
82 return -EINVAL;
83
84 pcibios_resource_to_bus(pdev, &bar, res);
85 vma->vm_pgoff += bar.start >> (PAGE_SHIFT - (sparse ? 5 : 0));
86 mmap_type = res->flags & IORESOURCE_MEM ? pci_mmap_mem : pci_mmap_io;
87
88 return hose_mmap_page_range(pdev->sysdata, vma, mmap_type, sparse);
89}
90
91static int pci_mmap_resource_sparse(struct kobject *kobj,
92 struct bin_attribute *attr,
93 struct vm_area_struct *vma)
94{
95 return pci_mmap_resource(kobj, attr, vma, 1);
96}
97
98static int pci_mmap_resource_dense(struct kobject *kobj,
99 struct bin_attribute *attr,
100 struct vm_area_struct *vma)
101{
102 return pci_mmap_resource(kobj, attr, vma, 0);
103}
104
105/**
106 * pci_remove_resource_files - cleanup resource files
107 * @dev: dev to cleanup
108 *
109 * If we created resource files for @dev, remove them from sysfs and
110 * free their resources.
111 */
112void pci_remove_resource_files(struct pci_dev *pdev)
113{
114 int i;
115
116 for (i = 0; i < PCI_ROM_RESOURCE; i++) {
117 struct bin_attribute *res_attr;
118
119 res_attr = pdev->res_attr[i];
120 if (res_attr) {
121 sysfs_remove_bin_file(&pdev->dev.kobj, res_attr);
122 kfree(res_attr);
123 }
124
125 res_attr = pdev->res_attr_wc[i];
126 if (res_attr) {
127 sysfs_remove_bin_file(&pdev->dev.kobj, res_attr);
128 kfree(res_attr);
129 }
130 }
131}
132
133static int sparse_mem_mmap_fits(struct pci_dev *pdev, int num)
134{
135 struct pci_bus_region bar;
136 struct pci_controller *hose = pdev->sysdata;
137 long dense_offset;
138 unsigned long sparse_size;
139
140 pcibios_resource_to_bus(pdev, &bar, &pdev->resource[num]);
141
142 /* All core logic chips have 4G sparse address space, except
143 CIA which has 16G (see xxx_SPARSE_MEM and xxx_DENSE_MEM
144 definitions in asm/core_xxx.h files). This corresponds
145 to 128M or 512M of the bus space. */
146 dense_offset = (long)(hose->dense_mem_base - hose->sparse_mem_base);
147 sparse_size = dense_offset >= 0x400000000UL ? 0x20000000 : 0x8000000;
148
149 return bar.end < sparse_size;
150}
151
152static int pci_create_one_attr(struct pci_dev *pdev, int num, char *name,
153 char *suffix, struct bin_attribute *res_attr,
154 unsigned long sparse)
155{
156 size_t size = pci_resource_len(pdev, num);
157
158 sprintf(name, "resource%d%s", num, suffix);
159 res_attr->mmap = sparse ? pci_mmap_resource_sparse :
160 pci_mmap_resource_dense;
161 res_attr->attr.name = name;
162 res_attr->attr.mode = S_IRUSR | S_IWUSR;
163 res_attr->size = sparse ? size << 5 : size;
164 res_attr->private = &pdev->resource[num];
165 return sysfs_create_bin_file(&pdev->dev.kobj, res_attr);
166}
167
168static int pci_create_attr(struct pci_dev *pdev, int num)
169{
170 /* allocate attribute structure, piggyback attribute name */
171 int retval, nlen1, nlen2 = 0, res_count = 1;
172 unsigned long sparse_base, dense_base;
173 struct bin_attribute *attr;
174 struct pci_controller *hose = pdev->sysdata;
175 char *suffix, *attr_name;
176
177 suffix = ""; /* Assume bwx machine, normal resourceN files. */
178 nlen1 = 10;
179
180 if (pdev->resource[num].flags & IORESOURCE_MEM) {
181 sparse_base = hose->sparse_mem_base;
182 dense_base = hose->dense_mem_base;
183 if (sparse_base && !sparse_mem_mmap_fits(pdev, num)) {
184 sparse_base = 0;
185 suffix = "_dense";
186 nlen1 = 16; /* resourceN_dense */
187 }
188 } else {
189 sparse_base = hose->sparse_io_base;
190 dense_base = hose->dense_io_base;
191 }
192
193 if (sparse_base) {
194 suffix = "_sparse";
195 nlen1 = 17;
196 if (dense_base) {
197 nlen2 = 16; /* resourceN_dense */
198 res_count = 2;
199 }
200 }
201
202 attr = kzalloc(sizeof(*attr) * res_count + nlen1 + nlen2, GFP_ATOMIC);
203 if (!attr)
204 return -ENOMEM;
205
206 /* Create bwx, sparse or single dense file */
207 attr_name = (char *)(attr + res_count);
208 pdev->res_attr[num] = attr;
209 retval = pci_create_one_attr(pdev, num, attr_name, suffix, attr,
210 sparse_base);
211 if (retval || res_count == 1)
212 return retval;
213
214 /* Create dense file */
215 attr_name += nlen1;
216 attr++;
217 pdev->res_attr_wc[num] = attr;
218 return pci_create_one_attr(pdev, num, attr_name, "_dense", attr, 0);
219}
220
221/**
222 * pci_create_resource_files - create resource files in sysfs for @dev
223 * @dev: dev in question
224 *
225 * Walk the resources in @dev creating files for each resource available.
226 */
227int pci_create_resource_files(struct pci_dev *pdev)
228{
229 int i;
230 int retval;
231
232 /* Expose the PCI resources from this device as files */
233 for (i = 0; i < PCI_ROM_RESOURCE; i++) {
234
235 /* skip empty resources */
236 if (!pci_resource_len(pdev, i))
237 continue;
238
239 retval = pci_create_attr(pdev, i);
240 if (retval) {
241 pci_remove_resource_files(pdev);
242 return retval;
243 }
244 }
245 return 0;
246}
247
248/* Legacy I/O bus mapping stuff. */
249
250static int __legacy_mmap_fits(struct pci_controller *hose,
251 struct vm_area_struct *vma,
252 unsigned long res_size, int sparse)
253{
254 unsigned long nr, start, size;
255
256 nr = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
257 start = vma->vm_pgoff;
258 size = ((res_size - 1) >> PAGE_SHIFT) + 1;
259
260 if (start < size && size - start >= nr)
261 return 1;
262 WARN(1, "process \"%s\" tried to map%s 0x%08lx-0x%08lx on hose %d "
263 "(size 0x%08lx)\n",
264 current->comm, sparse ? " sparse" : "", start, start + nr,
265 hose->index, size);
266 return 0;
267}
268
269static inline int has_sparse(struct pci_controller *hose,
270 enum pci_mmap_state mmap_type)
271{
272 unsigned long base;
273
274 base = (mmap_type == pci_mmap_mem) ? hose->sparse_mem_base :
275 hose->sparse_io_base;
276
277 return base != 0;
278}
279
280int pci_mmap_legacy_page_range(struct pci_bus *bus, struct vm_area_struct *vma,
281 enum pci_mmap_state mmap_type)
282{
283 struct pci_controller *hose = bus->sysdata;
284 int sparse = has_sparse(hose, mmap_type);
285 unsigned long res_size;
286
287 res_size = (mmap_type == pci_mmap_mem) ? bus->legacy_mem->size :
288 bus->legacy_io->size;
289 if (!__legacy_mmap_fits(hose, vma, res_size, sparse))
290 return -EINVAL;
291
292 return hose_mmap_page_range(hose, vma, mmap_type, sparse);
293}
294
295/**
296 * pci_adjust_legacy_attr - adjustment of legacy file attributes
297 * @b: bus to create files under
298 * @mmap_type: I/O port or memory
299 *
300 * Adjust file name and size for sparse mappings.
301 */
302void pci_adjust_legacy_attr(struct pci_bus *bus, enum pci_mmap_state mmap_type)
303{
304 struct pci_controller *hose = bus->sysdata;
305
306 if (!has_sparse(hose, mmap_type))
307 return;
308
309 if (mmap_type == pci_mmap_mem) {
310 bus->legacy_mem->attr.name = "legacy_mem_sparse";
311 bus->legacy_mem->size <<= 5;
312 } else {
313 bus->legacy_io->attr.name = "legacy_io_sparse";
314 bus->legacy_io->size <<= 5;
315 }
316 return;
317}
318
319/* Legacy I/O bus read/write functions */
320int pci_legacy_read(struct pci_bus *bus, loff_t port, u32 *val, size_t size)
321{
322 struct pci_controller *hose = bus->sysdata;
323
324 port += hose->io_space->start;
325
326 switch(size) {
327 case 1:
328 *((u8 *)val) = inb(port);
329 return 1;
330 case 2:
331 if (port & 1)
332 return -EINVAL;
333 *((u16 *)val) = inw(port);
334 return 2;
335 case 4:
336 if (port & 3)
337 return -EINVAL;
338 *((u32 *)val) = inl(port);
339 return 4;
340 }
341 return -EINVAL;
342}
343
344int pci_legacy_write(struct pci_bus *bus, loff_t port, u32 val, size_t size)
345{
346 struct pci_controller *hose = bus->sysdata;
347
348 port += hose->io_space->start;
349
350 switch(size) {
351 case 1:
352 outb(port, val);
353 return 1;
354 case 2:
355 if (port & 1)
356 return -EINVAL;
357 outw(port, val);
358 return 2;
359 case 4:
360 if (port & 3)
361 return -EINVAL;
362 outl(port, val);
363 return 4;
364 }
365 return -EINVAL;
366}
diff --git a/arch/powerpc/include/asm/pci.h b/arch/powerpc/include/asm/pci.h
index 3548159a1beb..ba17d5d90a49 100644
--- a/arch/powerpc/include/asm/pci.h
+++ b/arch/powerpc/include/asm/pci.h
@@ -114,6 +114,10 @@ extern int pci_domain_nr(struct pci_bus *bus);
114/* Decide whether to display the domain number in /proc */ 114/* Decide whether to display the domain number in /proc */
115extern int pci_proc_domain(struct pci_bus *bus); 115extern int pci_proc_domain(struct pci_bus *bus);
116 116
117/* MSI arch hooks */
118#define arch_setup_msi_irqs arch_setup_msi_irqs
119#define arch_teardown_msi_irqs arch_teardown_msi_irqs
120#define arch_msi_check_device arch_msi_check_device
117 121
118struct vm_area_struct; 122struct vm_area_struct;
119/* Map a range of PCI memory or I/O space for a device into user space */ 123/* Map a range of PCI memory or I/O space for a device into user space */
diff --git a/arch/powerpc/kernel/msi.c b/arch/powerpc/kernel/msi.c
index 3bb7d3dd28be..8bbc12d20f5c 100644
--- a/arch/powerpc/kernel/msi.c
+++ b/arch/powerpc/kernel/msi.c
@@ -9,6 +9,7 @@
9 9
10#include <linux/kernel.h> 10#include <linux/kernel.h>
11#include <linux/msi.h> 11#include <linux/msi.h>
12#include <linux/pci.h>
12 13
13#include <asm/machdep.h> 14#include <asm/machdep.h>
14 15
@@ -19,6 +20,10 @@ int arch_msi_check_device(struct pci_dev* dev, int nvec, int type)
19 return -ENOSYS; 20 return -ENOSYS;
20 } 21 }
21 22
23 /* PowerPC doesn't support multiple MSI yet */
24 if (type == PCI_CAP_ID_MSI && nvec > 1)
25 return 1;
26
22 if (ppc_md.msi_check_device) { 27 if (ppc_md.msi_check_device) {
23 pr_debug("msi: Using platform check routine.\n"); 28 pr_debug("msi: Using platform check routine.\n");
24 return ppc_md.msi_check_device(dev, nvec, type); 29 return ppc_md.msi_check_device(dev, nvec, type);
diff --git a/arch/x86/include/asm/pci.h b/arch/x86/include/asm/pci.h
index a977de23cb4d..a0301bfeb954 100644
--- a/arch/x86/include/asm/pci.h
+++ b/arch/x86/include/asm/pci.h
@@ -86,6 +86,9 @@ static inline void early_quirks(void) { }
86 86
87extern void pci_iommu_alloc(void); 87extern void pci_iommu_alloc(void);
88 88
89/* MSI arch hook */
90#define arch_setup_msi_irqs arch_setup_msi_irqs
91
89#endif /* __KERNEL__ */ 92#endif /* __KERNEL__ */
90 93
91#ifdef CONFIG_X86_32 94#ifdef CONFIG_X86_32
diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c
index da99ffcdfde6..1bb5c6cee3eb 100644
--- a/arch/x86/kernel/apic/io_apic.c
+++ b/arch/x86/kernel/apic/io_apic.c
@@ -3468,6 +3468,10 @@ int arch_setup_msi_irqs(struct pci_dev *dev, int nvec, int type)
3468 struct intel_iommu *iommu = NULL; 3468 struct intel_iommu *iommu = NULL;
3469 int index = 0; 3469 int index = 0;
3470 3470
3471 /* x86 doesn't support multiple MSI yet */
3472 if (type == PCI_CAP_ID_MSI && nvec > 1)
3473 return 1;
3474
3471 irq_want = nr_irqs_gsi; 3475 irq_want = nr_irqs_gsi;
3472 sub_handle = 0; 3476 sub_handle = 0;
3473 list_for_each_entry(msidesc, &dev->msi_list, list) { 3477 list_for_each_entry(msidesc, &dev->msi_list, list) {
diff --git a/arch/x86/kernel/pci-dma.c b/arch/x86/kernel/pci-dma.c
index c7c4776ff630..90f5b9ef5def 100644
--- a/arch/x86/kernel/pci-dma.c
+++ b/arch/x86/kernel/pci-dma.c
@@ -300,8 +300,7 @@ fs_initcall(pci_iommu_init);
300static __devinit void via_no_dac(struct pci_dev *dev) 300static __devinit void via_no_dac(struct pci_dev *dev)
301{ 301{
302 if ((dev->class >> 8) == PCI_CLASS_BRIDGE_PCI && forbid_dac == 0) { 302 if ((dev->class >> 8) == PCI_CLASS_BRIDGE_PCI && forbid_dac == 0) {
303 printk(KERN_INFO 303 dev_info(&dev->dev, "disabling DAC on VIA PCI bridge\n");
304 "PCI: VIA PCI bridge detected. Disabling DAC.\n");
305 forbid_dac = 1; 304 forbid_dac = 1;
306 } 305 }
307} 306}
diff --git a/arch/x86/pci/early.c b/arch/x86/pci/early.c
index f6adf2c6d751..aaf26ae58cd5 100644
--- a/arch/x86/pci/early.c
+++ b/arch/x86/pci/early.c
@@ -69,11 +69,12 @@ void early_dump_pci_device(u8 bus, u8 slot, u8 func)
69 int j; 69 int j;
70 u32 val; 70 u32 val;
71 71
72 printk(KERN_INFO "PCI: %02x:%02x:%02x", bus, slot, func); 72 printk(KERN_INFO "pci 0000:%02x:%02x.%d config space:",
73 bus, slot, func);
73 74
74 for (i = 0; i < 256; i += 4) { 75 for (i = 0; i < 256; i += 4) {
75 if (!(i & 0x0f)) 76 if (!(i & 0x0f))
76 printk("\n%04x:",i); 77 printk("\n %02x:",i);
77 78
78 val = read_pci_config(bus, slot, func, i); 79 val = read_pci_config(bus, slot, func, i);
79 for (j = 0; j < 4; j++) { 80 for (j = 0; j < 4; j++) {
@@ -96,20 +97,22 @@ void early_dump_pci_devices(void)
96 for (func = 0; func < 8; func++) { 97 for (func = 0; func < 8; func++) {
97 u32 class; 98 u32 class;
98 u8 type; 99 u8 type;
100
99 class = read_pci_config(bus, slot, func, 101 class = read_pci_config(bus, slot, func,
100 PCI_CLASS_REVISION); 102 PCI_CLASS_REVISION);
101 if (class == 0xffffffff) 103 if (class == 0xffffffff)
102 break; 104 continue;
103 105
104 early_dump_pci_device(bus, slot, func); 106 early_dump_pci_device(bus, slot, func);
105 107
106 /* No multi-function device? */ 108 if (func == 0) {
107 type = read_pci_config_byte(bus, slot, func, 109 type = read_pci_config_byte(bus, slot,
110 func,
108 PCI_HEADER_TYPE); 111 PCI_HEADER_TYPE);
109 if (!(type & 0x80)) 112 if (!(type & 0x80))
110 break; 113 break;
114 }
111 } 115 }
112 } 116 }
113 } 117 }
114} 118}
115
diff --git a/arch/x86/pci/fixup.c b/arch/x86/pci/fixup.c
index 9c49919e4d1c..6dd89555fbfa 100644
--- a/arch/x86/pci/fixup.c
+++ b/arch/x86/pci/fixup.c
@@ -495,26 +495,6 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_SIEMENS, 0x0015,
495 pci_siemens_interrupt_controller); 495 pci_siemens_interrupt_controller);
496 496
497/* 497/*
498 * Regular PCI devices have 256 bytes, but AMD Family 10h/11h CPUs have
499 * 4096 bytes configuration space for each function of their processor
500 * configuration space.
501 */
502static void amd_cpu_pci_cfg_space_size(struct pci_dev *dev)
503{
504 dev->cfg_size = pci_cfg_space_size_ext(dev);
505}
506DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_AMD, 0x1200, amd_cpu_pci_cfg_space_size);
507DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_AMD, 0x1201, amd_cpu_pci_cfg_space_size);
508DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_AMD, 0x1202, amd_cpu_pci_cfg_space_size);
509DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_AMD, 0x1203, amd_cpu_pci_cfg_space_size);
510DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_AMD, 0x1204, amd_cpu_pci_cfg_space_size);
511DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_AMD, 0x1300, amd_cpu_pci_cfg_space_size);
512DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_AMD, 0x1301, amd_cpu_pci_cfg_space_size);
513DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_AMD, 0x1302, amd_cpu_pci_cfg_space_size);
514DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_AMD, 0x1303, amd_cpu_pci_cfg_space_size);
515DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_AMD, 0x1304, amd_cpu_pci_cfg_space_size);
516
517/*
518 * SB600: Disable BAR1 on device 14.0 to avoid HPET resources from 498 * SB600: Disable BAR1 on device 14.0 to avoid HPET resources from
519 * confusing the PCI engine: 499 * confusing the PCI engine:
520 */ 500 */
diff --git a/arch/x86/pci/legacy.c b/arch/x86/pci/legacy.c
index f1065b129e9c..4061bb0f267d 100644
--- a/arch/x86/pci/legacy.c
+++ b/arch/x86/pci/legacy.c
@@ -50,8 +50,6 @@ static int __init pci_legacy_init(void)
50 if (pci_root_bus) 50 if (pci_root_bus)
51 pci_bus_add_devices(pci_root_bus); 51 pci_bus_add_devices(pci_root_bus);
52 52
53 pcibios_fixup_peer_bridges();
54
55 return 0; 53 return 0;
56} 54}
57 55
@@ -67,6 +65,7 @@ int __init pci_subsys_init(void)
67 pci_visws_init(); 65 pci_visws_init();
68#endif 66#endif
69 pci_legacy_init(); 67 pci_legacy_init();
68 pcibios_fixup_peer_bridges();
70 pcibios_irq_init(); 69 pcibios_irq_init();
71 pcibios_init(); 70 pcibios_init();
72 71
diff --git a/arch/x86/pci/mmconfig-shared.c b/arch/x86/pci/mmconfig-shared.c
index 89bf9242c80a..905bb526b133 100644
--- a/arch/x86/pci/mmconfig-shared.c
+++ b/arch/x86/pci/mmconfig-shared.c
@@ -14,6 +14,7 @@
14#include <linux/init.h> 14#include <linux/init.h>
15#include <linux/acpi.h> 15#include <linux/acpi.h>
16#include <linux/bitmap.h> 16#include <linux/bitmap.h>
17#include <linux/sort.h>
17#include <asm/e820.h> 18#include <asm/e820.h>
18#include <asm/pci_x86.h> 19#include <asm/pci_x86.h>
19 20
@@ -24,24 +25,49 @@
24/* Indicate if the mmcfg resources have been placed into the resource table. */ 25/* Indicate if the mmcfg resources have been placed into the resource table. */
25static int __initdata pci_mmcfg_resources_inserted; 26static int __initdata pci_mmcfg_resources_inserted;
26 27
28static __init int extend_mmcfg(int num)
29{
30 struct acpi_mcfg_allocation *new;
31 int new_num = pci_mmcfg_config_num + num;
32
33 new = kzalloc(sizeof(pci_mmcfg_config[0]) * new_num, GFP_KERNEL);
34 if (!new)
35 return -1;
36
37 if (pci_mmcfg_config) {
38 memcpy(new, pci_mmcfg_config,
39 sizeof(pci_mmcfg_config[0]) * new_num);
40 kfree(pci_mmcfg_config);
41 }
42 pci_mmcfg_config = new;
43
44 return 0;
45}
46
47static __init void fill_one_mmcfg(u64 addr, int segment, int start, int end)
48{
49 int i = pci_mmcfg_config_num;
50
51 pci_mmcfg_config_num++;
52 pci_mmcfg_config[i].address = addr;
53 pci_mmcfg_config[i].pci_segment = segment;
54 pci_mmcfg_config[i].start_bus_number = start;
55 pci_mmcfg_config[i].end_bus_number = end;
56}
57
27static const char __init *pci_mmcfg_e7520(void) 58static const char __init *pci_mmcfg_e7520(void)
28{ 59{
29 u32 win; 60 u32 win;
30 raw_pci_ops->read(0, 0, PCI_DEVFN(0, 0), 0xce, 2, &win); 61 raw_pci_ops->read(0, 0, PCI_DEVFN(0, 0), 0xce, 2, &win);
31 62
32 win = win & 0xf000; 63 win = win & 0xf000;
33 if(win == 0x0000 || win == 0xf000) 64 if (win == 0x0000 || win == 0xf000)
34 pci_mmcfg_config_num = 0; 65 return NULL;
35 else { 66
36 pci_mmcfg_config_num = 1; 67 if (extend_mmcfg(1) == -1)
37 pci_mmcfg_config = kzalloc(sizeof(pci_mmcfg_config[0]), GFP_KERNEL); 68 return NULL;
38 if (!pci_mmcfg_config) 69
39 return NULL; 70 fill_one_mmcfg(win << 16, 0, 0, 255);
40 pci_mmcfg_config[0].address = win << 16;
41 pci_mmcfg_config[0].pci_segment = 0;
42 pci_mmcfg_config[0].start_bus_number = 0;
43 pci_mmcfg_config[0].end_bus_number = 255;
44 }
45 71
46 return "Intel Corporation E7520 Memory Controller Hub"; 72 return "Intel Corporation E7520 Memory Controller Hub";
47} 73}
@@ -50,13 +76,11 @@ static const char __init *pci_mmcfg_intel_945(void)
50{ 76{
51 u32 pciexbar, mask = 0, len = 0; 77 u32 pciexbar, mask = 0, len = 0;
52 78
53 pci_mmcfg_config_num = 1;
54
55 raw_pci_ops->read(0, 0, PCI_DEVFN(0, 0), 0x48, 4, &pciexbar); 79 raw_pci_ops->read(0, 0, PCI_DEVFN(0, 0), 0x48, 4, &pciexbar);
56 80
57 /* Enable bit */ 81 /* Enable bit */
58 if (!(pciexbar & 1)) 82 if (!(pciexbar & 1))
59 pci_mmcfg_config_num = 0; 83 return NULL;
60 84
61 /* Size bits */ 85 /* Size bits */
62 switch ((pciexbar >> 1) & 3) { 86 switch ((pciexbar >> 1) & 3) {
@@ -73,28 +97,23 @@ static const char __init *pci_mmcfg_intel_945(void)
73 len = 0x04000000U; 97 len = 0x04000000U;
74 break; 98 break;
75 default: 99 default:
76 pci_mmcfg_config_num = 0; 100 return NULL;
77 } 101 }
78 102
79 /* Errata #2, things break when not aligned on a 256Mb boundary */ 103 /* Errata #2, things break when not aligned on a 256Mb boundary */
80 /* Can only happen in 64M/128M mode */ 104 /* Can only happen in 64M/128M mode */
81 105
82 if ((pciexbar & mask) & 0x0fffffffU) 106 if ((pciexbar & mask) & 0x0fffffffU)
83 pci_mmcfg_config_num = 0; 107 return NULL;
84 108
85 /* Don't hit the APIC registers and their friends */ 109 /* Don't hit the APIC registers and their friends */
86 if ((pciexbar & mask) >= 0xf0000000U) 110 if ((pciexbar & mask) >= 0xf0000000U)
87 pci_mmcfg_config_num = 0; 111 return NULL;
88 112
89 if (pci_mmcfg_config_num) { 113 if (extend_mmcfg(1) == -1)
90 pci_mmcfg_config = kzalloc(sizeof(pci_mmcfg_config[0]), GFP_KERNEL); 114 return NULL;
91 if (!pci_mmcfg_config) 115
92 return NULL; 116 fill_one_mmcfg(pciexbar & mask, 0, 0, (len >> 20) - 1);
93 pci_mmcfg_config[0].address = pciexbar & mask;
94 pci_mmcfg_config[0].pci_segment = 0;
95 pci_mmcfg_config[0].start_bus_number = 0;
96 pci_mmcfg_config[0].end_bus_number = (len >> 20) - 1;
97 }
98 117
99 return "Intel Corporation 945G/GZ/P/PL Express Memory Controller Hub"; 118 return "Intel Corporation 945G/GZ/P/PL Express Memory Controller Hub";
100} 119}
@@ -138,22 +157,77 @@ static const char __init *pci_mmcfg_amd_fam10h(void)
138 busnbits = 8; 157 busnbits = 8;
139 } 158 }
140 159
141 pci_mmcfg_config_num = (1 << segnbits); 160 if (extend_mmcfg(1 << segnbits) == -1)
142 pci_mmcfg_config = kzalloc(sizeof(pci_mmcfg_config[0]) *
143 pci_mmcfg_config_num, GFP_KERNEL);
144 if (!pci_mmcfg_config)
145 return NULL; 161 return NULL;
146 162
147 for (i = 0; i < (1 << segnbits); i++) { 163 for (i = 0; i < (1 << segnbits); i++)
148 pci_mmcfg_config[i].address = base + (1<<28) * i; 164 fill_one_mmcfg(base + (1<<28) * i, i, 0, (1 << busnbits) - 1);
149 pci_mmcfg_config[i].pci_segment = i;
150 pci_mmcfg_config[i].start_bus_number = 0;
151 pci_mmcfg_config[i].end_bus_number = (1 << busnbits) - 1;
152 }
153 165
154 return "AMD Family 10h NB"; 166 return "AMD Family 10h NB";
155} 167}
156 168
169static bool __initdata mcp55_checked;
170static const char __init *pci_mmcfg_nvidia_mcp55(void)
171{
172 int bus;
173 int mcp55_mmconf_found = 0;
174
175 static const u32 extcfg_regnum = 0x90;
176 static const u32 extcfg_regsize = 4;
177 static const u32 extcfg_enable_mask = 1<<31;
178 static const u32 extcfg_start_mask = 0xff<<16;
179 static const int extcfg_start_shift = 16;
180 static const u32 extcfg_size_mask = 0x3<<28;
181 static const int extcfg_size_shift = 28;
182 static const int extcfg_sizebus[] = {0x100, 0x80, 0x40, 0x20};
183 static const u32 extcfg_base_mask[] = {0x7ff8, 0x7ffc, 0x7ffe, 0x7fff};
184 static const int extcfg_base_lshift = 25;
185
186 /*
187 * do check if amd fam10h already took over
188 */
189 if (!acpi_disabled || pci_mmcfg_config_num || mcp55_checked)
190 return NULL;
191
192 mcp55_checked = true;
193 for (bus = 0; bus < 256; bus++) {
194 u64 base;
195 u32 l, extcfg;
196 u16 vendor, device;
197 int start, size_index, end;
198
199 raw_pci_ops->read(0, bus, PCI_DEVFN(0, 0), 0, 4, &l);
200 vendor = l & 0xffff;
201 device = (l >> 16) & 0xffff;
202
203 if (PCI_VENDOR_ID_NVIDIA != vendor || 0x0369 != device)
204 continue;
205
206 raw_pci_ops->read(0, bus, PCI_DEVFN(0, 0), extcfg_regnum,
207 extcfg_regsize, &extcfg);
208
209 if (!(extcfg & extcfg_enable_mask))
210 continue;
211
212 if (extend_mmcfg(1) == -1)
213 continue;
214
215 size_index = (extcfg & extcfg_size_mask) >> extcfg_size_shift;
216 base = extcfg & extcfg_base_mask[size_index];
217 /* base could > 4G */
218 base <<= extcfg_base_lshift;
219 start = (extcfg & extcfg_start_mask) >> extcfg_start_shift;
220 end = start + extcfg_sizebus[size_index] - 1;
221 fill_one_mmcfg(base, 0, start, end);
222 mcp55_mmconf_found++;
223 }
224
225 if (!mcp55_mmconf_found)
226 return NULL;
227
228 return "nVidia MCP55";
229}
230
157struct pci_mmcfg_hostbridge_probe { 231struct pci_mmcfg_hostbridge_probe {
158 u32 bus; 232 u32 bus;
159 u32 devfn; 233 u32 devfn;
@@ -171,8 +245,52 @@ static struct pci_mmcfg_hostbridge_probe pci_mmcfg_probes[] __initdata = {
171 0x1200, pci_mmcfg_amd_fam10h }, 245 0x1200, pci_mmcfg_amd_fam10h },
172 { 0xff, PCI_DEVFN(0, 0), PCI_VENDOR_ID_AMD, 246 { 0xff, PCI_DEVFN(0, 0), PCI_VENDOR_ID_AMD,
173 0x1200, pci_mmcfg_amd_fam10h }, 247 0x1200, pci_mmcfg_amd_fam10h },
248 { 0, PCI_DEVFN(0, 0), PCI_VENDOR_ID_NVIDIA,
249 0x0369, pci_mmcfg_nvidia_mcp55 },
174}; 250};
175 251
252static int __init cmp_mmcfg(const void *x1, const void *x2)
253{
254 const typeof(pci_mmcfg_config[0]) *m1 = x1;
255 const typeof(pci_mmcfg_config[0]) *m2 = x2;
256 int start1, start2;
257
258 start1 = m1->start_bus_number;
259 start2 = m2->start_bus_number;
260
261 return start1 - start2;
262}
263
264static void __init pci_mmcfg_check_end_bus_number(void)
265{
266 int i;
267 typeof(pci_mmcfg_config[0]) *cfg, *cfgx;
268
269 /* sort them at first */
270 sort(pci_mmcfg_config, pci_mmcfg_config_num,
271 sizeof(pci_mmcfg_config[0]), cmp_mmcfg, NULL);
272
273 /* last one*/
274 if (pci_mmcfg_config_num > 0) {
275 i = pci_mmcfg_config_num - 1;
276 cfg = &pci_mmcfg_config[i];
277 if (cfg->end_bus_number < cfg->start_bus_number)
278 cfg->end_bus_number = 255;
279 }
280
281 /* don't overlap please */
282 for (i = 0; i < pci_mmcfg_config_num - 1; i++) {
283 cfg = &pci_mmcfg_config[i];
284 cfgx = &pci_mmcfg_config[i+1];
285
286 if (cfg->end_bus_number < cfg->start_bus_number)
287 cfg->end_bus_number = 255;
288
289 if (cfg->end_bus_number >= cfgx->start_bus_number)
290 cfg->end_bus_number = cfgx->start_bus_number - 1;
291 }
292}
293
176static int __init pci_mmcfg_check_hostbridge(void) 294static int __init pci_mmcfg_check_hostbridge(void)
177{ 295{
178 u32 l; 296 u32 l;
@@ -186,31 +304,33 @@ static int __init pci_mmcfg_check_hostbridge(void)
186 304
187 pci_mmcfg_config_num = 0; 305 pci_mmcfg_config_num = 0;
188 pci_mmcfg_config = NULL; 306 pci_mmcfg_config = NULL;
189 name = NULL;
190 307
191 for (i = 0; !name && i < ARRAY_SIZE(pci_mmcfg_probes); i++) { 308 for (i = 0; i < ARRAY_SIZE(pci_mmcfg_probes); i++) {
192 bus = pci_mmcfg_probes[i].bus; 309 bus = pci_mmcfg_probes[i].bus;
193 devfn = pci_mmcfg_probes[i].devfn; 310 devfn = pci_mmcfg_probes[i].devfn;
194 raw_pci_ops->read(0, bus, devfn, 0, 4, &l); 311 raw_pci_ops->read(0, bus, devfn, 0, 4, &l);
195 vendor = l & 0xffff; 312 vendor = l & 0xffff;
196 device = (l >> 16) & 0xffff; 313 device = (l >> 16) & 0xffff;
197 314
315 name = NULL;
198 if (pci_mmcfg_probes[i].vendor == vendor && 316 if (pci_mmcfg_probes[i].vendor == vendor &&
199 pci_mmcfg_probes[i].device == device) 317 pci_mmcfg_probes[i].device == device)
200 name = pci_mmcfg_probes[i].probe(); 318 name = pci_mmcfg_probes[i].probe();
201 }
202 319
203 if (name) { 320 if (name)
204 printk(KERN_INFO "PCI: Found %s %s MMCONFIG support.\n", 321 printk(KERN_INFO "PCI: Found %s with MMCONFIG support.\n",
205 name, pci_mmcfg_config_num ? "with" : "without"); 322 name);
206 } 323 }
207 324
208 return name != NULL; 325 /* some end_bus_number is crazy, fix it */
326 pci_mmcfg_check_end_bus_number();
327
328 return pci_mmcfg_config_num != 0;
209} 329}
210 330
211static void __init pci_mmcfg_insert_resources(void) 331static void __init pci_mmcfg_insert_resources(void)
212{ 332{
213#define PCI_MMCFG_RESOURCE_NAME_LEN 19 333#define PCI_MMCFG_RESOURCE_NAME_LEN 24
214 int i; 334 int i;
215 struct resource *res; 335 struct resource *res;
216 char *names; 336 char *names;
@@ -228,9 +348,10 @@ static void __init pci_mmcfg_insert_resources(void)
228 struct acpi_mcfg_allocation *cfg = &pci_mmcfg_config[i]; 348 struct acpi_mcfg_allocation *cfg = &pci_mmcfg_config[i];
229 num_buses = cfg->end_bus_number - cfg->start_bus_number + 1; 349 num_buses = cfg->end_bus_number - cfg->start_bus_number + 1;
230 res->name = names; 350 res->name = names;
231 snprintf(names, PCI_MMCFG_RESOURCE_NAME_LEN, "PCI MMCONFIG %u", 351 snprintf(names, PCI_MMCFG_RESOURCE_NAME_LEN,
232 cfg->pci_segment); 352 "PCI MMCONFIG %u [%02x-%02x]", cfg->pci_segment,
233 res->start = cfg->address; 353 cfg->start_bus_number, cfg->end_bus_number);
354 res->start = cfg->address + (cfg->start_bus_number << 20);
234 res->end = res->start + (num_buses << 20) - 1; 355 res->end = res->start + (num_buses << 20) - 1;
235 res->flags = IORESOURCE_MEM | IORESOURCE_BUSY; 356 res->flags = IORESOURCE_MEM | IORESOURCE_BUSY;
236 insert_resource(&iomem_resource, res); 357 insert_resource(&iomem_resource, res);
@@ -354,8 +475,6 @@ static void __init pci_mmcfg_reject_broken(int early)
354 (pci_mmcfg_config[0].address == 0)) 475 (pci_mmcfg_config[0].address == 0))
355 return; 476 return;
356 477
357 cfg = &pci_mmcfg_config[0];
358
359 for (i = 0; i < pci_mmcfg_config_num; i++) { 478 for (i = 0; i < pci_mmcfg_config_num; i++) {
360 int valid = 0; 479 int valid = 0;
361 u64 addr, size; 480 u64 addr, size;
@@ -423,10 +542,10 @@ static void __init __pci_mmcfg_init(int early)
423 known_bridge = 1; 542 known_bridge = 1;
424 } 543 }
425 544
426 if (!known_bridge) { 545 if (!known_bridge)
427 acpi_table_parse(ACPI_SIG_MCFG, acpi_parse_mcfg); 546 acpi_table_parse(ACPI_SIG_MCFG, acpi_parse_mcfg);
428 pci_mmcfg_reject_broken(early); 547
429 } 548 pci_mmcfg_reject_broken(early);
430 549
431 if ((pci_mmcfg_config_num == 0) || 550 if ((pci_mmcfg_config_num == 0) ||
432 (pci_mmcfg_config == NULL) || 551 (pci_mmcfg_config == NULL) ||
diff --git a/arch/x86/pci/mmconfig_64.c b/arch/x86/pci/mmconfig_64.c
index 30007ffc8e11..94349f8b2f96 100644
--- a/arch/x86/pci/mmconfig_64.c
+++ b/arch/x86/pci/mmconfig_64.c
@@ -112,13 +112,18 @@ static struct pci_raw_ops pci_mmcfg = {
112static void __iomem * __init mcfg_ioremap(struct acpi_mcfg_allocation *cfg) 112static void __iomem * __init mcfg_ioremap(struct acpi_mcfg_allocation *cfg)
113{ 113{
114 void __iomem *addr; 114 void __iomem *addr;
115 u32 size; 115 u64 start, size;
116 116
117 size = (cfg->end_bus_number + 1) << 20; 117 start = cfg->start_bus_number;
118 addr = ioremap_nocache(cfg->address, size); 118 start <<= 20;
119 start += cfg->address;
120 size = cfg->end_bus_number + 1 - cfg->start_bus_number;
121 size <<= 20;
122 addr = ioremap_nocache(start, size);
119 if (addr) { 123 if (addr) {
120 printk(KERN_INFO "PCI: Using MMCONFIG at %Lx - %Lx\n", 124 printk(KERN_INFO "PCI: Using MMCONFIG at %Lx - %Lx\n",
121 cfg->address, cfg->address + size - 1); 125 start, start + size - 1);
126 addr -= cfg->start_bus_number << 20;
122 } 127 }
123 return addr; 128 return addr;
124} 129}
@@ -157,7 +162,7 @@ void __init pci_mmcfg_arch_free(void)
157 162
158 for (i = 0; i < pci_mmcfg_config_num; ++i) { 163 for (i = 0; i < pci_mmcfg_config_num; ++i) {
159 if (pci_mmcfg_virt[i].virt) { 164 if (pci_mmcfg_virt[i].virt) {
160 iounmap(pci_mmcfg_virt[i].virt); 165 iounmap(pci_mmcfg_virt[i].virt + (pci_mmcfg_virt[i].cfg->start_bus_number << 20));
161 pci_mmcfg_virt[i].virt = NULL; 166 pci_mmcfg_virt[i].virt = NULL;
162 pci_mmcfg_virt[i].cfg = NULL; 167 pci_mmcfg_virt[i].cfg = NULL;
163 } 168 }
diff --git a/drivers/acpi/pci_root.c b/drivers/acpi/pci_root.c
index 5b38a026d122..196f97d00956 100644
--- a/drivers/acpi/pci_root.c
+++ b/drivers/acpi/pci_root.c
@@ -66,11 +66,18 @@ struct acpi_pci_root {
66 struct acpi_device * device; 66 struct acpi_device * device;
67 struct acpi_pci_id id; 67 struct acpi_pci_id id;
68 struct pci_bus *bus; 68 struct pci_bus *bus;
69
70 u32 osc_support_set; /* _OSC state of support bits */
71 u32 osc_control_set; /* _OSC state of control bits */
72 u32 osc_control_qry; /* the latest _OSC query result */
73
74 u32 osc_queried:1; /* has _OSC control been queried? */
69}; 75};
70 76
71static LIST_HEAD(acpi_pci_roots); 77static LIST_HEAD(acpi_pci_roots);
72 78
73static struct acpi_pci_driver *sub_driver; 79static struct acpi_pci_driver *sub_driver;
80static DEFINE_MUTEX(osc_lock);
74 81
75int acpi_pci_register_driver(struct acpi_pci_driver *driver) 82int acpi_pci_register_driver(struct acpi_pci_driver *driver)
76{ 83{
@@ -185,6 +192,175 @@ static void acpi_pci_bridge_scan(struct acpi_device *device)
185 } 192 }
186} 193}
187 194
195static u8 OSC_UUID[16] = {0x5B, 0x4D, 0xDB, 0x33, 0xF7, 0x1F, 0x1C, 0x40,
196 0x96, 0x57, 0x74, 0x41, 0xC0, 0x3D, 0xD7, 0x66};
197
198static acpi_status acpi_pci_run_osc(acpi_handle handle,
199 const u32 *capbuf, u32 *retval)
200{
201 acpi_status status;
202 struct acpi_object_list input;
203 union acpi_object in_params[4];
204 struct acpi_buffer output = {ACPI_ALLOCATE_BUFFER, NULL};
205 union acpi_object *out_obj;
206 u32 errors;
207
208 /* Setting up input parameters */
209 input.count = 4;
210 input.pointer = in_params;
211 in_params[0].type = ACPI_TYPE_BUFFER;
212 in_params[0].buffer.length = 16;
213 in_params[0].buffer.pointer = OSC_UUID;
214 in_params[1].type = ACPI_TYPE_INTEGER;
215 in_params[1].integer.value = 1;
216 in_params[2].type = ACPI_TYPE_INTEGER;
217 in_params[2].integer.value = 3;
218 in_params[3].type = ACPI_TYPE_BUFFER;
219 in_params[3].buffer.length = 12;
220 in_params[3].buffer.pointer = (u8 *)capbuf;
221
222 status = acpi_evaluate_object(handle, "_OSC", &input, &output);
223 if (ACPI_FAILURE(status))
224 return status;
225
226 if (!output.length)
227 return AE_NULL_OBJECT;
228
229 out_obj = output.pointer;
230 if (out_obj->type != ACPI_TYPE_BUFFER) {
231 printk(KERN_DEBUG "_OSC evaluation returned wrong type\n");
232 status = AE_TYPE;
233 goto out_kfree;
234 }
235 /* Need to ignore the bit0 in result code */
236 errors = *((u32 *)out_obj->buffer.pointer) & ~(1 << 0);
237 if (errors) {
238 if (errors & OSC_REQUEST_ERROR)
239 printk(KERN_DEBUG "_OSC request failed\n");
240 if (errors & OSC_INVALID_UUID_ERROR)
241 printk(KERN_DEBUG "_OSC invalid UUID\n");
242 if (errors & OSC_INVALID_REVISION_ERROR)
243 printk(KERN_DEBUG "_OSC invalid revision\n");
244 if (errors & OSC_CAPABILITIES_MASK_ERROR) {
245 if (capbuf[OSC_QUERY_TYPE] & OSC_QUERY_ENABLE)
246 goto out_success;
247 printk(KERN_DEBUG
248 "Firmware did not grant requested _OSC control\n");
249 status = AE_SUPPORT;
250 goto out_kfree;
251 }
252 status = AE_ERROR;
253 goto out_kfree;
254 }
255out_success:
256 *retval = *((u32 *)(out_obj->buffer.pointer + 8));
257 status = AE_OK;
258
259out_kfree:
260 kfree(output.pointer);
261 return status;
262}
263
264static acpi_status acpi_pci_query_osc(struct acpi_pci_root *root, u32 flags)
265{
266 acpi_status status;
267 u32 support_set, result, capbuf[3];
268
269 /* do _OSC query for all possible controls */
270 support_set = root->osc_support_set | (flags & OSC_SUPPORT_MASKS);
271 capbuf[OSC_QUERY_TYPE] = OSC_QUERY_ENABLE;
272 capbuf[OSC_SUPPORT_TYPE] = support_set;
273 capbuf[OSC_CONTROL_TYPE] = OSC_CONTROL_MASKS;
274
275 status = acpi_pci_run_osc(root->device->handle, capbuf, &result);
276 if (ACPI_SUCCESS(status)) {
277 root->osc_support_set = support_set;
278 root->osc_control_qry = result;
279 root->osc_queried = 1;
280 }
281 return status;
282}
283
284static acpi_status acpi_pci_osc_support(struct acpi_pci_root *root, u32 flags)
285{
286 acpi_status status;
287 acpi_handle tmp;
288
289 status = acpi_get_handle(root->device->handle, "_OSC", &tmp);
290 if (ACPI_FAILURE(status))
291 return status;
292 mutex_lock(&osc_lock);
293 status = acpi_pci_query_osc(root, flags);
294 mutex_unlock(&osc_lock);
295 return status;
296}
297
298static struct acpi_pci_root *acpi_pci_find_root(acpi_handle handle)
299{
300 struct acpi_pci_root *root;
301 list_for_each_entry(root, &acpi_pci_roots, node) {
302 if (root->device->handle == handle)
303 return root;
304 }
305 return NULL;
306}
307
308/**
309 * acpi_pci_osc_control_set - commit requested control to Firmware
310 * @handle: acpi_handle for the target ACPI object
311 * @flags: driver's requested control bits
312 *
313 * Attempt to take control from Firmware on requested control bits.
314 **/
315acpi_status acpi_pci_osc_control_set(acpi_handle handle, u32 flags)
316{
317 acpi_status status;
318 u32 control_req, result, capbuf[3];
319 acpi_handle tmp;
320 struct acpi_pci_root *root;
321
322 status = acpi_get_handle(handle, "_OSC", &tmp);
323 if (ACPI_FAILURE(status))
324 return status;
325
326 control_req = (flags & OSC_CONTROL_MASKS);
327 if (!control_req)
328 return AE_TYPE;
329
330 root = acpi_pci_find_root(handle);
331 if (!root)
332 return AE_NOT_EXIST;
333
334 mutex_lock(&osc_lock);
335 /* No need to evaluate _OSC if the control was already granted. */
336 if ((root->osc_control_set & control_req) == control_req)
337 goto out;
338
339 /* Need to query controls first before requesting them */
340 if (!root->osc_queried) {
341 status = acpi_pci_query_osc(root, root->osc_support_set);
342 if (ACPI_FAILURE(status))
343 goto out;
344 }
345 if ((root->osc_control_qry & control_req) != control_req) {
346 printk(KERN_DEBUG
347 "Firmware did not grant requested _OSC control\n");
348 status = AE_SUPPORT;
349 goto out;
350 }
351
352 capbuf[OSC_QUERY_TYPE] = 0;
353 capbuf[OSC_SUPPORT_TYPE] = root->osc_support_set;
354 capbuf[OSC_CONTROL_TYPE] = root->osc_control_set | control_req;
355 status = acpi_pci_run_osc(handle, capbuf, &result);
356 if (ACPI_SUCCESS(status))
357 root->osc_control_set = result;
358out:
359 mutex_unlock(&osc_lock);
360 return status;
361}
362EXPORT_SYMBOL(acpi_pci_osc_control_set);
363
188static int __devinit acpi_pci_root_add(struct acpi_device *device) 364static int __devinit acpi_pci_root_add(struct acpi_device *device)
189{ 365{
190 int result = 0; 366 int result = 0;
@@ -217,7 +393,7 @@ static int __devinit acpi_pci_root_add(struct acpi_device *device)
217 * PCI domains, so we indicate this in _OSC support capabilities. 393 * PCI domains, so we indicate this in _OSC support capabilities.
218 */ 394 */
219 flags = base_flags = OSC_PCI_SEGMENT_GROUPS_SUPPORT; 395 flags = base_flags = OSC_PCI_SEGMENT_GROUPS_SUPPORT;
220 pci_acpi_osc_support(device->handle, flags); 396 acpi_pci_osc_support(root, flags);
221 397
222 /* 398 /*
223 * Segment 399 * Segment
@@ -353,7 +529,7 @@ static int __devinit acpi_pci_root_add(struct acpi_device *device)
353 if (pci_msi_enabled()) 529 if (pci_msi_enabled())
354 flags |= OSC_MSI_SUPPORT; 530 flags |= OSC_MSI_SUPPORT;
355 if (flags != base_flags) 531 if (flags != base_flags)
356 pci_acpi_osc_support(device->handle, flags); 532 acpi_pci_osc_support(root, flags);
357 533
358 end: 534 end:
359 if (result) { 535 if (result) {
diff --git a/drivers/pci/Kconfig b/drivers/pci/Kconfig
index 2a4501dd2515..fdc864f9cf23 100644
--- a/drivers/pci/Kconfig
+++ b/drivers/pci/Kconfig
@@ -59,3 +59,13 @@ config HT_IRQ
59 This allows native hypertransport devices to use interrupts. 59 This allows native hypertransport devices to use interrupts.
60 60
61 If unsure say Y. 61 If unsure say Y.
62
63config PCI_IOV
64 bool "PCI IOV support"
65 depends on PCI
66 help
67 I/O Virtualization is a PCI feature supported by some devices
68 which allows them to create virtual devices which share their
69 physical resources.
70
71 If unsure, say N.
diff --git a/drivers/pci/Makefile b/drivers/pci/Makefile
index 3d07ce24f6a8..ba6af162fd39 100644
--- a/drivers/pci/Makefile
+++ b/drivers/pci/Makefile
@@ -29,6 +29,8 @@ obj-$(CONFIG_DMAR) += dmar.o iova.o intel-iommu.o
29 29
30obj-$(CONFIG_INTR_REMAP) += dmar.o intr_remapping.o 30obj-$(CONFIG_INTR_REMAP) += dmar.o intr_remapping.o
31 31
32obj-$(CONFIG_PCI_IOV) += iov.o
33
32# 34#
33# Some architectures use the generic PCI setup functions 35# Some architectures use the generic PCI setup functions
34# 36#
diff --git a/drivers/pci/bus.c b/drivers/pci/bus.c
index 52b54f053be0..68f91a252595 100644
--- a/drivers/pci/bus.c
+++ b/drivers/pci/bus.c
@@ -133,7 +133,7 @@ int pci_bus_add_child(struct pci_bus *bus)
133 * 133 *
134 * Call hotplug for each new devices. 134 * Call hotplug for each new devices.
135 */ 135 */
136void pci_bus_add_devices(struct pci_bus *bus) 136void pci_bus_add_devices(const struct pci_bus *bus)
137{ 137{
138 struct pci_dev *dev; 138 struct pci_dev *dev;
139 struct pci_bus *child; 139 struct pci_bus *child;
@@ -184,8 +184,10 @@ void pci_enable_bridges(struct pci_bus *bus)
184 184
185 list_for_each_entry(dev, &bus->devices, bus_list) { 185 list_for_each_entry(dev, &bus->devices, bus_list) {
186 if (dev->subordinate) { 186 if (dev->subordinate) {
187 retval = pci_enable_device(dev); 187 if (atomic_read(&dev->enable_cnt) == 0) {
188 pci_set_master(dev); 188 retval = pci_enable_device(dev);
189 pci_set_master(dev);
190 }
189 pci_enable_bridges(dev->subordinate); 191 pci_enable_bridges(dev->subordinate);
190 } 192 }
191 } 193 }
diff --git a/drivers/pci/hotplug/acpi_pcihp.c b/drivers/pci/hotplug/acpi_pcihp.c
index 1c1141801060..fbc63d5e459f 100644
--- a/drivers/pci/hotplug/acpi_pcihp.c
+++ b/drivers/pci/hotplug/acpi_pcihp.c
@@ -30,9 +30,8 @@
30#include <linux/types.h> 30#include <linux/types.h>
31#include <linux/pci.h> 31#include <linux/pci.h>
32#include <linux/pci_hotplug.h> 32#include <linux/pci_hotplug.h>
33#include <linux/acpi.h>
33#include <linux/pci-acpi.h> 34#include <linux/pci-acpi.h>
34#include <acpi/acpi.h>
35#include <acpi/acpi_bus.h>
36 35
37#define MY_NAME "acpi_pcihp" 36#define MY_NAME "acpi_pcihp"
38 37
@@ -333,19 +332,14 @@ acpi_status acpi_get_hp_params_from_firmware(struct pci_bus *bus,
333{ 332{
334 acpi_status status = AE_NOT_FOUND; 333 acpi_status status = AE_NOT_FOUND;
335 acpi_handle handle, phandle; 334 acpi_handle handle, phandle;
336 struct pci_bus *pbus = bus; 335 struct pci_bus *pbus;
337 struct pci_dev *pdev; 336
338 337 handle = NULL;
339 do { 338 for (pbus = bus; pbus; pbus = pbus->parent) {
340 pdev = pbus->self; 339 handle = acpi_pci_get_bridge_handle(pbus);
341 if (!pdev) { 340 if (handle)
342 handle = acpi_get_pci_rootbridge_handle(
343 pci_domain_nr(pbus), pbus->number);
344 break; 341 break;
345 } 342 }
346 handle = DEVICE_ACPI_HANDLE(&(pdev->dev));
347 pbus = pbus->parent;
348 } while (!handle);
349 343
350 /* 344 /*
351 * _HPP settings apply to all child buses, until another _HPP is 345 * _HPP settings apply to all child buses, until another _HPP is
@@ -378,12 +372,10 @@ EXPORT_SYMBOL_GPL(acpi_get_hp_params_from_firmware);
378 * 372 *
379 * Attempt to take hotplug control from firmware. 373 * Attempt to take hotplug control from firmware.
380 */ 374 */
381int acpi_get_hp_hw_control_from_firmware(struct pci_dev *dev, u32 flags) 375int acpi_get_hp_hw_control_from_firmware(struct pci_dev *pdev, u32 flags)
382{ 376{
383 acpi_status status; 377 acpi_status status;
384 acpi_handle chandle, handle; 378 acpi_handle chandle, handle;
385 struct pci_dev *pdev = dev;
386 struct pci_bus *parent;
387 struct acpi_buffer string = { ACPI_ALLOCATE_BUFFER, NULL }; 379 struct acpi_buffer string = { ACPI_ALLOCATE_BUFFER, NULL };
388 380
389 flags &= (OSC_PCI_EXPRESS_NATIVE_HP_CONTROL | 381 flags &= (OSC_PCI_EXPRESS_NATIVE_HP_CONTROL |
@@ -408,33 +400,25 @@ int acpi_get_hp_hw_control_from_firmware(struct pci_dev *dev, u32 flags)
408 acpi_get_name(handle, ACPI_FULL_PATHNAME, &string); 400 acpi_get_name(handle, ACPI_FULL_PATHNAME, &string);
409 dbg("Trying to get hotplug control for %s\n", 401 dbg("Trying to get hotplug control for %s\n",
410 (char *)string.pointer); 402 (char *)string.pointer);
411 status = pci_osc_control_set(handle, flags); 403 status = acpi_pci_osc_control_set(handle, flags);
412 if (ACPI_SUCCESS(status)) 404 if (ACPI_SUCCESS(status))
413 goto got_one; 405 goto got_one;
414 kfree(string.pointer); 406 kfree(string.pointer);
415 string = (struct acpi_buffer){ ACPI_ALLOCATE_BUFFER, NULL }; 407 string = (struct acpi_buffer){ ACPI_ALLOCATE_BUFFER, NULL };
416 } 408 }
417 409
418 pdev = dev; 410 handle = DEVICE_ACPI_HANDLE(&pdev->dev);
419 handle = DEVICE_ACPI_HANDLE(&dev->dev); 411 if (!handle) {
420 while (!handle) {
421 /* 412 /*
422 * This hotplug controller was not listed in the ACPI name 413 * This hotplug controller was not listed in the ACPI name
423 * space at all. Try to get acpi handle of parent pci bus. 414 * space at all. Try to get acpi handle of parent pci bus.
424 */ 415 */
425 if (!pdev || !pdev->bus->parent) 416 struct pci_bus *pbus;
426 break; 417 for (pbus = pdev->bus; pbus; pbus = pbus->parent) {
427 parent = pdev->bus->parent; 418 handle = acpi_pci_get_bridge_handle(pbus);
428 dbg("Could not find %s in acpi namespace, trying parent\n", 419 if (handle)
429 pci_name(pdev)); 420 break;
430 if (!parent->self) 421 }
431 /* Parent must be a host bridge */
432 handle = acpi_get_pci_rootbridge_handle(
433 pci_domain_nr(parent),
434 parent->number);
435 else
436 handle = DEVICE_ACPI_HANDLE(&(parent->self->dev));
437 pdev = parent->self;
438 } 422 }
439 423
440 while (handle) { 424 while (handle) {
@@ -453,13 +437,13 @@ int acpi_get_hp_hw_control_from_firmware(struct pci_dev *dev, u32 flags)
453 } 437 }
454 438
455 dbg("Cannot get control of hotplug hardware for pci %s\n", 439 dbg("Cannot get control of hotplug hardware for pci %s\n",
456 pci_name(dev)); 440 pci_name(pdev));
457 441
458 kfree(string.pointer); 442 kfree(string.pointer);
459 return -ENODEV; 443 return -ENODEV;
460got_one: 444got_one:
461 dbg("Gained control for hotplug HW for pci %s (%s)\n", pci_name(dev), 445 dbg("Gained control for hotplug HW for pci %s (%s)\n",
462 (char *)string.pointer); 446 pci_name(pdev), (char *)string.pointer);
463 kfree(string.pointer); 447 kfree(string.pointer);
464 return 0; 448 return 0;
465} 449}
diff --git a/drivers/pci/hotplug/fakephp.c b/drivers/pci/hotplug/fakephp.c
index d8649e127298..6151389fd903 100644
--- a/drivers/pci/hotplug/fakephp.c
+++ b/drivers/pci/hotplug/fakephp.c
@@ -1,395 +1,163 @@
1/* 1/* Works like the fakephp driver used to, except a little better.
2 * Fake PCI Hot Plug Controller Driver
3 * 2 *
4 * Copyright (C) 2003 Greg Kroah-Hartman <greg@kroah.com> 3 * - It's possible to remove devices with subordinate busses.
5 * Copyright (C) 2003 IBM Corp. 4 * - New PCI devices that appear via any method, not just a fakephp triggered
6 * Copyright (C) 2003 Rolf Eike Beer <eike-kernel@sf-tec.de> 5 * rescan, will be noticed.
6 * - Devices that are removed via any method, not just a fakephp triggered
7 * removal, will also be noticed.
7 * 8 *
8 * Based on ideas and code from: 9 * Uses nothing from the pci-hotplug subsystem.
9 * Vladimir Kondratiev <vladimir.kondratiev@intel.com>
10 * Rolf Eike Beer <eike-kernel@sf-tec.de>
11 * 10 *
12 * All rights reserved.
13 *
14 * This program is free software; you can redistribute it and/or modify
15 * it under the terms of the GNU General Public License as published by
16 * the Free Software Foundation, version 2 of the License.
17 *
18 * Send feedback to <greg@kroah.com>
19 */ 11 */
20 12
21/*
22 *
23 * This driver will "emulate" removing PCI devices from the system. If
24 * the "power" file is written to with "0" then the specified PCI device
25 * will be completely removed from the kernel.
26 *
27 * WARNING, this does NOT turn off the power to the PCI device. This is
28 * a "logical" removal, not a physical or electrical removal.
29 *
30 * Use this module at your own risk, you have been warned!
31 *
32 * Enabling PCI devices is left as an exercise for the reader...
33 *
34 */
35#include <linux/kernel.h>
36#include <linux/module.h> 13#include <linux/module.h>
37#include <linux/pci.h> 14#include <linux/kernel.h>
38#include <linux/pci_hotplug.h> 15#include <linux/types.h>
16#include <linux/list.h>
17#include <linux/kobject.h>
18#include <linux/sysfs.h>
39#include <linux/init.h> 19#include <linux/init.h>
40#include <linux/string.h> 20#include <linux/pci.h>
41#include <linux/slab.h> 21#include <linux/device.h>
42#include <linux/workqueue.h>
43#include "../pci.h" 22#include "../pci.h"
44 23
45#if !defined(MODULE) 24struct legacy_slot {
46 #define MY_NAME "fakephp" 25 struct kobject kobj;
47#else 26 struct pci_dev *dev;
48 #define MY_NAME THIS_MODULE->name 27 struct list_head list;
49#endif
50
51#define dbg(format, arg...) \
52 do { \
53 if (debug) \
54 printk(KERN_DEBUG "%s: " format, \
55 MY_NAME , ## arg); \
56 } while (0)
57#define err(format, arg...) printk(KERN_ERR "%s: " format, MY_NAME , ## arg)
58#define info(format, arg...) printk(KERN_INFO "%s: " format, MY_NAME , ## arg)
59
60#define DRIVER_AUTHOR "Greg Kroah-Hartman <greg@kroah.com>"
61#define DRIVER_DESC "Fake PCI Hot Plug Controller Driver"
62
63struct dummy_slot {
64 struct list_head node;
65 struct hotplug_slot *slot;
66 struct pci_dev *dev;
67 struct work_struct remove_work;
68 unsigned long removed;
69}; 28};
70 29
71static int debug; 30static LIST_HEAD(legacy_list);
72static int dup_slots;
73static LIST_HEAD(slot_list);
74static struct workqueue_struct *dummyphp_wq;
75
76static void pci_rescan_worker(struct work_struct *work);
77static DECLARE_WORK(pci_rescan_work, pci_rescan_worker);
78
79static int enable_slot (struct hotplug_slot *slot);
80static int disable_slot (struct hotplug_slot *slot);
81 31
82static struct hotplug_slot_ops dummy_hotplug_slot_ops = { 32static ssize_t legacy_show(struct kobject *kobj, struct attribute *attr,
83 .owner = THIS_MODULE, 33 char *buf)
84 .enable_slot = enable_slot,
85 .disable_slot = disable_slot,
86};
87
88static void dummy_release(struct hotplug_slot *slot)
89{ 34{
90 struct dummy_slot *dslot = slot->private; 35 struct legacy_slot *slot = container_of(kobj, typeof(*slot), kobj);
91 36 strcpy(buf, "1\n");
92 list_del(&dslot->node); 37 return 2;
93 kfree(dslot->slot->info);
94 kfree(dslot->slot);
95 pci_dev_put(dslot->dev);
96 kfree(dslot);
97} 38}
98 39
99#define SLOT_NAME_SIZE 8 40static void remove_callback(void *data)
100
101static int add_slot(struct pci_dev *dev)
102{ 41{
103 struct dummy_slot *dslot; 42 pci_remove_bus_device((struct pci_dev *)data);
104 struct hotplug_slot *slot;
105 char name[SLOT_NAME_SIZE];
106 int retval = -ENOMEM;
107 static int count = 1;
108
109 slot = kzalloc(sizeof(struct hotplug_slot), GFP_KERNEL);
110 if (!slot)
111 goto error;
112
113 slot->info = kzalloc(sizeof(struct hotplug_slot_info), GFP_KERNEL);
114 if (!slot->info)
115 goto error_slot;
116
117 slot->info->power_status = 1;
118 slot->info->max_bus_speed = PCI_SPEED_UNKNOWN;
119 slot->info->cur_bus_speed = PCI_SPEED_UNKNOWN;
120
121 dslot = kzalloc(sizeof(struct dummy_slot), GFP_KERNEL);
122 if (!dslot)
123 goto error_info;
124
125 if (dup_slots)
126 snprintf(name, SLOT_NAME_SIZE, "fake");
127 else
128 snprintf(name, SLOT_NAME_SIZE, "fake%d", count++);
129 dbg("slot->name = %s\n", name);
130 slot->ops = &dummy_hotplug_slot_ops;
131 slot->release = &dummy_release;
132 slot->private = dslot;
133
134 retval = pci_hp_register(slot, dev->bus, PCI_SLOT(dev->devfn), name);
135 if (retval) {
136 err("pci_hp_register failed with error %d\n", retval);
137 goto error_dslot;
138 }
139
140 dbg("slot->name = %s\n", hotplug_slot_name(slot));
141 dslot->slot = slot;
142 dslot->dev = pci_dev_get(dev);
143 list_add (&dslot->node, &slot_list);
144 return retval;
145
146error_dslot:
147 kfree(dslot);
148error_info:
149 kfree(slot->info);
150error_slot:
151 kfree(slot);
152error:
153 return retval;
154} 43}
155 44
156static int __init pci_scan_buses(void) 45static ssize_t legacy_store(struct kobject *kobj, struct attribute *attr,
46 const char *buf, size_t len)
157{ 47{
158 struct pci_dev *dev = NULL; 48 struct legacy_slot *slot = container_of(kobj, typeof(*slot), kobj);
159 int lastslot = 0; 49 unsigned long val;
160 50
161 while ((dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev)) != NULL) { 51 if (strict_strtoul(buf, 0, &val) < 0)
162 if (PCI_FUNC(dev->devfn) > 0 && 52 return -EINVAL;
163 lastslot == PCI_SLOT(dev->devfn))
164 continue;
165 lastslot = PCI_SLOT(dev->devfn);
166 add_slot(dev);
167 }
168 53
169 return 0; 54 if (val)
55 pci_rescan_bus(slot->dev->bus);
56 else
57 sysfs_schedule_callback(&slot->dev->dev.kobj, remove_callback,
58 slot->dev, THIS_MODULE);
59 return len;
170} 60}
171 61
172static void remove_slot(struct dummy_slot *dslot) 62static struct attribute *legacy_attrs[] = {
173{ 63 &(struct attribute){ .name = "power", .mode = 0644 },
174 int retval; 64 NULL,
175 65};
176 dbg("removing slot %s\n", hotplug_slot_name(dslot->slot));
177 retval = pci_hp_deregister(dslot->slot);
178 if (retval)
179 err("Problem unregistering a slot %s\n",
180 hotplug_slot_name(dslot->slot));
181}
182 66
183/* called from the single-threaded workqueue handler to remove a slot */ 67static void legacy_release(struct kobject *kobj)
184static void remove_slot_worker(struct work_struct *work)
185{ 68{
186 struct dummy_slot *dslot = 69 struct legacy_slot *slot = container_of(kobj, typeof(*slot), kobj);
187 container_of(work, struct dummy_slot, remove_work);
188 remove_slot(dslot);
189}
190 70
191/** 71 pci_dev_put(slot->dev);
192 * pci_rescan_slot - Rescan slot 72 kfree(slot);
193 * @temp: Device template. Should be set: bus and devfn.
194 *
195 * Tries hard not to re-enable already existing devices;
196 * also handles scanning of subfunctions.
197 */
198static int pci_rescan_slot(struct pci_dev *temp)
199{
200 struct pci_bus *bus = temp->bus;
201 struct pci_dev *dev;
202 int func;
203 u8 hdr_type;
204 int count = 0;
205
206 if (!pci_read_config_byte(temp, PCI_HEADER_TYPE, &hdr_type)) {
207 temp->hdr_type = hdr_type & 0x7f;
208 if ((dev = pci_get_slot(bus, temp->devfn)) != NULL)
209 pci_dev_put(dev);
210 else {
211 dev = pci_scan_single_device(bus, temp->devfn);
212 if (dev) {
213 dbg("New device on %s function %x:%x\n",
214 bus->name, temp->devfn >> 3,
215 temp->devfn & 7);
216 count++;
217 }
218 }
219 /* multifunction device? */
220 if (!(hdr_type & 0x80))
221 return count;
222
223 /* continue scanning for other functions */
224 for (func = 1, temp->devfn++; func < 8; func++, temp->devfn++) {
225 if (pci_read_config_byte(temp, PCI_HEADER_TYPE, &hdr_type))
226 continue;
227 temp->hdr_type = hdr_type & 0x7f;
228
229 if ((dev = pci_get_slot(bus, temp->devfn)) != NULL)
230 pci_dev_put(dev);
231 else {
232 dev = pci_scan_single_device(bus, temp->devfn);
233 if (dev) {
234 dbg("New device on %s function %x:%x\n",
235 bus->name, temp->devfn >> 3,
236 temp->devfn & 7);
237 count++;
238 }
239 }
240 }
241 }
242
243 return count;
244} 73}
245 74
75static struct kobj_type legacy_ktype = {
76 .sysfs_ops = &(struct sysfs_ops){
77 .store = legacy_store, .show = legacy_show
78 },
79 .release = &legacy_release,
80 .default_attrs = legacy_attrs,
81};
246 82
247/** 83static int legacy_add_slot(struct pci_dev *pdev)
248 * pci_rescan_bus - Rescan PCI bus
249 * @bus: the PCI bus to rescan
250 *
251 * Call pci_rescan_slot for each possible function of the bus.
252 */
253static void pci_rescan_bus(const struct pci_bus *bus)
254{ 84{
255 unsigned int devfn; 85 struct legacy_slot *slot = kzalloc(sizeof(*slot), GFP_KERNEL);
256 struct pci_dev *dev;
257 int retval;
258 int found = 0;
259 dev = alloc_pci_dev();
260 if (!dev)
261 return;
262 86
263 dev->bus = (struct pci_bus*)bus; 87 if (!slot)
264 dev->sysdata = bus->sysdata; 88 return -ENOMEM;
265 for (devfn = 0; devfn < 0x100; devfn += 8) {
266 dev->devfn = devfn;
267 found += pci_rescan_slot(dev);
268 }
269
270 if (found) {
271 pci_bus_assign_resources(bus);
272 list_for_each_entry(dev, &bus->devices, bus_list) {
273 /* Skip already-added devices */
274 if (dev->is_added)
275 continue;
276 retval = pci_bus_add_device(dev);
277 if (retval)
278 dev_err(&dev->dev,
279 "Error adding device, continuing\n");
280 else
281 add_slot(dev);
282 }
283 pci_bus_add_devices(bus);
284 }
285 kfree(dev);
286}
287 89
288/* recursively scan all buses */ 90 if (kobject_init_and_add(&slot->kobj, &legacy_ktype,
289static void pci_rescan_buses(const struct list_head *list) 91 &pci_slots_kset->kobj, "%s",
290{ 92 dev_name(&pdev->dev))) {
291 const struct list_head *l; 93 dev_warn(&pdev->dev, "Failed to created legacy fake slot\n");
292 list_for_each(l,list) { 94 return -EINVAL;
293 const struct pci_bus *b = pci_bus_b(l);
294 pci_rescan_bus(b);
295 pci_rescan_buses(&b->children);
296 } 95 }
297} 96 slot->dev = pci_dev_get(pdev);
298 97
299/* initiate rescan of all pci buses */ 98 list_add(&slot->list, &legacy_list);
300static inline void pci_rescan(void) {
301 pci_rescan_buses(&pci_root_buses);
302}
303
304/* called from the single-threaded workqueue handler to rescan all pci buses */
305static void pci_rescan_worker(struct work_struct *work)
306{
307 pci_rescan();
308}
309 99
310static int enable_slot(struct hotplug_slot *hotplug_slot)
311{
312 /* mis-use enable_slot for rescanning of the pci bus */
313 cancel_work_sync(&pci_rescan_work);
314 queue_work(dummyphp_wq, &pci_rescan_work);
315 return 0; 100 return 0;
316} 101}
317 102
318static int disable_slot(struct hotplug_slot *slot) 103static int legacy_notify(struct notifier_block *nb,
104 unsigned long action, void *data)
319{ 105{
320 struct dummy_slot *dslot; 106 struct pci_dev *pdev = to_pci_dev(data);
321 struct pci_dev *dev;
322 int func;
323
324 if (!slot)
325 return -ENODEV;
326 dslot = slot->private;
327
328 dbg("%s - physical_slot = %s\n", __func__, hotplug_slot_name(slot));
329 107
330 for (func = 7; func >= 0; func--) { 108 if (action == BUS_NOTIFY_ADD_DEVICE) {
331 dev = pci_get_slot(dslot->dev->bus, dslot->dev->devfn + func); 109 legacy_add_slot(pdev);
332 if (!dev) 110 } else if (action == BUS_NOTIFY_DEL_DEVICE) {
333 continue; 111 struct legacy_slot *slot;
334 112
335 if (test_and_set_bit(0, &dslot->removed)) { 113 list_for_each_entry(slot, &legacy_list, list)
336 dbg("Slot already scheduled for removal\n"); 114 if (slot->dev == pdev)
337 pci_dev_put(dev); 115 goto found;
338 return -ENODEV;
339 }
340 116
341 /* remove the device from the pci core */ 117 dev_warn(&pdev->dev, "Missing legacy fake slot?");
342 pci_remove_bus_device(dev); 118 return -ENODEV;
343 119found:
344 /* queue work item to blow away this sysfs entry and other 120 kobject_del(&slot->kobj);
345 * parts. 121 list_del(&slot->list);
346 */ 122 kobject_put(&slot->kobj);
347 INIT_WORK(&dslot->remove_work, remove_slot_worker);
348 queue_work(dummyphp_wq, &dslot->remove_work);
349
350 pci_dev_put(dev);
351 } 123 }
124
352 return 0; 125 return 0;
353} 126}
354 127
355static void cleanup_slots (void) 128static struct notifier_block legacy_notifier = {
356{ 129 .notifier_call = legacy_notify
357 struct list_head *tmp; 130};
358 struct list_head *next;
359 struct dummy_slot *dslot;
360
361 destroy_workqueue(dummyphp_wq);
362 list_for_each_safe (tmp, next, &slot_list) {
363 dslot = list_entry (tmp, struct dummy_slot, node);
364 remove_slot(dslot);
365 }
366
367}
368 131
369static int __init dummyphp_init(void) 132static int __init init_legacy(void)
370{ 133{
371 info(DRIVER_DESC "\n"); 134 struct pci_dev *pdev = NULL;
372 135
373 dummyphp_wq = create_singlethread_workqueue(MY_NAME); 136 /* Add existing devices */
374 if (!dummyphp_wq) 137 while ((pdev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, pdev)))
375 return -ENOMEM; 138 legacy_add_slot(pdev);
376 139
377 return pci_scan_buses(); 140 /* Be alerted of any new ones */
141 bus_register_notifier(&pci_bus_type, &legacy_notifier);
142 return 0;
378} 143}
144module_init(init_legacy);
379 145
380 146static void __exit remove_legacy(void)
381static void __exit dummyphp_exit(void)
382{ 147{
383 cleanup_slots(); 148 struct legacy_slot *slot, *tmp;
149
150 bus_unregister_notifier(&pci_bus_type, &legacy_notifier);
151
152 list_for_each_entry_safe(slot, tmp, &legacy_list, list) {
153 list_del(&slot->list);
154 kobject_del(&slot->kobj);
155 kobject_put(&slot->kobj);
156 }
384} 157}
158module_exit(remove_legacy);
385 159
386module_init(dummyphp_init);
387module_exit(dummyphp_exit);
388 160
389MODULE_AUTHOR(DRIVER_AUTHOR); 161MODULE_AUTHOR("Trent Piepho <xyzzy@speakeasy.org>");
390MODULE_DESCRIPTION(DRIVER_DESC); 162MODULE_DESCRIPTION("Legacy version of the fakephp interface");
391MODULE_LICENSE("GPL"); 163MODULE_LICENSE("GPL");
392module_param(debug, bool, S_IRUGO | S_IWUSR);
393MODULE_PARM_DESC(debug, "Debugging mode enabled or not");
394module_param(dup_slots, bool, S_IRUGO | S_IWUSR);
395MODULE_PARM_DESC(dup_slots, "Force duplicate slot names for debugging");
diff --git a/drivers/pci/hotplug/pciehp.h b/drivers/pci/hotplug/pciehp.h
index 39ae37589fda..0a368547e633 100644
--- a/drivers/pci/hotplug/pciehp.h
+++ b/drivers/pci/hotplug/pciehp.h
@@ -46,10 +46,10 @@ extern int pciehp_force;
46extern struct workqueue_struct *pciehp_wq; 46extern struct workqueue_struct *pciehp_wq;
47 47
48#define dbg(format, arg...) \ 48#define dbg(format, arg...) \
49 do { \ 49do { \
50 if (pciehp_debug) \ 50 if (pciehp_debug) \
51 printk("%s: " format, MY_NAME , ## arg); \ 51 printk(KERN_DEBUG "%s: " format, MY_NAME , ## arg); \
52 } while (0) 52} while (0)
53#define err(format, arg...) \ 53#define err(format, arg...) \
54 printk(KERN_ERR "%s: " format, MY_NAME , ## arg) 54 printk(KERN_ERR "%s: " format, MY_NAME , ## arg)
55#define info(format, arg...) \ 55#define info(format, arg...) \
@@ -60,7 +60,7 @@ extern struct workqueue_struct *pciehp_wq;
60#define ctrl_dbg(ctrl, format, arg...) \ 60#define ctrl_dbg(ctrl, format, arg...) \
61 do { \ 61 do { \
62 if (pciehp_debug) \ 62 if (pciehp_debug) \
63 dev_printk(, &ctrl->pcie->device, \ 63 dev_printk(KERN_DEBUG, &ctrl->pcie->device, \
64 format, ## arg); \ 64 format, ## arg); \
65 } while (0) 65 } while (0)
66#define ctrl_err(ctrl, format, arg...) \ 66#define ctrl_err(ctrl, format, arg...) \
@@ -108,10 +108,11 @@ struct controller {
108 u32 slot_cap; 108 u32 slot_cap;
109 u8 cap_base; 109 u8 cap_base;
110 struct timer_list poll_timer; 110 struct timer_list poll_timer;
111 int cmd_busy; 111 unsigned int cmd_busy:1;
112 unsigned int no_cmd_complete:1; 112 unsigned int no_cmd_complete:1;
113 unsigned int link_active_reporting:1; 113 unsigned int link_active_reporting:1;
114 unsigned int notification_enabled:1; 114 unsigned int notification_enabled:1;
115 unsigned int power_fault_detected;
115}; 116};
116 117
117#define INT_BUTTON_IGNORE 0 118#define INT_BUTTON_IGNORE 0
diff --git a/drivers/pci/hotplug/pciehp_acpi.c b/drivers/pci/hotplug/pciehp_acpi.c
index 438d795f9fe3..96048010e7d9 100644
--- a/drivers/pci/hotplug/pciehp_acpi.c
+++ b/drivers/pci/hotplug/pciehp_acpi.c
@@ -67,37 +67,27 @@ static int __init parse_detect_mode(void)
67 return PCIEHP_DETECT_DEFAULT; 67 return PCIEHP_DETECT_DEFAULT;
68} 68}
69 69
70static struct pcie_port_service_id __initdata port_pci_ids[] = {
71 {
72 .vendor = PCI_ANY_ID,
73 .device = PCI_ANY_ID,
74 .port_type = PCIE_ANY_PORT,
75 .service_type = PCIE_PORT_SERVICE_HP,
76 .driver_data = 0,
77 }, { /* end: all zeroes */ }
78};
79
80static int __initdata dup_slot_id; 70static int __initdata dup_slot_id;
81static int __initdata acpi_slot_detected; 71static int __initdata acpi_slot_detected;
82static struct list_head __initdata dummy_slots = LIST_HEAD_INIT(dummy_slots); 72static struct list_head __initdata dummy_slots = LIST_HEAD_INIT(dummy_slots);
83 73
84/* Dummy driver for dumplicate name detection */ 74/* Dummy driver for dumplicate name detection */
85static int __init dummy_probe(struct pcie_device *dev, 75static int __init dummy_probe(struct pcie_device *dev)
86 const struct pcie_port_service_id *id)
87{ 76{
88 int pos; 77 int pos;
89 u32 slot_cap; 78 u32 slot_cap;
90 struct slot *slot, *tmp; 79 struct slot *slot, *tmp;
91 struct pci_dev *pdev = dev->port; 80 struct pci_dev *pdev = dev->port;
92 struct pci_bus *pbus = pdev->subordinate; 81 struct pci_bus *pbus = pdev->subordinate;
93 if (!(slot = kzalloc(sizeof(*slot), GFP_KERNEL)))
94 return -ENOMEM;
95 /* Note: pciehp_detect_mode != PCIEHP_DETECT_ACPI here */ 82 /* Note: pciehp_detect_mode != PCIEHP_DETECT_ACPI here */
96 if (pciehp_get_hp_hw_control_from_firmware(pdev)) 83 if (pciehp_get_hp_hw_control_from_firmware(pdev))
97 return -ENODEV; 84 return -ENODEV;
98 if (!(pos = pci_find_capability(pdev, PCI_CAP_ID_EXP))) 85 if (!(pos = pci_find_capability(pdev, PCI_CAP_ID_EXP)))
99 return -ENODEV; 86 return -ENODEV;
100 pci_read_config_dword(pdev, pos + PCI_EXP_SLTCAP, &slot_cap); 87 pci_read_config_dword(pdev, pos + PCI_EXP_SLTCAP, &slot_cap);
88 slot = kzalloc(sizeof(*slot), GFP_KERNEL);
89 if (!slot)
90 return -ENOMEM;
101 slot->number = slot_cap >> 19; 91 slot->number = slot_cap >> 19;
102 list_for_each_entry(tmp, &dummy_slots, slot_list) { 92 list_for_each_entry(tmp, &dummy_slots, slot_list) {
103 if (tmp->number == slot->number) 93 if (tmp->number == slot->number)
@@ -111,7 +101,8 @@ static int __init dummy_probe(struct pcie_device *dev,
111 101
112static struct pcie_port_service_driver __initdata dummy_driver = { 102static struct pcie_port_service_driver __initdata dummy_driver = {
113 .name = "pciehp_dummy", 103 .name = "pciehp_dummy",
114 .id_table = port_pci_ids, 104 .port_type = PCIE_ANY_PORT,
105 .service = PCIE_PORT_SERVICE_HP,
115 .probe = dummy_probe, 106 .probe = dummy_probe,
116}; 107};
117 108
diff --git a/drivers/pci/hotplug/pciehp_core.c b/drivers/pci/hotplug/pciehp_core.c
index 681e3912b821..fb254b2454de 100644
--- a/drivers/pci/hotplug/pciehp_core.c
+++ b/drivers/pci/hotplug/pciehp_core.c
@@ -401,7 +401,7 @@ static int get_cur_bus_speed(struct hotplug_slot *hotplug_slot, enum pci_bus_spe
401 return 0; 401 return 0;
402} 402}
403 403
404static int pciehp_probe(struct pcie_device *dev, const struct pcie_port_service_id *id) 404static int pciehp_probe(struct pcie_device *dev)
405{ 405{
406 int rc; 406 int rc;
407 struct controller *ctrl; 407 struct controller *ctrl;
@@ -475,7 +475,7 @@ static void pciehp_remove (struct pcie_device *dev)
475} 475}
476 476
477#ifdef CONFIG_PM 477#ifdef CONFIG_PM
478static int pciehp_suspend (struct pcie_device *dev, pm_message_t state) 478static int pciehp_suspend (struct pcie_device *dev)
479{ 479{
480 dev_info(&dev->device, "%s ENTRY\n", __func__); 480 dev_info(&dev->device, "%s ENTRY\n", __func__);
481 return 0; 481 return 0;
@@ -503,20 +503,12 @@ static int pciehp_resume (struct pcie_device *dev)
503 } 503 }
504 return 0; 504 return 0;
505} 505}
506#endif 506#endif /* PM */
507
508static struct pcie_port_service_id port_pci_ids[] = { {
509 .vendor = PCI_ANY_ID,
510 .device = PCI_ANY_ID,
511 .port_type = PCIE_ANY_PORT,
512 .service_type = PCIE_PORT_SERVICE_HP,
513 .driver_data = 0,
514 }, { /* end: all zeroes */ }
515};
516 507
517static struct pcie_port_service_driver hpdriver_portdrv = { 508static struct pcie_port_service_driver hpdriver_portdrv = {
518 .name = PCIE_MODULE_NAME, 509 .name = PCIE_MODULE_NAME,
519 .id_table = &port_pci_ids[0], 510 .port_type = PCIE_ANY_PORT,
511 .service = PCIE_PORT_SERVICE_HP,
520 512
521 .probe = pciehp_probe, 513 .probe = pciehp_probe,
522 .remove = pciehp_remove, 514 .remove = pciehp_remove,
diff --git a/drivers/pci/hotplug/pciehp_hpc.c b/drivers/pci/hotplug/pciehp_hpc.c
index 7a16c6897bb9..07bd32151146 100644
--- a/drivers/pci/hotplug/pciehp_hpc.c
+++ b/drivers/pci/hotplug/pciehp_hpc.c
@@ -548,23 +548,21 @@ static int hpc_power_on_slot(struct slot * slot)
548 548
549 slot_cmd = POWER_ON; 549 slot_cmd = POWER_ON;
550 cmd_mask = PCI_EXP_SLTCTL_PCC; 550 cmd_mask = PCI_EXP_SLTCTL_PCC;
551 /* Enable detection that we turned off at slot power-off time */
552 if (!pciehp_poll_mode) { 551 if (!pciehp_poll_mode) {
553 slot_cmd |= (PCI_EXP_SLTCTL_PFDE | PCI_EXP_SLTCTL_MRLSCE | 552 /* Enable power fault detection turned off at power off time */
554 PCI_EXP_SLTCTL_PDCE); 553 slot_cmd |= PCI_EXP_SLTCTL_PFDE;
555 cmd_mask |= (PCI_EXP_SLTCTL_PFDE | PCI_EXP_SLTCTL_MRLSCE | 554 cmd_mask |= PCI_EXP_SLTCTL_PFDE;
556 PCI_EXP_SLTCTL_PDCE);
557 } 555 }
558 556
559 retval = pcie_write_cmd(ctrl, slot_cmd, cmd_mask); 557 retval = pcie_write_cmd(ctrl, slot_cmd, cmd_mask);
560
561 if (retval) { 558 if (retval) {
562 ctrl_err(ctrl, "Write %x command failed!\n", slot_cmd); 559 ctrl_err(ctrl, "Write %x command failed!\n", slot_cmd);
563 return -1; 560 return retval;
564 } 561 }
565 ctrl_dbg(ctrl, "%s: SLOTCTRL %x write cmd %x\n", 562 ctrl_dbg(ctrl, "%s: SLOTCTRL %x write cmd %x\n",
566 __func__, ctrl->cap_base + PCI_EXP_SLTCTL, slot_cmd); 563 __func__, ctrl->cap_base + PCI_EXP_SLTCTL, slot_cmd);
567 564
565 ctrl->power_fault_detected = 0;
568 return retval; 566 return retval;
569} 567}
570 568
@@ -621,18 +619,10 @@ static int hpc_power_off_slot(struct slot * slot)
621 619
622 slot_cmd = POWER_OFF; 620 slot_cmd = POWER_OFF;
623 cmd_mask = PCI_EXP_SLTCTL_PCC; 621 cmd_mask = PCI_EXP_SLTCTL_PCC;
624 /*
625 * If we get MRL or presence detect interrupts now, the isr
626 * will notice the sticky power-fault bit too and issue power
627 * indicator change commands. This will lead to an endless loop
628 * of command completions, since the power-fault bit remains on
629 * till the slot is powered on again.
630 */
631 if (!pciehp_poll_mode) { 622 if (!pciehp_poll_mode) {
632 slot_cmd &= ~(PCI_EXP_SLTCTL_PFDE | PCI_EXP_SLTCTL_MRLSCE | 623 /* Disable power fault detection */
633 PCI_EXP_SLTCTL_PDCE); 624 slot_cmd &= ~PCI_EXP_SLTCTL_PFDE;
634 cmd_mask |= (PCI_EXP_SLTCTL_PFDE | PCI_EXP_SLTCTL_MRLSCE | 625 cmd_mask |= PCI_EXP_SLTCTL_PFDE;
635 PCI_EXP_SLTCTL_PDCE);
636 } 626 }
637 627
638 retval = pcie_write_cmd(ctrl, slot_cmd, cmd_mask); 628 retval = pcie_write_cmd(ctrl, slot_cmd, cmd_mask);
@@ -672,10 +662,11 @@ static irqreturn_t pcie_isr(int irq, void *dev_id)
672 detected &= (PCI_EXP_SLTSTA_ABP | PCI_EXP_SLTSTA_PFD | 662 detected &= (PCI_EXP_SLTSTA_ABP | PCI_EXP_SLTSTA_PFD |
673 PCI_EXP_SLTSTA_MRLSC | PCI_EXP_SLTSTA_PDC | 663 PCI_EXP_SLTSTA_MRLSC | PCI_EXP_SLTSTA_PDC |
674 PCI_EXP_SLTSTA_CC); 664 PCI_EXP_SLTSTA_CC);
665 detected &= ~intr_loc;
675 intr_loc |= detected; 666 intr_loc |= detected;
676 if (!intr_loc) 667 if (!intr_loc)
677 return IRQ_NONE; 668 return IRQ_NONE;
678 if (detected && pciehp_writew(ctrl, PCI_EXP_SLTSTA, detected)) { 669 if (detected && pciehp_writew(ctrl, PCI_EXP_SLTSTA, intr_loc)) {
679 ctrl_err(ctrl, "%s: Cannot write to SLOTSTATUS\n", 670 ctrl_err(ctrl, "%s: Cannot write to SLOTSTATUS\n",
680 __func__); 671 __func__);
681 return IRQ_NONE; 672 return IRQ_NONE;
@@ -709,9 +700,10 @@ static irqreturn_t pcie_isr(int irq, void *dev_id)
709 pciehp_handle_presence_change(p_slot); 700 pciehp_handle_presence_change(p_slot);
710 701
711 /* Check Power Fault Detected */ 702 /* Check Power Fault Detected */
712 if (intr_loc & PCI_EXP_SLTSTA_PFD) 703 if ((intr_loc & PCI_EXP_SLTSTA_PFD) && !ctrl->power_fault_detected) {
704 ctrl->power_fault_detected = 1;
713 pciehp_handle_power_fault(p_slot); 705 pciehp_handle_power_fault(p_slot);
714 706 }
715 return IRQ_HANDLED; 707 return IRQ_HANDLED;
716} 708}
717 709
diff --git a/drivers/pci/hotplug/shpchp.h b/drivers/pci/hotplug/shpchp.h
index 6aba0b6cf2e0..974e924ca96d 100644
--- a/drivers/pci/hotplug/shpchp.h
+++ b/drivers/pci/hotplug/shpchp.h
@@ -48,10 +48,10 @@ extern int shpchp_debug;
48extern struct workqueue_struct *shpchp_wq; 48extern struct workqueue_struct *shpchp_wq;
49 49
50#define dbg(format, arg...) \ 50#define dbg(format, arg...) \
51 do { \ 51do { \
52 if (shpchp_debug) \ 52 if (shpchp_debug) \
53 printk("%s: " format, MY_NAME , ## arg); \ 53 printk(KERN_DEBUG "%s: " format, MY_NAME , ## arg); \
54 } while (0) 54} while (0)
55#define err(format, arg...) \ 55#define err(format, arg...) \
56 printk(KERN_ERR "%s: " format, MY_NAME , ## arg) 56 printk(KERN_ERR "%s: " format, MY_NAME , ## arg)
57#define info(format, arg...) \ 57#define info(format, arg...) \
@@ -62,7 +62,7 @@ extern struct workqueue_struct *shpchp_wq;
62#define ctrl_dbg(ctrl, format, arg...) \ 62#define ctrl_dbg(ctrl, format, arg...) \
63 do { \ 63 do { \
64 if (shpchp_debug) \ 64 if (shpchp_debug) \
65 dev_printk(, &ctrl->pci_dev->dev, \ 65 dev_printk(KERN_DEBUG, &ctrl->pci_dev->dev, \
66 format, ## arg); \ 66 format, ## arg); \
67 } while (0) 67 } while (0)
68#define ctrl_err(ctrl, format, arg...) \ 68#define ctrl_err(ctrl, format, arg...) \
diff --git a/drivers/pci/hotplug/shpchp_pci.c b/drivers/pci/hotplug/shpchp_pci.c
index 138f161becc0..aa315e52529b 100644
--- a/drivers/pci/hotplug/shpchp_pci.c
+++ b/drivers/pci/hotplug/shpchp_pci.c
@@ -137,7 +137,7 @@ int __ref shpchp_configure_device(struct slot *p_slot)
137 busnr)) 137 busnr))
138 break; 138 break;
139 } 139 }
140 if (busnr >= end) { 140 if (busnr > end) {
141 ctrl_err(ctrl, 141 ctrl_err(ctrl,
142 "No free bus for hot-added bridge\n"); 142 "No free bus for hot-added bridge\n");
143 pci_dev_put(dev); 143 pci_dev_put(dev);
diff --git a/drivers/pci/intel-iommu.c b/drivers/pci/intel-iommu.c
index 49402c399232..9dbd5066acaf 100644
--- a/drivers/pci/intel-iommu.c
+++ b/drivers/pci/intel-iommu.c
@@ -1782,7 +1782,7 @@ static inline void iommu_prepare_isa(void)
1782 ret = iommu_prepare_identity_map(pdev, 0, 16*1024*1024); 1782 ret = iommu_prepare_identity_map(pdev, 0, 16*1024*1024);
1783 1783
1784 if (ret) 1784 if (ret)
1785 printk("IOMMU: Failed to create 0-64M identity map, " 1785 printk(KERN_ERR "IOMMU: Failed to create 0-64M identity map, "
1786 "floppy might not work\n"); 1786 "floppy might not work\n");
1787 1787
1788} 1788}
diff --git a/drivers/pci/iov.c b/drivers/pci/iov.c
new file mode 100644
index 000000000000..7227efc760db
--- /dev/null
+++ b/drivers/pci/iov.c
@@ -0,0 +1,680 @@
1/*
2 * drivers/pci/iov.c
3 *
4 * Copyright (C) 2009 Intel Corporation, Yu Zhao <yu.zhao@intel.com>
5 *
6 * PCI Express I/O Virtualization (IOV) support.
7 * Single Root IOV 1.0
8 */
9
10#include <linux/pci.h>
11#include <linux/mutex.h>
12#include <linux/string.h>
13#include <linux/delay.h>
14#include "pci.h"
15
16#define VIRTFN_ID_LEN 16
17
18static inline u8 virtfn_bus(struct pci_dev *dev, int id)
19{
20 return dev->bus->number + ((dev->devfn + dev->sriov->offset +
21 dev->sriov->stride * id) >> 8);
22}
23
24static inline u8 virtfn_devfn(struct pci_dev *dev, int id)
25{
26 return (dev->devfn + dev->sriov->offset +
27 dev->sriov->stride * id) & 0xff;
28}
29
30static struct pci_bus *virtfn_add_bus(struct pci_bus *bus, int busnr)
31{
32 int rc;
33 struct pci_bus *child;
34
35 if (bus->number == busnr)
36 return bus;
37
38 child = pci_find_bus(pci_domain_nr(bus), busnr);
39 if (child)
40 return child;
41
42 child = pci_add_new_bus(bus, NULL, busnr);
43 if (!child)
44 return NULL;
45
46 child->subordinate = busnr;
47 child->dev.parent = bus->bridge;
48 rc = pci_bus_add_child(child);
49 if (rc) {
50 pci_remove_bus(child);
51 return NULL;
52 }
53
54 return child;
55}
56
57static void virtfn_remove_bus(struct pci_bus *bus, int busnr)
58{
59 struct pci_bus *child;
60
61 if (bus->number == busnr)
62 return;
63
64 child = pci_find_bus(pci_domain_nr(bus), busnr);
65 BUG_ON(!child);
66
67 if (list_empty(&child->devices))
68 pci_remove_bus(child);
69}
70
71static int virtfn_add(struct pci_dev *dev, int id, int reset)
72{
73 int i;
74 int rc;
75 u64 size;
76 char buf[VIRTFN_ID_LEN];
77 struct pci_dev *virtfn;
78 struct resource *res;
79 struct pci_sriov *iov = dev->sriov;
80
81 virtfn = alloc_pci_dev();
82 if (!virtfn)
83 return -ENOMEM;
84
85 mutex_lock(&iov->dev->sriov->lock);
86 virtfn->bus = virtfn_add_bus(dev->bus, virtfn_bus(dev, id));
87 if (!virtfn->bus) {
88 kfree(virtfn);
89 mutex_unlock(&iov->dev->sriov->lock);
90 return -ENOMEM;
91 }
92 virtfn->devfn = virtfn_devfn(dev, id);
93 virtfn->vendor = dev->vendor;
94 pci_read_config_word(dev, iov->pos + PCI_SRIOV_VF_DID, &virtfn->device);
95 pci_setup_device(virtfn);
96 virtfn->dev.parent = dev->dev.parent;
97
98 for (i = 0; i < PCI_SRIOV_NUM_BARS; i++) {
99 res = dev->resource + PCI_IOV_RESOURCES + i;
100 if (!res->parent)
101 continue;
102 virtfn->resource[i].name = pci_name(virtfn);
103 virtfn->resource[i].flags = res->flags;
104 size = resource_size(res);
105 do_div(size, iov->total);
106 virtfn->resource[i].start = res->start + size * id;
107 virtfn->resource[i].end = virtfn->resource[i].start + size - 1;
108 rc = request_resource(res, &virtfn->resource[i]);
109 BUG_ON(rc);
110 }
111
112 if (reset)
113 pci_execute_reset_function(virtfn);
114
115 pci_device_add(virtfn, virtfn->bus);
116 mutex_unlock(&iov->dev->sriov->lock);
117
118 virtfn->physfn = pci_dev_get(dev);
119 virtfn->is_virtfn = 1;
120
121 rc = pci_bus_add_device(virtfn);
122 if (rc)
123 goto failed1;
124 sprintf(buf, "virtfn%u", id);
125 rc = sysfs_create_link(&dev->dev.kobj, &virtfn->dev.kobj, buf);
126 if (rc)
127 goto failed1;
128 rc = sysfs_create_link(&virtfn->dev.kobj, &dev->dev.kobj, "physfn");
129 if (rc)
130 goto failed2;
131
132 kobject_uevent(&virtfn->dev.kobj, KOBJ_CHANGE);
133
134 return 0;
135
136failed2:
137 sysfs_remove_link(&dev->dev.kobj, buf);
138failed1:
139 pci_dev_put(dev);
140 mutex_lock(&iov->dev->sriov->lock);
141 pci_remove_bus_device(virtfn);
142 virtfn_remove_bus(dev->bus, virtfn_bus(dev, id));
143 mutex_unlock(&iov->dev->sriov->lock);
144
145 return rc;
146}
147
148static void virtfn_remove(struct pci_dev *dev, int id, int reset)
149{
150 char buf[VIRTFN_ID_LEN];
151 struct pci_bus *bus;
152 struct pci_dev *virtfn;
153 struct pci_sriov *iov = dev->sriov;
154
155 bus = pci_find_bus(pci_domain_nr(dev->bus), virtfn_bus(dev, id));
156 if (!bus)
157 return;
158
159 virtfn = pci_get_slot(bus, virtfn_devfn(dev, id));
160 if (!virtfn)
161 return;
162
163 pci_dev_put(virtfn);
164
165 if (reset) {
166 device_release_driver(&virtfn->dev);
167 pci_execute_reset_function(virtfn);
168 }
169
170 sprintf(buf, "virtfn%u", id);
171 sysfs_remove_link(&dev->dev.kobj, buf);
172 sysfs_remove_link(&virtfn->dev.kobj, "physfn");
173
174 mutex_lock(&iov->dev->sriov->lock);
175 pci_remove_bus_device(virtfn);
176 virtfn_remove_bus(dev->bus, virtfn_bus(dev, id));
177 mutex_unlock(&iov->dev->sriov->lock);
178
179 pci_dev_put(dev);
180}
181
182static int sriov_migration(struct pci_dev *dev)
183{
184 u16 status;
185 struct pci_sriov *iov = dev->sriov;
186
187 if (!iov->nr_virtfn)
188 return 0;
189
190 if (!(iov->cap & PCI_SRIOV_CAP_VFM))
191 return 0;
192
193 pci_read_config_word(dev, iov->pos + PCI_SRIOV_STATUS, &status);
194 if (!(status & PCI_SRIOV_STATUS_VFM))
195 return 0;
196
197 schedule_work(&iov->mtask);
198
199 return 1;
200}
201
202static void sriov_migration_task(struct work_struct *work)
203{
204 int i;
205 u8 state;
206 u16 status;
207 struct pci_sriov *iov = container_of(work, struct pci_sriov, mtask);
208
209 for (i = iov->initial; i < iov->nr_virtfn; i++) {
210 state = readb(iov->mstate + i);
211 if (state == PCI_SRIOV_VFM_MI) {
212 writeb(PCI_SRIOV_VFM_AV, iov->mstate + i);
213 state = readb(iov->mstate + i);
214 if (state == PCI_SRIOV_VFM_AV)
215 virtfn_add(iov->self, i, 1);
216 } else if (state == PCI_SRIOV_VFM_MO) {
217 virtfn_remove(iov->self, i, 1);
218 writeb(PCI_SRIOV_VFM_UA, iov->mstate + i);
219 state = readb(iov->mstate + i);
220 if (state == PCI_SRIOV_VFM_AV)
221 virtfn_add(iov->self, i, 0);
222 }
223 }
224
225 pci_read_config_word(iov->self, iov->pos + PCI_SRIOV_STATUS, &status);
226 status &= ~PCI_SRIOV_STATUS_VFM;
227 pci_write_config_word(iov->self, iov->pos + PCI_SRIOV_STATUS, status);
228}
229
230static int sriov_enable_migration(struct pci_dev *dev, int nr_virtfn)
231{
232 int bir;
233 u32 table;
234 resource_size_t pa;
235 struct pci_sriov *iov = dev->sriov;
236
237 if (nr_virtfn <= iov->initial)
238 return 0;
239
240 pci_read_config_dword(dev, iov->pos + PCI_SRIOV_VFM, &table);
241 bir = PCI_SRIOV_VFM_BIR(table);
242 if (bir > PCI_STD_RESOURCE_END)
243 return -EIO;
244
245 table = PCI_SRIOV_VFM_OFFSET(table);
246 if (table + nr_virtfn > pci_resource_len(dev, bir))
247 return -EIO;
248
249 pa = pci_resource_start(dev, bir) + table;
250 iov->mstate = ioremap(pa, nr_virtfn);
251 if (!iov->mstate)
252 return -ENOMEM;
253
254 INIT_WORK(&iov->mtask, sriov_migration_task);
255
256 iov->ctrl |= PCI_SRIOV_CTRL_VFM | PCI_SRIOV_CTRL_INTR;
257 pci_write_config_word(dev, iov->pos + PCI_SRIOV_CTRL, iov->ctrl);
258
259 return 0;
260}
261
262static void sriov_disable_migration(struct pci_dev *dev)
263{
264 struct pci_sriov *iov = dev->sriov;
265
266 iov->ctrl &= ~(PCI_SRIOV_CTRL_VFM | PCI_SRIOV_CTRL_INTR);
267 pci_write_config_word(dev, iov->pos + PCI_SRIOV_CTRL, iov->ctrl);
268
269 cancel_work_sync(&iov->mtask);
270 iounmap(iov->mstate);
271}
272
273static int sriov_enable(struct pci_dev *dev, int nr_virtfn)
274{
275 int rc;
276 int i, j;
277 int nres;
278 u16 offset, stride, initial;
279 struct resource *res;
280 struct pci_dev *pdev;
281 struct pci_sriov *iov = dev->sriov;
282
283 if (!nr_virtfn)
284 return 0;
285
286 if (iov->nr_virtfn)
287 return -EINVAL;
288
289 pci_read_config_word(dev, iov->pos + PCI_SRIOV_INITIAL_VF, &initial);
290 if (initial > iov->total ||
291 (!(iov->cap & PCI_SRIOV_CAP_VFM) && (initial != iov->total)))
292 return -EIO;
293
294 if (nr_virtfn < 0 || nr_virtfn > iov->total ||
295 (!(iov->cap & PCI_SRIOV_CAP_VFM) && (nr_virtfn > initial)))
296 return -EINVAL;
297
298 pci_write_config_word(dev, iov->pos + PCI_SRIOV_NUM_VF, nr_virtfn);
299 pci_read_config_word(dev, iov->pos + PCI_SRIOV_VF_OFFSET, &offset);
300 pci_read_config_word(dev, iov->pos + PCI_SRIOV_VF_STRIDE, &stride);
301 if (!offset || (nr_virtfn > 1 && !stride))
302 return -EIO;
303
304 nres = 0;
305 for (i = 0; i < PCI_SRIOV_NUM_BARS; i++) {
306 res = dev->resource + PCI_IOV_RESOURCES + i;
307 if (res->parent)
308 nres++;
309 }
310 if (nres != iov->nres) {
311 dev_err(&dev->dev, "not enough MMIO resources for SR-IOV\n");
312 return -ENOMEM;
313 }
314
315 iov->offset = offset;
316 iov->stride = stride;
317
318 if (virtfn_bus(dev, nr_virtfn - 1) > dev->bus->subordinate) {
319 dev_err(&dev->dev, "SR-IOV: bus number out of range\n");
320 return -ENOMEM;
321 }
322
323 if (iov->link != dev->devfn) {
324 pdev = pci_get_slot(dev->bus, iov->link);
325 if (!pdev)
326 return -ENODEV;
327
328 pci_dev_put(pdev);
329
330 if (!pdev->is_physfn)
331 return -ENODEV;
332
333 rc = sysfs_create_link(&dev->dev.kobj,
334 &pdev->dev.kobj, "dep_link");
335 if (rc)
336 return rc;
337 }
338
339 iov->ctrl |= PCI_SRIOV_CTRL_VFE | PCI_SRIOV_CTRL_MSE;
340 pci_block_user_cfg_access(dev);
341 pci_write_config_word(dev, iov->pos + PCI_SRIOV_CTRL, iov->ctrl);
342 msleep(100);
343 pci_unblock_user_cfg_access(dev);
344
345 iov->initial = initial;
346 if (nr_virtfn < initial)
347 initial = nr_virtfn;
348
349 for (i = 0; i < initial; i++) {
350 rc = virtfn_add(dev, i, 0);
351 if (rc)
352 goto failed;
353 }
354
355 if (iov->cap & PCI_SRIOV_CAP_VFM) {
356 rc = sriov_enable_migration(dev, nr_virtfn);
357 if (rc)
358 goto failed;
359 }
360
361 kobject_uevent(&dev->dev.kobj, KOBJ_CHANGE);
362 iov->nr_virtfn = nr_virtfn;
363
364 return 0;
365
366failed:
367 for (j = 0; j < i; j++)
368 virtfn_remove(dev, j, 0);
369
370 iov->ctrl &= ~(PCI_SRIOV_CTRL_VFE | PCI_SRIOV_CTRL_MSE);
371 pci_block_user_cfg_access(dev);
372 pci_write_config_word(dev, iov->pos + PCI_SRIOV_CTRL, iov->ctrl);
373 ssleep(1);
374 pci_unblock_user_cfg_access(dev);
375
376 if (iov->link != dev->devfn)
377 sysfs_remove_link(&dev->dev.kobj, "dep_link");
378
379 return rc;
380}
381
382static void sriov_disable(struct pci_dev *dev)
383{
384 int i;
385 struct pci_sriov *iov = dev->sriov;
386
387 if (!iov->nr_virtfn)
388 return;
389
390 if (iov->cap & PCI_SRIOV_CAP_VFM)
391 sriov_disable_migration(dev);
392
393 for (i = 0; i < iov->nr_virtfn; i++)
394 virtfn_remove(dev, i, 0);
395
396 iov->ctrl &= ~(PCI_SRIOV_CTRL_VFE | PCI_SRIOV_CTRL_MSE);
397 pci_block_user_cfg_access(dev);
398 pci_write_config_word(dev, iov->pos + PCI_SRIOV_CTRL, iov->ctrl);
399 ssleep(1);
400 pci_unblock_user_cfg_access(dev);
401
402 if (iov->link != dev->devfn)
403 sysfs_remove_link(&dev->dev.kobj, "dep_link");
404
405 iov->nr_virtfn = 0;
406}
407
408static int sriov_init(struct pci_dev *dev, int pos)
409{
410 int i;
411 int rc;
412 int nres;
413 u32 pgsz;
414 u16 ctrl, total, offset, stride;
415 struct pci_sriov *iov;
416 struct resource *res;
417 struct pci_dev *pdev;
418
419 if (dev->pcie_type != PCI_EXP_TYPE_RC_END &&
420 dev->pcie_type != PCI_EXP_TYPE_ENDPOINT)
421 return -ENODEV;
422
423 pci_read_config_word(dev, pos + PCI_SRIOV_CTRL, &ctrl);
424 if (ctrl & PCI_SRIOV_CTRL_VFE) {
425 pci_write_config_word(dev, pos + PCI_SRIOV_CTRL, 0);
426 ssleep(1);
427 }
428
429 pci_read_config_word(dev, pos + PCI_SRIOV_TOTAL_VF, &total);
430 if (!total)
431 return 0;
432
433 ctrl = 0;
434 list_for_each_entry(pdev, &dev->bus->devices, bus_list)
435 if (pdev->is_physfn)
436 goto found;
437
438 pdev = NULL;
439 if (pci_ari_enabled(dev->bus))
440 ctrl |= PCI_SRIOV_CTRL_ARI;
441
442found:
443 pci_write_config_word(dev, pos + PCI_SRIOV_CTRL, ctrl);
444 pci_write_config_word(dev, pos + PCI_SRIOV_NUM_VF, total);
445 pci_read_config_word(dev, pos + PCI_SRIOV_VF_OFFSET, &offset);
446 pci_read_config_word(dev, pos + PCI_SRIOV_VF_STRIDE, &stride);
447 if (!offset || (total > 1 && !stride))
448 return -EIO;
449
450 pci_read_config_dword(dev, pos + PCI_SRIOV_SUP_PGSIZE, &pgsz);
451 i = PAGE_SHIFT > 12 ? PAGE_SHIFT - 12 : 0;
452 pgsz &= ~((1 << i) - 1);
453 if (!pgsz)
454 return -EIO;
455
456 pgsz &= ~(pgsz - 1);
457 pci_write_config_dword(dev, pos + PCI_SRIOV_SYS_PGSIZE, pgsz);
458
459 nres = 0;
460 for (i = 0; i < PCI_SRIOV_NUM_BARS; i++) {
461 res = dev->resource + PCI_IOV_RESOURCES + i;
462 i += __pci_read_base(dev, pci_bar_unknown, res,
463 pos + PCI_SRIOV_BAR + i * 4);
464 if (!res->flags)
465 continue;
466 if (resource_size(res) & (PAGE_SIZE - 1)) {
467 rc = -EIO;
468 goto failed;
469 }
470 res->end = res->start + resource_size(res) * total - 1;
471 nres++;
472 }
473
474 iov = kzalloc(sizeof(*iov), GFP_KERNEL);
475 if (!iov) {
476 rc = -ENOMEM;
477 goto failed;
478 }
479
480 iov->pos = pos;
481 iov->nres = nres;
482 iov->ctrl = ctrl;
483 iov->total = total;
484 iov->offset = offset;
485 iov->stride = stride;
486 iov->pgsz = pgsz;
487 iov->self = dev;
488 pci_read_config_dword(dev, pos + PCI_SRIOV_CAP, &iov->cap);
489 pci_read_config_byte(dev, pos + PCI_SRIOV_FUNC_LINK, &iov->link);
490
491 if (pdev)
492 iov->dev = pci_dev_get(pdev);
493 else {
494 iov->dev = dev;
495 mutex_init(&iov->lock);
496 }
497
498 dev->sriov = iov;
499 dev->is_physfn = 1;
500
501 return 0;
502
503failed:
504 for (i = 0; i < PCI_SRIOV_NUM_BARS; i++) {
505 res = dev->resource + PCI_IOV_RESOURCES + i;
506 res->flags = 0;
507 }
508
509 return rc;
510}
511
512static void sriov_release(struct pci_dev *dev)
513{
514 BUG_ON(dev->sriov->nr_virtfn);
515
516 if (dev == dev->sriov->dev)
517 mutex_destroy(&dev->sriov->lock);
518 else
519 pci_dev_put(dev->sriov->dev);
520
521 kfree(dev->sriov);
522 dev->sriov = NULL;
523}
524
525static void sriov_restore_state(struct pci_dev *dev)
526{
527 int i;
528 u16 ctrl;
529 struct pci_sriov *iov = dev->sriov;
530
531 pci_read_config_word(dev, iov->pos + PCI_SRIOV_CTRL, &ctrl);
532 if (ctrl & PCI_SRIOV_CTRL_VFE)
533 return;
534
535 for (i = PCI_IOV_RESOURCES; i <= PCI_IOV_RESOURCE_END; i++)
536 pci_update_resource(dev, i);
537
538 pci_write_config_dword(dev, iov->pos + PCI_SRIOV_SYS_PGSIZE, iov->pgsz);
539 pci_write_config_word(dev, iov->pos + PCI_SRIOV_NUM_VF, iov->nr_virtfn);
540 pci_write_config_word(dev, iov->pos + PCI_SRIOV_CTRL, iov->ctrl);
541 if (iov->ctrl & PCI_SRIOV_CTRL_VFE)
542 msleep(100);
543}
544
545/**
546 * pci_iov_init - initialize the IOV capability
547 * @dev: the PCI device
548 *
549 * Returns 0 on success, or negative on failure.
550 */
551int pci_iov_init(struct pci_dev *dev)
552{
553 int pos;
554
555 if (!dev->is_pcie)
556 return -ENODEV;
557
558 pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_SRIOV);
559 if (pos)
560 return sriov_init(dev, pos);
561
562 return -ENODEV;
563}
564
565/**
566 * pci_iov_release - release resources used by the IOV capability
567 * @dev: the PCI device
568 */
569void pci_iov_release(struct pci_dev *dev)
570{
571 if (dev->is_physfn)
572 sriov_release(dev);
573}
574
575/**
576 * pci_iov_resource_bar - get position of the SR-IOV BAR
577 * @dev: the PCI device
578 * @resno: the resource number
579 * @type: the BAR type to be filled in
580 *
581 * Returns position of the BAR encapsulated in the SR-IOV capability.
582 */
583int pci_iov_resource_bar(struct pci_dev *dev, int resno,
584 enum pci_bar_type *type)
585{
586 if (resno < PCI_IOV_RESOURCES || resno > PCI_IOV_RESOURCE_END)
587 return 0;
588
589 BUG_ON(!dev->is_physfn);
590
591 *type = pci_bar_unknown;
592
593 return dev->sriov->pos + PCI_SRIOV_BAR +
594 4 * (resno - PCI_IOV_RESOURCES);
595}
596
597/**
598 * pci_restore_iov_state - restore the state of the IOV capability
599 * @dev: the PCI device
600 */
601void pci_restore_iov_state(struct pci_dev *dev)
602{
603 if (dev->is_physfn)
604 sriov_restore_state(dev);
605}
606
607/**
608 * pci_iov_bus_range - find bus range used by Virtual Function
609 * @bus: the PCI bus
610 *
611 * Returns max number of buses (exclude current one) used by Virtual
612 * Functions.
613 */
614int pci_iov_bus_range(struct pci_bus *bus)
615{
616 int max = 0;
617 u8 busnr;
618 struct pci_dev *dev;
619
620 list_for_each_entry(dev, &bus->devices, bus_list) {
621 if (!dev->is_physfn)
622 continue;
623 busnr = virtfn_bus(dev, dev->sriov->total - 1);
624 if (busnr > max)
625 max = busnr;
626 }
627
628 return max ? max - bus->number : 0;
629}
630
631/**
632 * pci_enable_sriov - enable the SR-IOV capability
633 * @dev: the PCI device
634 *
635 * Returns 0 on success, or negative on failure.
636 */
637int pci_enable_sriov(struct pci_dev *dev, int nr_virtfn)
638{
639 might_sleep();
640
641 if (!dev->is_physfn)
642 return -ENODEV;
643
644 return sriov_enable(dev, nr_virtfn);
645}
646EXPORT_SYMBOL_GPL(pci_enable_sriov);
647
648/**
649 * pci_disable_sriov - disable the SR-IOV capability
650 * @dev: the PCI device
651 */
652void pci_disable_sriov(struct pci_dev *dev)
653{
654 might_sleep();
655
656 if (!dev->is_physfn)
657 return;
658
659 sriov_disable(dev);
660}
661EXPORT_SYMBOL_GPL(pci_disable_sriov);
662
663/**
664 * pci_sriov_migration - notify SR-IOV core of Virtual Function Migration
665 * @dev: the PCI device
666 *
667 * Returns IRQ_HANDLED if the IRQ is handled, or IRQ_NONE if not.
668 *
669 * Physical Function driver is responsible to register IRQ handler using
670 * VF Migration Interrupt Message Number, and call this function when the
671 * interrupt is generated by the hardware.
672 */
673irqreturn_t pci_sriov_migration(struct pci_dev *dev)
674{
675 if (!dev->is_physfn)
676 return IRQ_NONE;
677
678 return sriov_migration(dev) ? IRQ_HANDLED : IRQ_NONE;
679}
680EXPORT_SYMBOL_GPL(pci_sriov_migration);
diff --git a/drivers/pci/msi.c b/drivers/pci/msi.c
index baba2eb5367d..6f2e6295e773 100644
--- a/drivers/pci/msi.c
+++ b/drivers/pci/msi.c
@@ -27,48 +27,53 @@ static int pci_msi_enable = 1;
27 27
28/* Arch hooks */ 28/* Arch hooks */
29 29
30int __attribute__ ((weak)) 30#ifndef arch_msi_check_device
31arch_msi_check_device(struct pci_dev *dev, int nvec, int type) 31int arch_msi_check_device(struct pci_dev *dev, int nvec, int type)
32{ 32{
33 return 0; 33 return 0;
34} 34}
35#endif
35 36
36int __attribute__ ((weak)) 37#ifndef arch_setup_msi_irqs
37arch_setup_msi_irq(struct pci_dev *dev, struct msi_desc *entry) 38int arch_setup_msi_irqs(struct pci_dev *dev, int nvec, int type)
38{
39 return 0;
40}
41
42int __attribute__ ((weak))
43arch_setup_msi_irqs(struct pci_dev *dev, int nvec, int type)
44{ 39{
45 struct msi_desc *entry; 40 struct msi_desc *entry;
46 int ret; 41 int ret;
47 42
43 /*
44 * If an architecture wants to support multiple MSI, it needs to
45 * override arch_setup_msi_irqs()
46 */
47 if (type == PCI_CAP_ID_MSI && nvec > 1)
48 return 1;
49
48 list_for_each_entry(entry, &dev->msi_list, list) { 50 list_for_each_entry(entry, &dev->msi_list, list) {
49 ret = arch_setup_msi_irq(dev, entry); 51 ret = arch_setup_msi_irq(dev, entry);
50 if (ret) 52 if (ret < 0)
51 return ret; 53 return ret;
54 if (ret > 0)
55 return -ENOSPC;
52 } 56 }
53 57
54 return 0; 58 return 0;
55} 59}
60#endif
56 61
57void __attribute__ ((weak)) arch_teardown_msi_irq(unsigned int irq) 62#ifndef arch_teardown_msi_irqs
58{ 63void arch_teardown_msi_irqs(struct pci_dev *dev)
59 return;
60}
61
62void __attribute__ ((weak))
63arch_teardown_msi_irqs(struct pci_dev *dev)
64{ 64{
65 struct msi_desc *entry; 65 struct msi_desc *entry;
66 66
67 list_for_each_entry(entry, &dev->msi_list, list) { 67 list_for_each_entry(entry, &dev->msi_list, list) {
68 if (entry->irq != 0) 68 int i, nvec;
69 arch_teardown_msi_irq(entry->irq); 69 if (entry->irq == 0)
70 continue;
71 nvec = 1 << entry->msi_attrib.multiple;
72 for (i = 0; i < nvec; i++)
73 arch_teardown_msi_irq(entry->irq + i);
70 } 74 }
71} 75}
76#endif
72 77
73static void __msi_set_enable(struct pci_dev *dev, int pos, int enable) 78static void __msi_set_enable(struct pci_dev *dev, int pos, int enable)
74{ 79{
@@ -111,27 +116,14 @@ static inline __attribute_const__ u32 msi_mask(unsigned x)
111 return (1 << (1 << x)) - 1; 116 return (1 << (1 << x)) - 1;
112} 117}
113 118
114static void msix_flush_writes(struct irq_desc *desc) 119static inline __attribute_const__ u32 msi_capable_mask(u16 control)
115{ 120{
116 struct msi_desc *entry; 121 return msi_mask((control >> 1) & 7);
122}
117 123
118 entry = get_irq_desc_msi(desc); 124static inline __attribute_const__ u32 msi_enabled_mask(u16 control)
119 BUG_ON(!entry || !entry->dev); 125{
120 switch (entry->msi_attrib.type) { 126 return msi_mask((control >> 4) & 7);
121 case PCI_CAP_ID_MSI:
122 /* nothing to do */
123 break;
124 case PCI_CAP_ID_MSIX:
125 {
126 int offset = entry->msi_attrib.entry_nr * PCI_MSIX_ENTRY_SIZE +
127 PCI_MSIX_ENTRY_VECTOR_CTRL_OFFSET;
128 readl(entry->mask_base + offset);
129 break;
130 }
131 default:
132 BUG();
133 break;
134 }
135} 127}
136 128
137/* 129/*
@@ -143,49 +135,71 @@ static void msix_flush_writes(struct irq_desc *desc)
143 * Returns 1 if it succeeded in masking the interrupt and 0 if the device 135 * Returns 1 if it succeeded in masking the interrupt and 0 if the device
144 * doesn't support MSI masking. 136 * doesn't support MSI masking.
145 */ 137 */
146static int msi_set_mask_bits(struct irq_desc *desc, u32 mask, u32 flag) 138static void msi_mask_irq(struct msi_desc *desc, u32 mask, u32 flag)
147{ 139{
148 struct msi_desc *entry; 140 u32 mask_bits = desc->masked;
149 141
150 entry = get_irq_desc_msi(desc); 142 if (!desc->msi_attrib.maskbit)
151 BUG_ON(!entry || !entry->dev); 143 return;
152 switch (entry->msi_attrib.type) { 144
153 case PCI_CAP_ID_MSI: 145 mask_bits &= ~mask;
154 if (entry->msi_attrib.maskbit) { 146 mask_bits |= flag;
155 int pos; 147 pci_write_config_dword(desc->dev, desc->mask_pos, mask_bits);
156 u32 mask_bits; 148 desc->masked = mask_bits;
157 149}
158 pos = (long)entry->mask_base; 150
159 pci_read_config_dword(entry->dev, pos, &mask_bits); 151/*
160 mask_bits &= ~(mask); 152 * This internal function does not flush PCI writes to the device.
161 mask_bits |= flag & mask; 153 * All users must ensure that they read from the device before either
162 pci_write_config_dword(entry->dev, pos, mask_bits); 154 * assuming that the device state is up to date, or returning out of this
163 } else { 155 * file. This saves a few milliseconds when initialising devices with lots
164 return 0; 156 * of MSI-X interrupts.
165 } 157 */
166 break; 158static void msix_mask_irq(struct msi_desc *desc, u32 flag)
167 case PCI_CAP_ID_MSIX: 159{
168 { 160 u32 mask_bits = desc->masked;
169 int offset = entry->msi_attrib.entry_nr * PCI_MSIX_ENTRY_SIZE + 161 unsigned offset = desc->msi_attrib.entry_nr * PCI_MSIX_ENTRY_SIZE +
170 PCI_MSIX_ENTRY_VECTOR_CTRL_OFFSET; 162 PCI_MSIX_ENTRY_VECTOR_CTRL_OFFSET;
171 writel(flag, entry->mask_base + offset); 163 mask_bits &= ~1;
172 readl(entry->mask_base + offset); 164 mask_bits |= flag;
173 break; 165 writel(mask_bits, desc->mask_base + offset);
174 } 166 desc->masked = mask_bits;
175 default: 167}
176 BUG(); 168
177 break; 169static void msi_set_mask_bit(unsigned irq, u32 flag)
170{
171 struct msi_desc *desc = get_irq_msi(irq);
172
173 if (desc->msi_attrib.is_msix) {
174 msix_mask_irq(desc, flag);
175 readl(desc->mask_base); /* Flush write to device */
176 } else {
177 unsigned offset = irq - desc->dev->irq;
178 msi_mask_irq(desc, 1 << offset, flag << offset);
178 } 179 }
179 entry->msi_attrib.masked = !!flag; 180}
180 return 1; 181
182void mask_msi_irq(unsigned int irq)
183{
184 msi_set_mask_bit(irq, 1);
185}
186
187void unmask_msi_irq(unsigned int irq)
188{
189 msi_set_mask_bit(irq, 0);
181} 190}
182 191
183void read_msi_msg_desc(struct irq_desc *desc, struct msi_msg *msg) 192void read_msi_msg_desc(struct irq_desc *desc, struct msi_msg *msg)
184{ 193{
185 struct msi_desc *entry = get_irq_desc_msi(desc); 194 struct msi_desc *entry = get_irq_desc_msi(desc);
186 switch(entry->msi_attrib.type) { 195 if (entry->msi_attrib.is_msix) {
187 case PCI_CAP_ID_MSI: 196 void __iomem *base = entry->mask_base +
188 { 197 entry->msi_attrib.entry_nr * PCI_MSIX_ENTRY_SIZE;
198
199 msg->address_lo = readl(base + PCI_MSIX_ENTRY_LOWER_ADDR_OFFSET);
200 msg->address_hi = readl(base + PCI_MSIX_ENTRY_UPPER_ADDR_OFFSET);
201 msg->data = readl(base + PCI_MSIX_ENTRY_DATA_OFFSET);
202 } else {
189 struct pci_dev *dev = entry->dev; 203 struct pci_dev *dev = entry->dev;
190 int pos = entry->msi_attrib.pos; 204 int pos = entry->msi_attrib.pos;
191 u16 data; 205 u16 data;
@@ -201,21 +215,6 @@ void read_msi_msg_desc(struct irq_desc *desc, struct msi_msg *msg)
201 pci_read_config_word(dev, msi_data_reg(pos, 0), &data); 215 pci_read_config_word(dev, msi_data_reg(pos, 0), &data);
202 } 216 }
203 msg->data = data; 217 msg->data = data;
204 break;
205 }
206 case PCI_CAP_ID_MSIX:
207 {
208 void __iomem *base;
209 base = entry->mask_base +
210 entry->msi_attrib.entry_nr * PCI_MSIX_ENTRY_SIZE;
211
212 msg->address_lo = readl(base + PCI_MSIX_ENTRY_LOWER_ADDR_OFFSET);
213 msg->address_hi = readl(base + PCI_MSIX_ENTRY_UPPER_ADDR_OFFSET);
214 msg->data = readl(base + PCI_MSIX_ENTRY_DATA_OFFSET);
215 break;
216 }
217 default:
218 BUG();
219 } 218 }
220} 219}
221 220
@@ -229,11 +228,25 @@ void read_msi_msg(unsigned int irq, struct msi_msg *msg)
229void write_msi_msg_desc(struct irq_desc *desc, struct msi_msg *msg) 228void write_msi_msg_desc(struct irq_desc *desc, struct msi_msg *msg)
230{ 229{
231 struct msi_desc *entry = get_irq_desc_msi(desc); 230 struct msi_desc *entry = get_irq_desc_msi(desc);
232 switch (entry->msi_attrib.type) { 231 if (entry->msi_attrib.is_msix) {
233 case PCI_CAP_ID_MSI: 232 void __iomem *base;
234 { 233 base = entry->mask_base +
234 entry->msi_attrib.entry_nr * PCI_MSIX_ENTRY_SIZE;
235
236 writel(msg->address_lo,
237 base + PCI_MSIX_ENTRY_LOWER_ADDR_OFFSET);
238 writel(msg->address_hi,
239 base + PCI_MSIX_ENTRY_UPPER_ADDR_OFFSET);
240 writel(msg->data, base + PCI_MSIX_ENTRY_DATA_OFFSET);
241 } else {
235 struct pci_dev *dev = entry->dev; 242 struct pci_dev *dev = entry->dev;
236 int pos = entry->msi_attrib.pos; 243 int pos = entry->msi_attrib.pos;
244 u16 msgctl;
245
246 pci_read_config_word(dev, msi_control_reg(pos), &msgctl);
247 msgctl &= ~PCI_MSI_FLAGS_QSIZE;
248 msgctl |= entry->msi_attrib.multiple << 4;
249 pci_write_config_word(dev, msi_control_reg(pos), msgctl);
237 250
238 pci_write_config_dword(dev, msi_lower_address_reg(pos), 251 pci_write_config_dword(dev, msi_lower_address_reg(pos),
239 msg->address_lo); 252 msg->address_lo);
@@ -246,23 +259,6 @@ void write_msi_msg_desc(struct irq_desc *desc, struct msi_msg *msg)
246 pci_write_config_word(dev, msi_data_reg(pos, 0), 259 pci_write_config_word(dev, msi_data_reg(pos, 0),
247 msg->data); 260 msg->data);
248 } 261 }
249 break;
250 }
251 case PCI_CAP_ID_MSIX:
252 {
253 void __iomem *base;
254 base = entry->mask_base +
255 entry->msi_attrib.entry_nr * PCI_MSIX_ENTRY_SIZE;
256
257 writel(msg->address_lo,
258 base + PCI_MSIX_ENTRY_LOWER_ADDR_OFFSET);
259 writel(msg->address_hi,
260 base + PCI_MSIX_ENTRY_UPPER_ADDR_OFFSET);
261 writel(msg->data, base + PCI_MSIX_ENTRY_DATA_OFFSET);
262 break;
263 }
264 default:
265 BUG();
266 } 262 }
267 entry->msg = *msg; 263 entry->msg = *msg;
268} 264}
@@ -274,37 +270,18 @@ void write_msi_msg(unsigned int irq, struct msi_msg *msg)
274 write_msi_msg_desc(desc, msg); 270 write_msi_msg_desc(desc, msg);
275} 271}
276 272
277void mask_msi_irq(unsigned int irq)
278{
279 struct irq_desc *desc = irq_to_desc(irq);
280
281 msi_set_mask_bits(desc, 1, 1);
282 msix_flush_writes(desc);
283}
284
285void unmask_msi_irq(unsigned int irq)
286{
287 struct irq_desc *desc = irq_to_desc(irq);
288
289 msi_set_mask_bits(desc, 1, 0);
290 msix_flush_writes(desc);
291}
292
293static int msi_free_irqs(struct pci_dev* dev); 273static int msi_free_irqs(struct pci_dev* dev);
294 274
295static struct msi_desc* alloc_msi_entry(void) 275static struct msi_desc *alloc_msi_entry(struct pci_dev *dev)
296{ 276{
297 struct msi_desc *entry; 277 struct msi_desc *desc = kzalloc(sizeof(*desc), GFP_KERNEL);
298 278 if (!desc)
299 entry = kzalloc(sizeof(struct msi_desc), GFP_KERNEL);
300 if (!entry)
301 return NULL; 279 return NULL;
302 280
303 INIT_LIST_HEAD(&entry->list); 281 INIT_LIST_HEAD(&desc->list);
304 entry->irq = 0; 282 desc->dev = dev;
305 entry->dev = NULL;
306 283
307 return entry; 284 return desc;
308} 285}
309 286
310static void pci_intx_for_msi(struct pci_dev *dev, int enable) 287static void pci_intx_for_msi(struct pci_dev *dev, int enable)
@@ -328,15 +305,11 @@ static void __pci_restore_msi_state(struct pci_dev *dev)
328 pci_intx_for_msi(dev, 0); 305 pci_intx_for_msi(dev, 0);
329 msi_set_enable(dev, 0); 306 msi_set_enable(dev, 0);
330 write_msi_msg(dev->irq, &entry->msg); 307 write_msi_msg(dev->irq, &entry->msg);
331 if (entry->msi_attrib.maskbit) {
332 struct irq_desc *desc = irq_to_desc(dev->irq);
333 msi_set_mask_bits(desc, entry->msi_attrib.maskbits_mask,
334 entry->msi_attrib.masked);
335 }
336 308
337 pci_read_config_word(dev, pos + PCI_MSI_FLAGS, &control); 309 pci_read_config_word(dev, pos + PCI_MSI_FLAGS, &control);
310 msi_mask_irq(entry, msi_capable_mask(control), entry->masked);
338 control &= ~PCI_MSI_FLAGS_QSIZE; 311 control &= ~PCI_MSI_FLAGS_QSIZE;
339 control |= PCI_MSI_FLAGS_ENABLE; 312 control |= (entry->msi_attrib.multiple << 4) | PCI_MSI_FLAGS_ENABLE;
340 pci_write_config_word(dev, pos + PCI_MSI_FLAGS, control); 313 pci_write_config_word(dev, pos + PCI_MSI_FLAGS, control);
341} 314}
342 315
@@ -354,9 +327,8 @@ static void __pci_restore_msix_state(struct pci_dev *dev)
354 msix_set_enable(dev, 0); 327 msix_set_enable(dev, 0);
355 328
356 list_for_each_entry(entry, &dev->msi_list, list) { 329 list_for_each_entry(entry, &dev->msi_list, list) {
357 struct irq_desc *desc = irq_to_desc(entry->irq);
358 write_msi_msg(entry->irq, &entry->msg); 330 write_msi_msg(entry->irq, &entry->msg);
359 msi_set_mask_bits(desc, 1, entry->msi_attrib.masked); 331 msix_mask_irq(entry, entry->masked);
360 } 332 }
361 333
362 BUG_ON(list_empty(&dev->msi_list)); 334 BUG_ON(list_empty(&dev->msi_list));
@@ -378,52 +350,48 @@ EXPORT_SYMBOL_GPL(pci_restore_msi_state);
378/** 350/**
379 * msi_capability_init - configure device's MSI capability structure 351 * msi_capability_init - configure device's MSI capability structure
380 * @dev: pointer to the pci_dev data structure of MSI device function 352 * @dev: pointer to the pci_dev data structure of MSI device function
353 * @nvec: number of interrupts to allocate
381 * 354 *
382 * Setup the MSI capability structure of device function with a single 355 * Setup the MSI capability structure of the device with the requested
383 * MSI irq, regardless of device function is capable of handling 356 * number of interrupts. A return value of zero indicates the successful
384 * multiple messages. A return of zero indicates the successful setup 357 * setup of an entry with the new MSI irq. A negative return value indicates
385 * of an entry zero with the new MSI irq or non-zero for otherwise. 358 * an error, and a positive return value indicates the number of interrupts
386 **/ 359 * which could have been allocated.
387static int msi_capability_init(struct pci_dev *dev) 360 */
361static int msi_capability_init(struct pci_dev *dev, int nvec)
388{ 362{
389 struct msi_desc *entry; 363 struct msi_desc *entry;
390 int pos, ret; 364 int pos, ret;
391 u16 control; 365 u16 control;
366 unsigned mask;
392 367
393 msi_set_enable(dev, 0); /* Ensure msi is disabled as I set it up */ 368 msi_set_enable(dev, 0); /* Ensure msi is disabled as I set it up */
394 369
395 pos = pci_find_capability(dev, PCI_CAP_ID_MSI); 370 pos = pci_find_capability(dev, PCI_CAP_ID_MSI);
396 pci_read_config_word(dev, msi_control_reg(pos), &control); 371 pci_read_config_word(dev, msi_control_reg(pos), &control);
397 /* MSI Entry Initialization */ 372 /* MSI Entry Initialization */
398 entry = alloc_msi_entry(); 373 entry = alloc_msi_entry(dev);
399 if (!entry) 374 if (!entry)
400 return -ENOMEM; 375 return -ENOMEM;
401 376
402 entry->msi_attrib.type = PCI_CAP_ID_MSI; 377 entry->msi_attrib.is_msix = 0;
403 entry->msi_attrib.is_64 = is_64bit_address(control); 378 entry->msi_attrib.is_64 = is_64bit_address(control);
404 entry->msi_attrib.entry_nr = 0; 379 entry->msi_attrib.entry_nr = 0;
405 entry->msi_attrib.maskbit = is_mask_bit_support(control); 380 entry->msi_attrib.maskbit = is_mask_bit_support(control);
406 entry->msi_attrib.masked = 1;
407 entry->msi_attrib.default_irq = dev->irq; /* Save IOAPIC IRQ */ 381 entry->msi_attrib.default_irq = dev->irq; /* Save IOAPIC IRQ */
408 entry->msi_attrib.pos = pos; 382 entry->msi_attrib.pos = pos;
409 entry->dev = dev; 383
410 if (entry->msi_attrib.maskbit) { 384 entry->mask_pos = msi_mask_bits_reg(pos, entry->msi_attrib.is_64);
411 unsigned int base, maskbits, temp; 385 /* All MSIs are unmasked by default, Mask them all */
412 386 if (entry->msi_attrib.maskbit)
413 base = msi_mask_bits_reg(pos, entry->msi_attrib.is_64); 387 pci_read_config_dword(dev, entry->mask_pos, &entry->masked);
414 entry->mask_base = (void __iomem *)(long)base; 388 mask = msi_capable_mask(control);
415 389 msi_mask_irq(entry, mask, mask);
416 /* All MSIs are unmasked by default, Mask them all */ 390
417 pci_read_config_dword(dev, base, &maskbits);
418 temp = msi_mask((control & PCI_MSI_FLAGS_QMASK) >> 1);
419 maskbits |= temp;
420 pci_write_config_dword(dev, base, maskbits);
421 entry->msi_attrib.maskbits_mask = temp;
422 }
423 list_add_tail(&entry->list, &dev->msi_list); 391 list_add_tail(&entry->list, &dev->msi_list);
424 392
425 /* Configure MSI capability structure */ 393 /* Configure MSI capability structure */
426 ret = arch_setup_msi_irqs(dev, 1, PCI_CAP_ID_MSI); 394 ret = arch_setup_msi_irqs(dev, nvec, PCI_CAP_ID_MSI);
427 if (ret) { 395 if (ret) {
428 msi_free_irqs(dev); 396 msi_free_irqs(dev);
429 return ret; 397 return ret;
@@ -476,26 +444,28 @@ static int msix_capability_init(struct pci_dev *dev,
476 444
477 /* MSI-X Table Initialization */ 445 /* MSI-X Table Initialization */
478 for (i = 0; i < nvec; i++) { 446 for (i = 0; i < nvec; i++) {
479 entry = alloc_msi_entry(); 447 entry = alloc_msi_entry(dev);
480 if (!entry) 448 if (!entry)
481 break; 449 break;
482 450
483 j = entries[i].entry; 451 j = entries[i].entry;
484 entry->msi_attrib.type = PCI_CAP_ID_MSIX; 452 entry->msi_attrib.is_msix = 1;
485 entry->msi_attrib.is_64 = 1; 453 entry->msi_attrib.is_64 = 1;
486 entry->msi_attrib.entry_nr = j; 454 entry->msi_attrib.entry_nr = j;
487 entry->msi_attrib.maskbit = 1;
488 entry->msi_attrib.masked = 1;
489 entry->msi_attrib.default_irq = dev->irq; 455 entry->msi_attrib.default_irq = dev->irq;
490 entry->msi_attrib.pos = pos; 456 entry->msi_attrib.pos = pos;
491 entry->dev = dev;
492 entry->mask_base = base; 457 entry->mask_base = base;
458 entry->masked = readl(base + j * PCI_MSIX_ENTRY_SIZE +
459 PCI_MSIX_ENTRY_VECTOR_CTRL_OFFSET);
460 msix_mask_irq(entry, 1);
493 461
494 list_add_tail(&entry->list, &dev->msi_list); 462 list_add_tail(&entry->list, &dev->msi_list);
495 } 463 }
496 464
497 ret = arch_setup_msi_irqs(dev, nvec, PCI_CAP_ID_MSIX); 465 ret = arch_setup_msi_irqs(dev, nvec, PCI_CAP_ID_MSIX);
498 if (ret) { 466 if (ret < 0) {
467 /* If we had some success report the number of irqs
468 * we succeeded in setting up. */
499 int avail = 0; 469 int avail = 0;
500 list_for_each_entry(entry, &dev->msi_list, list) { 470 list_for_each_entry(entry, &dev->msi_list, list) {
501 if (entry->irq != 0) { 471 if (entry->irq != 0) {
@@ -503,14 +473,13 @@ static int msix_capability_init(struct pci_dev *dev,
503 } 473 }
504 } 474 }
505 475
506 msi_free_irqs(dev); 476 if (avail != 0)
477 ret = avail;
478 }
507 479
508 /* If we had some success report the number of irqs 480 if (ret) {
509 * we succeeded in setting up. 481 msi_free_irqs(dev);
510 */ 482 return ret;
511 if (avail == 0)
512 avail = ret;
513 return avail;
514 } 483 }
515 484
516 i = 0; 485 i = 0;
@@ -575,39 +544,54 @@ static int pci_msi_check_device(struct pci_dev* dev, int nvec, int type)
575} 544}
576 545
577/** 546/**
578 * pci_enable_msi - configure device's MSI capability structure 547 * pci_enable_msi_block - configure device's MSI capability structure
579 * @dev: pointer to the pci_dev data structure of MSI device function 548 * @dev: device to configure
549 * @nvec: number of interrupts to configure
580 * 550 *
581 * Setup the MSI capability structure of device function with 551 * Allocate IRQs for a device with the MSI capability.
582 * a single MSI irq upon its software driver call to request for 552 * This function returns a negative errno if an error occurs. If it
583 * MSI mode enabled on its hardware device function. A return of zero 553 * is unable to allocate the number of interrupts requested, it returns
584 * indicates the successful setup of an entry zero with the new MSI 554 * the number of interrupts it might be able to allocate. If it successfully
585 * irq or non-zero for otherwise. 555 * allocates at least the number of interrupts requested, it returns 0 and
586 **/ 556 * updates the @dev's irq member to the lowest new interrupt number; the
587int pci_enable_msi(struct pci_dev* dev) 557 * other interrupt numbers allocated to this device are consecutive.
558 */
559int pci_enable_msi_block(struct pci_dev *dev, unsigned int nvec)
588{ 560{
589 int status; 561 int status, pos, maxvec;
562 u16 msgctl;
590 563
591 status = pci_msi_check_device(dev, 1, PCI_CAP_ID_MSI); 564 pos = pci_find_capability(dev, PCI_CAP_ID_MSI);
565 if (!pos)
566 return -EINVAL;
567 pci_read_config_word(dev, pos + PCI_MSI_FLAGS, &msgctl);
568 maxvec = 1 << ((msgctl & PCI_MSI_FLAGS_QMASK) >> 1);
569 if (nvec > maxvec)
570 return maxvec;
571
572 status = pci_msi_check_device(dev, nvec, PCI_CAP_ID_MSI);
592 if (status) 573 if (status)
593 return status; 574 return status;
594 575
595 WARN_ON(!!dev->msi_enabled); 576 WARN_ON(!!dev->msi_enabled);
596 577
597 /* Check whether driver already requested for MSI-X irqs */ 578 /* Check whether driver already requested MSI-X irqs */
598 if (dev->msix_enabled) { 579 if (dev->msix_enabled) {
599 dev_info(&dev->dev, "can't enable MSI " 580 dev_info(&dev->dev, "can't enable MSI "
600 "(MSI-X already enabled)\n"); 581 "(MSI-X already enabled)\n");
601 return -EINVAL; 582 return -EINVAL;
602 } 583 }
603 status = msi_capability_init(dev); 584
585 status = msi_capability_init(dev, nvec);
604 return status; 586 return status;
605} 587}
606EXPORT_SYMBOL(pci_enable_msi); 588EXPORT_SYMBOL(pci_enable_msi_block);
607 589
608void pci_msi_shutdown(struct pci_dev* dev) 590void pci_msi_shutdown(struct pci_dev *dev)
609{ 591{
610 struct msi_desc *entry; 592 struct msi_desc *desc;
593 u32 mask;
594 u16 ctrl;
611 595
612 if (!pci_msi_enable || !dev || !dev->msi_enabled) 596 if (!pci_msi_enable || !dev || !dev->msi_enabled)
613 return; 597 return;
@@ -617,19 +601,15 @@ void pci_msi_shutdown(struct pci_dev* dev)
617 dev->msi_enabled = 0; 601 dev->msi_enabled = 0;
618 602
619 BUG_ON(list_empty(&dev->msi_list)); 603 BUG_ON(list_empty(&dev->msi_list));
620 entry = list_entry(dev->msi_list.next, struct msi_desc, list); 604 desc = list_first_entry(&dev->msi_list, struct msi_desc, list);
621 /* Return the the pci reset with msi irqs unmasked */ 605 pci_read_config_word(dev, desc->msi_attrib.pos + PCI_MSI_FLAGS, &ctrl);
622 if (entry->msi_attrib.maskbit) { 606 mask = msi_capable_mask(ctrl);
623 u32 mask = entry->msi_attrib.maskbits_mask; 607 msi_mask_irq(desc, mask, ~mask);
624 struct irq_desc *desc = irq_to_desc(dev->irq);
625 msi_set_mask_bits(desc, mask, ~mask);
626 }
627 if (!entry->dev || entry->msi_attrib.type != PCI_CAP_ID_MSI)
628 return;
629 608
630 /* Restore dev->irq to its default pin-assertion irq */ 609 /* Restore dev->irq to its default pin-assertion irq */
631 dev->irq = entry->msi_attrib.default_irq; 610 dev->irq = desc->msi_attrib.default_irq;
632} 611}
612
633void pci_disable_msi(struct pci_dev* dev) 613void pci_disable_msi(struct pci_dev* dev)
634{ 614{
635 struct msi_desc *entry; 615 struct msi_desc *entry;
@@ -640,7 +620,7 @@ void pci_disable_msi(struct pci_dev* dev)
640 pci_msi_shutdown(dev); 620 pci_msi_shutdown(dev);
641 621
642 entry = list_entry(dev->msi_list.next, struct msi_desc, list); 622 entry = list_entry(dev->msi_list.next, struct msi_desc, list);
643 if (!entry->dev || entry->msi_attrib.type != PCI_CAP_ID_MSI) 623 if (entry->msi_attrib.is_msix)
644 return; 624 return;
645 625
646 msi_free_irqs(dev); 626 msi_free_irqs(dev);
@@ -652,14 +632,18 @@ static int msi_free_irqs(struct pci_dev* dev)
652 struct msi_desc *entry, *tmp; 632 struct msi_desc *entry, *tmp;
653 633
654 list_for_each_entry(entry, &dev->msi_list, list) { 634 list_for_each_entry(entry, &dev->msi_list, list) {
655 if (entry->irq) 635 int i, nvec;
656 BUG_ON(irq_has_action(entry->irq)); 636 if (!entry->irq)
637 continue;
638 nvec = 1 << entry->msi_attrib.multiple;
639 for (i = 0; i < nvec; i++)
640 BUG_ON(irq_has_action(entry->irq + i));
657 } 641 }
658 642
659 arch_teardown_msi_irqs(dev); 643 arch_teardown_msi_irqs(dev);
660 644
661 list_for_each_entry_safe(entry, tmp, &dev->msi_list, list) { 645 list_for_each_entry_safe(entry, tmp, &dev->msi_list, list) {
662 if (entry->msi_attrib.type == PCI_CAP_ID_MSIX) { 646 if (entry->msi_attrib.is_msix) {
663 writel(1, entry->mask_base + entry->msi_attrib.entry_nr 647 writel(1, entry->mask_base + entry->msi_attrib.entry_nr
664 * PCI_MSIX_ENTRY_SIZE 648 * PCI_MSIX_ENTRY_SIZE
665 + PCI_MSIX_ENTRY_VECTOR_CTRL_OFFSET); 649 + PCI_MSIX_ENTRY_VECTOR_CTRL_OFFSET);
@@ -675,6 +659,23 @@ static int msi_free_irqs(struct pci_dev* dev)
675} 659}
676 660
677/** 661/**
662 * pci_msix_table_size - return the number of device's MSI-X table entries
663 * @dev: pointer to the pci_dev data structure of MSI-X device function
664 */
665int pci_msix_table_size(struct pci_dev *dev)
666{
667 int pos;
668 u16 control;
669
670 pos = pci_find_capability(dev, PCI_CAP_ID_MSIX);
671 if (!pos)
672 return 0;
673
674 pci_read_config_word(dev, msi_control_reg(pos), &control);
675 return multi_msix_capable(control);
676}
677
678/**
678 * pci_enable_msix - configure device's MSI-X capability structure 679 * pci_enable_msix - configure device's MSI-X capability structure
679 * @dev: pointer to the pci_dev data structure of MSI-X device function 680 * @dev: pointer to the pci_dev data structure of MSI-X device function
680 * @entries: pointer to an array of MSI-X entries 681 * @entries: pointer to an array of MSI-X entries
@@ -691,9 +692,8 @@ static int msi_free_irqs(struct pci_dev* dev)
691 **/ 692 **/
692int pci_enable_msix(struct pci_dev* dev, struct msix_entry *entries, int nvec) 693int pci_enable_msix(struct pci_dev* dev, struct msix_entry *entries, int nvec)
693{ 694{
694 int status, pos, nr_entries; 695 int status, nr_entries;
695 int i, j; 696 int i, j;
696 u16 control;
697 697
698 if (!entries) 698 if (!entries)
699 return -EINVAL; 699 return -EINVAL;
@@ -702,9 +702,7 @@ int pci_enable_msix(struct pci_dev* dev, struct msix_entry *entries, int nvec)
702 if (status) 702 if (status)
703 return status; 703 return status;
704 704
705 pos = pci_find_capability(dev, PCI_CAP_ID_MSIX); 705 nr_entries = pci_msix_table_size(dev);
706 pci_read_config_word(dev, msi_control_reg(pos), &control);
707 nr_entries = multi_msix_capable(control);
708 if (nvec > nr_entries) 706 if (nvec > nr_entries)
709 return -EINVAL; 707 return -EINVAL;
710 708
diff --git a/drivers/pci/msi.h b/drivers/pci/msi.h
index 3898f5237144..71f4df2ef654 100644
--- a/drivers/pci/msi.h
+++ b/drivers/pci/msi.h
@@ -20,14 +20,8 @@
20#define msi_mask_bits_reg(base, is64bit) \ 20#define msi_mask_bits_reg(base, is64bit) \
21 ( (is64bit == 1) ? base+PCI_MSI_MASK_BIT : base+PCI_MSI_MASK_BIT-4) 21 ( (is64bit == 1) ? base+PCI_MSI_MASK_BIT : base+PCI_MSI_MASK_BIT-4)
22#define msi_disable(control) control &= ~PCI_MSI_FLAGS_ENABLE 22#define msi_disable(control) control &= ~PCI_MSI_FLAGS_ENABLE
23#define multi_msi_capable(control) \
24 (1 << ((control & PCI_MSI_FLAGS_QMASK) >> 1))
25#define multi_msi_enable(control, num) \
26 control |= (((num >> 1) << 4) & PCI_MSI_FLAGS_QSIZE);
27#define is_64bit_address(control) (!!(control & PCI_MSI_FLAGS_64BIT)) 23#define is_64bit_address(control) (!!(control & PCI_MSI_FLAGS_64BIT))
28#define is_mask_bit_support(control) (!!(control & PCI_MSI_FLAGS_MASKBIT)) 24#define is_mask_bit_support(control) (!!(control & PCI_MSI_FLAGS_MASKBIT))
29#define msi_enable(control, num) multi_msi_enable(control, num); \
30 control |= PCI_MSI_FLAGS_ENABLE
31 25
32#define msix_table_offset_reg(base) (base + 0x04) 26#define msix_table_offset_reg(base) (base + 0x04)
33#define msix_pba_offset_reg(base) (base + 0x08) 27#define msix_pba_offset_reg(base) (base + 0x08)
diff --git a/drivers/pci/pci-acpi.c b/drivers/pci/pci-acpi.c
index deea8a187eb8..fac5eddcefd2 100644
--- a/drivers/pci/pci-acpi.c
+++ b/drivers/pci/pci-acpi.c
@@ -18,221 +18,6 @@
18#include <linux/pci-acpi.h> 18#include <linux/pci-acpi.h>
19#include "pci.h" 19#include "pci.h"
20 20
21struct acpi_osc_data {
22 acpi_handle handle;
23 u32 support_set;
24 u32 control_set;
25 u32 control_query;
26 int is_queried;
27 struct list_head sibiling;
28};
29static LIST_HEAD(acpi_osc_data_list);
30
31struct acpi_osc_args {
32 u32 capbuf[3];
33};
34
35static DEFINE_MUTEX(pci_acpi_lock);
36
37static struct acpi_osc_data *acpi_get_osc_data(acpi_handle handle)
38{
39 struct acpi_osc_data *data;
40
41 list_for_each_entry(data, &acpi_osc_data_list, sibiling) {
42 if (data->handle == handle)
43 return data;
44 }
45 data = kzalloc(sizeof(*data), GFP_KERNEL);
46 if (!data)
47 return NULL;
48 INIT_LIST_HEAD(&data->sibiling);
49 data->handle = handle;
50 list_add_tail(&data->sibiling, &acpi_osc_data_list);
51 return data;
52}
53
54static u8 OSC_UUID[16] = {0x5B, 0x4D, 0xDB, 0x33, 0xF7, 0x1F, 0x1C, 0x40,
55 0x96, 0x57, 0x74, 0x41, 0xC0, 0x3D, 0xD7, 0x66};
56
57static acpi_status acpi_run_osc(acpi_handle handle,
58 struct acpi_osc_args *osc_args, u32 *retval)
59{
60 acpi_status status;
61 struct acpi_object_list input;
62 union acpi_object in_params[4];
63 struct acpi_buffer output = {ACPI_ALLOCATE_BUFFER, NULL};
64 union acpi_object *out_obj;
65 u32 errors, flags = osc_args->capbuf[OSC_QUERY_TYPE];
66
67 /* Setting up input parameters */
68 input.count = 4;
69 input.pointer = in_params;
70 in_params[0].type = ACPI_TYPE_BUFFER;
71 in_params[0].buffer.length = 16;
72 in_params[0].buffer.pointer = OSC_UUID;
73 in_params[1].type = ACPI_TYPE_INTEGER;
74 in_params[1].integer.value = 1;
75 in_params[2].type = ACPI_TYPE_INTEGER;
76 in_params[2].integer.value = 3;
77 in_params[3].type = ACPI_TYPE_BUFFER;
78 in_params[3].buffer.length = 12;
79 in_params[3].buffer.pointer = (u8 *)osc_args->capbuf;
80
81 status = acpi_evaluate_object(handle, "_OSC", &input, &output);
82 if (ACPI_FAILURE(status))
83 return status;
84
85 if (!output.length)
86 return AE_NULL_OBJECT;
87
88 out_obj = output.pointer;
89 if (out_obj->type != ACPI_TYPE_BUFFER) {
90 printk(KERN_DEBUG "Evaluate _OSC returns wrong type\n");
91 status = AE_TYPE;
92 goto out_kfree;
93 }
94 /* Need to ignore the bit0 in result code */
95 errors = *((u32 *)out_obj->buffer.pointer) & ~(1 << 0);
96 if (errors) {
97 if (errors & OSC_REQUEST_ERROR)
98 printk(KERN_DEBUG "_OSC request fails\n");
99 if (errors & OSC_INVALID_UUID_ERROR)
100 printk(KERN_DEBUG "_OSC invalid UUID\n");
101 if (errors & OSC_INVALID_REVISION_ERROR)
102 printk(KERN_DEBUG "_OSC invalid revision\n");
103 if (errors & OSC_CAPABILITIES_MASK_ERROR) {
104 if (flags & OSC_QUERY_ENABLE)
105 goto out_success;
106 printk(KERN_DEBUG "_OSC FW not grant req. control\n");
107 status = AE_SUPPORT;
108 goto out_kfree;
109 }
110 status = AE_ERROR;
111 goto out_kfree;
112 }
113out_success:
114 *retval = *((u32 *)(out_obj->buffer.pointer + 8));
115 status = AE_OK;
116
117out_kfree:
118 kfree(output.pointer);
119 return status;
120}
121
122static acpi_status __acpi_query_osc(u32 flags, struct acpi_osc_data *osc_data)
123{
124 acpi_status status;
125 u32 support_set, result;
126 struct acpi_osc_args osc_args;
127
128 /* do _OSC query for all possible controls */
129 support_set = osc_data->support_set | (flags & OSC_SUPPORT_MASKS);
130 osc_args.capbuf[OSC_QUERY_TYPE] = OSC_QUERY_ENABLE;
131 osc_args.capbuf[OSC_SUPPORT_TYPE] = support_set;
132 osc_args.capbuf[OSC_CONTROL_TYPE] = OSC_CONTROL_MASKS;
133
134 status = acpi_run_osc(osc_data->handle, &osc_args, &result);
135 if (ACPI_SUCCESS(status)) {
136 osc_data->support_set = support_set;
137 osc_data->control_query = result;
138 osc_data->is_queried = 1;
139 }
140
141 return status;
142}
143
144/*
145 * pci_acpi_osc_support: Invoke _OSC indicating support for the given feature
146 * @flags: Bitmask of flags to support
147 *
148 * See the ACPI spec for the definition of the flags
149 */
150int pci_acpi_osc_support(acpi_handle handle, u32 flags)
151{
152 acpi_status status;
153 acpi_handle tmp;
154 struct acpi_osc_data *osc_data;
155 int rc = 0;
156
157 status = acpi_get_handle(handle, "_OSC", &tmp);
158 if (ACPI_FAILURE(status))
159 return -ENOTTY;
160
161 mutex_lock(&pci_acpi_lock);
162 osc_data = acpi_get_osc_data(handle);
163 if (!osc_data) {
164 printk(KERN_ERR "acpi osc data array is full\n");
165 rc = -ENOMEM;
166 goto out;
167 }
168
169 __acpi_query_osc(flags, osc_data);
170out:
171 mutex_unlock(&pci_acpi_lock);
172 return rc;
173}
174
175/**
176 * pci_osc_control_set - commit requested control to Firmware
177 * @handle: acpi_handle for the target ACPI object
178 * @flags: driver's requested control bits
179 *
180 * Attempt to take control from Firmware on requested control bits.
181 **/
182acpi_status pci_osc_control_set(acpi_handle handle, u32 flags)
183{
184 acpi_status status;
185 u32 control_req, control_set, result;
186 acpi_handle tmp;
187 struct acpi_osc_data *osc_data;
188 struct acpi_osc_args osc_args;
189
190 status = acpi_get_handle(handle, "_OSC", &tmp);
191 if (ACPI_FAILURE(status))
192 return status;
193
194 mutex_lock(&pci_acpi_lock);
195 osc_data = acpi_get_osc_data(handle);
196 if (!osc_data) {
197 printk(KERN_ERR "acpi osc data array is full\n");
198 status = AE_ERROR;
199 goto out;
200 }
201
202 control_req = (flags & OSC_CONTROL_MASKS);
203 if (!control_req) {
204 status = AE_TYPE;
205 goto out;
206 }
207
208 /* No need to evaluate _OSC if the control was already granted. */
209 if ((osc_data->control_set & control_req) == control_req)
210 goto out;
211
212 if (!osc_data->is_queried) {
213 status = __acpi_query_osc(osc_data->support_set, osc_data);
214 if (ACPI_FAILURE(status))
215 goto out;
216 }
217
218 if ((osc_data->control_query & control_req) != control_req) {
219 status = AE_SUPPORT;
220 goto out;
221 }
222
223 control_set = osc_data->control_set | control_req;
224 osc_args.capbuf[OSC_QUERY_TYPE] = 0;
225 osc_args.capbuf[OSC_SUPPORT_TYPE] = osc_data->support_set;
226 osc_args.capbuf[OSC_CONTROL_TYPE] = control_set;
227 status = acpi_run_osc(handle, &osc_args, &result);
228 if (ACPI_SUCCESS(status))
229 osc_data->control_set = result;
230out:
231 mutex_unlock(&pci_acpi_lock);
232 return status;
233}
234EXPORT_SYMBOL(pci_osc_control_set);
235
236/* 21/*
237 * _SxD returns the D-state with the highest power 22 * _SxD returns the D-state with the highest power
238 * (lowest D-state number) supported in the S-state "x". 23 * (lowest D-state number) supported in the S-state "x".
diff --git a/drivers/pci/pci-driver.c b/drivers/pci/pci-driver.c
index 267de88551c9..c0cbbb5a245e 100644
--- a/drivers/pci/pci-driver.c
+++ b/drivers/pci/pci-driver.c
@@ -99,6 +99,52 @@ store_new_id(struct device_driver *driver, const char *buf, size_t count)
99} 99}
100static DRIVER_ATTR(new_id, S_IWUSR, NULL, store_new_id); 100static DRIVER_ATTR(new_id, S_IWUSR, NULL, store_new_id);
101 101
102/**
103 * store_remove_id - remove a PCI device ID from this driver
104 * @driver: target device driver
105 * @buf: buffer for scanning device ID data
106 * @count: input size
107 *
108 * Removes a dynamic pci device ID to this driver.
109 */
110static ssize_t
111store_remove_id(struct device_driver *driver, const char *buf, size_t count)
112{
113 struct pci_dynid *dynid, *n;
114 struct pci_driver *pdrv = to_pci_driver(driver);
115 __u32 vendor, device, subvendor = PCI_ANY_ID,
116 subdevice = PCI_ANY_ID, class = 0, class_mask = 0;
117 int fields = 0;
118 int retval = -ENODEV;
119
120 fields = sscanf(buf, "%x %x %x %x %x %x",
121 &vendor, &device, &subvendor, &subdevice,
122 &class, &class_mask);
123 if (fields < 2)
124 return -EINVAL;
125
126 spin_lock(&pdrv->dynids.lock);
127 list_for_each_entry_safe(dynid, n, &pdrv->dynids.list, node) {
128 struct pci_device_id *id = &dynid->id;
129 if ((id->vendor == vendor) &&
130 (id->device == device) &&
131 (subvendor == PCI_ANY_ID || id->subvendor == subvendor) &&
132 (subdevice == PCI_ANY_ID || id->subdevice == subdevice) &&
133 !((id->class ^ class) & class_mask)) {
134 list_del(&dynid->node);
135 kfree(dynid);
136 retval = 0;
137 break;
138 }
139 }
140 spin_unlock(&pdrv->dynids.lock);
141
142 if (retval)
143 return retval;
144 return count;
145}
146static DRIVER_ATTR(remove_id, S_IWUSR, NULL, store_remove_id);
147
102static void 148static void
103pci_free_dynids(struct pci_driver *drv) 149pci_free_dynids(struct pci_driver *drv)
104{ 150{
@@ -125,6 +171,20 @@ static void pci_remove_newid_file(struct pci_driver *drv)
125{ 171{
126 driver_remove_file(&drv->driver, &driver_attr_new_id); 172 driver_remove_file(&drv->driver, &driver_attr_new_id);
127} 173}
174
175static int
176pci_create_removeid_file(struct pci_driver *drv)
177{
178 int error = 0;
179 if (drv->probe != NULL)
180 error = driver_create_file(&drv->driver,&driver_attr_remove_id);
181 return error;
182}
183
184static void pci_remove_removeid_file(struct pci_driver *drv)
185{
186 driver_remove_file(&drv->driver, &driver_attr_remove_id);
187}
128#else /* !CONFIG_HOTPLUG */ 188#else /* !CONFIG_HOTPLUG */
129static inline void pci_free_dynids(struct pci_driver *drv) {} 189static inline void pci_free_dynids(struct pci_driver *drv) {}
130static inline int pci_create_newid_file(struct pci_driver *drv) 190static inline int pci_create_newid_file(struct pci_driver *drv)
@@ -132,6 +192,11 @@ static inline int pci_create_newid_file(struct pci_driver *drv)
132 return 0; 192 return 0;
133} 193}
134static inline void pci_remove_newid_file(struct pci_driver *drv) {} 194static inline void pci_remove_newid_file(struct pci_driver *drv) {}
195static inline int pci_create_removeid_file(struct pci_driver *drv)
196{
197 return 0;
198}
199static inline void pci_remove_removeid_file(struct pci_driver *drv) {}
135#endif 200#endif
136 201
137/** 202/**
@@ -899,13 +964,23 @@ int __pci_register_driver(struct pci_driver *drv, struct module *owner,
899 /* register with core */ 964 /* register with core */
900 error = driver_register(&drv->driver); 965 error = driver_register(&drv->driver);
901 if (error) 966 if (error)
902 return error; 967 goto out;
903 968
904 error = pci_create_newid_file(drv); 969 error = pci_create_newid_file(drv);
905 if (error) 970 if (error)
906 driver_unregister(&drv->driver); 971 goto out_newid;
907 972
973 error = pci_create_removeid_file(drv);
974 if (error)
975 goto out_removeid;
976out:
908 return error; 977 return error;
978
979out_removeid:
980 pci_remove_newid_file(drv);
981out_newid:
982 driver_unregister(&drv->driver);
983 goto out;
909} 984}
910 985
911/** 986/**
@@ -921,6 +996,7 @@ int __pci_register_driver(struct pci_driver *drv, struct module *owner,
921void 996void
922pci_unregister_driver(struct pci_driver *drv) 997pci_unregister_driver(struct pci_driver *drv)
923{ 998{
999 pci_remove_removeid_file(drv);
924 pci_remove_newid_file(drv); 1000 pci_remove_newid_file(drv);
925 driver_unregister(&drv->driver); 1001 driver_unregister(&drv->driver);
926 pci_free_dynids(drv); 1002 pci_free_dynids(drv);
@@ -1020,6 +1096,7 @@ struct bus_type pci_bus_type = {
1020 .remove = pci_device_remove, 1096 .remove = pci_device_remove,
1021 .shutdown = pci_device_shutdown, 1097 .shutdown = pci_device_shutdown,
1022 .dev_attrs = pci_dev_attrs, 1098 .dev_attrs = pci_dev_attrs,
1099 .bus_attrs = pci_bus_attrs,
1023 .pm = PCI_PM_OPS_PTR, 1100 .pm = PCI_PM_OPS_PTR,
1024}; 1101};
1025 1102
diff --git a/drivers/pci/pci-sysfs.c b/drivers/pci/pci-sysfs.c
index dfc4e0ddf241..e9a8706a6401 100644
--- a/drivers/pci/pci-sysfs.c
+++ b/drivers/pci/pci-sysfs.c
@@ -219,6 +219,83 @@ msi_bus_store(struct device *dev, struct device_attribute *attr,
219 return count; 219 return count;
220} 220}
221 221
222#ifdef CONFIG_HOTPLUG
223static DEFINE_MUTEX(pci_remove_rescan_mutex);
224static ssize_t bus_rescan_store(struct bus_type *bus, const char *buf,
225 size_t count)
226{
227 unsigned long val;
228 struct pci_bus *b = NULL;
229
230 if (strict_strtoul(buf, 0, &val) < 0)
231 return -EINVAL;
232
233 if (val) {
234 mutex_lock(&pci_remove_rescan_mutex);
235 while ((b = pci_find_next_bus(b)) != NULL)
236 pci_rescan_bus(b);
237 mutex_unlock(&pci_remove_rescan_mutex);
238 }
239 return count;
240}
241
242struct bus_attribute pci_bus_attrs[] = {
243 __ATTR(rescan, (S_IWUSR|S_IWGRP), NULL, bus_rescan_store),
244 __ATTR_NULL
245};
246
247static ssize_t
248dev_rescan_store(struct device *dev, struct device_attribute *attr,
249 const char *buf, size_t count)
250{
251 unsigned long val;
252 struct pci_dev *pdev = to_pci_dev(dev);
253
254 if (strict_strtoul(buf, 0, &val) < 0)
255 return -EINVAL;
256
257 if (val) {
258 mutex_lock(&pci_remove_rescan_mutex);
259 pci_rescan_bus(pdev->bus);
260 mutex_unlock(&pci_remove_rescan_mutex);
261 }
262 return count;
263}
264
265static void remove_callback(struct device *dev)
266{
267 struct pci_dev *pdev = to_pci_dev(dev);
268
269 mutex_lock(&pci_remove_rescan_mutex);
270 pci_remove_bus_device(pdev);
271 mutex_unlock(&pci_remove_rescan_mutex);
272}
273
274static ssize_t
275remove_store(struct device *dev, struct device_attribute *dummy,
276 const char *buf, size_t count)
277{
278 int ret = 0;
279 unsigned long val;
280 struct pci_dev *pdev = to_pci_dev(dev);
281
282 if (strict_strtoul(buf, 0, &val) < 0)
283 return -EINVAL;
284
285 if (pci_is_root_bus(pdev->bus))
286 return -EBUSY;
287
288 /* An attribute cannot be unregistered by one of its own methods,
289 * so we have to use this roundabout approach.
290 */
291 if (val)
292 ret = device_schedule_callback(dev, remove_callback);
293 if (ret)
294 count = ret;
295 return count;
296}
297#endif
298
222struct device_attribute pci_dev_attrs[] = { 299struct device_attribute pci_dev_attrs[] = {
223 __ATTR_RO(resource), 300 __ATTR_RO(resource),
224 __ATTR_RO(vendor), 301 __ATTR_RO(vendor),
@@ -237,10 +314,25 @@ struct device_attribute pci_dev_attrs[] = {
237 __ATTR(broken_parity_status,(S_IRUGO|S_IWUSR), 314 __ATTR(broken_parity_status,(S_IRUGO|S_IWUSR),
238 broken_parity_status_show,broken_parity_status_store), 315 broken_parity_status_show,broken_parity_status_store),
239 __ATTR(msi_bus, 0644, msi_bus_show, msi_bus_store), 316 __ATTR(msi_bus, 0644, msi_bus_show, msi_bus_store),
317#ifdef CONFIG_HOTPLUG
318 __ATTR(remove, (S_IWUSR|S_IWGRP), NULL, remove_store),
319 __ATTR(rescan, (S_IWUSR|S_IWGRP), NULL, dev_rescan_store),
320#endif
240 __ATTR_NULL, 321 __ATTR_NULL,
241}; 322};
242 323
243static ssize_t 324static ssize_t
325boot_vga_show(struct device *dev, struct device_attribute *attr, char *buf)
326{
327 struct pci_dev *pdev = to_pci_dev(dev);
328
329 return sprintf(buf, "%u\n",
330 !!(pdev->resource[PCI_ROM_RESOURCE].flags &
331 IORESOURCE_ROM_SHADOW));
332}
333struct device_attribute vga_attr = __ATTR_RO(boot_vga);
334
335static ssize_t
244pci_read_config(struct kobject *kobj, struct bin_attribute *bin_attr, 336pci_read_config(struct kobject *kobj, struct bin_attribute *bin_attr,
245 char *buf, loff_t off, size_t count) 337 char *buf, loff_t off, size_t count)
246{ 338{
@@ -493,6 +585,19 @@ pci_mmap_legacy_io(struct kobject *kobj, struct bin_attribute *attr,
493} 585}
494 586
495/** 587/**
588 * pci_adjust_legacy_attr - adjustment of legacy file attributes
589 * @b: bus to create files under
590 * @mmap_type: I/O port or memory
591 *
592 * Stub implementation. Can be overridden by arch if necessary.
593 */
594void __weak
595pci_adjust_legacy_attr(struct pci_bus *b, enum pci_mmap_state mmap_type)
596{
597 return;
598}
599
600/**
496 * pci_create_legacy_files - create legacy I/O port and memory files 601 * pci_create_legacy_files - create legacy I/O port and memory files
497 * @b: bus to create files under 602 * @b: bus to create files under
498 * 603 *
@@ -518,6 +623,7 @@ void pci_create_legacy_files(struct pci_bus *b)
518 b->legacy_io->read = pci_read_legacy_io; 623 b->legacy_io->read = pci_read_legacy_io;
519 b->legacy_io->write = pci_write_legacy_io; 624 b->legacy_io->write = pci_write_legacy_io;
520 b->legacy_io->mmap = pci_mmap_legacy_io; 625 b->legacy_io->mmap = pci_mmap_legacy_io;
626 pci_adjust_legacy_attr(b, pci_mmap_io);
521 error = device_create_bin_file(&b->dev, b->legacy_io); 627 error = device_create_bin_file(&b->dev, b->legacy_io);
522 if (error) 628 if (error)
523 goto legacy_io_err; 629 goto legacy_io_err;
@@ -528,6 +634,7 @@ void pci_create_legacy_files(struct pci_bus *b)
528 b->legacy_mem->size = 1024*1024; 634 b->legacy_mem->size = 1024*1024;
529 b->legacy_mem->attr.mode = S_IRUSR | S_IWUSR; 635 b->legacy_mem->attr.mode = S_IRUSR | S_IWUSR;
530 b->legacy_mem->mmap = pci_mmap_legacy_mem; 636 b->legacy_mem->mmap = pci_mmap_legacy_mem;
637 pci_adjust_legacy_attr(b, pci_mmap_mem);
531 error = device_create_bin_file(&b->dev, b->legacy_mem); 638 error = device_create_bin_file(&b->dev, b->legacy_mem);
532 if (error) 639 if (error)
533 goto legacy_mem_err; 640 goto legacy_mem_err;
@@ -719,8 +826,8 @@ static int pci_create_resource_files(struct pci_dev *pdev)
719 return 0; 826 return 0;
720} 827}
721#else /* !HAVE_PCI_MMAP */ 828#else /* !HAVE_PCI_MMAP */
722static inline int pci_create_resource_files(struct pci_dev *dev) { return 0; } 829int __weak pci_create_resource_files(struct pci_dev *dev) { return 0; }
723static inline void pci_remove_resource_files(struct pci_dev *dev) { return; } 830void __weak pci_remove_resource_files(struct pci_dev *dev) { return; }
724#endif /* HAVE_PCI_MMAP */ 831#endif /* HAVE_PCI_MMAP */
725 832
726/** 833/**
@@ -884,18 +991,27 @@ int __must_check pci_create_sysfs_dev_files (struct pci_dev *pdev)
884 pdev->rom_attr = attr; 991 pdev->rom_attr = attr;
885 } 992 }
886 993
994 if ((pdev->class >> 8) == PCI_CLASS_DISPLAY_VGA) {
995 retval = device_create_file(&pdev->dev, &vga_attr);
996 if (retval)
997 goto err_rom_file;
998 }
999
887 /* add platform-specific attributes */ 1000 /* add platform-specific attributes */
888 retval = pcibios_add_platform_entries(pdev); 1001 retval = pcibios_add_platform_entries(pdev);
889 if (retval) 1002 if (retval)
890 goto err_rom_file; 1003 goto err_vga_file;
891 1004
892 /* add sysfs entries for various capabilities */ 1005 /* add sysfs entries for various capabilities */
893 retval = pci_create_capabilities_sysfs(pdev); 1006 retval = pci_create_capabilities_sysfs(pdev);
894 if (retval) 1007 if (retval)
895 goto err_rom_file; 1008 goto err_vga_file;
896 1009
897 return 0; 1010 return 0;
898 1011
1012err_vga_file:
1013 if ((pdev->class >> 8) == PCI_CLASS_DISPLAY_VGA)
1014 device_remove_file(&pdev->dev, &vga_attr);
899err_rom_file: 1015err_rom_file:
900 if (rom_size) { 1016 if (rom_size) {
901 sysfs_remove_bin_file(&pdev->dev.kobj, pdev->rom_attr); 1017 sysfs_remove_bin_file(&pdev->dev.kobj, pdev->rom_attr);
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
index 0195066251e5..fe7ac2cea7c9 100644
--- a/drivers/pci/pci.c
+++ b/drivers/pci/pci.c
@@ -20,6 +20,8 @@
20#include <linux/pm_wakeup.h> 20#include <linux/pm_wakeup.h>
21#include <linux/interrupt.h> 21#include <linux/interrupt.h>
22#include <asm/dma.h> /* isa_dma_bridge_buggy */ 22#include <asm/dma.h> /* isa_dma_bridge_buggy */
23#include <linux/device.h>
24#include <asm/setup.h>
23#include "pci.h" 25#include "pci.h"
24 26
25unsigned int pci_pm_d3_delay = PCI_PM_D3_WAIT; 27unsigned int pci_pm_d3_delay = PCI_PM_D3_WAIT;
@@ -677,6 +679,8 @@ pci_power_t pci_choose_state(struct pci_dev *dev, pm_message_t state)
677 679
678EXPORT_SYMBOL(pci_choose_state); 680EXPORT_SYMBOL(pci_choose_state);
679 681
682#define PCI_EXP_SAVE_REGS 7
683
680static int pci_save_pcie_state(struct pci_dev *dev) 684static int pci_save_pcie_state(struct pci_dev *dev)
681{ 685{
682 int pos, i = 0; 686 int pos, i = 0;
@@ -689,7 +693,7 @@ static int pci_save_pcie_state(struct pci_dev *dev)
689 693
690 save_state = pci_find_saved_cap(dev, PCI_CAP_ID_EXP); 694 save_state = pci_find_saved_cap(dev, PCI_CAP_ID_EXP);
691 if (!save_state) { 695 if (!save_state) {
692 dev_err(&dev->dev, "buffer not found in %s\n", __FUNCTION__); 696 dev_err(&dev->dev, "buffer not found in %s\n", __func__);
693 return -ENOMEM; 697 return -ENOMEM;
694 } 698 }
695 cap = (u16 *)&save_state->data[0]; 699 cap = (u16 *)&save_state->data[0];
@@ -698,6 +702,9 @@ static int pci_save_pcie_state(struct pci_dev *dev)
698 pci_read_config_word(dev, pos + PCI_EXP_LNKCTL, &cap[i++]); 702 pci_read_config_word(dev, pos + PCI_EXP_LNKCTL, &cap[i++]);
699 pci_read_config_word(dev, pos + PCI_EXP_SLTCTL, &cap[i++]); 703 pci_read_config_word(dev, pos + PCI_EXP_SLTCTL, &cap[i++]);
700 pci_read_config_word(dev, pos + PCI_EXP_RTCTL, &cap[i++]); 704 pci_read_config_word(dev, pos + PCI_EXP_RTCTL, &cap[i++]);
705 pci_read_config_word(dev, pos + PCI_EXP_DEVCTL2, &cap[i++]);
706 pci_read_config_word(dev, pos + PCI_EXP_LNKCTL2, &cap[i++]);
707 pci_read_config_word(dev, pos + PCI_EXP_SLTCTL2, &cap[i++]);
701 708
702 return 0; 709 return 0;
703} 710}
@@ -718,6 +725,9 @@ static void pci_restore_pcie_state(struct pci_dev *dev)
718 pci_write_config_word(dev, pos + PCI_EXP_LNKCTL, cap[i++]); 725 pci_write_config_word(dev, pos + PCI_EXP_LNKCTL, cap[i++]);
719 pci_write_config_word(dev, pos + PCI_EXP_SLTCTL, cap[i++]); 726 pci_write_config_word(dev, pos + PCI_EXP_SLTCTL, cap[i++]);
720 pci_write_config_word(dev, pos + PCI_EXP_RTCTL, cap[i++]); 727 pci_write_config_word(dev, pos + PCI_EXP_RTCTL, cap[i++]);
728 pci_write_config_word(dev, pos + PCI_EXP_DEVCTL2, cap[i++]);
729 pci_write_config_word(dev, pos + PCI_EXP_LNKCTL2, cap[i++]);
730 pci_write_config_word(dev, pos + PCI_EXP_SLTCTL2, cap[i++]);
721} 731}
722 732
723 733
@@ -732,7 +742,7 @@ static int pci_save_pcix_state(struct pci_dev *dev)
732 742
733 save_state = pci_find_saved_cap(dev, PCI_CAP_ID_PCIX); 743 save_state = pci_find_saved_cap(dev, PCI_CAP_ID_PCIX);
734 if (!save_state) { 744 if (!save_state) {
735 dev_err(&dev->dev, "buffer not found in %s\n", __FUNCTION__); 745 dev_err(&dev->dev, "buffer not found in %s\n", __func__);
736 return -ENOMEM; 746 return -ENOMEM;
737 } 747 }
738 748
@@ -805,6 +815,7 @@ pci_restore_state(struct pci_dev *dev)
805 } 815 }
806 pci_restore_pcix_state(dev); 816 pci_restore_pcix_state(dev);
807 pci_restore_msi_state(dev); 817 pci_restore_msi_state(dev);
818 pci_restore_iov_state(dev);
808 819
809 return 0; 820 return 0;
810} 821}
@@ -1401,7 +1412,8 @@ void pci_allocate_cap_save_buffers(struct pci_dev *dev)
1401{ 1412{
1402 int error; 1413 int error;
1403 1414
1404 error = pci_add_cap_save_buffer(dev, PCI_CAP_ID_EXP, 4 * sizeof(u16)); 1415 error = pci_add_cap_save_buffer(dev, PCI_CAP_ID_EXP,
1416 PCI_EXP_SAVE_REGS * sizeof(u16));
1405 if (error) 1417 if (error)
1406 dev_err(&dev->dev, 1418 dev_err(&dev->dev,
1407 "unable to preallocate PCI Express save buffer\n"); 1419 "unable to preallocate PCI Express save buffer\n");
@@ -1472,7 +1484,7 @@ pci_get_interrupt_pin(struct pci_dev *dev, struct pci_dev **bridge)
1472 if (!pin) 1484 if (!pin)
1473 return -1; 1485 return -1;
1474 1486
1475 while (dev->bus->self) { 1487 while (dev->bus->parent) {
1476 pin = pci_swizzle_interrupt_pin(dev, pin); 1488 pin = pci_swizzle_interrupt_pin(dev, pin);
1477 dev = dev->bus->self; 1489 dev = dev->bus->self;
1478 } 1490 }
@@ -1492,7 +1504,7 @@ u8 pci_common_swizzle(struct pci_dev *dev, u8 *pinp)
1492{ 1504{
1493 u8 pin = *pinp; 1505 u8 pin = *pinp;
1494 1506
1495 while (dev->bus->self) { 1507 while (dev->bus->parent) {
1496 pin = pci_swizzle_interrupt_pin(dev, pin); 1508 pin = pci_swizzle_interrupt_pin(dev, pin);
1497 dev = dev->bus->self; 1509 dev = dev->bus->self;
1498 } 1510 }
@@ -2016,18 +2028,24 @@ static int __pcie_flr(struct pci_dev *dev, int probe)
2016 pci_block_user_cfg_access(dev); 2028 pci_block_user_cfg_access(dev);
2017 2029
2018 /* Wait for Transaction Pending bit clean */ 2030 /* Wait for Transaction Pending bit clean */
2031 pci_read_config_word(dev, exppos + PCI_EXP_DEVSTA, &status);
2032 if (!(status & PCI_EXP_DEVSTA_TRPND))
2033 goto transaction_done;
2034
2019 msleep(100); 2035 msleep(100);
2020 pci_read_config_word(dev, exppos + PCI_EXP_DEVSTA, &status); 2036 pci_read_config_word(dev, exppos + PCI_EXP_DEVSTA, &status);
2021 if (status & PCI_EXP_DEVSTA_TRPND) { 2037 if (!(status & PCI_EXP_DEVSTA_TRPND))
2022 dev_info(&dev->dev, "Busy after 100ms while trying to reset; " 2038 goto transaction_done;
2039
2040 dev_info(&dev->dev, "Busy after 100ms while trying to reset; "
2023 "sleeping for 1 second\n"); 2041 "sleeping for 1 second\n");
2024 ssleep(1); 2042 ssleep(1);
2025 pci_read_config_word(dev, exppos + PCI_EXP_DEVSTA, &status); 2043 pci_read_config_word(dev, exppos + PCI_EXP_DEVSTA, &status);
2026 if (status & PCI_EXP_DEVSTA_TRPND) 2044 if (status & PCI_EXP_DEVSTA_TRPND)
2027 dev_info(&dev->dev, "Still busy after 1s; " 2045 dev_info(&dev->dev, "Still busy after 1s; "
2028 "proceeding with reset anyway\n"); 2046 "proceeding with reset anyway\n");
2029 }
2030 2047
2048transaction_done:
2031 pci_write_config_word(dev, exppos + PCI_EXP_DEVCTL, 2049 pci_write_config_word(dev, exppos + PCI_EXP_DEVCTL,
2032 PCI_EXP_DEVCTL_BCR_FLR); 2050 PCI_EXP_DEVCTL_BCR_FLR);
2033 mdelay(100); 2051 mdelay(100);
@@ -2054,18 +2072,24 @@ static int __pci_af_flr(struct pci_dev *dev, int probe)
2054 pci_block_user_cfg_access(dev); 2072 pci_block_user_cfg_access(dev);
2055 2073
2056 /* Wait for Transaction Pending bit clean */ 2074 /* Wait for Transaction Pending bit clean */
2075 pci_read_config_byte(dev, cappos + PCI_AF_STATUS, &status);
2076 if (!(status & PCI_AF_STATUS_TP))
2077 goto transaction_done;
2078
2057 msleep(100); 2079 msleep(100);
2058 pci_read_config_byte(dev, cappos + PCI_AF_STATUS, &status); 2080 pci_read_config_byte(dev, cappos + PCI_AF_STATUS, &status);
2059 if (status & PCI_AF_STATUS_TP) { 2081 if (!(status & PCI_AF_STATUS_TP))
2060 dev_info(&dev->dev, "Busy after 100ms while trying to" 2082 goto transaction_done;
2061 " reset; sleeping for 1 second\n"); 2083
2062 ssleep(1); 2084 dev_info(&dev->dev, "Busy after 100ms while trying to"
2063 pci_read_config_byte(dev, 2085 " reset; sleeping for 1 second\n");
2064 cappos + PCI_AF_STATUS, &status); 2086 ssleep(1);
2065 if (status & PCI_AF_STATUS_TP) 2087 pci_read_config_byte(dev, cappos + PCI_AF_STATUS, &status);
2066 dev_info(&dev->dev, "Still busy after 1s; " 2088 if (status & PCI_AF_STATUS_TP)
2067 "proceeding with reset anyway\n"); 2089 dev_info(&dev->dev, "Still busy after 1s; "
2068 } 2090 "proceeding with reset anyway\n");
2091
2092transaction_done:
2069 pci_write_config_byte(dev, cappos + PCI_AF_CTRL, PCI_AF_CTRL_FLR); 2093 pci_write_config_byte(dev, cappos + PCI_AF_CTRL, PCI_AF_CTRL_FLR);
2070 mdelay(100); 2094 mdelay(100);
2071 2095
@@ -2334,18 +2358,140 @@ int pci_select_bars(struct pci_dev *dev, unsigned long flags)
2334 */ 2358 */
2335int pci_resource_bar(struct pci_dev *dev, int resno, enum pci_bar_type *type) 2359int pci_resource_bar(struct pci_dev *dev, int resno, enum pci_bar_type *type)
2336{ 2360{
2361 int reg;
2362
2337 if (resno < PCI_ROM_RESOURCE) { 2363 if (resno < PCI_ROM_RESOURCE) {
2338 *type = pci_bar_unknown; 2364 *type = pci_bar_unknown;
2339 return PCI_BASE_ADDRESS_0 + 4 * resno; 2365 return PCI_BASE_ADDRESS_0 + 4 * resno;
2340 } else if (resno == PCI_ROM_RESOURCE) { 2366 } else if (resno == PCI_ROM_RESOURCE) {
2341 *type = pci_bar_mem32; 2367 *type = pci_bar_mem32;
2342 return dev->rom_base_reg; 2368 return dev->rom_base_reg;
2369 } else if (resno < PCI_BRIDGE_RESOURCES) {
2370 /* device specific resource */
2371 reg = pci_iov_resource_bar(dev, resno, type);
2372 if (reg)
2373 return reg;
2343 } 2374 }
2344 2375
2345 dev_err(&dev->dev, "BAR: invalid resource #%d\n", resno); 2376 dev_err(&dev->dev, "BAR: invalid resource #%d\n", resno);
2346 return 0; 2377 return 0;
2347} 2378}
2348 2379
2380#define RESOURCE_ALIGNMENT_PARAM_SIZE COMMAND_LINE_SIZE
2381static char resource_alignment_param[RESOURCE_ALIGNMENT_PARAM_SIZE] = {0};
2382spinlock_t resource_alignment_lock = SPIN_LOCK_UNLOCKED;
2383
2384/**
2385 * pci_specified_resource_alignment - get resource alignment specified by user.
2386 * @dev: the PCI device to get
2387 *
2388 * RETURNS: Resource alignment if it is specified.
2389 * Zero if it is not specified.
2390 */
2391resource_size_t pci_specified_resource_alignment(struct pci_dev *dev)
2392{
2393 int seg, bus, slot, func, align_order, count;
2394 resource_size_t align = 0;
2395 char *p;
2396
2397 spin_lock(&resource_alignment_lock);
2398 p = resource_alignment_param;
2399 while (*p) {
2400 count = 0;
2401 if (sscanf(p, "%d%n", &align_order, &count) == 1 &&
2402 p[count] == '@') {
2403 p += count + 1;
2404 } else {
2405 align_order = -1;
2406 }
2407 if (sscanf(p, "%x:%x:%x.%x%n",
2408 &seg, &bus, &slot, &func, &count) != 4) {
2409 seg = 0;
2410 if (sscanf(p, "%x:%x.%x%n",
2411 &bus, &slot, &func, &count) != 3) {
2412 /* Invalid format */
2413 printk(KERN_ERR "PCI: Can't parse resource_alignment parameter: %s\n",
2414 p);
2415 break;
2416 }
2417 }
2418 p += count;
2419 if (seg == pci_domain_nr(dev->bus) &&
2420 bus == dev->bus->number &&
2421 slot == PCI_SLOT(dev->devfn) &&
2422 func == PCI_FUNC(dev->devfn)) {
2423 if (align_order == -1) {
2424 align = PAGE_SIZE;
2425 } else {
2426 align = 1 << align_order;
2427 }
2428 /* Found */
2429 break;
2430 }
2431 if (*p != ';' && *p != ',') {
2432 /* End of param or invalid format */
2433 break;
2434 }
2435 p++;
2436 }
2437 spin_unlock(&resource_alignment_lock);
2438 return align;
2439}
2440
2441/**
2442 * pci_is_reassigndev - check if specified PCI is target device to reassign
2443 * @dev: the PCI device to check
2444 *
2445 * RETURNS: non-zero for PCI device is a target device to reassign,
2446 * or zero is not.
2447 */
2448int pci_is_reassigndev(struct pci_dev *dev)
2449{
2450 return (pci_specified_resource_alignment(dev) != 0);
2451}
2452
2453ssize_t pci_set_resource_alignment_param(const char *buf, size_t count)
2454{
2455 if (count > RESOURCE_ALIGNMENT_PARAM_SIZE - 1)
2456 count = RESOURCE_ALIGNMENT_PARAM_SIZE - 1;
2457 spin_lock(&resource_alignment_lock);
2458 strncpy(resource_alignment_param, buf, count);
2459 resource_alignment_param[count] = '\0';
2460 spin_unlock(&resource_alignment_lock);
2461 return count;
2462}
2463
2464ssize_t pci_get_resource_alignment_param(char *buf, size_t size)
2465{
2466 size_t count;
2467 spin_lock(&resource_alignment_lock);
2468 count = snprintf(buf, size, "%s", resource_alignment_param);
2469 spin_unlock(&resource_alignment_lock);
2470 return count;
2471}
2472
2473static ssize_t pci_resource_alignment_show(struct bus_type *bus, char *buf)
2474{
2475 return pci_get_resource_alignment_param(buf, PAGE_SIZE);
2476}
2477
2478static ssize_t pci_resource_alignment_store(struct bus_type *bus,
2479 const char *buf, size_t count)
2480{
2481 return pci_set_resource_alignment_param(buf, count);
2482}
2483
2484BUS_ATTR(resource_alignment, 0644, pci_resource_alignment_show,
2485 pci_resource_alignment_store);
2486
2487static int __init pci_resource_alignment_sysfs_init(void)
2488{
2489 return bus_create_file(&pci_bus_type,
2490 &bus_attr_resource_alignment);
2491}
2492
2493late_initcall(pci_resource_alignment_sysfs_init);
2494
2349static void __devinit pci_no_domains(void) 2495static void __devinit pci_no_domains(void)
2350{ 2496{
2351#ifdef CONFIG_PCI_DOMAINS 2497#ifdef CONFIG_PCI_DOMAINS
@@ -2394,6 +2540,9 @@ static int __init pci_setup(char *str)
2394 pci_cardbus_io_size = memparse(str + 9, &str); 2540 pci_cardbus_io_size = memparse(str + 9, &str);
2395 } else if (!strncmp(str, "cbmemsize=", 10)) { 2541 } else if (!strncmp(str, "cbmemsize=", 10)) {
2396 pci_cardbus_mem_size = memparse(str + 10, &str); 2542 pci_cardbus_mem_size = memparse(str + 10, &str);
2543 } else if (!strncmp(str, "resource_alignment=", 19)) {
2544 pci_set_resource_alignment_param(str + 19,
2545 strlen(str + 19));
2397 } else { 2546 } else {
2398 printk(KERN_ERR "PCI: Unknown option `%s'\n", 2547 printk(KERN_ERR "PCI: Unknown option `%s'\n",
2399 str); 2548 str);
diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h
index 149fff65891f..d03f6b99f292 100644
--- a/drivers/pci/pci.h
+++ b/drivers/pci/pci.h
@@ -1,6 +1,8 @@
1#ifndef DRIVERS_PCI_H 1#ifndef DRIVERS_PCI_H
2#define DRIVERS_PCI_H 2#define DRIVERS_PCI_H
3 3
4#include <linux/workqueue.h>
5
4#define PCI_CFG_SPACE_SIZE 256 6#define PCI_CFG_SPACE_SIZE 256
5#define PCI_CFG_SPACE_EXP_SIZE 4096 7#define PCI_CFG_SPACE_EXP_SIZE 4096
6 8
@@ -135,6 +137,12 @@ extern int pcie_mch_quirk;
135extern struct device_attribute pci_dev_attrs[]; 137extern struct device_attribute pci_dev_attrs[];
136extern struct device_attribute dev_attr_cpuaffinity; 138extern struct device_attribute dev_attr_cpuaffinity;
137extern struct device_attribute dev_attr_cpulistaffinity; 139extern struct device_attribute dev_attr_cpulistaffinity;
140#ifdef CONFIG_HOTPLUG
141extern struct bus_attribute pci_bus_attrs[];
142#else
143#define pci_bus_attrs NULL
144#endif
145
138 146
139/** 147/**
140 * pci_match_one_device - Tell if a PCI device structure has a matching 148 * pci_match_one_device - Tell if a PCI device structure has a matching
@@ -177,6 +185,7 @@ enum pci_bar_type {
177 pci_bar_mem64, /* A 64-bit memory BAR */ 185 pci_bar_mem64, /* A 64-bit memory BAR */
178}; 186};
179 187
188extern int pci_setup_device(struct pci_dev *dev);
180extern int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type, 189extern int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type,
181 struct resource *res, unsigned int reg); 190 struct resource *res, unsigned int reg);
182extern int pci_resource_bar(struct pci_dev *dev, int resno, 191extern int pci_resource_bar(struct pci_dev *dev, int resno,
@@ -194,4 +203,60 @@ static inline int pci_ari_enabled(struct pci_bus *bus)
194 return bus->self && bus->self->ari_enabled; 203 return bus->self && bus->self->ari_enabled;
195} 204}
196 205
206#ifdef CONFIG_PCI_QUIRKS
207extern int pci_is_reassigndev(struct pci_dev *dev);
208resource_size_t pci_specified_resource_alignment(struct pci_dev *dev);
209extern void pci_disable_bridge_window(struct pci_dev *dev);
210#endif
211
212/* Single Root I/O Virtualization */
213struct pci_sriov {
214 int pos; /* capability position */
215 int nres; /* number of resources */
216 u32 cap; /* SR-IOV Capabilities */
217 u16 ctrl; /* SR-IOV Control */
218 u16 total; /* total VFs associated with the PF */
219 u16 initial; /* initial VFs associated with the PF */
220 u16 nr_virtfn; /* number of VFs available */
221 u16 offset; /* first VF Routing ID offset */
222 u16 stride; /* following VF stride */
223 u32 pgsz; /* page size for BAR alignment */
224 u8 link; /* Function Dependency Link */
225 struct pci_dev *dev; /* lowest numbered PF */
226 struct pci_dev *self; /* this PF */
227 struct mutex lock; /* lock for VF bus */
228 struct work_struct mtask; /* VF Migration task */
229 u8 __iomem *mstate; /* VF Migration State Array */
230};
231
232#ifdef CONFIG_PCI_IOV
233extern int pci_iov_init(struct pci_dev *dev);
234extern void pci_iov_release(struct pci_dev *dev);
235extern int pci_iov_resource_bar(struct pci_dev *dev, int resno,
236 enum pci_bar_type *type);
237extern void pci_restore_iov_state(struct pci_dev *dev);
238extern int pci_iov_bus_range(struct pci_bus *bus);
239#else
240static inline int pci_iov_init(struct pci_dev *dev)
241{
242 return -ENODEV;
243}
244static inline void pci_iov_release(struct pci_dev *dev)
245
246{
247}
248static inline int pci_iov_resource_bar(struct pci_dev *dev, int resno,
249 enum pci_bar_type *type)
250{
251 return 0;
252}
253static inline void pci_restore_iov_state(struct pci_dev *dev)
254{
255}
256static inline int pci_iov_bus_range(struct pci_bus *bus)
257{
258 return 0;
259}
260#endif /* CONFIG_PCI_IOV */
261
197#endif /* DRIVERS_PCI_H */ 262#endif /* DRIVERS_PCI_H */
diff --git a/drivers/pci/pcie/aer/aerdrv.c b/drivers/pci/pcie/aer/aerdrv.c
index e390707661dd..32ade5af927e 100644
--- a/drivers/pci/pcie/aer/aerdrv.c
+++ b/drivers/pci/pcie/aer/aerdrv.c
@@ -38,30 +38,13 @@ MODULE_AUTHOR(DRIVER_AUTHOR);
38MODULE_DESCRIPTION(DRIVER_DESC); 38MODULE_DESCRIPTION(DRIVER_DESC);
39MODULE_LICENSE("GPL"); 39MODULE_LICENSE("GPL");
40 40
41static int __devinit aer_probe (struct pcie_device *dev, 41static int __devinit aer_probe (struct pcie_device *dev);
42 const struct pcie_port_service_id *id );
43static void aer_remove(struct pcie_device *dev); 42static void aer_remove(struct pcie_device *dev);
44static int aer_suspend(struct pcie_device *dev, pm_message_t state)
45{return 0;}
46static int aer_resume(struct pcie_device *dev) {return 0;}
47static pci_ers_result_t aer_error_detected(struct pci_dev *dev, 43static pci_ers_result_t aer_error_detected(struct pci_dev *dev,
48 enum pci_channel_state error); 44 enum pci_channel_state error);
49static void aer_error_resume(struct pci_dev *dev); 45static void aer_error_resume(struct pci_dev *dev);
50static pci_ers_result_t aer_root_reset(struct pci_dev *dev); 46static pci_ers_result_t aer_root_reset(struct pci_dev *dev);
51 47
52/*
53 * PCI Express bus's AER Root service driver data structure
54 */
55static struct pcie_port_service_id aer_id[] = {
56 {
57 .vendor = PCI_ANY_ID,
58 .device = PCI_ANY_ID,
59 .port_type = PCIE_RC_PORT,
60 .service_type = PCIE_PORT_SERVICE_AER,
61 },
62 { /* end: all zeroes */ }
63};
64
65static struct pci_error_handlers aer_error_handlers = { 48static struct pci_error_handlers aer_error_handlers = {
66 .error_detected = aer_error_detected, 49 .error_detected = aer_error_detected,
67 .resume = aer_error_resume, 50 .resume = aer_error_resume,
@@ -69,14 +52,12 @@ static struct pci_error_handlers aer_error_handlers = {
69 52
70static struct pcie_port_service_driver aerdriver = { 53static struct pcie_port_service_driver aerdriver = {
71 .name = "aer", 54 .name = "aer",
72 .id_table = &aer_id[0], 55 .port_type = PCIE_ANY_PORT,
56 .service = PCIE_PORT_SERVICE_AER,
73 57
74 .probe = aer_probe, 58 .probe = aer_probe,
75 .remove = aer_remove, 59 .remove = aer_remove,
76 60
77 .suspend = aer_suspend,
78 .resume = aer_resume,
79
80 .err_handler = &aer_error_handlers, 61 .err_handler = &aer_error_handlers,
81 62
82 .reset_link = aer_root_reset, 63 .reset_link = aer_root_reset,
@@ -207,8 +188,7 @@ static void aer_remove(struct pcie_device *dev)
207 * 188 *
208 * Invoked when PCI Express bus loads AER service driver. 189 * Invoked when PCI Express bus loads AER service driver.
209 **/ 190 **/
210static int __devinit aer_probe (struct pcie_device *dev, 191static int __devinit aer_probe (struct pcie_device *dev)
211 const struct pcie_port_service_id *id )
212{ 192{
213 int status; 193 int status;
214 struct aer_rpc *rpc; 194 struct aer_rpc *rpc;
diff --git a/drivers/pci/pcie/aer/aerdrv_acpi.c b/drivers/pci/pcie/aer/aerdrv_acpi.c
index ebce26c37049..8edb2f300e8f 100644
--- a/drivers/pci/pcie/aer/aerdrv_acpi.c
+++ b/drivers/pci/pcie/aer/aerdrv_acpi.c
@@ -38,7 +38,7 @@ int aer_osc_setup(struct pcie_device *pciedev)
38 38
39 handle = acpi_find_root_bridge_handle(pdev); 39 handle = acpi_find_root_bridge_handle(pdev);
40 if (handle) { 40 if (handle) {
41 status = pci_osc_control_set(handle, 41 status = acpi_pci_osc_control_set(handle,
42 OSC_PCI_EXPRESS_AER_CONTROL | 42 OSC_PCI_EXPRESS_AER_CONTROL |
43 OSC_PCI_EXPRESS_CAP_STRUCTURE_CONTROL); 43 OSC_PCI_EXPRESS_CAP_STRUCTURE_CONTROL);
44 } 44 }
diff --git a/drivers/pci/pcie/aer/aerdrv_core.c b/drivers/pci/pcie/aer/aerdrv_core.c
index 382575007382..307452f30035 100644
--- a/drivers/pci/pcie/aer/aerdrv_core.c
+++ b/drivers/pci/pcie/aer/aerdrv_core.c
@@ -351,21 +351,21 @@ static int find_aer_service_iter(struct device *device, void *data)
351{ 351{
352 struct device_driver *driver; 352 struct device_driver *driver;
353 struct pcie_port_service_driver *service_driver; 353 struct pcie_port_service_driver *service_driver;
354 struct pcie_device *pcie_dev;
355 struct find_aer_service_data *result; 354 struct find_aer_service_data *result;
356 355
357 result = (struct find_aer_service_data *) data; 356 result = (struct find_aer_service_data *) data;
358 357
359 if (device->bus == &pcie_port_bus_type) { 358 if (device->bus == &pcie_port_bus_type) {
360 pcie_dev = to_pcie_device(device); 359 struct pcie_port_data *port_data;
361 if (pcie_dev->id.port_type == PCIE_SW_DOWNSTREAM_PORT) 360
361 port_data = pci_get_drvdata(to_pcie_device(device)->port);
362 if (port_data->port_type == PCIE_SW_DOWNSTREAM_PORT)
362 result->is_downstream = 1; 363 result->is_downstream = 1;
363 364
364 driver = device->driver; 365 driver = device->driver;
365 if (driver) { 366 if (driver) {
366 service_driver = to_service_driver(driver); 367 service_driver = to_service_driver(driver);
367 if (service_driver->id_table->service_type == 368 if (service_driver->service == PCIE_PORT_SERVICE_AER) {
368 PCIE_PORT_SERVICE_AER) {
369 result->aer_driver = service_driver; 369 result->aer_driver = service_driver;
370 return 1; 370 return 1;
371 } 371 }
diff --git a/drivers/pci/pcie/portdrv.h b/drivers/pci/pcie/portdrv.h
index 2529f3f2ea5a..17ad53868f9f 100644
--- a/drivers/pci/pcie/portdrv.h
+++ b/drivers/pci/pcie/portdrv.h
@@ -25,19 +25,21 @@
25#define PCIE_CAPABILITIES_REG 0x2 25#define PCIE_CAPABILITIES_REG 0x2
26#define PCIE_SLOT_CAPABILITIES_REG 0x14 26#define PCIE_SLOT_CAPABILITIES_REG 0x14
27#define PCIE_PORT_DEVICE_MAXSERVICES 4 27#define PCIE_PORT_DEVICE_MAXSERVICES 4
28#define PCIE_PORT_MSI_VECTOR_MASK 0x1f
29/*
30 * According to the PCI Express Base Specification 2.0, the indices of the MSI-X
31 * table entires used by port services must not exceed 31
32 */
33#define PCIE_PORT_MAX_MSIX_ENTRIES 32
28 34
29#define get_descriptor_id(type, service) (((type - 4) << 4) | service) 35#define get_descriptor_id(type, service) (((type - 4) << 4) | service)
30 36
31struct pcie_port_device_ext {
32 int interrupt_mode; /* [0:INTx | 1:MSI | 2:MSI-X] */
33};
34
35extern struct bus_type pcie_port_bus_type; 37extern struct bus_type pcie_port_bus_type;
36extern int pcie_port_device_probe(struct pci_dev *dev); 38extern int pcie_port_device_probe(struct pci_dev *dev);
37extern int pcie_port_device_register(struct pci_dev *dev); 39extern int pcie_port_device_register(struct pci_dev *dev);
38#ifdef CONFIG_PM 40#ifdef CONFIG_PM
39extern int pcie_port_device_suspend(struct pci_dev *dev, pm_message_t state); 41extern int pcie_port_device_suspend(struct device *dev);
40extern int pcie_port_device_resume(struct pci_dev *dev); 42extern int pcie_port_device_resume(struct device *dev);
41#endif 43#endif
42extern void pcie_port_device_remove(struct pci_dev *dev); 44extern void pcie_port_device_remove(struct pci_dev *dev);
43extern int __must_check pcie_port_bus_register(void); 45extern int __must_check pcie_port_bus_register(void);
diff --git a/drivers/pci/pcie/portdrv_bus.c b/drivers/pci/pcie/portdrv_bus.c
index eec89b767f9f..ef3a4eeaebb4 100644
--- a/drivers/pci/pcie/portdrv_bus.c
+++ b/drivers/pci/pcie/portdrv_bus.c
@@ -26,20 +26,22 @@ EXPORT_SYMBOL_GPL(pcie_port_bus_type);
26static int pcie_port_bus_match(struct device *dev, struct device_driver *drv) 26static int pcie_port_bus_match(struct device *dev, struct device_driver *drv)
27{ 27{
28 struct pcie_device *pciedev; 28 struct pcie_device *pciedev;
29 struct pcie_port_data *port_data;
29 struct pcie_port_service_driver *driver; 30 struct pcie_port_service_driver *driver;
30 31
31 if (drv->bus != &pcie_port_bus_type || dev->bus != &pcie_port_bus_type) 32 if (drv->bus != &pcie_port_bus_type || dev->bus != &pcie_port_bus_type)
32 return 0; 33 return 0;
33 34
34 pciedev = to_pcie_device(dev); 35 pciedev = to_pcie_device(dev);
35 driver = to_service_driver(drv); 36 driver = to_service_driver(drv);
36 if ( (driver->id_table->vendor != PCI_ANY_ID && 37
37 driver->id_table->vendor != pciedev->id.vendor) || 38 if (driver->service != pciedev->service)
38 (driver->id_table->device != PCI_ANY_ID && 39 return 0;
39 driver->id_table->device != pciedev->id.device) || 40
40 (driver->id_table->port_type != PCIE_ANY_PORT && 41 port_data = pci_get_drvdata(pciedev->port);
41 driver->id_table->port_type != pciedev->id.port_type) || 42
42 driver->id_table->service_type != pciedev->id.service_type ) 43 if (driver->port_type != PCIE_ANY_PORT
44 && driver->port_type != port_data->port_type)
43 return 0; 45 return 0;
44 46
45 return 1; 47 return 1;
diff --git a/drivers/pci/pcie/portdrv_core.c b/drivers/pci/pcie/portdrv_core.c
index 8b3f8c18032f..e39982503863 100644
--- a/drivers/pci/pcie/portdrv_core.c
+++ b/drivers/pci/pcie/portdrv_core.c
@@ -15,10 +15,9 @@
15#include <linux/slab.h> 15#include <linux/slab.h>
16#include <linux/pcieport_if.h> 16#include <linux/pcieport_if.h>
17 17
18#include "../pci.h"
18#include "portdrv.h" 19#include "portdrv.h"
19 20
20extern int pcie_mch_quirk; /* MSI-quirk Indicator */
21
22/** 21/**
23 * release_pcie_device - free PCI Express port service device structure 22 * release_pcie_device - free PCI Express port service device structure
24 * @dev: Port service device to release 23 * @dev: Port service device to release
@@ -31,26 +30,150 @@ static void release_pcie_device(struct device *dev)
31 kfree(to_pcie_device(dev)); 30 kfree(to_pcie_device(dev));
32} 31}
33 32
34static int is_msi_quirked(struct pci_dev *dev) 33/**
34 * pcie_port_msix_add_entry - add entry to given array of MSI-X entries
35 * @entries: Array of MSI-X entries
36 * @new_entry: Index of the entry to add to the array
37 * @nr_entries: Number of entries aleady in the array
38 *
39 * Return value: Position of the added entry in the array
40 */
41static int pcie_port_msix_add_entry(
42 struct msix_entry *entries, int new_entry, int nr_entries)
35{ 43{
36 int port_type, quirk = 0; 44 int j;
45
46 for (j = 0; j < nr_entries; j++)
47 if (entries[j].entry == new_entry)
48 return j;
49
50 entries[j].entry = new_entry;
51 return j;
52}
53
54/**
55 * pcie_port_enable_msix - try to set up MSI-X as interrupt mode for given port
56 * @dev: PCI Express port to handle
57 * @vectors: Array of interrupt vectors to populate
58 * @mask: Bitmask of port capabilities returned by get_port_device_capability()
59 *
60 * Return value: 0 on success, error code on failure
61 */
62static int pcie_port_enable_msix(struct pci_dev *dev, int *vectors, int mask)
63{
64 struct msix_entry *msix_entries;
65 int idx[PCIE_PORT_DEVICE_MAXSERVICES];
66 int nr_entries, status, pos, i, nvec;
37 u16 reg16; 67 u16 reg16;
68 u32 reg32;
38 69
39 pci_read_config_word(dev, 70 nr_entries = pci_msix_table_size(dev);
40 pci_find_capability(dev, PCI_CAP_ID_EXP) + 71 if (!nr_entries)
41 PCIE_CAPABILITIES_REG, &reg16); 72 return -EINVAL;
42 port_type = (reg16 >> 4) & PORT_TYPE_MASK; 73 if (nr_entries > PCIE_PORT_MAX_MSIX_ENTRIES)
43 switch(port_type) { 74 nr_entries = PCIE_PORT_MAX_MSIX_ENTRIES;
44 case PCIE_RC_PORT: 75
45 if (pcie_mch_quirk == 1) 76 msix_entries = kzalloc(sizeof(*msix_entries) * nr_entries, GFP_KERNEL);
46 quirk = 1; 77 if (!msix_entries)
47 break; 78 return -ENOMEM;
48 case PCIE_SW_UPSTREAM_PORT: 79
49 case PCIE_SW_DOWNSTREAM_PORT: 80 /*
50 default: 81 * Allocate as many entries as the port wants, so that we can check
51 break; 82 * which of them will be useful. Moreover, if nr_entries is correctly
83 * equal to the number of entries this port actually uses, we'll happily
84 * go through without any tricks.
85 */
86 for (i = 0; i < nr_entries; i++)
87 msix_entries[i].entry = i;
88
89 status = pci_enable_msix(dev, msix_entries, nr_entries);
90 if (status)
91 goto Exit;
92
93 for (i = 0; i < PCIE_PORT_DEVICE_MAXSERVICES; i++)
94 idx[i] = -1;
95 status = -EIO;
96 nvec = 0;
97
98 if (mask & (PCIE_PORT_SERVICE_PME | PCIE_PORT_SERVICE_HP)) {
99 int entry;
100
101 /*
102 * The code below follows the PCI Express Base Specification 2.0
103 * stating in Section 6.1.6 that "PME and Hot-Plug Event
104 * interrupts (when both are implemented) always share the same
105 * MSI or MSI-X vector, as indicated by the Interrupt Message
106 * Number field in the PCI Express Capabilities register", where
107 * according to Section 7.8.2 of the specification "For MSI-X,
108 * the value in this field indicates which MSI-X Table entry is
109 * used to generate the interrupt message."
110 */
111 pos = pci_find_capability(dev, PCI_CAP_ID_EXP);
112 pci_read_config_word(dev, pos + PCIE_CAPABILITIES_REG, &reg16);
113 entry = (reg16 >> 9) & PCIE_PORT_MSI_VECTOR_MASK;
114 if (entry >= nr_entries)
115 goto Error;
116
117 i = pcie_port_msix_add_entry(msix_entries, entry, nvec);
118 if (i == nvec)
119 nvec++;
120
121 idx[PCIE_PORT_SERVICE_PME_SHIFT] = i;
122 idx[PCIE_PORT_SERVICE_HP_SHIFT] = i;
123 }
124
125 if (mask & PCIE_PORT_SERVICE_AER) {
126 int entry;
127
128 /*
129 * The code below follows Section 7.10.10 of the PCI Express
130 * Base Specification 2.0 stating that bits 31-27 of the Root
131 * Error Status Register contain a value indicating which of the
132 * MSI/MSI-X vectors assigned to the port is going to be used
133 * for AER, where "For MSI-X, the value in this register
134 * indicates which MSI-X Table entry is used to generate the
135 * interrupt message."
136 */
137 pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR);
138 pci_read_config_dword(dev, pos + PCI_ERR_ROOT_STATUS, &reg32);
139 entry = reg32 >> 27;
140 if (entry >= nr_entries)
141 goto Error;
142
143 i = pcie_port_msix_add_entry(msix_entries, entry, nvec);
144 if (i == nvec)
145 nvec++;
146
147 idx[PCIE_PORT_SERVICE_AER_SHIFT] = i;
52 } 148 }
53 return quirk; 149
150 /*
151 * If nvec is equal to the allocated number of entries, we can just use
152 * what we have. Otherwise, the port has some extra entries not for the
153 * services we know and we need to work around that.
154 */
155 if (nvec == nr_entries) {
156 status = 0;
157 } else {
158 /* Drop the temporary MSI-X setup */
159 pci_disable_msix(dev);
160
161 /* Now allocate the MSI-X vectors for real */
162 status = pci_enable_msix(dev, msix_entries, nvec);
163 if (status)
164 goto Exit;
165 }
166
167 for (i = 0; i < PCIE_PORT_DEVICE_MAXSERVICES; i++)
168 vectors[i] = idx[i] >= 0 ? msix_entries[idx[i]].vector : -1;
169
170 Exit:
171 kfree(msix_entries);
172 return status;
173
174 Error:
175 pci_disable_msix(dev);
176 goto Exit;
54} 177}
55 178
56/** 179/**
@@ -64,47 +187,32 @@ static int is_msi_quirked(struct pci_dev *dev)
64 */ 187 */
65static int assign_interrupt_mode(struct pci_dev *dev, int *vectors, int mask) 188static int assign_interrupt_mode(struct pci_dev *dev, int *vectors, int mask)
66{ 189{
67 int i, pos, nvec, status = -EINVAL; 190 struct pcie_port_data *port_data = pci_get_drvdata(dev);
68 int interrupt_mode = PCIE_PORT_INTx_MODE; 191 int irq, interrupt_mode = PCIE_PORT_NO_IRQ;
192 int i;
69 193
70 /* Set INTx as default */
71 for (i = 0, nvec = 0; i < PCIE_PORT_DEVICE_MAXSERVICES; i++) {
72 if (mask & (1 << i))
73 nvec++;
74 vectors[i] = dev->irq;
75 }
76
77 /* Check MSI quirk */ 194 /* Check MSI quirk */
78 if (is_msi_quirked(dev)) 195 if (port_data->port_type == PCIE_RC_PORT && pcie_mch_quirk)
79 return interrupt_mode; 196 goto Fallback;
80 197
81 /* Select MSI-X over MSI if supported */ 198 /* Try to use MSI-X if supported */
82 pos = pci_find_capability(dev, PCI_CAP_ID_MSIX); 199 if (!pcie_port_enable_msix(dev, vectors, mask))
83 if (pos) { 200 return PCIE_PORT_MSIX_MODE;
84 struct msix_entry msix_entries[PCIE_PORT_DEVICE_MAXSERVICES] = 201
85 {{0, 0}, {0, 1}, {0, 2}, {0, 3}}; 202 /* We're not going to use MSI-X, so try MSI and fall back to INTx */
86 status = pci_enable_msix(dev, msix_entries, nvec); 203 if (!pci_enable_msi(dev))
87 if (!status) { 204 interrupt_mode = PCIE_PORT_MSI_MODE;
88 int j = 0; 205
89 206 Fallback:
90 interrupt_mode = PCIE_PORT_MSIX_MODE; 207 if (interrupt_mode == PCIE_PORT_NO_IRQ && dev->pin)
91 for (i = 0; i < PCIE_PORT_DEVICE_MAXSERVICES; i++) { 208 interrupt_mode = PCIE_PORT_INTx_MODE;
92 if (mask & (1 << i)) 209
93 vectors[i] = msix_entries[j++].vector; 210 irq = interrupt_mode != PCIE_PORT_NO_IRQ ? dev->irq : -1;
94 } 211 for (i = 0; i < PCIE_PORT_DEVICE_MAXSERVICES; i++)
95 } 212 vectors[i] = irq;
96 } 213
97 if (status) { 214 vectors[PCIE_PORT_SERVICE_VC_SHIFT] = -1;
98 pos = pci_find_capability(dev, PCI_CAP_ID_MSI); 215
99 if (pos) {
100 status = pci_enable_msi(dev);
101 if (!status) {
102 interrupt_mode = PCIE_PORT_MSI_MODE;
103 for (i = 0;i < PCIE_PORT_DEVICE_MAXSERVICES;i++)
104 vectors[i] = dev->irq;
105 }
106 }
107 }
108 return interrupt_mode; 216 return interrupt_mode;
109} 217}
110 218
@@ -132,13 +240,11 @@ static int get_port_device_capability(struct pci_dev *dev)
132 pos + PCIE_SLOT_CAPABILITIES_REG, &reg32); 240 pos + PCIE_SLOT_CAPABILITIES_REG, &reg32);
133 if (reg32 & SLOT_HP_CAPABLE_MASK) 241 if (reg32 & SLOT_HP_CAPABLE_MASK)
134 services |= PCIE_PORT_SERVICE_HP; 242 services |= PCIE_PORT_SERVICE_HP;
135 } 243 }
136 /* PME Capable - root port capability */ 244 /* AER capable */
137 if (((reg16 >> 4) & PORT_TYPE_MASK) == PCIE_RC_PORT)
138 services |= PCIE_PORT_SERVICE_PME;
139
140 if (pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR)) 245 if (pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR))
141 services |= PCIE_PORT_SERVICE_AER; 246 services |= PCIE_PORT_SERVICE_AER;
247 /* VC support */
142 if (pci_find_ext_capability(dev, PCI_EXT_CAP_ID_VC)) 248 if (pci_find_ext_capability(dev, PCI_EXT_CAP_ID_VC))
143 services |= PCIE_PORT_SERVICE_VC; 249 services |= PCIE_PORT_SERVICE_VC;
144 250
@@ -152,20 +258,17 @@ static int get_port_device_capability(struct pci_dev *dev)
152 * @port_type: Type of the port 258 * @port_type: Type of the port
153 * @service_type: Type of service to associate with the service device 259 * @service_type: Type of service to associate with the service device
154 * @irq: Interrupt vector to associate with the service device 260 * @irq: Interrupt vector to associate with the service device
155 * @irq_mode: Interrupt mode of the service (INTx, MSI-X, MSI)
156 */ 261 */
157static void pcie_device_init(struct pci_dev *parent, struct pcie_device *dev, 262static void pcie_device_init(struct pci_dev *parent, struct pcie_device *dev,
158 int port_type, int service_type, int irq, int irq_mode) 263 int service_type, int irq)
159{ 264{
265 struct pcie_port_data *port_data = pci_get_drvdata(parent);
160 struct device *device; 266 struct device *device;
267 int port_type = port_data->port_type;
161 268
162 dev->port = parent; 269 dev->port = parent;
163 dev->interrupt_mode = irq_mode;
164 dev->irq = irq; 270 dev->irq = irq;
165 dev->id.vendor = parent->vendor; 271 dev->service = service_type;
166 dev->id.device = parent->device;
167 dev->id.port_type = port_type;
168 dev->id.service_type = (1 << service_type);
169 272
170 /* Initialize generic device interface */ 273 /* Initialize generic device interface */
171 device = &dev->device; 274 device = &dev->device;
@@ -185,10 +288,9 @@ static void pcie_device_init(struct pci_dev *parent, struct pcie_device *dev,
185 * @port_type: Type of the port 288 * @port_type: Type of the port
186 * @service_type: Type of service to associate with the service device 289 * @service_type: Type of service to associate with the service device
187 * @irq: Interrupt vector to associate with the service device 290 * @irq: Interrupt vector to associate with the service device
188 * @irq_mode: Interrupt mode of the service (INTx, MSI-X, MSI)
189 */ 291 */
190static struct pcie_device* alloc_pcie_device(struct pci_dev *parent, 292static struct pcie_device* alloc_pcie_device(struct pci_dev *parent,
191 int port_type, int service_type, int irq, int irq_mode) 293 int service_type, int irq)
192{ 294{
193 struct pcie_device *device; 295 struct pcie_device *device;
194 296
@@ -196,7 +298,7 @@ static struct pcie_device* alloc_pcie_device(struct pci_dev *parent,
196 if (!device) 298 if (!device)
197 return NULL; 299 return NULL;
198 300
199 pcie_device_init(parent, device, port_type, service_type, irq,irq_mode); 301 pcie_device_init(parent, device, service_type, irq);
200 return device; 302 return device;
201} 303}
202 304
@@ -230,63 +332,90 @@ int pcie_port_device_probe(struct pci_dev *dev)
230 */ 332 */
231int pcie_port_device_register(struct pci_dev *dev) 333int pcie_port_device_register(struct pci_dev *dev)
232{ 334{
233 struct pcie_port_device_ext *p_ext; 335 struct pcie_port_data *port_data;
234 int status, type, capabilities, irq_mode, i; 336 int status, capabilities, irq_mode, i, nr_serv;
235 int vectors[PCIE_PORT_DEVICE_MAXSERVICES]; 337 int vectors[PCIE_PORT_DEVICE_MAXSERVICES];
236 u16 reg16; 338 u16 reg16;
237 339
238 /* Allocate port device extension */ 340 port_data = kzalloc(sizeof(*port_data), GFP_KERNEL);
239 if (!(p_ext = kmalloc(sizeof(struct pcie_port_device_ext), GFP_KERNEL))) 341 if (!port_data)
240 return -ENOMEM; 342 return -ENOMEM;
241 343 pci_set_drvdata(dev, port_data);
242 pci_set_drvdata(dev, p_ext);
243 344
244 /* Get port type */ 345 /* Get port type */
245 pci_read_config_word(dev, 346 pci_read_config_word(dev,
246 pci_find_capability(dev, PCI_CAP_ID_EXP) + 347 pci_find_capability(dev, PCI_CAP_ID_EXP) +
247 PCIE_CAPABILITIES_REG, &reg16); 348 PCIE_CAPABILITIES_REG, &reg16);
248 type = (reg16 >> 4) & PORT_TYPE_MASK; 349 port_data->port_type = (reg16 >> 4) & PORT_TYPE_MASK;
249 350
250 /* Now get port services */
251 capabilities = get_port_device_capability(dev); 351 capabilities = get_port_device_capability(dev);
352 /* Root ports are capable of generating PME too */
353 if (port_data->port_type == PCIE_RC_PORT)
354 capabilities |= PCIE_PORT_SERVICE_PME;
355
252 irq_mode = assign_interrupt_mode(dev, vectors, capabilities); 356 irq_mode = assign_interrupt_mode(dev, vectors, capabilities);
253 p_ext->interrupt_mode = irq_mode; 357 if (irq_mode == PCIE_PORT_NO_IRQ) {
358 /*
359 * Don't use service devices that require interrupts if there is
360 * no way to generate them.
361 */
362 if (!(capabilities & PCIE_PORT_SERVICE_VC)) {
363 status = -ENODEV;
364 goto Error;
365 }
366 capabilities = PCIE_PORT_SERVICE_VC;
367 }
368 port_data->port_irq_mode = irq_mode;
369
370 status = pci_enable_device(dev);
371 if (status)
372 goto Error;
373 pci_set_master(dev);
254 374
255 /* Allocate child services if any */ 375 /* Allocate child services if any */
256 for (i = 0; i < PCIE_PORT_DEVICE_MAXSERVICES; i++) { 376 for (i = 0, nr_serv = 0; i < PCIE_PORT_DEVICE_MAXSERVICES; i++) {
257 struct pcie_device *child; 377 struct pcie_device *child;
378 int service = 1 << i;
379
380 if (!(capabilities & service))
381 continue;
258 382
259 if (capabilities & (1 << i)) { 383 child = alloc_pcie_device(dev, service, vectors[i]);
260 child = alloc_pcie_device( 384 if (!child)
261 dev, /* parent */ 385 continue;
262 type, /* port type */ 386
263 i, /* service type */ 387 status = device_register(&child->device);
264 vectors[i], /* irq */ 388 if (status) {
265 irq_mode /* interrupt mode */); 389 kfree(child);
266 if (child) { 390 continue;
267 status = device_register(&child->device);
268 if (status) {
269 kfree(child);
270 continue;
271 }
272 get_device(&child->device);
273 }
274 } 391 }
392
393 get_device(&child->device);
394 nr_serv++;
395 }
396 if (!nr_serv) {
397 pci_disable_device(dev);
398 status = -ENODEV;
399 goto Error;
275 } 400 }
401
276 return 0; 402 return 0;
403
404 Error:
405 kfree(port_data);
406 return status;
277} 407}
278 408
279#ifdef CONFIG_PM 409#ifdef CONFIG_PM
280static int suspend_iter(struct device *dev, void *data) 410static int suspend_iter(struct device *dev, void *data)
281{ 411{
282 struct pcie_port_service_driver *service_driver; 412 struct pcie_port_service_driver *service_driver;
283 pm_message_t state = * (pm_message_t *) data;
284 413
285 if ((dev->bus == &pcie_port_bus_type) && 414 if ((dev->bus == &pcie_port_bus_type) &&
286 (dev->driver)) { 415 (dev->driver)) {
287 service_driver = to_service_driver(dev->driver); 416 service_driver = to_service_driver(dev->driver);
288 if (service_driver->suspend) 417 if (service_driver->suspend)
289 service_driver->suspend(to_pcie_device(dev), state); 418 service_driver->suspend(to_pcie_device(dev));
290 } 419 }
291 return 0; 420 return 0;
292} 421}
@@ -294,11 +423,10 @@ static int suspend_iter(struct device *dev, void *data)
294/** 423/**
295 * pcie_port_device_suspend - suspend port services associated with a PCIe port 424 * pcie_port_device_suspend - suspend port services associated with a PCIe port
296 * @dev: PCI Express port to handle 425 * @dev: PCI Express port to handle
297 * @state: Representation of system power management transition in progress
298 */ 426 */
299int pcie_port_device_suspend(struct pci_dev *dev, pm_message_t state) 427int pcie_port_device_suspend(struct device *dev)
300{ 428{
301 return device_for_each_child(&dev->dev, &state, suspend_iter); 429 return device_for_each_child(dev, NULL, suspend_iter);
302} 430}
303 431
304static int resume_iter(struct device *dev, void *data) 432static int resume_iter(struct device *dev, void *data)
@@ -318,24 +446,17 @@ static int resume_iter(struct device *dev, void *data)
318 * pcie_port_device_suspend - resume port services associated with a PCIe port 446 * pcie_port_device_suspend - resume port services associated with a PCIe port
319 * @dev: PCI Express port to handle 447 * @dev: PCI Express port to handle
320 */ 448 */
321int pcie_port_device_resume(struct pci_dev *dev) 449int pcie_port_device_resume(struct device *dev)
322{ 450{
323 return device_for_each_child(&dev->dev, NULL, resume_iter); 451 return device_for_each_child(dev, NULL, resume_iter);
324} 452}
325#endif 453#endif /* PM */
326 454
327static int remove_iter(struct device *dev, void *data) 455static int remove_iter(struct device *dev, void *data)
328{ 456{
329 struct pcie_port_service_driver *service_driver;
330
331 if (dev->bus == &pcie_port_bus_type) { 457 if (dev->bus == &pcie_port_bus_type) {
332 if (dev->driver) { 458 put_device(dev);
333 service_driver = to_service_driver(dev->driver); 459 device_unregister(dev);
334 if (service_driver->remove)
335 service_driver->remove(to_pcie_device(dev));
336 }
337 *(unsigned long*)data = (unsigned long)dev;
338 return 1;
339 } 460 }
340 return 0; 461 return 0;
341} 462}
@@ -349,25 +470,21 @@ static int remove_iter(struct device *dev, void *data)
349 */ 470 */
350void pcie_port_device_remove(struct pci_dev *dev) 471void pcie_port_device_remove(struct pci_dev *dev)
351{ 472{
352 struct device *device; 473 struct pcie_port_data *port_data = pci_get_drvdata(dev);
353 unsigned long device_addr;
354 int interrupt_mode = PCIE_PORT_INTx_MODE;
355 int status;
356 474
357 do { 475 device_for_each_child(&dev->dev, NULL, remove_iter);
358 status = device_for_each_child(&dev->dev, &device_addr, remove_iter); 476 pci_disable_device(dev);
359 if (status) { 477
360 device = (struct device*)device_addr; 478 switch (port_data->port_irq_mode) {
361 interrupt_mode = (to_pcie_device(device))->interrupt_mode; 479 case PCIE_PORT_MSIX_MODE:
362 put_device(device);
363 device_unregister(device);
364 }
365 } while (status);
366 /* Switch to INTx by default if MSI enabled */
367 if (interrupt_mode == PCIE_PORT_MSIX_MODE)
368 pci_disable_msix(dev); 480 pci_disable_msix(dev);
369 else if (interrupt_mode == PCIE_PORT_MSI_MODE) 481 break;
482 case PCIE_PORT_MSI_MODE:
370 pci_disable_msi(dev); 483 pci_disable_msi(dev);
484 break;
485 }
486
487 kfree(port_data);
371} 488}
372 489
373/** 490/**
@@ -392,7 +509,7 @@ static int pcie_port_probe_service(struct device *dev)
392 return -ENODEV; 509 return -ENODEV;
393 510
394 pciedev = to_pcie_device(dev); 511 pciedev = to_pcie_device(dev);
395 status = driver->probe(pciedev, driver->id_table); 512 status = driver->probe(pciedev);
396 if (!status) { 513 if (!status) {
397 dev_printk(KERN_DEBUG, dev, "service driver %s loaded\n", 514 dev_printk(KERN_DEBUG, dev, "service driver %s loaded\n",
398 driver->name); 515 driver->name);
diff --git a/drivers/pci/pcie/portdrv_pci.c b/drivers/pci/pcie/portdrv_pci.c
index 5ea566e20b37..b924e2463f85 100644
--- a/drivers/pci/pcie/portdrv_pci.c
+++ b/drivers/pci/pcie/portdrv_pci.c
@@ -32,11 +32,6 @@ MODULE_LICENSE("GPL");
32/* global data */ 32/* global data */
33static const char device_name[] = "pcieport-driver"; 33static const char device_name[] = "pcieport-driver";
34 34
35static int pcie_portdrv_save_config(struct pci_dev *dev)
36{
37 return pci_save_state(dev);
38}
39
40static int pcie_portdrv_restore_config(struct pci_dev *dev) 35static int pcie_portdrv_restore_config(struct pci_dev *dev)
41{ 36{
42 int retval; 37 int retval;
@@ -49,21 +44,21 @@ static int pcie_portdrv_restore_config(struct pci_dev *dev)
49} 44}
50 45
51#ifdef CONFIG_PM 46#ifdef CONFIG_PM
52static int pcie_portdrv_suspend(struct pci_dev *dev, pm_message_t state) 47static struct dev_pm_ops pcie_portdrv_pm_ops = {
53{ 48 .suspend = pcie_port_device_suspend,
54 return pcie_port_device_suspend(dev, state); 49 .resume = pcie_port_device_resume,
50 .freeze = pcie_port_device_suspend,
51 .thaw = pcie_port_device_resume,
52 .poweroff = pcie_port_device_suspend,
53 .restore = pcie_port_device_resume,
54};
55 55
56} 56#define PCIE_PORTDRV_PM_OPS (&pcie_portdrv_pm_ops)
57 57
58static int pcie_portdrv_resume(struct pci_dev *dev) 58#else /* !PM */
59{ 59
60 pci_set_master(dev); 60#define PCIE_PORTDRV_PM_OPS NULL
61 return pcie_port_device_resume(dev); 61#endif /* !PM */
62}
63#else
64#define pcie_portdrv_suspend NULL
65#define pcie_portdrv_resume NULL
66#endif
67 62
68/* 63/*
69 * pcie_portdrv_probe - Probe PCI-Express port devices 64 * pcie_portdrv_probe - Probe PCI-Express port devices
@@ -82,20 +77,15 @@ static int __devinit pcie_portdrv_probe (struct pci_dev *dev,
82 if (status) 77 if (status)
83 return status; 78 return status;
84 79
85 if (pci_enable_device(dev) < 0)
86 return -ENODEV;
87
88 pci_set_master(dev);
89 if (!dev->irq && dev->pin) { 80 if (!dev->irq && dev->pin) {
90 dev_warn(&dev->dev, "device [%04x:%04x] has invalid IRQ; " 81 dev_warn(&dev->dev, "device [%04x:%04x] has invalid IRQ; "
91 "check vendor BIOS\n", dev->vendor, dev->device); 82 "check vendor BIOS\n", dev->vendor, dev->device);
92 } 83 }
93 if (pcie_port_device_register(dev)) { 84 status = pcie_port_device_register(dev);
94 pci_disable_device(dev); 85 if (status)
95 return -ENOMEM; 86 return status;
96 }
97 87
98 pcie_portdrv_save_config(dev); 88 pci_save_state(dev);
99 89
100 return 0; 90 return 0;
101} 91}
@@ -104,7 +94,6 @@ static void pcie_portdrv_remove (struct pci_dev *dev)
104{ 94{
105 pcie_port_device_remove(dev); 95 pcie_port_device_remove(dev);
106 pci_disable_device(dev); 96 pci_disable_device(dev);
107 kfree(pci_get_drvdata(dev));
108} 97}
109 98
110static int error_detected_iter(struct device *device, void *data) 99static int error_detected_iter(struct device *device, void *data)
@@ -278,10 +267,9 @@ static struct pci_driver pcie_portdriver = {
278 .probe = pcie_portdrv_probe, 267 .probe = pcie_portdrv_probe,
279 .remove = pcie_portdrv_remove, 268 .remove = pcie_portdrv_remove,
280 269
281 .suspend = pcie_portdrv_suspend,
282 .resume = pcie_portdrv_resume,
283
284 .err_handler = &pcie_portdrv_err_handler, 270 .err_handler = &pcie_portdrv_err_handler,
271
272 .driver.pm = PCIE_PORTDRV_PM_OPS,
285}; 273};
286 274
287static int __init pcie_portdrv_init(void) 275static int __init pcie_portdrv_init(void)
diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
index 55ec44a27e89..e2f3dd098cfa 100644
--- a/drivers/pci/probe.c
+++ b/drivers/pci/probe.c
@@ -287,7 +287,7 @@ void __devinit pci_read_bridge_bases(struct pci_bus *child)
287 struct resource *res; 287 struct resource *res;
288 int i; 288 int i;
289 289
290 if (!dev) /* It's a host bus, nothing to read */ 290 if (!child->parent) /* It's a host bus, nothing to read */
291 return; 291 return;
292 292
293 if (dev->transparent) { 293 if (dev->transparent) {
@@ -511,21 +511,21 @@ int __devinit pci_scan_bridge(struct pci_bus *bus, struct pci_dev *dev, int max,
511 511
512 /* 512 /*
513 * If we already got to this bus through a different bridge, 513 * If we already got to this bus through a different bridge,
514 * ignore it. This can happen with the i450NX chipset. 514 * don't re-add it. This can happen with the i450NX chipset.
515 *
516 * However, we continue to descend down the hierarchy and
517 * scan remaining child buses.
515 */ 518 */
516 if (pci_find_bus(pci_domain_nr(bus), busnr)) { 519 child = pci_find_bus(pci_domain_nr(bus), busnr);
517 dev_info(&dev->dev, "bus %04x:%02x already known\n", 520 if (!child) {
518 pci_domain_nr(bus), busnr); 521 child = pci_add_new_bus(bus, dev, busnr);
519 goto out; 522 if (!child)
523 goto out;
524 child->primary = buses & 0xFF;
525 child->subordinate = (buses >> 16) & 0xFF;
526 child->bridge_ctl = bctl;
520 } 527 }
521 528
522 child = pci_add_new_bus(bus, dev, busnr);
523 if (!child)
524 goto out;
525 child->primary = buses & 0xFF;
526 child->subordinate = (buses >> 16) & 0xFF;
527 child->bridge_ctl = bctl;
528
529 cmax = pci_scan_child_bus(child); 529 cmax = pci_scan_child_bus(child);
530 if (cmax > max) 530 if (cmax > max)
531 max = cmax; 531 max = cmax;
@@ -674,6 +674,19 @@ static void pci_read_irq(struct pci_dev *dev)
674 dev->irq = irq; 674 dev->irq = irq;
675} 675}
676 676
677static void set_pcie_port_type(struct pci_dev *pdev)
678{
679 int pos;
680 u16 reg16;
681
682 pos = pci_find_capability(pdev, PCI_CAP_ID_EXP);
683 if (!pos)
684 return;
685 pdev->is_pcie = 1;
686 pci_read_config_word(pdev, pos + PCI_EXP_FLAGS, &reg16);
687 pdev->pcie_type = (reg16 & PCI_EXP_FLAGS_TYPE) >> 4;
688}
689
677#define LEGACY_IO_RESOURCE (IORESOURCE_IO | IORESOURCE_PCI_FIXED) 690#define LEGACY_IO_RESOURCE (IORESOURCE_IO | IORESOURCE_PCI_FIXED)
678 691
679/** 692/**
@@ -683,12 +696,33 @@ static void pci_read_irq(struct pci_dev *dev)
683 * Initialize the device structure with information about the device's 696 * Initialize the device structure with information about the device's
684 * vendor,class,memory and IO-space addresses,IRQ lines etc. 697 * vendor,class,memory and IO-space addresses,IRQ lines etc.
685 * Called at initialisation of the PCI subsystem and by CardBus services. 698 * Called at initialisation of the PCI subsystem and by CardBus services.
686 * Returns 0 on success and -1 if unknown type of device (not normal, bridge 699 * Returns 0 on success and negative if unknown type of device (not normal,
687 * or CardBus). 700 * bridge or CardBus).
688 */ 701 */
689static int pci_setup_device(struct pci_dev * dev) 702int pci_setup_device(struct pci_dev *dev)
690{ 703{
691 u32 class; 704 u32 class;
705 u8 hdr_type;
706 struct pci_slot *slot;
707
708 if (pci_read_config_byte(dev, PCI_HEADER_TYPE, &hdr_type))
709 return -EIO;
710
711 dev->sysdata = dev->bus->sysdata;
712 dev->dev.parent = dev->bus->bridge;
713 dev->dev.bus = &pci_bus_type;
714 dev->hdr_type = hdr_type & 0x7f;
715 dev->multifunction = !!(hdr_type & 0x80);
716 dev->error_state = pci_channel_io_normal;
717 set_pcie_port_type(dev);
718
719 list_for_each_entry(slot, &dev->bus->slots, list)
720 if (PCI_SLOT(dev->devfn) == slot->number)
721 dev->slot = slot;
722
723 /* Assume 32-bit PCI; let 64-bit PCI cards (which are far rarer)
724 set this higher, assuming the system even supports it. */
725 dev->dma_mask = 0xffffffff;
692 726
693 dev_set_name(&dev->dev, "%04x:%02x:%02x.%d", pci_domain_nr(dev->bus), 727 dev_set_name(&dev->dev, "%04x:%02x:%02x.%d", pci_domain_nr(dev->bus),
694 dev->bus->number, PCI_SLOT(dev->devfn), 728 dev->bus->number, PCI_SLOT(dev->devfn),
@@ -703,12 +737,14 @@ static int pci_setup_device(struct pci_dev * dev)
703 dev_dbg(&dev->dev, "found [%04x:%04x] class %06x header type %02x\n", 737 dev_dbg(&dev->dev, "found [%04x:%04x] class %06x header type %02x\n",
704 dev->vendor, dev->device, class, dev->hdr_type); 738 dev->vendor, dev->device, class, dev->hdr_type);
705 739
740 /* need to have dev->class ready */
741 dev->cfg_size = pci_cfg_space_size(dev);
742
706 /* "Unknown power state" */ 743 /* "Unknown power state" */
707 dev->current_state = PCI_UNKNOWN; 744 dev->current_state = PCI_UNKNOWN;
708 745
709 /* Early fixups, before probing the BARs */ 746 /* Early fixups, before probing the BARs */
710 pci_fixup_device(pci_fixup_early, dev); 747 pci_fixup_device(pci_fixup_early, dev);
711 class = dev->class >> 8;
712 748
713 switch (dev->hdr_type) { /* header type */ 749 switch (dev->hdr_type) { /* header type */
714 case PCI_HEADER_TYPE_NORMAL: /* standard header */ 750 case PCI_HEADER_TYPE_NORMAL: /* standard header */
@@ -770,7 +806,7 @@ static int pci_setup_device(struct pci_dev * dev)
770 default: /* unknown header */ 806 default: /* unknown header */
771 dev_err(&dev->dev, "unknown header type %02x, " 807 dev_err(&dev->dev, "unknown header type %02x, "
772 "ignoring device\n", dev->hdr_type); 808 "ignoring device\n", dev->hdr_type);
773 return -1; 809 return -EIO;
774 810
775 bad: 811 bad:
776 dev_err(&dev->dev, "ignoring class %02x (doesn't match header " 812 dev_err(&dev->dev, "ignoring class %02x (doesn't match header "
@@ -785,6 +821,7 @@ static int pci_setup_device(struct pci_dev * dev)
785static void pci_release_capabilities(struct pci_dev *dev) 821static void pci_release_capabilities(struct pci_dev *dev)
786{ 822{
787 pci_vpd_release(dev); 823 pci_vpd_release(dev);
824 pci_iov_release(dev);
788} 825}
789 826
790/** 827/**
@@ -803,19 +840,6 @@ static void pci_release_dev(struct device *dev)
803 kfree(pci_dev); 840 kfree(pci_dev);
804} 841}
805 842
806static void set_pcie_port_type(struct pci_dev *pdev)
807{
808 int pos;
809 u16 reg16;
810
811 pos = pci_find_capability(pdev, PCI_CAP_ID_EXP);
812 if (!pos)
813 return;
814 pdev->is_pcie = 1;
815 pci_read_config_word(pdev, pos + PCI_EXP_FLAGS, &reg16);
816 pdev->pcie_type = (reg16 & PCI_EXP_FLAGS_TYPE) >> 4;
817}
818
819/** 843/**
820 * pci_cfg_space_size - get the configuration space size of the PCI device. 844 * pci_cfg_space_size - get the configuration space size of the PCI device.
821 * @dev: PCI device 845 * @dev: PCI device
@@ -847,6 +871,11 @@ int pci_cfg_space_size(struct pci_dev *dev)
847{ 871{
848 int pos; 872 int pos;
849 u32 status; 873 u32 status;
874 u16 class;
875
876 class = dev->class >> 8;
877 if (class == PCI_CLASS_BRIDGE_HOST)
878 return pci_cfg_space_size_ext(dev);
850 879
851 pos = pci_find_capability(dev, PCI_CAP_ID_EXP); 880 pos = pci_find_capability(dev, PCI_CAP_ID_EXP);
852 if (!pos) { 881 if (!pos) {
@@ -891,9 +920,7 @@ EXPORT_SYMBOL(alloc_pci_dev);
891static struct pci_dev *pci_scan_device(struct pci_bus *bus, int devfn) 920static struct pci_dev *pci_scan_device(struct pci_bus *bus, int devfn)
892{ 921{
893 struct pci_dev *dev; 922 struct pci_dev *dev;
894 struct pci_slot *slot;
895 u32 l; 923 u32 l;
896 u8 hdr_type;
897 int delay = 1; 924 int delay = 1;
898 925
899 if (pci_bus_read_config_dword(bus, devfn, PCI_VENDOR_ID, &l)) 926 if (pci_bus_read_config_dword(bus, devfn, PCI_VENDOR_ID, &l))
@@ -920,34 +947,16 @@ static struct pci_dev *pci_scan_device(struct pci_bus *bus, int devfn)
920 } 947 }
921 } 948 }
922 949
923 if (pci_bus_read_config_byte(bus, devfn, PCI_HEADER_TYPE, &hdr_type))
924 return NULL;
925
926 dev = alloc_pci_dev(); 950 dev = alloc_pci_dev();
927 if (!dev) 951 if (!dev)
928 return NULL; 952 return NULL;
929 953
930 dev->bus = bus; 954 dev->bus = bus;
931 dev->sysdata = bus->sysdata;
932 dev->dev.parent = bus->bridge;
933 dev->dev.bus = &pci_bus_type;
934 dev->devfn = devfn; 955 dev->devfn = devfn;
935 dev->hdr_type = hdr_type & 0x7f;
936 dev->multifunction = !!(hdr_type & 0x80);
937 dev->vendor = l & 0xffff; 956 dev->vendor = l & 0xffff;
938 dev->device = (l >> 16) & 0xffff; 957 dev->device = (l >> 16) & 0xffff;
939 dev->cfg_size = pci_cfg_space_size(dev);
940 dev->error_state = pci_channel_io_normal;
941 set_pcie_port_type(dev);
942
943 list_for_each_entry(slot, &bus->slots, list)
944 if (PCI_SLOT(devfn) == slot->number)
945 dev->slot = slot;
946 958
947 /* Assume 32-bit PCI; let 64-bit PCI cards (which are far rarer) 959 if (pci_setup_device(dev)) {
948 set this higher, assuming the system even supports it. */
949 dev->dma_mask = 0xffffffff;
950 if (pci_setup_device(dev) < 0) {
951 kfree(dev); 960 kfree(dev);
952 return NULL; 961 return NULL;
953 } 962 }
@@ -972,6 +981,9 @@ static void pci_init_capabilities(struct pci_dev *dev)
972 981
973 /* Alternative Routing-ID Forwarding */ 982 /* Alternative Routing-ID Forwarding */
974 pci_enable_ari(dev); 983 pci_enable_ari(dev);
984
985 /* Single Root I/O Virtualization */
986 pci_iov_init(dev);
975} 987}
976 988
977void pci_device_add(struct pci_dev *dev, struct pci_bus *bus) 989void pci_device_add(struct pci_dev *dev, struct pci_bus *bus)
@@ -1006,6 +1018,12 @@ struct pci_dev *__ref pci_scan_single_device(struct pci_bus *bus, int devfn)
1006{ 1018{
1007 struct pci_dev *dev; 1019 struct pci_dev *dev;
1008 1020
1021 dev = pci_get_slot(bus, devfn);
1022 if (dev) {
1023 pci_dev_put(dev);
1024 return dev;
1025 }
1026
1009 dev = pci_scan_device(bus, devfn); 1027 dev = pci_scan_device(bus, devfn);
1010 if (!dev) 1028 if (!dev)
1011 return NULL; 1029 return NULL;
@@ -1024,35 +1042,27 @@ EXPORT_SYMBOL(pci_scan_single_device);
1024 * Scan a PCI slot on the specified PCI bus for devices, adding 1042 * Scan a PCI slot on the specified PCI bus for devices, adding
1025 * discovered devices to the @bus->devices list. New devices 1043 * discovered devices to the @bus->devices list. New devices
1026 * will not have is_added set. 1044 * will not have is_added set.
1045 *
1046 * Returns the number of new devices found.
1027 */ 1047 */
1028int pci_scan_slot(struct pci_bus *bus, int devfn) 1048int pci_scan_slot(struct pci_bus *bus, int devfn)
1029{ 1049{
1030 int func, nr = 0; 1050 int fn, nr = 0;
1031 int scan_all_fns; 1051 struct pci_dev *dev;
1032
1033 scan_all_fns = pcibios_scan_all_fns(bus, devfn);
1034
1035 for (func = 0; func < 8; func++, devfn++) {
1036 struct pci_dev *dev;
1037
1038 dev = pci_scan_single_device(bus, devfn);
1039 if (dev) {
1040 nr++;
1041 1052
1042 /* 1053 dev = pci_scan_single_device(bus, devfn);
1043 * If this is a single function device, 1054 if (dev && !dev->is_added) /* new device? */
1044 * don't scan past the first function. 1055 nr++;
1045 */ 1056
1046 if (!dev->multifunction) { 1057 if ((dev && dev->multifunction) ||
1047 if (func > 0) { 1058 (!dev && pcibios_scan_all_fns(bus, devfn))) {
1048 dev->multifunction = 1; 1059 for (fn = 1; fn < 8; fn++) {
1049 } else { 1060 dev = pci_scan_single_device(bus, devfn + fn);
1050 break; 1061 if (dev) {
1051 } 1062 if (!dev->is_added)
1063 nr++;
1064 dev->multifunction = 1;
1052 } 1065 }
1053 } else {
1054 if (func == 0 && !scan_all_fns)
1055 break;
1056 } 1066 }
1057 } 1067 }
1058 1068
@@ -1074,12 +1084,21 @@ unsigned int __devinit pci_scan_child_bus(struct pci_bus *bus)
1074 for (devfn = 0; devfn < 0x100; devfn += 8) 1084 for (devfn = 0; devfn < 0x100; devfn += 8)
1075 pci_scan_slot(bus, devfn); 1085 pci_scan_slot(bus, devfn);
1076 1086
1087 /* Reserve buses for SR-IOV capability. */
1088 max += pci_iov_bus_range(bus);
1089
1077 /* 1090 /*
1078 * After performing arch-dependent fixup of the bus, look behind 1091 * After performing arch-dependent fixup of the bus, look behind
1079 * all PCI-to-PCI bridges on this bus. 1092 * all PCI-to-PCI bridges on this bus.
1080 */ 1093 */
1081 pr_debug("PCI: Fixups for bus %04x:%02x\n", pci_domain_nr(bus), bus->number); 1094 if (!bus->is_added) {
1082 pcibios_fixup_bus(bus); 1095 pr_debug("PCI: Fixups for bus %04x:%02x\n",
1096 pci_domain_nr(bus), bus->number);
1097 pcibios_fixup_bus(bus);
1098 if (pci_is_root_bus(bus))
1099 bus->is_added = 1;
1100 }
1101
1083 for (pass=0; pass < 2; pass++) 1102 for (pass=0; pass < 2; pass++)
1084 list_for_each_entry(dev, &bus->devices, bus_list) { 1103 list_for_each_entry(dev, &bus->devices, bus_list) {
1085 if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE || 1104 if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE ||
@@ -1114,7 +1133,7 @@ struct pci_bus * pci_create_bus(struct device *parent,
1114 if (!b) 1133 if (!b)
1115 return NULL; 1134 return NULL;
1116 1135
1117 dev = kmalloc(sizeof(*dev), GFP_KERNEL); 1136 dev = kzalloc(sizeof(*dev), GFP_KERNEL);
1118 if (!dev){ 1137 if (!dev){
1119 kfree(b); 1138 kfree(b);
1120 return NULL; 1139 return NULL;
@@ -1133,7 +1152,6 @@ struct pci_bus * pci_create_bus(struct device *parent,
1133 list_add_tail(&b->node, &pci_root_buses); 1152 list_add_tail(&b->node, &pci_root_buses);
1134 up_write(&pci_bus_sem); 1153 up_write(&pci_bus_sem);
1135 1154
1136 memset(dev, 0, sizeof(*dev));
1137 dev->parent = parent; 1155 dev->parent = parent;
1138 dev->release = pci_release_bus_bridge_dev; 1156 dev->release = pci_release_bus_bridge_dev;
1139 dev_set_name(dev, "pci%04x:%02x", pci_domain_nr(b), bus); 1157 dev_set_name(dev, "pci%04x:%02x", pci_domain_nr(b), bus);
@@ -1193,6 +1211,38 @@ struct pci_bus * __devinit pci_scan_bus_parented(struct device *parent,
1193EXPORT_SYMBOL(pci_scan_bus_parented); 1211EXPORT_SYMBOL(pci_scan_bus_parented);
1194 1212
1195#ifdef CONFIG_HOTPLUG 1213#ifdef CONFIG_HOTPLUG
1214/**
1215 * pci_rescan_bus - scan a PCI bus for devices.
1216 * @bus: PCI bus to scan
1217 *
1218 * Scan a PCI bus and child buses for new devices, adds them,
1219 * and enables them.
1220 *
1221 * Returns the max number of subordinate bus discovered.
1222 */
1223unsigned int __devinit pci_rescan_bus(struct pci_bus *bus)
1224{
1225 unsigned int max;
1226 struct pci_dev *dev;
1227
1228 max = pci_scan_child_bus(bus);
1229
1230 down_read(&pci_bus_sem);
1231 list_for_each_entry(dev, &bus->devices, bus_list)
1232 if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE ||
1233 dev->hdr_type == PCI_HEADER_TYPE_CARDBUS)
1234 if (dev->subordinate)
1235 pci_bus_size_bridges(dev->subordinate);
1236 up_read(&pci_bus_sem);
1237
1238 pci_bus_assign_resources(bus);
1239 pci_enable_bridges(bus);
1240 pci_bus_add_devices(bus);
1241
1242 return max;
1243}
1244EXPORT_SYMBOL_GPL(pci_rescan_bus);
1245
1196EXPORT_SYMBOL(pci_add_new_bus); 1246EXPORT_SYMBOL(pci_add_new_bus);
1197EXPORT_SYMBOL(pci_scan_slot); 1247EXPORT_SYMBOL(pci_scan_slot);
1198EXPORT_SYMBOL(pci_scan_bridge); 1248EXPORT_SYMBOL(pci_scan_bridge);
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
index 92b9efe9bcaf..9b2f0d96900d 100644
--- a/drivers/pci/quirks.c
+++ b/drivers/pci/quirks.c
@@ -24,6 +24,7 @@
24#include <linux/kallsyms.h> 24#include <linux/kallsyms.h>
25#include <linux/dmi.h> 25#include <linux/dmi.h>
26#include <linux/pci-aspm.h> 26#include <linux/pci-aspm.h>
27#include <linux/ioport.h>
27#include "pci.h" 28#include "pci.h"
28 29
29int isa_dma_bridge_buggy; 30int isa_dma_bridge_buggy;
@@ -34,6 +35,65 @@ int pcie_mch_quirk;
34EXPORT_SYMBOL(pcie_mch_quirk); 35EXPORT_SYMBOL(pcie_mch_quirk);
35 36
36#ifdef CONFIG_PCI_QUIRKS 37#ifdef CONFIG_PCI_QUIRKS
38/*
39 * This quirk function disables the device and releases resources
40 * which is specified by kernel's boot parameter 'pci=resource_alignment='.
41 * It also rounds up size to specified alignment.
42 * Later on, the kernel will assign page-aligned memory resource back
43 * to that device.
44 */
45static void __devinit quirk_resource_alignment(struct pci_dev *dev)
46{
47 int i;
48 struct resource *r;
49 resource_size_t align, size;
50
51 if (!pci_is_reassigndev(dev))
52 return;
53
54 if (dev->hdr_type == PCI_HEADER_TYPE_NORMAL &&
55 (dev->class >> 8) == PCI_CLASS_BRIDGE_HOST) {
56 dev_warn(&dev->dev,
57 "Can't reassign resources to host bridge.\n");
58 return;
59 }
60
61 dev_info(&dev->dev, "Disabling device and release resources.\n");
62 pci_disable_device(dev);
63
64 align = pci_specified_resource_alignment(dev);
65 for (i=0; i < PCI_BRIDGE_RESOURCES; i++) {
66 r = &dev->resource[i];
67 if (!(r->flags & IORESOURCE_MEM))
68 continue;
69 size = resource_size(r);
70 if (size < align) {
71 size = align;
72 dev_info(&dev->dev,
73 "Rounding up size of resource #%d to %#llx.\n",
74 i, (unsigned long long)size);
75 }
76 r->end = size - 1;
77 r->start = 0;
78 }
79 /* Need to disable bridge's resource window,
80 * to enable the kernel to reassign new resource
81 * window later on.
82 */
83 if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE &&
84 (dev->class >> 8) == PCI_CLASS_BRIDGE_PCI) {
85 for (i = PCI_BRIDGE_RESOURCES; i < PCI_NUM_RESOURCES; i++) {
86 r = &dev->resource[i];
87 if (!(r->flags & IORESOURCE_MEM))
88 continue;
89 r->end = resource_size(r) - 1;
90 r->start = 0;
91 }
92 pci_disable_bridge_window(dev);
93 }
94}
95DECLARE_PCI_FIXUP_HEADER(PCI_ANY_ID, PCI_ANY_ID, quirk_resource_alignment);
96
37/* The Mellanox Tavor device gives false positive parity errors 97/* The Mellanox Tavor device gives false positive parity errors
38 * Mark this device with a broken_parity_status, to allow 98 * Mark this device with a broken_parity_status, to allow
39 * PCI scanning code to "skip" this now blacklisted device. 99 * PCI scanning code to "skip" this now blacklisted device.
@@ -1126,10 +1186,15 @@ static void __init asus_hides_smbus_hostbridge(struct pci_dev *dev)
1126 * its on-board VGA controller */ 1186 * its on-board VGA controller */
1127 asus_hides_smbus = 1; 1187 asus_hides_smbus = 1;
1128 } 1188 }
1129 else if (dev->device == PCI_DEVICE_ID_INTEL_82845G_IG) 1189 else if (dev->device == PCI_DEVICE_ID_INTEL_82801DB_2)
1130 switch(dev->subsystem_device) { 1190 switch(dev->subsystem_device) {
1131 case 0x00b8: /* Compaq Evo D510 CMT */ 1191 case 0x00b8: /* Compaq Evo D510 CMT */
1132 case 0x00b9: /* Compaq Evo D510 SFF */ 1192 case 0x00b9: /* Compaq Evo D510 SFF */
1193 /* Motherboard doesn't have Host bridge
1194 * subvendor/subdevice IDs and on-board VGA
1195 * controller is disabled if an AGP card is
1196 * inserted, therefore checking USB UHCI
1197 * Controller #1 */
1133 asus_hides_smbus = 1; 1198 asus_hides_smbus = 1;
1134 } 1199 }
1135 else if (dev->device == PCI_DEVICE_ID_INTEL_82815_CGC) 1200 else if (dev->device == PCI_DEVICE_ID_INTEL_82815_CGC)
@@ -1154,7 +1219,7 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82855GM_HB, as
1154DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82915GM_HB, asus_hides_smbus_hostbridge); 1219DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82915GM_HB, asus_hides_smbus_hostbridge);
1155 1220
1156DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82810_IG3, asus_hides_smbus_hostbridge); 1221DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82810_IG3, asus_hides_smbus_hostbridge);
1157DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82845G_IG, asus_hides_smbus_hostbridge); 1222DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801DB_2, asus_hides_smbus_hostbridge);
1158DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82815_CGC, asus_hides_smbus_hostbridge); 1223DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82815_CGC, asus_hides_smbus_hostbridge);
1159 1224
1160static void asus_hides_smbus_lpc(struct pci_dev *dev) 1225static void asus_hides_smbus_lpc(struct pci_dev *dev)
@@ -1664,9 +1729,13 @@ static void __devinit quirk_netmos(struct pci_dev *dev)
1664 * of parallel ports and <S> is the number of serial ports. 1729 * of parallel ports and <S> is the number of serial ports.
1665 */ 1730 */
1666 switch (dev->device) { 1731 switch (dev->device) {
1732 case PCI_DEVICE_ID_NETMOS_9835:
1733 /* Well, this rule doesn't hold for the following 9835 device */
1734 if (dev->subsystem_vendor == PCI_VENDOR_ID_IBM &&
1735 dev->subsystem_device == 0x0299)
1736 return;
1667 case PCI_DEVICE_ID_NETMOS_9735: 1737 case PCI_DEVICE_ID_NETMOS_9735:
1668 case PCI_DEVICE_ID_NETMOS_9745: 1738 case PCI_DEVICE_ID_NETMOS_9745:
1669 case PCI_DEVICE_ID_NETMOS_9835:
1670 case PCI_DEVICE_ID_NETMOS_9845: 1739 case PCI_DEVICE_ID_NETMOS_9845:
1671 case PCI_DEVICE_ID_NETMOS_9855: 1740 case PCI_DEVICE_ID_NETMOS_9855:
1672 if ((dev->class >> 8) == PCI_CLASS_COMMUNICATION_SERIAL && 1741 if ((dev->class >> 8) == PCI_CLASS_COMMUNICATION_SERIAL &&
@@ -2078,6 +2147,92 @@ DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_NVIDIA,
2078 PCI_DEVICE_ID_NVIDIA_NVENET_15, 2147 PCI_DEVICE_ID_NVIDIA_NVENET_15,
2079 nvenet_msi_disable); 2148 nvenet_msi_disable);
2080 2149
2150static int __devinit ht_check_msi_mapping(struct pci_dev *dev)
2151{
2152 int pos, ttl = 48;
2153 int found = 0;
2154
2155 /* check if there is HT MSI cap or enabled on this device */
2156 pos = pci_find_ht_capability(dev, HT_CAPTYPE_MSI_MAPPING);
2157 while (pos && ttl--) {
2158 u8 flags;
2159
2160 if (found < 1)
2161 found = 1;
2162 if (pci_read_config_byte(dev, pos + HT_MSI_FLAGS,
2163 &flags) == 0) {
2164 if (flags & HT_MSI_FLAGS_ENABLE) {
2165 if (found < 2) {
2166 found = 2;
2167 break;
2168 }
2169 }
2170 }
2171 pos = pci_find_next_ht_capability(dev, pos,
2172 HT_CAPTYPE_MSI_MAPPING);
2173 }
2174
2175 return found;
2176}
2177
2178static int __devinit host_bridge_with_leaf(struct pci_dev *host_bridge)
2179{
2180 struct pci_dev *dev;
2181 int pos;
2182 int i, dev_no;
2183 int found = 0;
2184
2185 dev_no = host_bridge->devfn >> 3;
2186 for (i = dev_no + 1; i < 0x20; i++) {
2187 dev = pci_get_slot(host_bridge->bus, PCI_DEVFN(i, 0));
2188 if (!dev)
2189 continue;
2190
2191 /* found next host bridge ?*/
2192 pos = pci_find_ht_capability(dev, HT_CAPTYPE_SLAVE);
2193 if (pos != 0) {
2194 pci_dev_put(dev);
2195 break;
2196 }
2197
2198 if (ht_check_msi_mapping(dev)) {
2199 found = 1;
2200 pci_dev_put(dev);
2201 break;
2202 }
2203 pci_dev_put(dev);
2204 }
2205
2206 return found;
2207}
2208
2209#define PCI_HT_CAP_SLAVE_CTRL0 4 /* link control */
2210#define PCI_HT_CAP_SLAVE_CTRL1 8 /* link control to */
2211
2212static int __devinit is_end_of_ht_chain(struct pci_dev *dev)
2213{
2214 int pos, ctrl_off;
2215 int end = 0;
2216 u16 flags, ctrl;
2217
2218 pos = pci_find_ht_capability(dev, HT_CAPTYPE_SLAVE);
2219
2220 if (!pos)
2221 goto out;
2222
2223 pci_read_config_word(dev, pos + PCI_CAP_FLAGS, &flags);
2224
2225 ctrl_off = ((flags >> 10) & 1) ?
2226 PCI_HT_CAP_SLAVE_CTRL0 : PCI_HT_CAP_SLAVE_CTRL1;
2227 pci_read_config_word(dev, pos + ctrl_off, &ctrl);
2228
2229 if (ctrl & (1 << 6))
2230 end = 1;
2231
2232out:
2233 return end;
2234}
2235
2081static void __devinit nv_ht_enable_msi_mapping(struct pci_dev *dev) 2236static void __devinit nv_ht_enable_msi_mapping(struct pci_dev *dev)
2082{ 2237{
2083 struct pci_dev *host_bridge; 2238 struct pci_dev *host_bridge;
@@ -2102,6 +2257,11 @@ static void __devinit nv_ht_enable_msi_mapping(struct pci_dev *dev)
2102 if (!found) 2257 if (!found)
2103 return; 2258 return;
2104 2259
2260 /* don't enable end_device/host_bridge with leaf directly here */
2261 if (host_bridge == dev && is_end_of_ht_chain(host_bridge) &&
2262 host_bridge_with_leaf(host_bridge))
2263 goto out;
2264
2105 /* root did that ! */ 2265 /* root did that ! */
2106 if (msi_ht_cap_enabled(host_bridge)) 2266 if (msi_ht_cap_enabled(host_bridge))
2107 goto out; 2267 goto out;
@@ -2132,44 +2292,12 @@ static void __devinit ht_disable_msi_mapping(struct pci_dev *dev)
2132 } 2292 }
2133} 2293}
2134 2294
2135static int __devinit ht_check_msi_mapping(struct pci_dev *dev) 2295static void __devinit __nv_msi_ht_cap_quirk(struct pci_dev *dev, int all)
2136{
2137 int pos, ttl = 48;
2138 int found = 0;
2139
2140 /* check if there is HT MSI cap or enabled on this device */
2141 pos = pci_find_ht_capability(dev, HT_CAPTYPE_MSI_MAPPING);
2142 while (pos && ttl--) {
2143 u8 flags;
2144
2145 if (found < 1)
2146 found = 1;
2147 if (pci_read_config_byte(dev, pos + HT_MSI_FLAGS,
2148 &flags) == 0) {
2149 if (flags & HT_MSI_FLAGS_ENABLE) {
2150 if (found < 2) {
2151 found = 2;
2152 break;
2153 }
2154 }
2155 }
2156 pos = pci_find_next_ht_capability(dev, pos,
2157 HT_CAPTYPE_MSI_MAPPING);
2158 }
2159
2160 return found;
2161}
2162
2163static void __devinit nv_msi_ht_cap_quirk(struct pci_dev *dev)
2164{ 2296{
2165 struct pci_dev *host_bridge; 2297 struct pci_dev *host_bridge;
2166 int pos; 2298 int pos;
2167 int found; 2299 int found;
2168 2300
2169 /* Enabling HT MSI mapping on this device breaks MCP51 */
2170 if (dev->device == 0x270)
2171 return;
2172
2173 /* check if there is HT MSI cap or enabled on this device */ 2301 /* check if there is HT MSI cap or enabled on this device */
2174 found = ht_check_msi_mapping(dev); 2302 found = ht_check_msi_mapping(dev);
2175 2303
@@ -2193,7 +2321,10 @@ static void __devinit nv_msi_ht_cap_quirk(struct pci_dev *dev)
2193 /* Host bridge is to HT */ 2321 /* Host bridge is to HT */
2194 if (found == 1) { 2322 if (found == 1) {
2195 /* it is not enabled, try to enable it */ 2323 /* it is not enabled, try to enable it */
2196 nv_ht_enable_msi_mapping(dev); 2324 if (all)
2325 ht_enable_msi_mapping(dev);
2326 else
2327 nv_ht_enable_msi_mapping(dev);
2197 } 2328 }
2198 return; 2329 return;
2199 } 2330 }
@@ -2205,8 +2336,20 @@ static void __devinit nv_msi_ht_cap_quirk(struct pci_dev *dev)
2205 /* Host bridge is not to HT, disable HT MSI mapping on this device */ 2336 /* Host bridge is not to HT, disable HT MSI mapping on this device */
2206 ht_disable_msi_mapping(dev); 2337 ht_disable_msi_mapping(dev);
2207} 2338}
2208DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID, nv_msi_ht_cap_quirk); 2339
2209DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AL, PCI_ANY_ID, nv_msi_ht_cap_quirk); 2340static void __devinit nv_msi_ht_cap_quirk_all(struct pci_dev *dev)
2341{
2342 return __nv_msi_ht_cap_quirk(dev, 1);
2343}
2344
2345static void __devinit nv_msi_ht_cap_quirk_leaf(struct pci_dev *dev)
2346{
2347 return __nv_msi_ht_cap_quirk(dev, 0);
2348}
2349
2350DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID, nv_msi_ht_cap_quirk_leaf);
2351
2352DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AL, PCI_ANY_ID, nv_msi_ht_cap_quirk_all);
2210 2353
2211static void __devinit quirk_msi_intx_disable_bug(struct pci_dev *dev) 2354static void __devinit quirk_msi_intx_disable_bug(struct pci_dev *dev)
2212{ 2355{
diff --git a/drivers/pci/remove.c b/drivers/pci/remove.c
index 042e08924421..86503c14ce7e 100644
--- a/drivers/pci/remove.c
+++ b/drivers/pci/remove.c
@@ -71,6 +71,9 @@ void pci_remove_bus(struct pci_bus *pci_bus)
71 down_write(&pci_bus_sem); 71 down_write(&pci_bus_sem);
72 list_del(&pci_bus->node); 72 list_del(&pci_bus->node);
73 up_write(&pci_bus_sem); 73 up_write(&pci_bus_sem);
74 if (!pci_bus->is_added)
75 return;
76
74 pci_remove_legacy_files(pci_bus); 77 pci_remove_legacy_files(pci_bus);
75 device_remove_file(&pci_bus->dev, &dev_attr_cpuaffinity); 78 device_remove_file(&pci_bus->dev, &dev_attr_cpuaffinity);
76 device_remove_file(&pci_bus->dev, &dev_attr_cpulistaffinity); 79 device_remove_file(&pci_bus->dev, &dev_attr_cpulistaffinity);
@@ -92,6 +95,7 @@ EXPORT_SYMBOL(pci_remove_bus);
92 */ 95 */
93void pci_remove_bus_device(struct pci_dev *dev) 96void pci_remove_bus_device(struct pci_dev *dev)
94{ 97{
98 pci_stop_bus_device(dev);
95 if (dev->subordinate) { 99 if (dev->subordinate) {
96 struct pci_bus *b = dev->subordinate; 100 struct pci_bus *b = dev->subordinate;
97 101
diff --git a/drivers/pci/search.c b/drivers/pci/search.c
index 5af8bd538149..710d4ea69568 100644
--- a/drivers/pci/search.c
+++ b/drivers/pci/search.c
@@ -29,7 +29,7 @@ pci_find_upstream_pcie_bridge(struct pci_dev *pdev)
29 if (pdev->is_pcie) 29 if (pdev->is_pcie)
30 return NULL; 30 return NULL;
31 while (1) { 31 while (1) {
32 if (!pdev->bus->self) 32 if (!pdev->bus->parent)
33 break; 33 break;
34 pdev = pdev->bus->self; 34 pdev = pdev->bus->self;
35 /* a p2p bridge */ 35 /* a p2p bridge */
diff --git a/drivers/pci/setup-bus.c b/drivers/pci/setup-bus.c
index 704608945780..334285a8e237 100644
--- a/drivers/pci/setup-bus.c
+++ b/drivers/pci/setup-bus.c
@@ -27,7 +27,7 @@
27#include <linux/slab.h> 27#include <linux/slab.h>
28 28
29 29
30static void pbus_assign_resources_sorted(struct pci_bus *bus) 30static void pbus_assign_resources_sorted(const struct pci_bus *bus)
31{ 31{
32 struct pci_dev *dev; 32 struct pci_dev *dev;
33 struct resource *res; 33 struct resource *res;
@@ -144,6 +144,9 @@ static void pci_setup_bridge(struct pci_bus *bus)
144 struct pci_bus_region region; 144 struct pci_bus_region region;
145 u32 l, bu, lu, io_upper16; 145 u32 l, bu, lu, io_upper16;
146 146
147 if (!pci_is_root_bus(bus) && bus->is_added)
148 return;
149
147 dev_info(&bridge->dev, "PCI bridge, secondary bus %04x:%02x\n", 150 dev_info(&bridge->dev, "PCI bridge, secondary bus %04x:%02x\n",
148 pci_domain_nr(bus), bus->number); 151 pci_domain_nr(bus), bus->number);
149 152
@@ -495,7 +498,7 @@ void __ref pci_bus_size_bridges(struct pci_bus *bus)
495} 498}
496EXPORT_SYMBOL(pci_bus_size_bridges); 499EXPORT_SYMBOL(pci_bus_size_bridges);
497 500
498void __ref pci_bus_assign_resources(struct pci_bus *bus) 501void __ref pci_bus_assign_resources(const struct pci_bus *bus)
499{ 502{
500 struct pci_bus *b; 503 struct pci_bus *b;
501 struct pci_dev *dev; 504 struct pci_dev *dev;
diff --git a/drivers/pci/setup-res.c b/drivers/pci/setup-res.c
index 32e8d88a4619..3039fcb86afc 100644
--- a/drivers/pci/setup-res.c
+++ b/drivers/pci/setup-res.c
@@ -120,6 +120,21 @@ int pci_claim_resource(struct pci_dev *dev, int resource)
120 return err; 120 return err;
121} 121}
122 122
123#ifdef CONFIG_PCI_QUIRKS
124void pci_disable_bridge_window(struct pci_dev *dev)
125{
126 dev_dbg(&dev->dev, "Disabling bridge window.\n");
127
128 /* MMIO Base/Limit */
129 pci_write_config_dword(dev, PCI_MEMORY_BASE, 0x0000fff0);
130
131 /* Prefetchable MMIO Base/Limit */
132 pci_write_config_dword(dev, PCI_PREF_LIMIT_UPPER32, 0);
133 pci_write_config_dword(dev, PCI_PREF_MEMORY_BASE, 0x0000fff0);
134 pci_write_config_dword(dev, PCI_PREF_BASE_UPPER32, 0xffffffff);
135}
136#endif /* CONFIG_PCI_QUIRKS */
137
123int pci_assign_resource(struct pci_dev *dev, int resno) 138int pci_assign_resource(struct pci_dev *dev, int resno)
124{ 139{
125 struct pci_bus *bus = dev->bus; 140 struct pci_bus *bus = dev->bus;
diff --git a/drivers/pci/slot.c b/drivers/pci/slot.c
index 5a8ccb4f604d..21189447e545 100644
--- a/drivers/pci/slot.c
+++ b/drivers/pci/slot.c
@@ -1,8 +1,8 @@
1/* 1/*
2 * drivers/pci/slot.c 2 * drivers/pci/slot.c
3 * Copyright (C) 2006 Matthew Wilcox <matthew@wil.cx> 3 * Copyright (C) 2006 Matthew Wilcox <matthew@wil.cx>
4 * Copyright (C) 2006-2008 Hewlett-Packard Development Company, L.P. 4 * Copyright (C) 2006-2009 Hewlett-Packard Development Company, L.P.
5 * Alex Chiang <achiang@hp.com> 5 * Alex Chiang <achiang@hp.com>
6 */ 6 */
7 7
8#include <linux/kobject.h> 8#include <linux/kobject.h>
@@ -52,8 +52,8 @@ static void pci_slot_release(struct kobject *kobj)
52 struct pci_dev *dev; 52 struct pci_dev *dev;
53 struct pci_slot *slot = to_pci_slot(kobj); 53 struct pci_slot *slot = to_pci_slot(kobj);
54 54
55 pr_debug("%s: releasing pci_slot on %x:%d\n", __func__, 55 dev_dbg(&slot->bus->dev, "dev %02x, released physical slot %s\n",
56 slot->bus->number, slot->number); 56 slot->number, pci_slot_name(slot));
57 57
58 list_for_each_entry(dev, &slot->bus->devices, bus_list) 58 list_for_each_entry(dev, &slot->bus->devices, bus_list)
59 if (PCI_SLOT(dev->devfn) == slot->number) 59 if (PCI_SLOT(dev->devfn) == slot->number)
@@ -248,9 +248,8 @@ placeholder:
248 if (PCI_SLOT(dev->devfn) == slot_nr) 248 if (PCI_SLOT(dev->devfn) == slot_nr)
249 dev->slot = slot; 249 dev->slot = slot;
250 250
251 /* Don't care if debug printk has a -1 for slot_nr */ 251 dev_dbg(&parent->dev, "dev %02x, created physical slot %s\n",
252 pr_debug("%s: created pci_slot on %04x:%02x:%02x\n", 252 slot_nr, pci_slot_name(slot));
253 __func__, pci_domain_nr(parent), parent->number, slot_nr);
254 253
255out: 254out:
256 kfree(slot_name); 255 kfree(slot_name);
@@ -299,9 +298,8 @@ EXPORT_SYMBOL_GPL(pci_renumber_slot);
299 */ 298 */
300void pci_destroy_slot(struct pci_slot *slot) 299void pci_destroy_slot(struct pci_slot *slot)
301{ 300{
302 pr_debug("%s: dec refcount to %d on %04x:%02x:%02x\n", __func__, 301 dev_dbg(&slot->bus->dev, "dev %02x, dec refcount to %d\n",
303 atomic_read(&slot->kobj.kref.refcount) - 1, 302 slot->number, atomic_read(&slot->kobj.kref.refcount) - 1);
304 pci_domain_nr(slot->bus), slot->bus->number, slot->number);
305 303
306 down_write(&pci_bus_sem); 304 down_write(&pci_bus_sem);
307 kobject_put(&slot->kobj); 305 kobject_put(&slot->kobj);
diff --git a/include/linux/acpi.h b/include/linux/acpi.h
index 78199151c00b..d047f846c3ed 100644
--- a/include/linux/acpi.h
+++ b/include/linux/acpi.h
@@ -257,6 +257,40 @@ void __init acpi_no_s4_hw_signature(void);
257void __init acpi_old_suspend_ordering(void); 257void __init acpi_old_suspend_ordering(void);
258void __init acpi_s4_no_nvs(void); 258void __init acpi_s4_no_nvs(void);
259#endif /* CONFIG_PM_SLEEP */ 259#endif /* CONFIG_PM_SLEEP */
260
261#define OSC_QUERY_TYPE 0
262#define OSC_SUPPORT_TYPE 1
263#define OSC_CONTROL_TYPE 2
264#define OSC_SUPPORT_MASKS 0x1f
265
266/* _OSC DW0 Definition */
267#define OSC_QUERY_ENABLE 1
268#define OSC_REQUEST_ERROR 2
269#define OSC_INVALID_UUID_ERROR 4
270#define OSC_INVALID_REVISION_ERROR 8
271#define OSC_CAPABILITIES_MASK_ERROR 16
272
273/* _OSC DW1 Definition (OS Support Fields) */
274#define OSC_EXT_PCI_CONFIG_SUPPORT 1
275#define OSC_ACTIVE_STATE_PWR_SUPPORT 2
276#define OSC_CLOCK_PWR_CAPABILITY_SUPPORT 4
277#define OSC_PCI_SEGMENT_GROUPS_SUPPORT 8
278#define OSC_MSI_SUPPORT 16
279
280/* _OSC DW1 Definition (OS Control Fields) */
281#define OSC_PCI_EXPRESS_NATIVE_HP_CONTROL 1
282#define OSC_SHPC_NATIVE_HP_CONTROL 2
283#define OSC_PCI_EXPRESS_PME_CONTROL 4
284#define OSC_PCI_EXPRESS_AER_CONTROL 8
285#define OSC_PCI_EXPRESS_CAP_STRUCTURE_CONTROL 16
286
287#define OSC_CONTROL_MASKS (OSC_PCI_EXPRESS_NATIVE_HP_CONTROL | \
288 OSC_SHPC_NATIVE_HP_CONTROL | \
289 OSC_PCI_EXPRESS_PME_CONTROL | \
290 OSC_PCI_EXPRESS_AER_CONTROL | \
291 OSC_PCI_EXPRESS_CAP_STRUCTURE_CONTROL)
292
293extern acpi_status acpi_pci_osc_control_set(acpi_handle handle, u32 flags);
260#else /* CONFIG_ACPI */ 294#else /* CONFIG_ACPI */
261 295
262static inline int early_acpi_boot_init(void) 296static inline int early_acpi_boot_init(void)
diff --git a/include/linux/msi.h b/include/linux/msi.h
index d2b8a1e8ca11..6991ab5b24d1 100644
--- a/include/linux/msi.h
+++ b/include/linux/msi.h
@@ -20,20 +20,23 @@ extern void write_msi_msg(unsigned int irq, struct msi_msg *msg);
20 20
21struct msi_desc { 21struct msi_desc {
22 struct { 22 struct {
23 __u8 type : 5; /* {0: unused, 5h:MSI, 11h:MSI-X} */ 23 __u8 is_msix : 1;
24 __u8 multiple: 3; /* log2 number of messages */
24 __u8 maskbit : 1; /* mask-pending bit supported ? */ 25 __u8 maskbit : 1; /* mask-pending bit supported ? */
25 __u8 masked : 1;
26 __u8 is_64 : 1; /* Address size: 0=32bit 1=64bit */ 26 __u8 is_64 : 1; /* Address size: 0=32bit 1=64bit */
27 __u8 pos; /* Location of the msi capability */ 27 __u8 pos; /* Location of the msi capability */
28 __u32 maskbits_mask; /* mask bits mask */
29 __u16 entry_nr; /* specific enabled entry */ 28 __u16 entry_nr; /* specific enabled entry */
30 unsigned default_irq; /* default pre-assigned irq */ 29 unsigned default_irq; /* default pre-assigned irq */
31 }msi_attrib; 30 } msi_attrib;
32 31
32 u32 masked; /* mask bits */
33 unsigned int irq; 33 unsigned int irq;
34 struct list_head list; 34 struct list_head list;
35 35
36 void __iomem *mask_base; 36 union {
37 void __iomem *mask_base;
38 u8 mask_pos;
39 };
37 struct pci_dev *dev; 40 struct pci_dev *dev;
38 41
39 /* Last set MSI message */ 42 /* Last set MSI message */
diff --git a/include/linux/pci-acpi.h b/include/linux/pci-acpi.h
index 042c166f65d5..092e82e0048c 100644
--- a/include/linux/pci-acpi.h
+++ b/include/linux/pci-acpi.h
@@ -10,72 +10,25 @@
10 10
11#include <linux/acpi.h> 11#include <linux/acpi.h>
12 12
13#define OSC_QUERY_TYPE 0
14#define OSC_SUPPORT_TYPE 1
15#define OSC_CONTROL_TYPE 2
16#define OSC_SUPPORT_MASKS 0x1f
17
18/*
19 * _OSC DW0 Definition
20 */
21#define OSC_QUERY_ENABLE 1
22#define OSC_REQUEST_ERROR 2
23#define OSC_INVALID_UUID_ERROR 4
24#define OSC_INVALID_REVISION_ERROR 8
25#define OSC_CAPABILITIES_MASK_ERROR 16
26
27/*
28 * _OSC DW1 Definition (OS Support Fields)
29 */
30#define OSC_EXT_PCI_CONFIG_SUPPORT 1
31#define OSC_ACTIVE_STATE_PWR_SUPPORT 2
32#define OSC_CLOCK_PWR_CAPABILITY_SUPPORT 4
33#define OSC_PCI_SEGMENT_GROUPS_SUPPORT 8
34#define OSC_MSI_SUPPORT 16
35
36/*
37 * _OSC DW1 Definition (OS Control Fields)
38 */
39#define OSC_PCI_EXPRESS_NATIVE_HP_CONTROL 1
40#define OSC_SHPC_NATIVE_HP_CONTROL 2
41#define OSC_PCI_EXPRESS_PME_CONTROL 4
42#define OSC_PCI_EXPRESS_AER_CONTROL 8
43#define OSC_PCI_EXPRESS_CAP_STRUCTURE_CONTROL 16
44
45#define OSC_CONTROL_MASKS (OSC_PCI_EXPRESS_NATIVE_HP_CONTROL | \
46 OSC_SHPC_NATIVE_HP_CONTROL | \
47 OSC_PCI_EXPRESS_PME_CONTROL | \
48 OSC_PCI_EXPRESS_AER_CONTROL | \
49 OSC_PCI_EXPRESS_CAP_STRUCTURE_CONTROL)
50
51#ifdef CONFIG_ACPI 13#ifdef CONFIG_ACPI
52extern acpi_status pci_osc_control_set(acpi_handle handle, u32 flags);
53int pci_acpi_osc_support(acpi_handle handle, u32 flags);
54static inline acpi_handle acpi_find_root_bridge_handle(struct pci_dev *pdev) 14static inline acpi_handle acpi_find_root_bridge_handle(struct pci_dev *pdev)
55{ 15{
56 /* Find root host bridge */ 16 struct pci_bus *pbus = pdev->bus;
57 while (pdev->bus->self) 17 /* Find a PCI root bus */
58 pdev = pdev->bus->self; 18 while (pbus->parent)
59 19 pbus = pbus->parent;
60 return acpi_get_pci_rootbridge_handle(pci_domain_nr(pdev->bus), 20 return acpi_get_pci_rootbridge_handle(pci_domain_nr(pbus),
61 pdev->bus->number); 21 pbus->number);
62} 22}
63 23
64static inline acpi_handle acpi_pci_get_bridge_handle(struct pci_bus *pbus) 24static inline acpi_handle acpi_pci_get_bridge_handle(struct pci_bus *pbus)
65{ 25{
66 int seg = pci_domain_nr(pbus), busnr = pbus->number; 26 if (pbus->parent)
67 struct pci_dev *bridge = pbus->self; 27 return DEVICE_ACPI_HANDLE(&(pbus->self->dev));
68 if (bridge) 28 return acpi_get_pci_rootbridge_handle(pci_domain_nr(pbus),
69 return DEVICE_ACPI_HANDLE(&(bridge->dev)); 29 pbus->number);
70 return acpi_get_pci_rootbridge_handle(seg, busnr);
71} 30}
72#else 31#else
73#if !defined(AE_ERROR)
74typedef u32 acpi_status;
75#define AE_ERROR (acpi_status) (0x0001)
76#endif
77static inline acpi_status pci_osc_control_set(acpi_handle handle, u32 flags)
78{return AE_ERROR;}
79static inline acpi_handle acpi_find_root_bridge_handle(struct pci_dev *pdev) 32static inline acpi_handle acpi_find_root_bridge_handle(struct pci_dev *pdev)
80{ return NULL; } 33{ return NULL; }
81#endif 34#endif
diff --git a/include/linux/pci.h b/include/linux/pci.h
index df3644132617..a7fe4bbd7ff1 100644
--- a/include/linux/pci.h
+++ b/include/linux/pci.h
@@ -52,6 +52,7 @@
52#include <asm/atomic.h> 52#include <asm/atomic.h>
53#include <linux/device.h> 53#include <linux/device.h>
54#include <linux/io.h> 54#include <linux/io.h>
55#include <linux/irqreturn.h>
55 56
56/* Include the ID list */ 57/* Include the ID list */
57#include <linux/pci_ids.h> 58#include <linux/pci_ids.h>
@@ -93,6 +94,12 @@ enum {
93 /* #6: expansion ROM resource */ 94 /* #6: expansion ROM resource */
94 PCI_ROM_RESOURCE, 95 PCI_ROM_RESOURCE,
95 96
97 /* device specific resources */
98#ifdef CONFIG_PCI_IOV
99 PCI_IOV_RESOURCES,
100 PCI_IOV_RESOURCE_END = PCI_IOV_RESOURCES + PCI_SRIOV_NUM_BARS - 1,
101#endif
102
96 /* resources assigned to buses behind the bridge */ 103 /* resources assigned to buses behind the bridge */
97#define PCI_BRIDGE_RESOURCE_NUM 4 104#define PCI_BRIDGE_RESOURCE_NUM 4
98 105
@@ -180,6 +187,7 @@ struct pci_cap_saved_state {
180 187
181struct pcie_link_state; 188struct pcie_link_state;
182struct pci_vpd; 189struct pci_vpd;
190struct pci_sriov;
183 191
184/* 192/*
185 * The pci_dev structure is used to describe PCI devices. 193 * The pci_dev structure is used to describe PCI devices.
@@ -257,6 +265,8 @@ struct pci_dev {
257 unsigned int is_managed:1; 265 unsigned int is_managed:1;
258 unsigned int is_pcie:1; 266 unsigned int is_pcie:1;
259 unsigned int state_saved:1; 267 unsigned int state_saved:1;
268 unsigned int is_physfn:1;
269 unsigned int is_virtfn:1;
260 pci_dev_flags_t dev_flags; 270 pci_dev_flags_t dev_flags;
261 atomic_t enable_cnt; /* pci_enable_device has been called */ 271 atomic_t enable_cnt; /* pci_enable_device has been called */
262 272
@@ -270,6 +280,12 @@ struct pci_dev {
270 struct list_head msi_list; 280 struct list_head msi_list;
271#endif 281#endif
272 struct pci_vpd *vpd; 282 struct pci_vpd *vpd;
283#ifdef CONFIG_PCI_IOV
284 union {
285 struct pci_sriov *sriov; /* SR-IOV capability related */
286 struct pci_dev *physfn; /* the PF this VF is associated with */
287 };
288#endif
273}; 289};
274 290
275extern struct pci_dev *alloc_pci_dev(void); 291extern struct pci_dev *alloc_pci_dev(void);
@@ -341,6 +357,15 @@ struct pci_bus {
341#define pci_bus_b(n) list_entry(n, struct pci_bus, node) 357#define pci_bus_b(n) list_entry(n, struct pci_bus, node)
342#define to_pci_bus(n) container_of(n, struct pci_bus, dev) 358#define to_pci_bus(n) container_of(n, struct pci_bus, dev)
343 359
360/*
361 * Returns true if the pci bus is root (behind host-pci bridge),
362 * false otherwise
363 */
364static inline bool pci_is_root_bus(struct pci_bus *pbus)
365{
366 return !(pbus->parent);
367}
368
344#ifdef CONFIG_PCI_MSI 369#ifdef CONFIG_PCI_MSI
345static inline bool pci_dev_msi_enabled(struct pci_dev *pci_dev) 370static inline bool pci_dev_msi_enabled(struct pci_dev *pci_dev)
346{ 371{
@@ -528,7 +553,7 @@ void pcibios_update_irq(struct pci_dev *, int irq);
528/* Generic PCI functions used internally */ 553/* Generic PCI functions used internally */
529 554
530extern struct pci_bus *pci_find_bus(int domain, int busnr); 555extern struct pci_bus *pci_find_bus(int domain, int busnr);
531void pci_bus_add_devices(struct pci_bus *bus); 556void pci_bus_add_devices(const struct pci_bus *bus);
532struct pci_bus *pci_scan_bus_parented(struct device *parent, int bus, 557struct pci_bus *pci_scan_bus_parented(struct device *parent, int bus,
533 struct pci_ops *ops, void *sysdata); 558 struct pci_ops *ops, void *sysdata);
534static inline struct pci_bus * __devinit pci_scan_bus(int bus, struct pci_ops *ops, 559static inline struct pci_bus * __devinit pci_scan_bus(int bus, struct pci_ops *ops,
@@ -702,6 +727,9 @@ int pci_back_from_sleep(struct pci_dev *dev);
702 727
703/* Functions for PCI Hotplug drivers to use */ 728/* Functions for PCI Hotplug drivers to use */
704int pci_bus_find_capability(struct pci_bus *bus, unsigned int devfn, int cap); 729int pci_bus_find_capability(struct pci_bus *bus, unsigned int devfn, int cap);
730#ifdef CONFIG_HOTPLUG
731unsigned int pci_rescan_bus(struct pci_bus *bus);
732#endif
705 733
706/* Vital product data routines */ 734/* Vital product data routines */
707ssize_t pci_read_vpd(struct pci_dev *dev, loff_t pos, size_t count, void *buf); 735ssize_t pci_read_vpd(struct pci_dev *dev, loff_t pos, size_t count, void *buf);
@@ -709,7 +737,7 @@ ssize_t pci_write_vpd(struct pci_dev *dev, loff_t pos, size_t count, const void
709int pci_vpd_truncate(struct pci_dev *dev, size_t size); 737int pci_vpd_truncate(struct pci_dev *dev, size_t size);
710 738
711/* Helper functions for low-level code (drivers/pci/setup-[bus,res].c) */ 739/* Helper functions for low-level code (drivers/pci/setup-[bus,res].c) */
712void pci_bus_assign_resources(struct pci_bus *bus); 740void pci_bus_assign_resources(const struct pci_bus *bus);
713void pci_bus_size_bridges(struct pci_bus *bus); 741void pci_bus_size_bridges(struct pci_bus *bus);
714int pci_claim_resource(struct pci_dev *, int); 742int pci_claim_resource(struct pci_dev *, int);
715void pci_assign_unassigned_resources(void); 743void pci_assign_unassigned_resources(void);
@@ -790,7 +818,7 @@ struct msix_entry {
790 818
791 819
792#ifndef CONFIG_PCI_MSI 820#ifndef CONFIG_PCI_MSI
793static inline int pci_enable_msi(struct pci_dev *dev) 821static inline int pci_enable_msi_block(struct pci_dev *dev, unsigned int nvec)
794{ 822{
795 return -1; 823 return -1;
796} 824}
@@ -800,6 +828,10 @@ static inline void pci_msi_shutdown(struct pci_dev *dev)
800static inline void pci_disable_msi(struct pci_dev *dev) 828static inline void pci_disable_msi(struct pci_dev *dev)
801{ } 829{ }
802 830
831static inline int pci_msix_table_size(struct pci_dev *dev)
832{
833 return 0;
834}
803static inline int pci_enable_msix(struct pci_dev *dev, 835static inline int pci_enable_msix(struct pci_dev *dev,
804 struct msix_entry *entries, int nvec) 836 struct msix_entry *entries, int nvec)
805{ 837{
@@ -821,9 +853,10 @@ static inline int pci_msi_enabled(void)
821 return 0; 853 return 0;
822} 854}
823#else 855#else
824extern int pci_enable_msi(struct pci_dev *dev); 856extern int pci_enable_msi_block(struct pci_dev *dev, unsigned int nvec);
825extern void pci_msi_shutdown(struct pci_dev *dev); 857extern void pci_msi_shutdown(struct pci_dev *dev);
826extern void pci_disable_msi(struct pci_dev *dev); 858extern void pci_disable_msi(struct pci_dev *dev);
859extern int pci_msix_table_size(struct pci_dev *dev);
827extern int pci_enable_msix(struct pci_dev *dev, 860extern int pci_enable_msix(struct pci_dev *dev,
828 struct msix_entry *entries, int nvec); 861 struct msix_entry *entries, int nvec);
829extern void pci_msix_shutdown(struct pci_dev *dev); 862extern void pci_msix_shutdown(struct pci_dev *dev);
@@ -842,6 +875,8 @@ static inline int pcie_aspm_enabled(void)
842extern int pcie_aspm_enabled(void); 875extern int pcie_aspm_enabled(void);
843#endif 876#endif
844 877
878#define pci_enable_msi(pdev) pci_enable_msi_block(pdev, 1)
879
845#ifdef CONFIG_HT_IRQ 880#ifdef CONFIG_HT_IRQ
846/* The functions a driver should call */ 881/* The functions a driver should call */
847int ht_create_irq(struct pci_dev *dev, int idx); 882int ht_create_irq(struct pci_dev *dev, int idx);
@@ -1195,5 +1230,23 @@ int pci_ext_cfg_avail(struct pci_dev *dev);
1195 1230
1196void __iomem *pci_ioremap_bar(struct pci_dev *pdev, int bar); 1231void __iomem *pci_ioremap_bar(struct pci_dev *pdev, int bar);
1197 1232
1233#ifdef CONFIG_PCI_IOV
1234extern int pci_enable_sriov(struct pci_dev *dev, int nr_virtfn);
1235extern void pci_disable_sriov(struct pci_dev *dev);
1236extern irqreturn_t pci_sriov_migration(struct pci_dev *dev);
1237#else
1238static inline int pci_enable_sriov(struct pci_dev *dev, int nr_virtfn)
1239{
1240 return -ENODEV;
1241}
1242static inline void pci_disable_sriov(struct pci_dev *dev)
1243{
1244}
1245static inline irqreturn_t pci_sriov_migration(struct pci_dev *dev)
1246{
1247 return IRQ_NONE;
1248}
1249#endif
1250
1198#endif /* __KERNEL__ */ 1251#endif /* __KERNEL__ */
1199#endif /* LINUX_PCI_H */ 1252#endif /* LINUX_PCI_H */
diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h
index e5816dd33371..cb14fd260837 100644
--- a/include/linux/pci_ids.h
+++ b/include/linux/pci_ids.h
@@ -2396,6 +2396,7 @@
2396#define PCI_DEVICE_ID_INTEL_82801CA_12 0x248c 2396#define PCI_DEVICE_ID_INTEL_82801CA_12 0x248c
2397#define PCI_DEVICE_ID_INTEL_82801DB_0 0x24c0 2397#define PCI_DEVICE_ID_INTEL_82801DB_0 0x24c0
2398#define PCI_DEVICE_ID_INTEL_82801DB_1 0x24c1 2398#define PCI_DEVICE_ID_INTEL_82801DB_1 0x24c1
2399#define PCI_DEVICE_ID_INTEL_82801DB_2 0x24c2
2399#define PCI_DEVICE_ID_INTEL_82801DB_3 0x24c3 2400#define PCI_DEVICE_ID_INTEL_82801DB_3 0x24c3
2400#define PCI_DEVICE_ID_INTEL_82801DB_5 0x24c5 2401#define PCI_DEVICE_ID_INTEL_82801DB_5 0x24c5
2401#define PCI_DEVICE_ID_INTEL_82801DB_6 0x24c6 2402#define PCI_DEVICE_ID_INTEL_82801DB_6 0x24c6
diff --git a/include/linux/pci_regs.h b/include/linux/pci_regs.h
index 027815b4635e..e4d08c1b2e0b 100644
--- a/include/linux/pci_regs.h
+++ b/include/linux/pci_regs.h
@@ -235,7 +235,7 @@
235#define PCI_PM_CAP_PME_SHIFT 11 /* Start of the PME Mask in PMC */ 235#define PCI_PM_CAP_PME_SHIFT 11 /* Start of the PME Mask in PMC */
236#define PCI_PM_CTRL 4 /* PM control and status register */ 236#define PCI_PM_CTRL 4 /* PM control and status register */
237#define PCI_PM_CTRL_STATE_MASK 0x0003 /* Current power state (D0 to D3) */ 237#define PCI_PM_CTRL_STATE_MASK 0x0003 /* Current power state (D0 to D3) */
238#define PCI_PM_CTRL_NO_SOFT_RESET 0x0004 /* No reset for D3hot->D0 */ 238#define PCI_PM_CTRL_NO_SOFT_RESET 0x0008 /* No reset for D3hot->D0 */
239#define PCI_PM_CTRL_PME_ENABLE 0x0100 /* PME pin enable */ 239#define PCI_PM_CTRL_PME_ENABLE 0x0100 /* PME pin enable */
240#define PCI_PM_CTRL_DATA_SEL_MASK 0x1e00 /* Data select (??) */ 240#define PCI_PM_CTRL_DATA_SEL_MASK 0x1e00 /* Data select (??) */
241#define PCI_PM_CTRL_DATA_SCALE_MASK 0x6000 /* Data scale (??) */ 241#define PCI_PM_CTRL_DATA_SCALE_MASK 0x6000 /* Data scale (??) */
@@ -375,6 +375,7 @@
375#define PCI_EXP_TYPE_UPSTREAM 0x5 /* Upstream Port */ 375#define PCI_EXP_TYPE_UPSTREAM 0x5 /* Upstream Port */
376#define PCI_EXP_TYPE_DOWNSTREAM 0x6 /* Downstream Port */ 376#define PCI_EXP_TYPE_DOWNSTREAM 0x6 /* Downstream Port */
377#define PCI_EXP_TYPE_PCI_BRIDGE 0x7 /* PCI/PCI-X Bridge */ 377#define PCI_EXP_TYPE_PCI_BRIDGE 0x7 /* PCI/PCI-X Bridge */
378#define PCI_EXP_TYPE_RC_END 0x9 /* Root Complex Integrated Endpoint */
378#define PCI_EXP_FLAGS_SLOT 0x0100 /* Slot implemented */ 379#define PCI_EXP_FLAGS_SLOT 0x0100 /* Slot implemented */
379#define PCI_EXP_FLAGS_IRQ 0x3e00 /* Interrupt message number */ 380#define PCI_EXP_FLAGS_IRQ 0x3e00 /* Interrupt message number */
380#define PCI_EXP_DEVCAP 4 /* Device capabilities */ 381#define PCI_EXP_DEVCAP 4 /* Device capabilities */
@@ -487,6 +488,8 @@
487#define PCI_EXP_DEVCAP2_ARI 0x20 /* Alternative Routing-ID */ 488#define PCI_EXP_DEVCAP2_ARI 0x20 /* Alternative Routing-ID */
488#define PCI_EXP_DEVCTL2 40 /* Device Control 2 */ 489#define PCI_EXP_DEVCTL2 40 /* Device Control 2 */
489#define PCI_EXP_DEVCTL2_ARI 0x20 /* Alternative Routing-ID */ 490#define PCI_EXP_DEVCTL2_ARI 0x20 /* Alternative Routing-ID */
491#define PCI_EXP_LNKCTL2 48 /* Link Control 2 */
492#define PCI_EXP_SLTCTL2 56 /* Slot Control 2 */
490 493
491/* Extended Capabilities (PCI-X 2.0 and Express) */ 494/* Extended Capabilities (PCI-X 2.0 and Express) */
492#define PCI_EXT_CAP_ID(header) (header & 0x0000ffff) 495#define PCI_EXT_CAP_ID(header) (header & 0x0000ffff)
@@ -498,6 +501,7 @@
498#define PCI_EXT_CAP_ID_DSN 3 501#define PCI_EXT_CAP_ID_DSN 3
499#define PCI_EXT_CAP_ID_PWR 4 502#define PCI_EXT_CAP_ID_PWR 4
500#define PCI_EXT_CAP_ID_ARI 14 503#define PCI_EXT_CAP_ID_ARI 14
504#define PCI_EXT_CAP_ID_SRIOV 16
501 505
502/* Advanced Error Reporting */ 506/* Advanced Error Reporting */
503#define PCI_ERR_UNCOR_STATUS 4 /* Uncorrectable Error Status */ 507#define PCI_ERR_UNCOR_STATUS 4 /* Uncorrectable Error Status */
@@ -615,4 +619,35 @@
615#define PCI_ARI_CTRL_ACS 0x0002 /* ACS Function Groups Enable */ 619#define PCI_ARI_CTRL_ACS 0x0002 /* ACS Function Groups Enable */
616#define PCI_ARI_CTRL_FG(x) (((x) >> 4) & 7) /* Function Group */ 620#define PCI_ARI_CTRL_FG(x) (((x) >> 4) & 7) /* Function Group */
617 621
622/* Single Root I/O Virtualization */
623#define PCI_SRIOV_CAP 0x04 /* SR-IOV Capabilities */
624#define PCI_SRIOV_CAP_VFM 0x01 /* VF Migration Capable */
625#define PCI_SRIOV_CAP_INTR(x) ((x) >> 21) /* Interrupt Message Number */
626#define PCI_SRIOV_CTRL 0x08 /* SR-IOV Control */
627#define PCI_SRIOV_CTRL_VFE 0x01 /* VF Enable */
628#define PCI_SRIOV_CTRL_VFM 0x02 /* VF Migration Enable */
629#define PCI_SRIOV_CTRL_INTR 0x04 /* VF Migration Interrupt Enable */
630#define PCI_SRIOV_CTRL_MSE 0x08 /* VF Memory Space Enable */
631#define PCI_SRIOV_CTRL_ARI 0x10 /* ARI Capable Hierarchy */
632#define PCI_SRIOV_STATUS 0x0a /* SR-IOV Status */
633#define PCI_SRIOV_STATUS_VFM 0x01 /* VF Migration Status */
634#define PCI_SRIOV_INITIAL_VF 0x0c /* Initial VFs */
635#define PCI_SRIOV_TOTAL_VF 0x0e /* Total VFs */
636#define PCI_SRIOV_NUM_VF 0x10 /* Number of VFs */
637#define PCI_SRIOV_FUNC_LINK 0x12 /* Function Dependency Link */
638#define PCI_SRIOV_VF_OFFSET 0x14 /* First VF Offset */
639#define PCI_SRIOV_VF_STRIDE 0x16 /* Following VF Stride */
640#define PCI_SRIOV_VF_DID 0x1a /* VF Device ID */
641#define PCI_SRIOV_SUP_PGSIZE 0x1c /* Supported Page Sizes */
642#define PCI_SRIOV_SYS_PGSIZE 0x20 /* System Page Size */
643#define PCI_SRIOV_BAR 0x24 /* VF BAR0 */
644#define PCI_SRIOV_NUM_BARS 6 /* Number of VF BARs */
645#define PCI_SRIOV_VFM 0x3c /* VF Migration State Array Offset*/
646#define PCI_SRIOV_VFM_BIR(x) ((x) & 7) /* State BIR */
647#define PCI_SRIOV_VFM_OFFSET(x) ((x) & ~7) /* State Offset */
648#define PCI_SRIOV_VFM_UA 0x0 /* Inactive.Unavailable */
649#define PCI_SRIOV_VFM_MI 0x1 /* Dormant.MigrateIn */
650#define PCI_SRIOV_VFM_MO 0x2 /* Active.MigrateOut */
651#define PCI_SRIOV_VFM_AV 0x3 /* Active.Available */
652
618#endif /* LINUX_PCI_REGS_H */ 653#endif /* LINUX_PCI_REGS_H */
diff --git a/include/linux/pcieport_if.h b/include/linux/pcieport_if.h
index 6cd91e3f9820..b4c79545330b 100644
--- a/include/linux/pcieport_if.h
+++ b/include/linux/pcieport_if.h
@@ -16,29 +16,30 @@
16#define PCIE_ANY_PORT 7 16#define PCIE_ANY_PORT 7
17 17
18/* Service Type */ 18/* Service Type */
19#define PCIE_PORT_SERVICE_PME 1 /* Power Management Event */ 19#define PCIE_PORT_SERVICE_PME_SHIFT 0 /* Power Management Event */
20#define PCIE_PORT_SERVICE_AER 2 /* Advanced Error Reporting */ 20#define PCIE_PORT_SERVICE_PME (1 << PCIE_PORT_SERVICE_PME_SHIFT)
21#define PCIE_PORT_SERVICE_HP 4 /* Native Hotplug */ 21#define PCIE_PORT_SERVICE_AER_SHIFT 1 /* Advanced Error Reporting */
22#define PCIE_PORT_SERVICE_VC 8 /* Virtual Channel */ 22#define PCIE_PORT_SERVICE_AER (1 << PCIE_PORT_SERVICE_AER_SHIFT)
23#define PCIE_PORT_SERVICE_HP_SHIFT 2 /* Native Hotplug */
24#define PCIE_PORT_SERVICE_HP (1 << PCIE_PORT_SERVICE_HP_SHIFT)
25#define PCIE_PORT_SERVICE_VC_SHIFT 3 /* Virtual Channel */
26#define PCIE_PORT_SERVICE_VC (1 << PCIE_PORT_SERVICE_VC_SHIFT)
23 27
24/* Root/Upstream/Downstream Port's Interrupt Mode */ 28/* Root/Upstream/Downstream Port's Interrupt Mode */
29#define PCIE_PORT_NO_IRQ (-1)
25#define PCIE_PORT_INTx_MODE 0 30#define PCIE_PORT_INTx_MODE 0
26#define PCIE_PORT_MSI_MODE 1 31#define PCIE_PORT_MSI_MODE 1
27#define PCIE_PORT_MSIX_MODE 2 32#define PCIE_PORT_MSIX_MODE 2
28 33
29struct pcie_port_service_id { 34struct pcie_port_data {
30 __u32 vendor, device; /* Vendor and device ID or PCI_ANY_ID*/ 35 int port_type; /* Type of the port */
31 __u32 subvendor, subdevice; /* Subsystem ID's or PCI_ANY_ID */ 36 int port_irq_mode; /* [0:INTx | 1:MSI | 2:MSI-X] */
32 __u32 class, class_mask; /* (class,subclass,prog-if) triplet */
33 __u32 port_type, service_type; /* Port Entity */
34 kernel_ulong_t driver_data;
35}; 37};
36 38
37struct pcie_device { 39struct pcie_device {
38 int irq; /* Service IRQ/MSI/MSI-X Vector */ 40 int irq; /* Service IRQ/MSI/MSI-X Vector */
39 int interrupt_mode; /* [0:INTx | 1:MSI | 2:MSI-X] */ 41 struct pci_dev *port; /* Root/Upstream/Downstream Port */
40 struct pcie_port_service_id id; /* Service ID */ 42 u32 service; /* Port service this device represents */
41 struct pci_dev *port; /* Root/Upstream/Downstream Port */
42 void *priv_data; /* Service Private Data */ 43 void *priv_data; /* Service Private Data */
43 struct device device; /* Generic Device Interface */ 44 struct device device; /* Generic Device Interface */
44}; 45};
@@ -56,10 +57,9 @@ static inline void* get_service_data(struct pcie_device *dev)
56 57
57struct pcie_port_service_driver { 58struct pcie_port_service_driver {
58 const char *name; 59 const char *name;
59 int (*probe) (struct pcie_device *dev, 60 int (*probe) (struct pcie_device *dev);
60 const struct pcie_port_service_id *id);
61 void (*remove) (struct pcie_device *dev); 61 void (*remove) (struct pcie_device *dev);
62 int (*suspend) (struct pcie_device *dev, pm_message_t state); 62 int (*suspend) (struct pcie_device *dev);
63 int (*resume) (struct pcie_device *dev); 63 int (*resume) (struct pcie_device *dev);
64 64
65 /* Service Error Recovery Handler */ 65 /* Service Error Recovery Handler */
@@ -68,7 +68,9 @@ struct pcie_port_service_driver {
68 /* Link Reset Capability - AER service driver specific */ 68 /* Link Reset Capability - AER service driver specific */
69 pci_ers_result_t (*reset_link) (struct pci_dev *dev); 69 pci_ers_result_t (*reset_link) (struct pci_dev *dev);
70 70
71 const struct pcie_port_service_id *id_table; 71 int port_type; /* Type of the port this driver can handle */
72 u32 service; /* Port service this device represents */
73
72 struct device_driver driver; 74 struct device_driver driver;
73}; 75};
74#define to_service_driver(d) \ 76#define to_service_driver(d) \