aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/pci/Makefile6
-rw-r--r--drivers/pci/bus.c21
-rw-r--r--drivers/pci/msi-altix.c210
-rw-r--r--drivers/pci/msi-apic.c100
-rw-r--r--drivers/pci/msi.c285
-rw-r--r--drivers/pci/msi.h133
-rw-r--r--drivers/pci/pci-acpi.c16
-rw-r--r--drivers/pci/pci-sysfs.c45
-rw-r--r--drivers/pci/pci.c16
-rw-r--r--drivers/pci/pci.h2
-rw-r--r--drivers/pci/probe.c45
-rw-r--r--drivers/pci/quirks.c47
-rw-r--r--drivers/pci/remove.c12
-rw-r--r--drivers/pci/search.c32
-rw-r--r--drivers/pci/setup-bus.c5
-rw-r--r--drivers/pci/setup-res.c40
-rw-r--r--drivers/scsi/qla1280.c24
-rw-r--r--drivers/scsi/sata_vsc.c11
18 files changed, 751 insertions, 299 deletions
diff --git a/drivers/pci/Makefile b/drivers/pci/Makefile
index 6707df968934..f2d152b818f0 100644
--- a/drivers/pci/Makefile
+++ b/drivers/pci/Makefile
@@ -26,7 +26,11 @@ obj-$(CONFIG_PPC32) += setup-irq.o
26obj-$(CONFIG_PPC64) += setup-bus.o 26obj-$(CONFIG_PPC64) += setup-bus.o
27obj-$(CONFIG_MIPS) += setup-bus.o setup-irq.o 27obj-$(CONFIG_MIPS) += setup-bus.o setup-irq.o
28obj-$(CONFIG_X86_VISWS) += setup-irq.o 28obj-$(CONFIG_X86_VISWS) += setup-irq.o
29obj-$(CONFIG_PCI_MSI) += msi.o 29
30msiobj-y := msi.o msi-apic.o
31msiobj-$(CONFIG_IA64_GENERIC) += msi-altix.o
32msiobj-$(CONFIG_IA64_SGI_SN2) += msi-altix.o
33obj-$(CONFIG_PCI_MSI) += $(msiobj-y)
30 34
31# 35#
32# ACPI Related PCI FW Functions 36# ACPI Related PCI FW Functions
diff --git a/drivers/pci/bus.c b/drivers/pci/bus.c
index eed67d9e73bc..723092682023 100644
--- a/drivers/pci/bus.c
+++ b/drivers/pci/bus.c
@@ -81,9 +81,9 @@ void __devinit pci_bus_add_device(struct pci_dev *dev)
81{ 81{
82 device_add(&dev->dev); 82 device_add(&dev->dev);
83 83
84 spin_lock(&pci_bus_lock); 84 down_write(&pci_bus_sem);
85 list_add_tail(&dev->global_list, &pci_devices); 85 list_add_tail(&dev->global_list, &pci_devices);
86 spin_unlock(&pci_bus_lock); 86 up_write(&pci_bus_sem);
87 87
88 pci_proc_attach_device(dev); 88 pci_proc_attach_device(dev);
89 pci_create_sysfs_dev_files(dev); 89 pci_create_sysfs_dev_files(dev);
@@ -125,10 +125,10 @@ void __devinit pci_bus_add_devices(struct pci_bus *bus)
125 */ 125 */
126 if (dev->subordinate) { 126 if (dev->subordinate) {
127 if (list_empty(&dev->subordinate->node)) { 127 if (list_empty(&dev->subordinate->node)) {
128 spin_lock(&pci_bus_lock); 128 down_write(&pci_bus_sem);
129 list_add_tail(&dev->subordinate->node, 129 list_add_tail(&dev->subordinate->node,
130 &dev->bus->children); 130 &dev->bus->children);
131 spin_unlock(&pci_bus_lock); 131 up_write(&pci_bus_sem);
132 } 132 }
133 pci_bus_add_devices(dev->subordinate); 133 pci_bus_add_devices(dev->subordinate);
134 134
@@ -168,7 +168,7 @@ void pci_walk_bus(struct pci_bus *top, void (*cb)(struct pci_dev *, void *),
168 struct list_head *next; 168 struct list_head *next;
169 169
170 bus = top; 170 bus = top;
171 spin_lock(&pci_bus_lock); 171 down_read(&pci_bus_sem);
172 next = top->devices.next; 172 next = top->devices.next;
173 for (;;) { 173 for (;;) {
174 if (next == &bus->devices) { 174 if (next == &bus->devices) {
@@ -180,22 +180,19 @@ void pci_walk_bus(struct pci_bus *top, void (*cb)(struct pci_dev *, void *),
180 continue; 180 continue;
181 } 181 }
182 dev = list_entry(next, struct pci_dev, bus_list); 182 dev = list_entry(next, struct pci_dev, bus_list);
183 pci_dev_get(dev);
184 if (dev->subordinate) { 183 if (dev->subordinate) {
185 /* this is a pci-pci bridge, do its devices next */ 184 /* this is a pci-pci bridge, do its devices next */
186 next = dev->subordinate->devices.next; 185 next = dev->subordinate->devices.next;
187 bus = dev->subordinate; 186 bus = dev->subordinate;
188 } else 187 } else
189 next = dev->bus_list.next; 188 next = dev->bus_list.next;
190 spin_unlock(&pci_bus_lock);
191 189
192 /* Run device routines with the bus unlocked */ 190 /* Run device routines with the device locked */
191 down(&dev->dev.sem);
193 cb(dev, userdata); 192 cb(dev, userdata);
194 193 up(&dev->dev.sem);
195 spin_lock(&pci_bus_lock);
196 pci_dev_put(dev);
197 } 194 }
198 spin_unlock(&pci_bus_lock); 195 up_read(&pci_bus_sem);
199} 196}
200EXPORT_SYMBOL_GPL(pci_walk_bus); 197EXPORT_SYMBOL_GPL(pci_walk_bus);
201 198
diff --git a/drivers/pci/msi-altix.c b/drivers/pci/msi-altix.c
new file mode 100644
index 000000000000..bed4183a5e39
--- /dev/null
+++ b/drivers/pci/msi-altix.c
@@ -0,0 +1,210 @@
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 2006 Silicon Graphics, Inc. All Rights Reserved.
7 */
8
9#include <linux/types.h>
10#include <linux/pci.h>
11#include <linux/cpumask.h>
12
13#include <asm/sn/addrs.h>
14#include <asm/sn/intr.h>
15#include <asm/sn/pcibus_provider_defs.h>
16#include <asm/sn/pcidev.h>
17#include <asm/sn/nodepda.h>
18
19#include "msi.h"
20
21struct sn_msi_info {
22 u64 pci_addr;
23 struct sn_irq_info *sn_irq_info;
24};
25
26static struct sn_msi_info *sn_msi_info;
27
28static void
29sn_msi_teardown(unsigned int vector)
30{
31 nasid_t nasid;
32 int widget;
33 struct pci_dev *pdev;
34 struct pcidev_info *sn_pdev;
35 struct sn_irq_info *sn_irq_info;
36 struct pcibus_bussoft *bussoft;
37 struct sn_pcibus_provider *provider;
38
39 sn_irq_info = sn_msi_info[vector].sn_irq_info;
40 if (sn_irq_info == NULL || sn_irq_info->irq_int_bit >= 0)
41 return;
42
43 sn_pdev = (struct pcidev_info *)sn_irq_info->irq_pciioinfo;
44 pdev = sn_pdev->pdi_linux_pcidev;
45 provider = SN_PCIDEV_BUSPROVIDER(pdev);
46
47 (*provider->dma_unmap)(pdev,
48 sn_msi_info[vector].pci_addr,
49 PCI_DMA_FROMDEVICE);
50 sn_msi_info[vector].pci_addr = 0;
51
52 bussoft = SN_PCIDEV_BUSSOFT(pdev);
53 nasid = NASID_GET(bussoft->bs_base);
54 widget = (nasid & 1) ?
55 TIO_SWIN_WIDGETNUM(bussoft->bs_base) :
56 SWIN_WIDGETNUM(bussoft->bs_base);
57
58 sn_intr_free(nasid, widget, sn_irq_info);
59 sn_msi_info[vector].sn_irq_info = NULL;
60
61 return;
62}
63
64int
65sn_msi_setup(struct pci_dev *pdev, unsigned int vector,
66 u32 *addr_hi, u32 *addr_lo, u32 *data)
67{
68 int widget;
69 int status;
70 nasid_t nasid;
71 u64 bus_addr;
72 struct sn_irq_info *sn_irq_info;
73 struct pcibus_bussoft *bussoft = SN_PCIDEV_BUSSOFT(pdev);
74 struct sn_pcibus_provider *provider = SN_PCIDEV_BUSPROVIDER(pdev);
75
76 if (bussoft == NULL)
77 return -EINVAL;
78
79 if (provider == NULL || provider->dma_map_consistent == NULL)
80 return -EINVAL;
81
82 /*
83 * Set up the vector plumbing. Let the prom (via sn_intr_alloc)
84 * decide which cpu to direct this msi at by default.
85 */
86
87 nasid = NASID_GET(bussoft->bs_base);
88 widget = (nasid & 1) ?
89 TIO_SWIN_WIDGETNUM(bussoft->bs_base) :
90 SWIN_WIDGETNUM(bussoft->bs_base);
91
92 sn_irq_info = kzalloc(sizeof(struct sn_irq_info), GFP_KERNEL);
93 if (! sn_irq_info)
94 return -ENOMEM;
95
96 status = sn_intr_alloc(nasid, widget, sn_irq_info, vector, -1, -1);
97 if (status) {
98 kfree(sn_irq_info);
99 return -ENOMEM;
100 }
101
102 sn_irq_info->irq_int_bit = -1; /* mark this as an MSI irq */
103 sn_irq_fixup(pdev, sn_irq_info);
104
105 /* Prom probably should fill these in, but doesn't ... */
106 sn_irq_info->irq_bridge_type = bussoft->bs_asic_type;
107 sn_irq_info->irq_bridge = (void *)bussoft->bs_base;
108
109 /*
110 * Map the xio address into bus space
111 */
112 bus_addr = (*provider->dma_map_consistent)(pdev,
113 sn_irq_info->irq_xtalkaddr,
114 sizeof(sn_irq_info->irq_xtalkaddr),
115 SN_DMA_MSI|SN_DMA_ADDR_XIO);
116 if (! bus_addr) {
117 sn_intr_free(nasid, widget, sn_irq_info);
118 kfree(sn_irq_info);
119 return -ENOMEM;
120 }
121
122 sn_msi_info[vector].sn_irq_info = sn_irq_info;
123 sn_msi_info[vector].pci_addr = bus_addr;
124
125 *addr_hi = (u32)(bus_addr >> 32);
126 *addr_lo = (u32)(bus_addr & 0x00000000ffffffff);
127
128 /*
129 * In the SN platform, bit 16 is a "send vector" bit which
130 * must be present in order to move the vector through the system.
131 */
132 *data = 0x100 + (unsigned int)vector;
133
134#ifdef CONFIG_SMP
135 set_irq_affinity_info((vector & 0xff), sn_irq_info->irq_cpuid, 0);
136#endif
137
138 return 0;
139}
140
141static void
142sn_msi_target(unsigned int vector, unsigned int cpu,
143 u32 *addr_hi, u32 *addr_lo)
144{
145 int slice;
146 nasid_t nasid;
147 u64 bus_addr;
148 struct pci_dev *pdev;
149 struct pcidev_info *sn_pdev;
150 struct sn_irq_info *sn_irq_info;
151 struct sn_irq_info *new_irq_info;
152 struct sn_pcibus_provider *provider;
153
154 sn_irq_info = sn_msi_info[vector].sn_irq_info;
155 if (sn_irq_info == NULL || sn_irq_info->irq_int_bit >= 0)
156 return;
157
158 /*
159 * Release XIO resources for the old MSI PCI address
160 */
161
162 sn_pdev = (struct pcidev_info *)sn_irq_info->irq_pciioinfo;
163 pdev = sn_pdev->pdi_linux_pcidev;
164 provider = SN_PCIDEV_BUSPROVIDER(pdev);
165
166 bus_addr = (u64)(*addr_hi) << 32 | (u64)(*addr_lo);
167 (*provider->dma_unmap)(pdev, bus_addr, PCI_DMA_FROMDEVICE);
168 sn_msi_info[vector].pci_addr = 0;
169
170 nasid = cpuid_to_nasid(cpu);
171 slice = cpuid_to_slice(cpu);
172
173 new_irq_info = sn_retarget_vector(sn_irq_info, nasid, slice);
174 sn_msi_info[vector].sn_irq_info = new_irq_info;
175 if (new_irq_info == NULL)
176 return;
177
178 /*
179 * Map the xio address into bus space
180 */
181
182 bus_addr = (*provider->dma_map_consistent)(pdev,
183 new_irq_info->irq_xtalkaddr,
184 sizeof(new_irq_info->irq_xtalkaddr),
185 SN_DMA_MSI|SN_DMA_ADDR_XIO);
186
187 sn_msi_info[vector].pci_addr = bus_addr;
188 *addr_hi = (u32)(bus_addr >> 32);
189 *addr_lo = (u32)(bus_addr & 0x00000000ffffffff);
190}
191
192struct msi_ops sn_msi_ops = {
193 .setup = sn_msi_setup,
194 .teardown = sn_msi_teardown,
195#ifdef CONFIG_SMP
196 .target = sn_msi_target,
197#endif
198};
199
200int
201sn_msi_init(void)
202{
203 sn_msi_info =
204 kzalloc(sizeof(struct sn_msi_info) * NR_VECTORS, GFP_KERNEL);
205 if (! sn_msi_info)
206 return -ENOMEM;
207
208 msi_register(&sn_msi_ops);
209 return 0;
210}
diff --git a/drivers/pci/msi-apic.c b/drivers/pci/msi-apic.c
new file mode 100644
index 000000000000..0eb5fe9003a2
--- /dev/null
+++ b/drivers/pci/msi-apic.c
@@ -0,0 +1,100 @@
1/*
2 * MSI hooks for standard x86 apic
3 */
4
5#include <linux/pci.h>
6#include <linux/irq.h>
7
8#include "msi.h"
9
10/*
11 * Shifts for APIC-based data
12 */
13
14#define MSI_DATA_VECTOR_SHIFT 0
15#define MSI_DATA_VECTOR(v) (((u8)v) << MSI_DATA_VECTOR_SHIFT)
16
17#define MSI_DATA_DELIVERY_SHIFT 8
18#define MSI_DATA_DELIVERY_FIXED (0 << MSI_DATA_DELIVERY_SHIFT)
19#define MSI_DATA_DELIVERY_LOWPRI (1 << MSI_DATA_DELIVERY_SHIFT)
20
21#define MSI_DATA_LEVEL_SHIFT 14
22#define MSI_DATA_LEVEL_DEASSERT (0 << MSI_DATA_LEVEL_SHIFT)
23#define MSI_DATA_LEVEL_ASSERT (1 << MSI_DATA_LEVEL_SHIFT)
24
25#define MSI_DATA_TRIGGER_SHIFT 15
26#define MSI_DATA_TRIGGER_EDGE (0 << MSI_DATA_TRIGGER_SHIFT)
27#define MSI_DATA_TRIGGER_LEVEL (1 << MSI_DATA_TRIGGER_SHIFT)
28
29/*
30 * Shift/mask fields for APIC-based bus address
31 */
32
33#define MSI_ADDR_HEADER 0xfee00000
34
35#define MSI_ADDR_DESTID_MASK 0xfff0000f
36#define MSI_ADDR_DESTID_CPU(cpu) ((cpu) << MSI_TARGET_CPU_SHIFT)
37
38#define MSI_ADDR_DESTMODE_SHIFT 2
39#define MSI_ADDR_DESTMODE_PHYS (0 << MSI_ADDR_DESTMODE_SHIFT)
40#define MSI_ADDR_DESTMODE_LOGIC (1 << MSI_ADDR_DESTMODE_SHIFT)
41
42#define MSI_ADDR_REDIRECTION_SHIFT 3
43#define MSI_ADDR_REDIRECTION_CPU (0 << MSI_ADDR_REDIRECTION_SHIFT)
44#define MSI_ADDR_REDIRECTION_LOWPRI (1 << MSI_ADDR_REDIRECTION_SHIFT)
45
46
47static void
48msi_target_apic(unsigned int vector,
49 unsigned int dest_cpu,
50 u32 *address_hi, /* in/out */
51 u32 *address_lo) /* in/out */
52{
53 u32 addr = *address_lo;
54
55 addr &= MSI_ADDR_DESTID_MASK;
56 addr |= MSI_ADDR_DESTID_CPU(cpu_physical_id(dest_cpu));
57
58 *address_lo = addr;
59}
60
61static int
62msi_setup_apic(struct pci_dev *pdev, /* unused in generic */
63 unsigned int vector,
64 u32 *address_hi,
65 u32 *address_lo,
66 u32 *data)
67{
68 unsigned long dest_phys_id;
69
70 dest_phys_id = cpu_physical_id(first_cpu(cpu_online_map));
71
72 *address_hi = 0;
73 *address_lo = MSI_ADDR_HEADER |
74 MSI_ADDR_DESTMODE_PHYS |
75 MSI_ADDR_REDIRECTION_CPU |
76 MSI_ADDR_DESTID_CPU(dest_phys_id);
77
78 *data = MSI_DATA_TRIGGER_EDGE |
79 MSI_DATA_LEVEL_ASSERT |
80 MSI_DATA_DELIVERY_FIXED |
81 MSI_DATA_VECTOR(vector);
82
83 return 0;
84}
85
86static void
87msi_teardown_apic(unsigned int vector)
88{
89 return; /* no-op */
90}
91
92/*
93 * Generic ops used on most IA archs/platforms. Set with msi_register()
94 */
95
96struct msi_ops msi_apic_ops = {
97 .setup = msi_setup_apic,
98 .teardown = msi_teardown_apic,
99 .target = msi_target_apic,
100};
diff --git a/drivers/pci/msi.c b/drivers/pci/msi.c
index 9855c4c920b8..7f8429284fab 100644
--- a/drivers/pci/msi.c
+++ b/drivers/pci/msi.c
@@ -23,8 +23,6 @@
23#include "pci.h" 23#include "pci.h"
24#include "msi.h" 24#include "msi.h"
25 25
26#define MSI_TARGET_CPU first_cpu(cpu_online_map)
27
28static DEFINE_SPINLOCK(msi_lock); 26static DEFINE_SPINLOCK(msi_lock);
29static struct msi_desc* msi_desc[NR_IRQS] = { [0 ... NR_IRQS-1] = NULL }; 27static struct msi_desc* msi_desc[NR_IRQS] = { [0 ... NR_IRQS-1] = NULL };
30static kmem_cache_t* msi_cachep; 28static kmem_cache_t* msi_cachep;
@@ -37,9 +35,17 @@ static int nr_msix_devices;
37 35
38#ifndef CONFIG_X86_IO_APIC 36#ifndef CONFIG_X86_IO_APIC
39int vector_irq[NR_VECTORS] = { [0 ... NR_VECTORS - 1] = -1}; 37int vector_irq[NR_VECTORS] = { [0 ... NR_VECTORS - 1] = -1};
40u8 irq_vector[NR_IRQ_VECTORS] = { FIRST_DEVICE_VECTOR , 0 };
41#endif 38#endif
42 39
40static struct msi_ops *msi_ops;
41
42int
43msi_register(struct msi_ops *ops)
44{
45 msi_ops = ops;
46 return 0;
47}
48
43static void msi_cache_ctor(void *p, kmem_cache_t *cache, unsigned long flags) 49static void msi_cache_ctor(void *p, kmem_cache_t *cache, unsigned long flags)
44{ 50{
45 memset(p, 0, NR_IRQS * sizeof(struct msi_desc)); 51 memset(p, 0, NR_IRQS * sizeof(struct msi_desc));
@@ -92,7 +98,7 @@ static void msi_set_mask_bit(unsigned int vector, int flag)
92static void set_msi_affinity(unsigned int vector, cpumask_t cpu_mask) 98static void set_msi_affinity(unsigned int vector, cpumask_t cpu_mask)
93{ 99{
94 struct msi_desc *entry; 100 struct msi_desc *entry;
95 struct msg_address address; 101 u32 address_hi, address_lo;
96 unsigned int irq = vector; 102 unsigned int irq = vector;
97 unsigned int dest_cpu = first_cpu(cpu_mask); 103 unsigned int dest_cpu = first_cpu(cpu_mask);
98 104
@@ -108,28 +114,36 @@ static void set_msi_affinity(unsigned int vector, cpumask_t cpu_mask)
108 if (!pos) 114 if (!pos)
109 return; 115 return;
110 116
117 pci_read_config_dword(entry->dev, msi_upper_address_reg(pos),
118 &address_hi);
111 pci_read_config_dword(entry->dev, msi_lower_address_reg(pos), 119 pci_read_config_dword(entry->dev, msi_lower_address_reg(pos),
112 &address.lo_address.value); 120 &address_lo);
113 address.lo_address.value &= MSI_ADDRESS_DEST_ID_MASK; 121
114 address.lo_address.value |= (cpu_physical_id(dest_cpu) << 122 msi_ops->target(vector, dest_cpu, &address_hi, &address_lo);
115 MSI_TARGET_CPU_SHIFT); 123
116 entry->msi_attrib.current_cpu = cpu_physical_id(dest_cpu); 124 pci_write_config_dword(entry->dev, msi_upper_address_reg(pos),
125 address_hi);
117 pci_write_config_dword(entry->dev, msi_lower_address_reg(pos), 126 pci_write_config_dword(entry->dev, msi_lower_address_reg(pos),
118 address.lo_address.value); 127 address_lo);
119 set_native_irq_info(irq, cpu_mask); 128 set_native_irq_info(irq, cpu_mask);
120 break; 129 break;
121 } 130 }
122 case PCI_CAP_ID_MSIX: 131 case PCI_CAP_ID_MSIX:
123 { 132 {
124 int offset = entry->msi_attrib.entry_nr * PCI_MSIX_ENTRY_SIZE + 133 int offset_hi =
125 PCI_MSIX_ENTRY_LOWER_ADDR_OFFSET; 134 entry->msi_attrib.entry_nr * PCI_MSIX_ENTRY_SIZE +
126 135 PCI_MSIX_ENTRY_UPPER_ADDR_OFFSET;
127 address.lo_address.value = readl(entry->mask_base + offset); 136 int offset_lo =
128 address.lo_address.value &= MSI_ADDRESS_DEST_ID_MASK; 137 entry->msi_attrib.entry_nr * PCI_MSIX_ENTRY_SIZE +
129 address.lo_address.value |= (cpu_physical_id(dest_cpu) << 138 PCI_MSIX_ENTRY_LOWER_ADDR_OFFSET;
130 MSI_TARGET_CPU_SHIFT); 139
131 entry->msi_attrib.current_cpu = cpu_physical_id(dest_cpu); 140 address_hi = readl(entry->mask_base + offset_hi);
132 writel(address.lo_address.value, entry->mask_base + offset); 141 address_lo = readl(entry->mask_base + offset_lo);
142
143 msi_ops->target(vector, dest_cpu, &address_hi, &address_lo);
144
145 writel(address_hi, entry->mask_base + offset_hi);
146 writel(address_lo, entry->mask_base + offset_lo);
133 set_native_irq_info(irq, cpu_mask); 147 set_native_irq_info(irq, cpu_mask);
134 break; 148 break;
135 } 149 }
@@ -251,30 +265,6 @@ static struct hw_interrupt_type msi_irq_wo_maskbit_type = {
251 .set_affinity = set_msi_affinity 265 .set_affinity = set_msi_affinity
252}; 266};
253 267
254static void msi_data_init(struct msg_data *msi_data,
255 unsigned int vector)
256{
257 memset(msi_data, 0, sizeof(struct msg_data));
258 msi_data->vector = (u8)vector;
259 msi_data->delivery_mode = MSI_DELIVERY_MODE;
260 msi_data->level = MSI_LEVEL_MODE;
261 msi_data->trigger = MSI_TRIGGER_MODE;
262}
263
264static void msi_address_init(struct msg_address *msi_address)
265{
266 unsigned int dest_id;
267 unsigned long dest_phys_id = cpu_physical_id(MSI_TARGET_CPU);
268
269 memset(msi_address, 0, sizeof(struct msg_address));
270 msi_address->hi_address = (u32)0;
271 dest_id = (MSI_ADDRESS_HEADER << MSI_ADDRESS_HEADER_SHIFT);
272 msi_address->lo_address.u.dest_mode = MSI_PHYSICAL_MODE;
273 msi_address->lo_address.u.redirection_hint = MSI_REDIRECTION_HINT_MODE;
274 msi_address->lo_address.u.dest_id = dest_id;
275 msi_address->lo_address.value |= (dest_phys_id << MSI_TARGET_CPU_SHIFT);
276}
277
278static int msi_free_vector(struct pci_dev* dev, int vector, int reassign); 268static int msi_free_vector(struct pci_dev* dev, int vector, int reassign);
279static int assign_msi_vector(void) 269static int assign_msi_vector(void)
280{ 270{
@@ -369,13 +359,29 @@ static int msi_init(void)
369 return status; 359 return status;
370 } 360 }
371 361
362 status = msi_arch_init();
363 if (status < 0) {
364 pci_msi_enable = 0;
365 printk(KERN_WARNING
366 "PCI: MSI arch init failed. MSI disabled.\n");
367 return status;
368 }
369
370 if (! msi_ops) {
371 printk(KERN_WARNING
372 "PCI: MSI ops not registered. MSI disabled.\n");
373 status = -EINVAL;
374 return status;
375 }
376
377 last_alloc_vector = assign_irq_vector(AUTO_ASSIGN);
372 status = msi_cache_init(); 378 status = msi_cache_init();
373 if (status < 0) { 379 if (status < 0) {
374 pci_msi_enable = 0; 380 pci_msi_enable = 0;
375 printk(KERN_WARNING "PCI: MSI cache init failed\n"); 381 printk(KERN_WARNING "PCI: MSI cache init failed\n");
376 return status; 382 return status;
377 } 383 }
378 last_alloc_vector = assign_irq_vector(AUTO_ASSIGN); 384
379 if (last_alloc_vector < 0) { 385 if (last_alloc_vector < 0) {
380 pci_msi_enable = 0; 386 pci_msi_enable = 0;
381 printk(KERN_WARNING "PCI: No interrupt vectors available for MSI\n"); 387 printk(KERN_WARNING "PCI: No interrupt vectors available for MSI\n");
@@ -442,9 +448,11 @@ static void enable_msi_mode(struct pci_dev *dev, int pos, int type)
442 /* Set enabled bits to single MSI & enable MSI_enable bit */ 448 /* Set enabled bits to single MSI & enable MSI_enable bit */
443 msi_enable(control, 1); 449 msi_enable(control, 1);
444 pci_write_config_word(dev, msi_control_reg(pos), control); 450 pci_write_config_word(dev, msi_control_reg(pos), control);
451 dev->msi_enabled = 1;
445 } else { 452 } else {
446 msix_enable(control); 453 msix_enable(control);
447 pci_write_config_word(dev, msi_control_reg(pos), control); 454 pci_write_config_word(dev, msi_control_reg(pos), control);
455 dev->msix_enabled = 1;
448 } 456 }
449 if (pci_find_capability(dev, PCI_CAP_ID_EXP)) { 457 if (pci_find_capability(dev, PCI_CAP_ID_EXP)) {
450 /* PCI Express Endpoint device detected */ 458 /* PCI Express Endpoint device detected */
@@ -461,9 +469,11 @@ void disable_msi_mode(struct pci_dev *dev, int pos, int type)
461 /* Set enabled bits to single MSI & enable MSI_enable bit */ 469 /* Set enabled bits to single MSI & enable MSI_enable bit */
462 msi_disable(control); 470 msi_disable(control);
463 pci_write_config_word(dev, msi_control_reg(pos), control); 471 pci_write_config_word(dev, msi_control_reg(pos), control);
472 dev->msi_enabled = 0;
464 } else { 473 } else {
465 msix_disable(control); 474 msix_disable(control);
466 pci_write_config_word(dev, msi_control_reg(pos), control); 475 pci_write_config_word(dev, msi_control_reg(pos), control);
476 dev->msix_enabled = 0;
467 } 477 }
468 if (pci_find_capability(dev, PCI_CAP_ID_EXP)) { 478 if (pci_find_capability(dev, PCI_CAP_ID_EXP)) {
469 /* PCI Express Endpoint device detected */ 479 /* PCI Express Endpoint device detected */
@@ -538,7 +548,6 @@ int pci_save_msi_state(struct pci_dev *dev)
538 pci_read_config_dword(dev, pos + PCI_MSI_DATA_32, &cap[i++]); 548 pci_read_config_dword(dev, pos + PCI_MSI_DATA_32, &cap[i++]);
539 if (control & PCI_MSI_FLAGS_MASKBIT) 549 if (control & PCI_MSI_FLAGS_MASKBIT)
540 pci_read_config_dword(dev, pos + PCI_MSI_MASK_BIT, &cap[i++]); 550 pci_read_config_dword(dev, pos + PCI_MSI_MASK_BIT, &cap[i++]);
541 disable_msi_mode(dev, pos, PCI_CAP_ID_MSI);
542 save_state->cap_nr = PCI_CAP_ID_MSI; 551 save_state->cap_nr = PCI_CAP_ID_MSI;
543 pci_add_saved_cap(dev, save_state); 552 pci_add_saved_cap(dev, save_state);
544 return 0; 553 return 0;
@@ -575,6 +584,8 @@ void pci_restore_msi_state(struct pci_dev *dev)
575int pci_save_msix_state(struct pci_dev *dev) 584int pci_save_msix_state(struct pci_dev *dev)
576{ 585{
577 int pos; 586 int pos;
587 int temp;
588 int vector, head, tail = 0;
578 u16 control; 589 u16 control;
579 struct pci_cap_saved_state *save_state; 590 struct pci_cap_saved_state *save_state;
580 591
@@ -582,6 +593,7 @@ int pci_save_msix_state(struct pci_dev *dev)
582 if (pos <= 0 || dev->no_msi) 593 if (pos <= 0 || dev->no_msi)
583 return 0; 594 return 0;
584 595
596 /* save the capability */
585 pci_read_config_word(dev, msi_control_reg(pos), &control); 597 pci_read_config_word(dev, msi_control_reg(pos), &control);
586 if (!(control & PCI_MSIX_FLAGS_ENABLE)) 598 if (!(control & PCI_MSIX_FLAGS_ENABLE))
587 return 0; 599 return 0;
@@ -593,7 +605,38 @@ int pci_save_msix_state(struct pci_dev *dev)
593 } 605 }
594 *((u16 *)&save_state->data[0]) = control; 606 *((u16 *)&save_state->data[0]) = control;
595 607
596 disable_msi_mode(dev, pos, PCI_CAP_ID_MSIX); 608 /* save the table */
609 temp = dev->irq;
610 if (msi_lookup_vector(dev, PCI_CAP_ID_MSIX)) {
611 kfree(save_state);
612 return -EINVAL;
613 }
614
615 vector = head = dev->irq;
616 while (head != tail) {
617 int j;
618 void __iomem *base;
619 struct msi_desc *entry;
620
621 entry = msi_desc[vector];
622 base = entry->mask_base;
623 j = entry->msi_attrib.entry_nr;
624
625 entry->address_lo_save =
626 readl(base + j * PCI_MSIX_ENTRY_SIZE +
627 PCI_MSIX_ENTRY_LOWER_ADDR_OFFSET);
628 entry->address_hi_save =
629 readl(base + j * PCI_MSIX_ENTRY_SIZE +
630 PCI_MSIX_ENTRY_UPPER_ADDR_OFFSET);
631 entry->data_save =
632 readl(base + j * PCI_MSIX_ENTRY_SIZE +
633 PCI_MSIX_ENTRY_DATA_OFFSET);
634
635 tail = msi_desc[vector]->link.tail;
636 vector = tail;
637 }
638 dev->irq = temp;
639
597 save_state->cap_nr = PCI_CAP_ID_MSIX; 640 save_state->cap_nr = PCI_CAP_ID_MSIX;
598 pci_add_saved_cap(dev, save_state); 641 pci_add_saved_cap(dev, save_state);
599 return 0; 642 return 0;
@@ -606,8 +649,6 @@ void pci_restore_msix_state(struct pci_dev *dev)
606 int vector, head, tail = 0; 649 int vector, head, tail = 0;
607 void __iomem *base; 650 void __iomem *base;
608 int j; 651 int j;
609 struct msg_address address;
610 struct msg_data data;
611 struct msi_desc *entry; 652 struct msi_desc *entry;
612 int temp; 653 int temp;
613 struct pci_cap_saved_state *save_state; 654 struct pci_cap_saved_state *save_state;
@@ -633,20 +674,13 @@ void pci_restore_msix_state(struct pci_dev *dev)
633 base = entry->mask_base; 674 base = entry->mask_base;
634 j = entry->msi_attrib.entry_nr; 675 j = entry->msi_attrib.entry_nr;
635 676
636 msi_address_init(&address); 677 writel(entry->address_lo_save,
637 msi_data_init(&data, vector);
638
639 address.lo_address.value &= MSI_ADDRESS_DEST_ID_MASK;
640 address.lo_address.value |= entry->msi_attrib.current_cpu <<
641 MSI_TARGET_CPU_SHIFT;
642
643 writel(address.lo_address.value,
644 base + j * PCI_MSIX_ENTRY_SIZE + 678 base + j * PCI_MSIX_ENTRY_SIZE +
645 PCI_MSIX_ENTRY_LOWER_ADDR_OFFSET); 679 PCI_MSIX_ENTRY_LOWER_ADDR_OFFSET);
646 writel(address.hi_address, 680 writel(entry->address_hi_save,
647 base + j * PCI_MSIX_ENTRY_SIZE + 681 base + j * PCI_MSIX_ENTRY_SIZE +
648 PCI_MSIX_ENTRY_UPPER_ADDR_OFFSET); 682 PCI_MSIX_ENTRY_UPPER_ADDR_OFFSET);
649 writel(*(u32*)&data, 683 writel(entry->data_save,
650 base + j * PCI_MSIX_ENTRY_SIZE + 684 base + j * PCI_MSIX_ENTRY_SIZE +
651 PCI_MSIX_ENTRY_DATA_OFFSET); 685 PCI_MSIX_ENTRY_DATA_OFFSET);
652 686
@@ -660,30 +694,32 @@ void pci_restore_msix_state(struct pci_dev *dev)
660} 694}
661#endif 695#endif
662 696
663static void msi_register_init(struct pci_dev *dev, struct msi_desc *entry) 697static int msi_register_init(struct pci_dev *dev, struct msi_desc *entry)
664{ 698{
665 struct msg_address address; 699 int status;
666 struct msg_data data; 700 u32 address_hi;
701 u32 address_lo;
702 u32 data;
667 int pos, vector = dev->irq; 703 int pos, vector = dev->irq;
668 u16 control; 704 u16 control;
669 705
670 pos = pci_find_capability(dev, PCI_CAP_ID_MSI); 706 pos = pci_find_capability(dev, PCI_CAP_ID_MSI);
671 pci_read_config_word(dev, msi_control_reg(pos), &control); 707 pci_read_config_word(dev, msi_control_reg(pos), &control);
708
672 /* Configure MSI capability structure */ 709 /* Configure MSI capability structure */
673 msi_address_init(&address); 710 status = msi_ops->setup(dev, vector, &address_hi, &address_lo, &data);
674 msi_data_init(&data, vector); 711 if (status < 0)
675 entry->msi_attrib.current_cpu = ((address.lo_address.u.dest_id >> 712 return status;
676 MSI_TARGET_CPU_SHIFT) & MSI_TARGET_CPU_MASK); 713
677 pci_write_config_dword(dev, msi_lower_address_reg(pos), 714 pci_write_config_dword(dev, msi_lower_address_reg(pos), address_lo);
678 address.lo_address.value);
679 if (is_64bit_address(control)) { 715 if (is_64bit_address(control)) {
680 pci_write_config_dword(dev, 716 pci_write_config_dword(dev,
681 msi_upper_address_reg(pos), address.hi_address); 717 msi_upper_address_reg(pos), address_hi);
682 pci_write_config_word(dev, 718 pci_write_config_word(dev,
683 msi_data_reg(pos, 1), *((u32*)&data)); 719 msi_data_reg(pos, 1), data);
684 } else 720 } else
685 pci_write_config_word(dev, 721 pci_write_config_word(dev,
686 msi_data_reg(pos, 0), *((u32*)&data)); 722 msi_data_reg(pos, 0), data);
687 if (entry->msi_attrib.maskbit) { 723 if (entry->msi_attrib.maskbit) {
688 unsigned int maskbits, temp; 724 unsigned int maskbits, temp;
689 /* All MSIs are unmasked by default, Mask them all */ 725 /* All MSIs are unmasked by default, Mask them all */
@@ -697,6 +733,8 @@ static void msi_register_init(struct pci_dev *dev, struct msi_desc *entry)
697 msi_mask_bits_reg(pos, is_64bit_address(control)), 733 msi_mask_bits_reg(pos, is_64bit_address(control)),
698 maskbits); 734 maskbits);
699 } 735 }
736
737 return 0;
700} 738}
701 739
702/** 740/**
@@ -710,6 +748,7 @@ static void msi_register_init(struct pci_dev *dev, struct msi_desc *entry)
710 **/ 748 **/
711static int msi_capability_init(struct pci_dev *dev) 749static int msi_capability_init(struct pci_dev *dev)
712{ 750{
751 int status;
713 struct msi_desc *entry; 752 struct msi_desc *entry;
714 int pos, vector; 753 int pos, vector;
715 u16 control; 754 u16 control;
@@ -742,7 +781,12 @@ static int msi_capability_init(struct pci_dev *dev)
742 /* Replace with MSI handler */ 781 /* Replace with MSI handler */
743 irq_handler_init(PCI_CAP_ID_MSI, vector, entry->msi_attrib.maskbit); 782 irq_handler_init(PCI_CAP_ID_MSI, vector, entry->msi_attrib.maskbit);
744 /* Configure MSI capability structure */ 783 /* Configure MSI capability structure */
745 msi_register_init(dev, entry); 784 status = msi_register_init(dev, entry);
785 if (status != 0) {
786 dev->irq = entry->msi_attrib.default_vector;
787 kmem_cache_free(msi_cachep, entry);
788 return status;
789 }
746 790
747 attach_msi_entry(entry, vector); 791 attach_msi_entry(entry, vector);
748 /* Set MSI enabled bits */ 792 /* Set MSI enabled bits */
@@ -765,8 +809,10 @@ static int msix_capability_init(struct pci_dev *dev,
765 struct msix_entry *entries, int nvec) 809 struct msix_entry *entries, int nvec)
766{ 810{
767 struct msi_desc *head = NULL, *tail = NULL, *entry = NULL; 811 struct msi_desc *head = NULL, *tail = NULL, *entry = NULL;
768 struct msg_address address; 812 u32 address_hi;
769 struct msg_data data; 813 u32 address_lo;
814 u32 data;
815 int status;
770 int vector, pos, i, j, nr_entries, temp = 0; 816 int vector, pos, i, j, nr_entries, temp = 0;
771 unsigned long phys_addr; 817 unsigned long phys_addr;
772 u32 table_offset; 818 u32 table_offset;
@@ -822,18 +868,20 @@ static int msix_capability_init(struct pci_dev *dev,
822 /* Replace with MSI-X handler */ 868 /* Replace with MSI-X handler */
823 irq_handler_init(PCI_CAP_ID_MSIX, vector, 1); 869 irq_handler_init(PCI_CAP_ID_MSIX, vector, 1);
824 /* Configure MSI-X capability structure */ 870 /* Configure MSI-X capability structure */
825 msi_address_init(&address); 871 status = msi_ops->setup(dev, vector,
826 msi_data_init(&data, vector); 872 &address_hi,
827 entry->msi_attrib.current_cpu = 873 &address_lo,
828 ((address.lo_address.u.dest_id >> 874 &data);
829 MSI_TARGET_CPU_SHIFT) & MSI_TARGET_CPU_MASK); 875 if (status < 0)
830 writel(address.lo_address.value, 876 break;
877
878 writel(address_lo,
831 base + j * PCI_MSIX_ENTRY_SIZE + 879 base + j * PCI_MSIX_ENTRY_SIZE +
832 PCI_MSIX_ENTRY_LOWER_ADDR_OFFSET); 880 PCI_MSIX_ENTRY_LOWER_ADDR_OFFSET);
833 writel(address.hi_address, 881 writel(address_hi,
834 base + j * PCI_MSIX_ENTRY_SIZE + 882 base + j * PCI_MSIX_ENTRY_SIZE +
835 PCI_MSIX_ENTRY_UPPER_ADDR_OFFSET); 883 PCI_MSIX_ENTRY_UPPER_ADDR_OFFSET);
836 writel(*(u32*)&data, 884 writel(data,
837 base + j * PCI_MSIX_ENTRY_SIZE + 885 base + j * PCI_MSIX_ENTRY_SIZE +
838 PCI_MSIX_ENTRY_DATA_OFFSET); 886 PCI_MSIX_ENTRY_DATA_OFFSET);
839 attach_msi_entry(entry, vector); 887 attach_msi_entry(entry, vector);
@@ -865,6 +913,7 @@ static int msix_capability_init(struct pci_dev *dev,
865 **/ 913 **/
866int pci_enable_msi(struct pci_dev* dev) 914int pci_enable_msi(struct pci_dev* dev)
867{ 915{
916 struct pci_bus *bus;
868 int pos, temp, status = -EINVAL; 917 int pos, temp, status = -EINVAL;
869 u16 control; 918 u16 control;
870 919
@@ -874,8 +923,9 @@ int pci_enable_msi(struct pci_dev* dev)
874 if (dev->no_msi) 923 if (dev->no_msi)
875 return status; 924 return status;
876 925
877 if (dev->bus->bus_flags & PCI_BUS_FLAGS_NO_MSI) 926 for (bus = dev->bus; bus; bus = bus->parent)
878 return -EINVAL; 927 if (bus->bus_flags & PCI_BUS_FLAGS_NO_MSI)
928 return -EINVAL;
879 929
880 temp = dev->irq; 930 temp = dev->irq;
881 931
@@ -887,23 +937,23 @@ int pci_enable_msi(struct pci_dev* dev)
887 if (!pos) 937 if (!pos)
888 return -EINVAL; 938 return -EINVAL;
889 939
890 pci_read_config_word(dev, msi_control_reg(pos), &control);
891 if (control & PCI_MSI_FLAGS_ENABLE)
892 return 0; /* Already in MSI mode */
893
894 if (!msi_lookup_vector(dev, PCI_CAP_ID_MSI)) { 940 if (!msi_lookup_vector(dev, PCI_CAP_ID_MSI)) {
895 /* Lookup Sucess */ 941 /* Lookup Sucess */
896 unsigned long flags; 942 unsigned long flags;
897 943
944 pci_read_config_word(dev, msi_control_reg(pos), &control);
945 if (control & PCI_MSI_FLAGS_ENABLE)
946 return 0; /* Already in MSI mode */
898 spin_lock_irqsave(&msi_lock, flags); 947 spin_lock_irqsave(&msi_lock, flags);
899 if (!vector_irq[dev->irq]) { 948 if (!vector_irq[dev->irq]) {
900 msi_desc[dev->irq]->msi_attrib.state = 0; 949 msi_desc[dev->irq]->msi_attrib.state = 0;
901 vector_irq[dev->irq] = -1; 950 vector_irq[dev->irq] = -1;
902 nr_released_vectors--; 951 nr_released_vectors--;
903 spin_unlock_irqrestore(&msi_lock, flags); 952 spin_unlock_irqrestore(&msi_lock, flags);
904 msi_register_init(dev, msi_desc[dev->irq]); 953 status = msi_register_init(dev, msi_desc[dev->irq]);
905 enable_msi_mode(dev, pos, PCI_CAP_ID_MSI); 954 if (status == 0)
906 return 0; 955 enable_msi_mode(dev, pos, PCI_CAP_ID_MSI);
956 return status;
907 } 957 }
908 spin_unlock_irqrestore(&msi_lock, flags); 958 spin_unlock_irqrestore(&msi_lock, flags);
909 dev->irq = temp; 959 dev->irq = temp;
@@ -980,6 +1030,8 @@ static int msi_free_vector(struct pci_dev* dev, int vector, int reassign)
980 void __iomem *base; 1030 void __iomem *base;
981 unsigned long flags; 1031 unsigned long flags;
982 1032
1033 msi_ops->teardown(vector);
1034
983 spin_lock_irqsave(&msi_lock, flags); 1035 spin_lock_irqsave(&msi_lock, flags);
984 entry = msi_desc[vector]; 1036 entry = msi_desc[vector];
985 if (!entry || entry->dev != dev) { 1037 if (!entry || entry->dev != dev) {
@@ -1008,33 +1060,8 @@ static int msi_free_vector(struct pci_dev* dev, int vector, int reassign)
1008 entry_nr * PCI_MSIX_ENTRY_SIZE + 1060 entry_nr * PCI_MSIX_ENTRY_SIZE +
1009 PCI_MSIX_ENTRY_VECTOR_CTRL_OFFSET); 1061 PCI_MSIX_ENTRY_VECTOR_CTRL_OFFSET);
1010 1062
1011 if (head == vector) { 1063 if (head == vector)
1012 /*
1013 * Detect last MSI-X vector to be released.
1014 * Release the MSI-X memory-mapped table.
1015 */
1016#if 0
1017 int pos, nr_entries;
1018 unsigned long phys_addr;
1019 u32 table_offset;
1020 u16 control;
1021 u8 bir;
1022
1023 pos = pci_find_capability(dev, PCI_CAP_ID_MSIX);
1024 pci_read_config_word(dev, msi_control_reg(pos),
1025 &control);
1026 nr_entries = multi_msix_capable(control);
1027 pci_read_config_dword(dev, msix_table_offset_reg(pos),
1028 &table_offset);
1029 bir = (u8)(table_offset & PCI_MSIX_FLAGS_BIRMASK);
1030 table_offset &= ~PCI_MSIX_FLAGS_BIRMASK;
1031 phys_addr = pci_resource_start(dev, bir) + table_offset;
1032/*
1033 * FIXME! and what did you want to do with phys_addr?
1034 */
1035#endif
1036 iounmap(base); 1064 iounmap(base);
1037 }
1038 } 1065 }
1039 1066
1040 return 0; 1067 return 0;
@@ -1108,6 +1135,7 @@ static int reroute_msix_table(int head, struct msix_entry *entries, int *nvec)
1108 **/ 1135 **/
1109int pci_enable_msix(struct pci_dev* dev, struct msix_entry *entries, int nvec) 1136int pci_enable_msix(struct pci_dev* dev, struct msix_entry *entries, int nvec)
1110{ 1137{
1138 struct pci_bus *bus;
1111 int status, pos, nr_entries, free_vectors; 1139 int status, pos, nr_entries, free_vectors;
1112 int i, j, temp; 1140 int i, j, temp;
1113 u16 control; 1141 u16 control;
@@ -1116,6 +1144,13 @@ int pci_enable_msix(struct pci_dev* dev, struct msix_entry *entries, int nvec)
1116 if (!pci_msi_enable || !dev || !entries) 1144 if (!pci_msi_enable || !dev || !entries)
1117 return -EINVAL; 1145 return -EINVAL;
1118 1146
1147 if (dev->no_msi)
1148 return -EINVAL;
1149
1150 for (bus = dev->bus; bus; bus = bus->parent)
1151 if (bus->bus_flags & PCI_BUS_FLAGS_NO_MSI)
1152 return -EINVAL;
1153
1119 status = msi_init(); 1154 status = msi_init();
1120 if (status < 0) 1155 if (status < 0)
1121 return status; 1156 return status;
@@ -1300,24 +1335,6 @@ void msi_remove_pci_irq_vectors(struct pci_dev* dev)
1300 } 1335 }
1301 msi_free_vector(dev, vector, 0); 1336 msi_free_vector(dev, vector, 0);
1302 if (warning) { 1337 if (warning) {
1303 /* Force to release the MSI-X memory-mapped table */
1304#if 0
1305 unsigned long phys_addr;
1306 u32 table_offset;
1307 u16 control;
1308 u8 bir;
1309
1310 pci_read_config_word(dev, msi_control_reg(pos),
1311 &control);
1312 pci_read_config_dword(dev, msix_table_offset_reg(pos),
1313 &table_offset);
1314 bir = (u8)(table_offset & PCI_MSIX_FLAGS_BIRMASK);
1315 table_offset &= ~PCI_MSIX_FLAGS_BIRMASK;
1316 phys_addr = pci_resource_start(dev, bir) + table_offset;
1317/*
1318 * FIXME! and what did you want to do with phys_addr?
1319 */
1320#endif
1321 iounmap(base); 1338 iounmap(base);
1322 printk(KERN_WARNING "PCI: %s: msi_remove_pci_irq_vectors() " 1339 printk(KERN_WARNING "PCI: %s: msi_remove_pci_irq_vectors() "
1323 "called without free_irq() on all MSI-X vectors\n", 1340 "called without free_irq() on all MSI-X vectors\n",
diff --git a/drivers/pci/msi.h b/drivers/pci/msi.h
index 4ac52d441e47..56951c39d3a3 100644
--- a/drivers/pci/msi.h
+++ b/drivers/pci/msi.h
@@ -6,6 +6,68 @@
6#ifndef MSI_H 6#ifndef MSI_H
7#define MSI_H 7#define MSI_H
8 8
9/*
10 * MSI operation vector. Used by the msi core code (drivers/pci/msi.c)
11 * to abstract platform-specific tasks relating to MSI address generation
12 * and resource management.
13 */
14struct msi_ops {
15 /**
16 * setup - generate an MSI bus address and data for a given vector
17 * @pdev: PCI device context (in)
18 * @vector: vector allocated by the msi core (in)
19 * @addr_hi: upper 32 bits of PCI bus MSI address (out)
20 * @addr_lo: lower 32 bits of PCI bus MSI address (out)
21 * @data: MSI data payload (out)
22 *
23 * Description: The setup op is used to generate a PCI bus addres and
24 * data which the msi core will program into the card MSI capability
25 * registers. The setup routine is responsible for picking an initial
26 * cpu to target the MSI at. The setup routine is responsible for
27 * examining pdev to determine the MSI capabilities of the card and
28 * generating a suitable address/data. The setup routine is
29 * responsible for allocating and tracking any system resources it
30 * needs to route the MSI to the cpu it picks, and for associating
31 * those resources with the passed in vector.
32 *
33 * Returns 0 if the MSI address/data was successfully setup.
34 **/
35
36 int (*setup) (struct pci_dev *pdev, unsigned int vector,
37 u32 *addr_hi, u32 *addr_lo, u32 *data);
38
39 /**
40 * teardown - release resources allocated by setup
41 * @vector: vector context for resources (in)
42 *
43 * Description: The teardown op is used to release any resources
44 * that were allocated in the setup routine associated with the passed
45 * in vector.
46 **/
47
48 void (*teardown) (unsigned int vector);
49
50 /**
51 * target - retarget an MSI at a different cpu
52 * @vector: vector context for resources (in)
53 * @cpu: new cpu to direct vector at (in)
54 * @addr_hi: new value of PCI bus upper 32 bits (in/out)
55 * @addr_lo: new value of PCI bus lower 32 bits (in/out)
56 *
57 * Description: The target op is used to redirect an MSI vector
58 * at a different cpu. addr_hi/addr_lo coming in are the existing
59 * values that the MSI core has programmed into the card. The
60 * target code is responsible for freeing any resources (if any)
61 * associated with the old address, and generating a new PCI bus
62 * addr_hi/addr_lo that will redirect the vector at the indicated cpu.
63 **/
64
65 void (*target) (unsigned int vector, unsigned int cpu,
66 u32 *addr_hi, u32 *addr_lo);
67};
68
69extern int msi_register(struct msi_ops *ops);
70
9#include <asm/msi.h> 71#include <asm/msi.h>
10 72
11/* 73/*
@@ -63,67 +125,6 @@ extern int pci_vector_resources(int last, int nr_released);
63#define msix_mask(address) (address | PCI_MSIX_FLAGS_BITMASK) 125#define msix_mask(address) (address | PCI_MSIX_FLAGS_BITMASK)
64#define msix_is_pending(address) (address & PCI_MSIX_FLAGS_PENDMASK) 126#define msix_is_pending(address) (address & PCI_MSIX_FLAGS_PENDMASK)
65 127
66/*
67 * MSI Defined Data Structures
68 */
69#define MSI_ADDRESS_HEADER 0xfee
70#define MSI_ADDRESS_HEADER_SHIFT 12
71#define MSI_ADDRESS_HEADER_MASK 0xfff000
72#define MSI_ADDRESS_DEST_ID_MASK 0xfff0000f
73#define MSI_TARGET_CPU_MASK 0xff
74#define MSI_DELIVERY_MODE 0
75#define MSI_LEVEL_MODE 1 /* Edge always assert */
76#define MSI_TRIGGER_MODE 0 /* MSI is edge sensitive */
77#define MSI_PHYSICAL_MODE 0
78#define MSI_LOGICAL_MODE 1
79#define MSI_REDIRECTION_HINT_MODE 0
80
81struct msg_data {
82#if defined(__LITTLE_ENDIAN_BITFIELD)
83 __u32 vector : 8;
84 __u32 delivery_mode : 3; /* 000b: FIXED | 001b: lowest prior */
85 __u32 reserved_1 : 3;
86 __u32 level : 1; /* 0: deassert | 1: assert */
87 __u32 trigger : 1; /* 0: edge | 1: level */
88 __u32 reserved_2 : 16;
89#elif defined(__BIG_ENDIAN_BITFIELD)
90 __u32 reserved_2 : 16;
91 __u32 trigger : 1; /* 0: edge | 1: level */
92 __u32 level : 1; /* 0: deassert | 1: assert */
93 __u32 reserved_1 : 3;
94 __u32 delivery_mode : 3; /* 000b: FIXED | 001b: lowest prior */
95 __u32 vector : 8;
96#else
97#error "Bitfield endianness not defined! Check your byteorder.h"
98#endif
99} __attribute__ ((packed));
100
101struct msg_address {
102 union {
103 struct {
104#if defined(__LITTLE_ENDIAN_BITFIELD)
105 __u32 reserved_1 : 2;
106 __u32 dest_mode : 1; /*0:physic | 1:logic */
107 __u32 redirection_hint: 1; /*0: dedicated CPU
108 1: lowest priority */
109 __u32 reserved_2 : 4;
110 __u32 dest_id : 24; /* Destination ID */
111#elif defined(__BIG_ENDIAN_BITFIELD)
112 __u32 dest_id : 24; /* Destination ID */
113 __u32 reserved_2 : 4;
114 __u32 redirection_hint: 1; /*0: dedicated CPU
115 1: lowest priority */
116 __u32 dest_mode : 1; /*0:physic | 1:logic */
117 __u32 reserved_1 : 2;
118#else
119#error "Bitfield endianness not defined! Check your byteorder.h"
120#endif
121 }u;
122 __u32 value;
123 }lo_address;
124 __u32 hi_address;
125} __attribute__ ((packed));
126
127struct msi_desc { 128struct msi_desc {
128 struct { 129 struct {
129 __u8 type : 5; /* {0: unused, 5h:MSI, 11h:MSI-X} */ 130 __u8 type : 5; /* {0: unused, 5h:MSI, 11h:MSI-X} */
@@ -132,7 +133,7 @@ struct msi_desc {
132 __u8 reserved: 1; /* reserved */ 133 __u8 reserved: 1; /* reserved */
133 __u8 entry_nr; /* specific enabled entry */ 134 __u8 entry_nr; /* specific enabled entry */
134 __u8 default_vector; /* default pre-assigned vector */ 135 __u8 default_vector; /* default pre-assigned vector */
135 __u8 current_cpu; /* current destination cpu */ 136 __u8 unused; /* formerly unused destination cpu*/
136 }msi_attrib; 137 }msi_attrib;
137 138
138 struct { 139 struct {
@@ -142,6 +143,14 @@ struct msi_desc {
142 143
143 void __iomem *mask_base; 144 void __iomem *mask_base;
144 struct pci_dev *dev; 145 struct pci_dev *dev;
146
147#ifdef CONFIG_PM
148 /* PM save area for MSIX address/data */
149
150 u32 address_hi_save;
151 u32 address_lo_save;
152 u32 data_save;
153#endif
145}; 154};
146 155
147#endif /* MSI_H */ 156#endif /* MSI_H */
diff --git a/drivers/pci/pci-acpi.c b/drivers/pci/pci-acpi.c
index c2ecae5ff0c1..bb7456c1dbac 100644
--- a/drivers/pci/pci-acpi.c
+++ b/drivers/pci/pci-acpi.c
@@ -267,7 +267,7 @@ static int acpi_pci_set_power_state(struct pci_dev *dev, pci_power_t state)
267 267
268 268
269/* ACPI bus type */ 269/* ACPI bus type */
270static int pci_acpi_find_device(struct device *dev, acpi_handle *handle) 270static int acpi_pci_find_device(struct device *dev, acpi_handle *handle)
271{ 271{
272 struct pci_dev * pci_dev; 272 struct pci_dev * pci_dev;
273 acpi_integer addr; 273 acpi_integer addr;
@@ -281,7 +281,7 @@ static int pci_acpi_find_device(struct device *dev, acpi_handle *handle)
281 return 0; 281 return 0;
282} 282}
283 283
284static int pci_acpi_find_root_bridge(struct device *dev, acpi_handle *handle) 284static int acpi_pci_find_root_bridge(struct device *dev, acpi_handle *handle)
285{ 285{
286 int num; 286 int num;
287 unsigned int seg, bus; 287 unsigned int seg, bus;
@@ -299,21 +299,21 @@ static int pci_acpi_find_root_bridge(struct device *dev, acpi_handle *handle)
299 return 0; 299 return 0;
300} 300}
301 301
302static struct acpi_bus_type pci_acpi_bus = { 302static struct acpi_bus_type acpi_pci_bus = {
303 .bus = &pci_bus_type, 303 .bus = &pci_bus_type,
304 .find_device = pci_acpi_find_device, 304 .find_device = acpi_pci_find_device,
305 .find_bridge = pci_acpi_find_root_bridge, 305 .find_bridge = acpi_pci_find_root_bridge,
306}; 306};
307 307
308static int __init pci_acpi_init(void) 308static int __init acpi_pci_init(void)
309{ 309{
310 int ret; 310 int ret;
311 311
312 ret = register_acpi_bus_type(&pci_acpi_bus); 312 ret = register_acpi_bus_type(&acpi_pci_bus);
313 if (ret) 313 if (ret)
314 return 0; 314 return 0;
315 platform_pci_choose_state = acpi_pci_choose_state; 315 platform_pci_choose_state = acpi_pci_choose_state;
316 platform_pci_set_power_state = acpi_pci_set_power_state; 316 platform_pci_set_power_state = acpi_pci_set_power_state;
317 return 0; 317 return 0;
318} 318}
319arch_initcall(pci_acpi_init); 319arch_initcall(acpi_pci_init);
diff --git a/drivers/pci/pci-sysfs.c b/drivers/pci/pci-sysfs.c
index 56ac2bc003c7..bc405c035ce3 100644
--- a/drivers/pci/pci-sysfs.c
+++ b/drivers/pci/pci-sysfs.c
@@ -43,6 +43,29 @@ pci_config_attr(subsystem_vendor, "0x%04x\n");
43pci_config_attr(subsystem_device, "0x%04x\n"); 43pci_config_attr(subsystem_device, "0x%04x\n");
44pci_config_attr(class, "0x%06x\n"); 44pci_config_attr(class, "0x%06x\n");
45pci_config_attr(irq, "%u\n"); 45pci_config_attr(irq, "%u\n");
46pci_config_attr(is_enabled, "%u\n");
47
48static ssize_t broken_parity_status_show(struct device *dev,
49 struct device_attribute *attr,
50 char *buf)
51{
52 struct pci_dev *pdev = to_pci_dev(dev);
53 return sprintf (buf, "%u\n", pdev->broken_parity_status);
54}
55
56static ssize_t broken_parity_status_store(struct device *dev,
57 struct device_attribute *attr,
58 const char *buf, size_t count)
59{
60 struct pci_dev *pdev = to_pci_dev(dev);
61 ssize_t consumed = -EINVAL;
62
63 if ((count > 0) && (*buf == '0' || *buf == '1')) {
64 pdev->broken_parity_status = *buf == '1' ? 1 : 0;
65 consumed = count;
66 }
67 return consumed;
68}
46 69
47static ssize_t local_cpus_show(struct device *dev, 70static ssize_t local_cpus_show(struct device *dev,
48 struct device_attribute *attr, char *buf) 71 struct device_attribute *attr, char *buf)
@@ -90,6 +113,25 @@ static ssize_t modalias_show(struct device *dev, struct device_attribute *attr,
90 (u8)(pci_dev->class >> 16), (u8)(pci_dev->class >> 8), 113 (u8)(pci_dev->class >> 16), (u8)(pci_dev->class >> 8),
91 (u8)(pci_dev->class)); 114 (u8)(pci_dev->class));
92} 115}
116static ssize_t
117is_enabled_store(struct device *dev, struct device_attribute *attr,
118 const char *buf, size_t count)
119{
120 struct pci_dev *pdev = to_pci_dev(dev);
121
122 /* this can crash the machine when done on the "wrong" device */
123 if (!capable(CAP_SYS_ADMIN))
124 return count;
125
126 if (*buf == '0')
127 pci_disable_device(pdev);
128
129 if (*buf == '1')
130 pci_enable_device(pdev);
131
132 return count;
133}
134
93 135
94struct device_attribute pci_dev_attrs[] = { 136struct device_attribute pci_dev_attrs[] = {
95 __ATTR_RO(resource), 137 __ATTR_RO(resource),
@@ -101,6 +143,9 @@ struct device_attribute pci_dev_attrs[] = {
101 __ATTR_RO(irq), 143 __ATTR_RO(irq),
102 __ATTR_RO(local_cpus), 144 __ATTR_RO(local_cpus),
103 __ATTR_RO(modalias), 145 __ATTR_RO(modalias),
146 __ATTR(enable, 0600, is_enabled_show, is_enabled_store),
147 __ATTR(broken_parity_status,(S_IRUGO|S_IWUSR),
148 broken_parity_status_show,broken_parity_status_store),
104 __ATTR_NULL, 149 __ATTR_NULL,
105}; 150};
106 151
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
index fde41cc14734..d408a3c30426 100644
--- a/drivers/pci/pci.c
+++ b/drivers/pci/pci.c
@@ -517,7 +517,12 @@ pci_enable_device_bars(struct pci_dev *dev, int bars)
517int 517int
518pci_enable_device(struct pci_dev *dev) 518pci_enable_device(struct pci_dev *dev)
519{ 519{
520 int err = pci_enable_device_bars(dev, (1 << PCI_NUM_RESOURCES) - 1); 520 int err;
521
522 if (dev->is_enabled)
523 return 0;
524
525 err = pci_enable_device_bars(dev, (1 << PCI_NUM_RESOURCES) - 1);
521 if (err) 526 if (err)
522 return err; 527 return err;
523 pci_fixup_device(pci_fixup_enable, dev); 528 pci_fixup_device(pci_fixup_enable, dev);
@@ -546,7 +551,14 @@ void
546pci_disable_device(struct pci_dev *dev) 551pci_disable_device(struct pci_dev *dev)
547{ 552{
548 u16 pci_command; 553 u16 pci_command;
549 554
555 if (dev->msi_enabled)
556 disable_msi_mode(dev, pci_find_capability(dev, PCI_CAP_ID_MSI),
557 PCI_CAP_ID_MSI);
558 if (dev->msix_enabled)
559 disable_msi_mode(dev, pci_find_capability(dev, PCI_CAP_ID_MSI),
560 PCI_CAP_ID_MSIX);
561
550 pci_read_config_word(dev, PCI_COMMAND, &pci_command); 562 pci_read_config_word(dev, PCI_COMMAND, &pci_command);
551 if (pci_command & PCI_COMMAND_MASTER) { 563 if (pci_command & PCI_COMMAND_MASTER) {
552 pci_command &= ~PCI_COMMAND_MASTER; 564 pci_command &= ~PCI_COMMAND_MASTER;
diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h
index 30630cbe2fe3..29bdeca031a8 100644
--- a/drivers/pci/pci.h
+++ b/drivers/pci/pci.h
@@ -40,7 +40,7 @@ extern int pci_bus_find_capability (struct pci_bus *bus, unsigned int devfn, int
40extern void pci_remove_legacy_files(struct pci_bus *bus); 40extern void pci_remove_legacy_files(struct pci_bus *bus);
41 41
42/* Lock for read/write access to pci device and bus lists */ 42/* Lock for read/write access to pci device and bus lists */
43extern spinlock_t pci_bus_lock; 43extern struct rw_semaphore pci_bus_sem;
44 44
45#ifdef CONFIG_X86_IO_APIC 45#ifdef CONFIG_X86_IO_APIC
46extern int pci_msi_quirk; 46extern int pci_msi_quirk;
diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
index a10ed9dab2c2..f89dbc3738b7 100644
--- a/drivers/pci/probe.c
+++ b/drivers/pci/probe.c
@@ -180,25 +180,31 @@ static void pci_read_bases(struct pci_dev *dev, unsigned int howmany, int rom)
180 res->flags |= pci_calc_resource_flags(l); 180 res->flags |= pci_calc_resource_flags(l);
181 if ((l & (PCI_BASE_ADDRESS_SPACE | PCI_BASE_ADDRESS_MEM_TYPE_MASK)) 181 if ((l & (PCI_BASE_ADDRESS_SPACE | PCI_BASE_ADDRESS_MEM_TYPE_MASK))
182 == (PCI_BASE_ADDRESS_SPACE_MEMORY | PCI_BASE_ADDRESS_MEM_TYPE_64)) { 182 == (PCI_BASE_ADDRESS_SPACE_MEMORY | PCI_BASE_ADDRESS_MEM_TYPE_64)) {
183 pci_read_config_dword(dev, reg+4, &l); 183 u32 szhi, lhi;
184 pci_read_config_dword(dev, reg+4, &lhi);
185 pci_write_config_dword(dev, reg+4, ~0);
186 pci_read_config_dword(dev, reg+4, &szhi);
187 pci_write_config_dword(dev, reg+4, lhi);
188 szhi = pci_size(lhi, szhi, 0xffffffff);
184 next++; 189 next++;
185#if BITS_PER_LONG == 64 190#if BITS_PER_LONG == 64
186 res->start |= ((unsigned long) l) << 32; 191 res->start |= ((unsigned long) lhi) << 32;
187 res->end = res->start + sz; 192 res->end = res->start + sz;
188 pci_write_config_dword(dev, reg+4, ~0); 193 if (szhi) {
189 pci_read_config_dword(dev, reg+4, &sz);
190 pci_write_config_dword(dev, reg+4, l);
191 sz = pci_size(l, sz, 0xffffffff);
192 if (sz) {
193 /* This BAR needs > 4GB? Wow. */ 194 /* This BAR needs > 4GB? Wow. */
194 res->end |= (unsigned long)sz<<32; 195 res->end |= (unsigned long)szhi<<32;
195 } 196 }
196#else 197#else
197 if (l) { 198 if (szhi) {
198 printk(KERN_ERR "PCI: Unable to handle 64-bit address for device %s\n", pci_name(dev)); 199 printk(KERN_ERR "PCI: Unable to handle 64-bit BAR for device %s\n", pci_name(dev));
199 res->start = 0; 200 res->start = 0;
200 res->flags = 0; 201 res->flags = 0;
201 continue; 202 } else if (lhi) {
203 /* 64-bit wide address, treat as disabled */
204 pci_write_config_dword(dev, reg, l & ~(u32)PCI_BASE_ADDRESS_MEM_MASK);
205 pci_write_config_dword(dev, reg+4, 0);
206 res->start = 0;
207 res->end = sz;
202 } 208 }
203#endif 209#endif
204 } 210 }
@@ -377,9 +383,9 @@ struct pci_bus * __devinit pci_add_new_bus(struct pci_bus *parent, struct pci_de
377 383
378 child = pci_alloc_child_bus(parent, dev, busnr); 384 child = pci_alloc_child_bus(parent, dev, busnr);
379 if (child) { 385 if (child) {
380 spin_lock(&pci_bus_lock); 386 down_write(&pci_bus_sem);
381 list_add_tail(&child->node, &parent->children); 387 list_add_tail(&child->node, &parent->children);
382 spin_unlock(&pci_bus_lock); 388 up_write(&pci_bus_sem);
383 } 389 }
384 return child; 390 return child;
385} 391}
@@ -838,9 +844,9 @@ void __devinit pci_device_add(struct pci_dev *dev, struct pci_bus *bus)
838 * and the bus list for fixup functions, etc. 844 * and the bus list for fixup functions, etc.
839 */ 845 */
840 INIT_LIST_HEAD(&dev->global_list); 846 INIT_LIST_HEAD(&dev->global_list);
841 spin_lock(&pci_bus_lock); 847 down_write(&pci_bus_sem);
842 list_add_tail(&dev->bus_list, &bus->devices); 848 list_add_tail(&dev->bus_list, &bus->devices);
843 spin_unlock(&pci_bus_lock); 849 up_write(&pci_bus_sem);
844} 850}
845 851
846struct pci_dev * __devinit 852struct pci_dev * __devinit
@@ -975,9 +981,10 @@ struct pci_bus * __devinit pci_create_bus(struct device *parent,
975 pr_debug("PCI: Bus %04x:%02x already known\n", pci_domain_nr(b), bus); 981 pr_debug("PCI: Bus %04x:%02x already known\n", pci_domain_nr(b), bus);
976 goto err_out; 982 goto err_out;
977 } 983 }
978 spin_lock(&pci_bus_lock); 984
985 down_write(&pci_bus_sem);
979 list_add_tail(&b->node, &pci_root_buses); 986 list_add_tail(&b->node, &pci_root_buses);
980 spin_unlock(&pci_bus_lock); 987 up_write(&pci_bus_sem);
981 988
982 memset(dev, 0, sizeof(*dev)); 989 memset(dev, 0, sizeof(*dev));
983 dev->parent = parent; 990 dev->parent = parent;
@@ -1017,9 +1024,9 @@ class_dev_create_file_err:
1017class_dev_reg_err: 1024class_dev_reg_err:
1018 device_unregister(dev); 1025 device_unregister(dev);
1019dev_reg_err: 1026dev_reg_err:
1020 spin_lock(&pci_bus_lock); 1027 down_write(&pci_bus_sem);
1021 list_del(&b->node); 1028 list_del(&b->node);
1022 spin_unlock(&pci_bus_lock); 1029 up_write(&pci_bus_sem);
1023err_out: 1030err_out:
1024 kfree(dev); 1031 kfree(dev);
1025 kfree(b); 1032 kfree(b);
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
index d378478612fb..4364d793f73b 100644
--- a/drivers/pci/quirks.c
+++ b/drivers/pci/quirks.c
@@ -24,6 +24,17 @@
24#include <linux/acpi.h> 24#include <linux/acpi.h>
25#include "pci.h" 25#include "pci.h"
26 26
27/* The Mellanox Tavor device gives false positive parity errors
28 * Mark this device with a broken_parity_status, to allow
29 * PCI scanning code to "skip" this now blacklisted device.
30 */
31static void __devinit quirk_mellanox_tavor(struct pci_dev *dev)
32{
33 dev->broken_parity_status = 1; /* This device gives false positives */
34}
35DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_MELLANOX,PCI_DEVICE_ID_MELLANOX_TAVOR,quirk_mellanox_tavor);
36DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_MELLANOX,PCI_DEVICE_ID_MELLANOX_TAVOR_BRIDGE,quirk_mellanox_tavor);
37
27/* Deal with broken BIOS'es that neglect to enable passive release, 38/* Deal with broken BIOS'es that neglect to enable passive release,
28 which can cause problems in combination with the 82441FX/PPro MTRRs */ 39 which can cause problems in combination with the 82441FX/PPro MTRRs */
29static void __devinit quirk_passive_release(struct pci_dev *dev) 40static void __devinit quirk_passive_release(struct pci_dev *dev)
@@ -878,27 +889,30 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82375, quirk_e
878 * when a PCI-Soundcard is added. The BIOS only gives Options 889 * when a PCI-Soundcard is added. The BIOS only gives Options
879 * "Disabled" and "AUTO". This Quirk Sets the corresponding 890 * "Disabled" and "AUTO". This Quirk Sets the corresponding
880 * Register-Value to enable the Soundcard. 891 * Register-Value to enable the Soundcard.
892 *
893 * FIXME: Presently this quirk will run on anything that has an 8237
894 * which isn't correct, we need to check DMI tables or something in
895 * order to make sure it only runs on the MSI-K8T-Neo2Fir. Because it
896 * runs everywhere at present we suppress the printk output in most
897 * irrelevant cases.
881 */ 898 */
882static void __init k8t_sound_hostbridge(struct pci_dev *dev) 899static void __init k8t_sound_hostbridge(struct pci_dev *dev)
883{ 900{
884 unsigned char val; 901 unsigned char val;
885 902
886 printk(KERN_INFO "PCI: Quirk-MSI-K8T Soundcard On\n");
887 pci_read_config_byte(dev, 0x50, &val); 903 pci_read_config_byte(dev, 0x50, &val);
888 if (val == 0x88 || val == 0xc8) { 904 if (val == 0x88 || val == 0xc8) {
905 /* Assume it's probably a MSI-K8T-Neo2Fir */
906 printk(KERN_INFO "PCI: MSI-K8T-Neo2Fir, attempting to turn soundcard ON\n");
889 pci_write_config_byte(dev, 0x50, val & (~0x40)); 907 pci_write_config_byte(dev, 0x50, val & (~0x40));
890 908
891 /* Verify the Change for Status output */ 909 /* Verify the Change for Status output */
892 pci_read_config_byte(dev, 0x50, &val); 910 pci_read_config_byte(dev, 0x50, &val);
893 if (val & 0x40) 911 if (val & 0x40)
894 printk(KERN_INFO "PCI: MSI-K8T soundcard still off\n"); 912 printk(KERN_INFO "PCI: MSI-K8T-Neo2Fir, soundcard still off\n");
895 else 913 else
896 printk(KERN_INFO "PCI: MSI-K8T soundcard on\n"); 914 printk(KERN_INFO "PCI: MSI-K8T-Neo2Fir, soundcard on\n");
897 } else {
898 printk(KERN_INFO "PCI: Unexpected Value in PCI-Register: "
899 "no Change!\n");
900 } 915 }
901
902} 916}
903DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8237, k8t_sound_hostbridge); 917DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8237, k8t_sound_hostbridge);
904 918
@@ -1485,6 +1499,25 @@ static void __devinit quirk_p64h2_1k_io(struct pci_dev *dev)
1485} 1499}
1486DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x1460, quirk_p64h2_1k_io); 1500DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x1460, quirk_p64h2_1k_io);
1487 1501
1502/* Under some circumstances, AER is not linked with extended capabilities.
1503 * Force it to be linked by setting the corresponding control bit in the
1504 * config space.
1505 */
1506static void __devinit quirk_nvidia_ck804_pcie_aer_ext_cap(struct pci_dev *dev)
1507{
1508 uint8_t b;
1509 if (pci_read_config_byte(dev, 0xf41, &b) == 0) {
1510 if (!(b & 0x20)) {
1511 pci_write_config_byte(dev, 0xf41, b | 0x20);
1512 printk(KERN_INFO
1513 "PCI: Linking AER extended capability on %s\n",
1514 pci_name(dev));
1515 }
1516 }
1517}
1518DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_CK804_PCIE,
1519 quirk_nvidia_ck804_pcie_aer_ext_cap);
1520
1488EXPORT_SYMBOL(pcie_mch_quirk); 1521EXPORT_SYMBOL(pcie_mch_quirk);
1489#ifdef CONFIG_HOTPLUG 1522#ifdef CONFIG_HOTPLUG
1490EXPORT_SYMBOL(pci_fixup_device); 1523EXPORT_SYMBOL(pci_fixup_device);
diff --git a/drivers/pci/remove.c b/drivers/pci/remove.c
index 1a6bf9de166f..99ffbd478b29 100644
--- a/drivers/pci/remove.c
+++ b/drivers/pci/remove.c
@@ -22,18 +22,18 @@ static void pci_destroy_dev(struct pci_dev *dev)
22 pci_proc_detach_device(dev); 22 pci_proc_detach_device(dev);
23 pci_remove_sysfs_dev_files(dev); 23 pci_remove_sysfs_dev_files(dev);
24 device_unregister(&dev->dev); 24 device_unregister(&dev->dev);
25 spin_lock(&pci_bus_lock); 25 down_write(&pci_bus_sem);
26 list_del(&dev->global_list); 26 list_del(&dev->global_list);
27 dev->global_list.next = dev->global_list.prev = NULL; 27 dev->global_list.next = dev->global_list.prev = NULL;
28 spin_unlock(&pci_bus_lock); 28 up_write(&pci_bus_sem);
29 } 29 }
30 30
31 /* Remove the device from the device lists, and prevent any further 31 /* Remove the device from the device lists, and prevent any further
32 * list accesses from this device */ 32 * list accesses from this device */
33 spin_lock(&pci_bus_lock); 33 down_write(&pci_bus_sem);
34 list_del(&dev->bus_list); 34 list_del(&dev->bus_list);
35 dev->bus_list.next = dev->bus_list.prev = NULL; 35 dev->bus_list.next = dev->bus_list.prev = NULL;
36 spin_unlock(&pci_bus_lock); 36 up_write(&pci_bus_sem);
37 37
38 pci_free_resources(dev); 38 pci_free_resources(dev);
39 pci_dev_put(dev); 39 pci_dev_put(dev);
@@ -62,9 +62,9 @@ void pci_remove_bus(struct pci_bus *pci_bus)
62{ 62{
63 pci_proc_detach_bus(pci_bus); 63 pci_proc_detach_bus(pci_bus);
64 64
65 spin_lock(&pci_bus_lock); 65 down_write(&pci_bus_sem);
66 list_del(&pci_bus->node); 66 list_del(&pci_bus->node);
67 spin_unlock(&pci_bus_lock); 67 up_write(&pci_bus_sem);
68 pci_remove_legacy_files(pci_bus); 68 pci_remove_legacy_files(pci_bus);
69 class_device_remove_file(&pci_bus->class_dev, 69 class_device_remove_file(&pci_bus->class_dev,
70 &class_device_attr_cpuaffinity); 70 &class_device_attr_cpuaffinity);
diff --git a/drivers/pci/search.c b/drivers/pci/search.c
index ce7dd6e7be60..622b3f8ba820 100644
--- a/drivers/pci/search.c
+++ b/drivers/pci/search.c
@@ -13,7 +13,7 @@
13#include <linux/interrupt.h> 13#include <linux/interrupt.h>
14#include "pci.h" 14#include "pci.h"
15 15
16DEFINE_SPINLOCK(pci_bus_lock); 16DECLARE_RWSEM(pci_bus_sem);
17 17
18static struct pci_bus * __devinit 18static struct pci_bus * __devinit
19pci_do_find_bus(struct pci_bus* bus, unsigned char busnr) 19pci_do_find_bus(struct pci_bus* bus, unsigned char busnr)
@@ -72,11 +72,11 @@ pci_find_next_bus(const struct pci_bus *from)
72 struct pci_bus *b = NULL; 72 struct pci_bus *b = NULL;
73 73
74 WARN_ON(in_interrupt()); 74 WARN_ON(in_interrupt());
75 spin_lock(&pci_bus_lock); 75 down_read(&pci_bus_sem);
76 n = from ? from->node.next : pci_root_buses.next; 76 n = from ? from->node.next : pci_root_buses.next;
77 if (n != &pci_root_buses) 77 if (n != &pci_root_buses)
78 b = pci_bus_b(n); 78 b = pci_bus_b(n);
79 spin_unlock(&pci_bus_lock); 79 up_read(&pci_bus_sem);
80 return b; 80 return b;
81} 81}
82 82
@@ -124,7 +124,7 @@ struct pci_dev * pci_get_slot(struct pci_bus *bus, unsigned int devfn)
124 struct pci_dev *dev; 124 struct pci_dev *dev;
125 125
126 WARN_ON(in_interrupt()); 126 WARN_ON(in_interrupt());
127 spin_lock(&pci_bus_lock); 127 down_read(&pci_bus_sem);
128 128
129 list_for_each(tmp, &bus->devices) { 129 list_for_each(tmp, &bus->devices) {
130 dev = pci_dev_b(tmp); 130 dev = pci_dev_b(tmp);
@@ -135,7 +135,7 @@ struct pci_dev * pci_get_slot(struct pci_bus *bus, unsigned int devfn)
135 dev = NULL; 135 dev = NULL;
136 out: 136 out:
137 pci_dev_get(dev); 137 pci_dev_get(dev);
138 spin_unlock(&pci_bus_lock); 138 up_read(&pci_bus_sem);
139 return dev; 139 return dev;
140} 140}
141 141
@@ -167,7 +167,7 @@ static struct pci_dev * pci_find_subsys(unsigned int vendor,
167 struct pci_dev *dev; 167 struct pci_dev *dev;
168 168
169 WARN_ON(in_interrupt()); 169 WARN_ON(in_interrupt());
170 spin_lock(&pci_bus_lock); 170 down_read(&pci_bus_sem);
171 n = from ? from->global_list.next : pci_devices.next; 171 n = from ? from->global_list.next : pci_devices.next;
172 172
173 while (n && (n != &pci_devices)) { 173 while (n && (n != &pci_devices)) {
@@ -181,7 +181,7 @@ static struct pci_dev * pci_find_subsys(unsigned int vendor,
181 } 181 }
182 dev = NULL; 182 dev = NULL;
183exit: 183exit:
184 spin_unlock(&pci_bus_lock); 184 up_read(&pci_bus_sem);
185 return dev; 185 return dev;
186} 186}
187 187
@@ -232,7 +232,7 @@ pci_get_subsys(unsigned int vendor, unsigned int device,
232 struct pci_dev *dev; 232 struct pci_dev *dev;
233 233
234 WARN_ON(in_interrupt()); 234 WARN_ON(in_interrupt());
235 spin_lock(&pci_bus_lock); 235 down_read(&pci_bus_sem);
236 n = from ? from->global_list.next : pci_devices.next; 236 n = from ? from->global_list.next : pci_devices.next;
237 237
238 while (n && (n != &pci_devices)) { 238 while (n && (n != &pci_devices)) {
@@ -247,7 +247,7 @@ pci_get_subsys(unsigned int vendor, unsigned int device,
247 dev = NULL; 247 dev = NULL;
248exit: 248exit:
249 dev = pci_dev_get(dev); 249 dev = pci_dev_get(dev);
250 spin_unlock(&pci_bus_lock); 250 up_read(&pci_bus_sem);
251 pci_dev_put(from); 251 pci_dev_put(from);
252 return dev; 252 return dev;
253} 253}
@@ -292,7 +292,7 @@ pci_find_device_reverse(unsigned int vendor, unsigned int device, const struct p
292 struct pci_dev *dev; 292 struct pci_dev *dev;
293 293
294 WARN_ON(in_interrupt()); 294 WARN_ON(in_interrupt());
295 spin_lock(&pci_bus_lock); 295 down_read(&pci_bus_sem);
296 n = from ? from->global_list.prev : pci_devices.prev; 296 n = from ? from->global_list.prev : pci_devices.prev;
297 297
298 while (n && (n != &pci_devices)) { 298 while (n && (n != &pci_devices)) {
@@ -304,7 +304,7 @@ pci_find_device_reverse(unsigned int vendor, unsigned int device, const struct p
304 } 304 }
305 dev = NULL; 305 dev = NULL;
306exit: 306exit:
307 spin_unlock(&pci_bus_lock); 307 up_read(&pci_bus_sem);
308 return dev; 308 return dev;
309} 309}
310 310
@@ -328,7 +328,7 @@ struct pci_dev *pci_get_class(unsigned int class, struct pci_dev *from)
328 struct pci_dev *dev; 328 struct pci_dev *dev;
329 329
330 WARN_ON(in_interrupt()); 330 WARN_ON(in_interrupt());
331 spin_lock(&pci_bus_lock); 331 down_read(&pci_bus_sem);
332 n = from ? from->global_list.next : pci_devices.next; 332 n = from ? from->global_list.next : pci_devices.next;
333 333
334 while (n && (n != &pci_devices)) { 334 while (n && (n != &pci_devices)) {
@@ -340,7 +340,7 @@ struct pci_dev *pci_get_class(unsigned int class, struct pci_dev *from)
340 dev = NULL; 340 dev = NULL;
341exit: 341exit:
342 dev = pci_dev_get(dev); 342 dev = pci_dev_get(dev);
343 spin_unlock(&pci_bus_lock); 343 up_read(&pci_bus_sem);
344 pci_dev_put(from); 344 pci_dev_put(from);
345 return dev; 345 return dev;
346} 346}
@@ -362,7 +362,7 @@ int pci_dev_present(const struct pci_device_id *ids)
362 int found = 0; 362 int found = 0;
363 363
364 WARN_ON(in_interrupt()); 364 WARN_ON(in_interrupt());
365 spin_lock(&pci_bus_lock); 365 down_read(&pci_bus_sem);
366 while (ids->vendor || ids->subvendor || ids->class_mask) { 366 while (ids->vendor || ids->subvendor || ids->class_mask) {
367 list_for_each_entry(dev, &pci_devices, global_list) { 367 list_for_each_entry(dev, &pci_devices, global_list) {
368 if (pci_match_one_device(ids, dev)) { 368 if (pci_match_one_device(ids, dev)) {
@@ -372,8 +372,8 @@ int pci_dev_present(const struct pci_device_id *ids)
372 } 372 }
373 ids++; 373 ids++;
374 } 374 }
375exit: 375exit:
376 spin_unlock(&pci_bus_lock); 376 up_read(&pci_bus_sem);
377 return found; 377 return found;
378} 378}
379EXPORT_SYMBOL(pci_dev_present); 379EXPORT_SYMBOL(pci_dev_present);
diff --git a/drivers/pci/setup-bus.c b/drivers/pci/setup-bus.c
index 28ce3a7ee434..35086e80faa9 100644
--- a/drivers/pci/setup-bus.c
+++ b/drivers/pci/setup-bus.c
@@ -55,9 +55,10 @@ pbus_assign_resources_sorted(struct pci_bus *bus)
55 list_for_each_entry(dev, &bus->devices, bus_list) { 55 list_for_each_entry(dev, &bus->devices, bus_list) {
56 u16 class = dev->class >> 8; 56 u16 class = dev->class >> 8;
57 57
58 /* Don't touch classless devices and host bridges. */ 58 /* Don't touch classless devices or host bridges or ioapics. */
59 if (class == PCI_CLASS_NOT_DEFINED || 59 if (class == PCI_CLASS_NOT_DEFINED ||
60 class == PCI_CLASS_BRIDGE_HOST) 60 class == PCI_CLASS_BRIDGE_HOST ||
61 class == PCI_CLASS_SYSTEM_PIC)
61 continue; 62 continue;
62 63
63 pdev_sort_resources(dev, &head); 64 pdev_sort_resources(dev, &head);
diff --git a/drivers/pci/setup-res.c b/drivers/pci/setup-res.c
index ea9277b7f899..577f4b55c46d 100644
--- a/drivers/pci/setup-res.c
+++ b/drivers/pci/setup-res.c
@@ -155,6 +155,46 @@ int pci_assign_resource(struct pci_dev *dev, int resno)
155 return ret; 155 return ret;
156} 156}
157 157
158#ifdef CONFIG_EMBEDDED
159int pci_assign_resource_fixed(struct pci_dev *dev, int resno)
160{
161 struct pci_bus *bus = dev->bus;
162 struct resource *res = dev->resource + resno;
163 unsigned int type_mask;
164 int i, ret = -EBUSY;
165
166 type_mask = IORESOURCE_IO | IORESOURCE_MEM | IORESOURCE_PREFETCH;
167
168 for (i = 0; i < PCI_BUS_NUM_RESOURCES; i++) {
169 struct resource *r = bus->resource[i];
170 if (!r)
171 continue;
172
173 /* type_mask must match */
174 if ((res->flags ^ r->flags) & type_mask)
175 continue;
176
177 ret = request_resource(r, res);
178
179 if (ret == 0)
180 break;
181 }
182
183 if (ret) {
184 printk(KERN_ERR "PCI: Failed to allocate %s resource "
185 "#%d:%llx@%llx for %s\n",
186 res->flags & IORESOURCE_IO ? "I/O" : "mem",
187 resno, (unsigned long long)(res->end - res->start + 1),
188 (unsigned long long)res->start, pci_name(dev));
189 } else if (resno < PCI_BRIDGE_RESOURCES) {
190 pci_update_resource(dev, res, resno);
191 }
192
193 return ret;
194}
195EXPORT_SYMBOL_GPL(pci_assign_resource_fixed);
196#endif
197
158/* Sort resources by alignment */ 198/* Sort resources by alignment */
159void __devinit 199void __devinit
160pdev_sort_resources(struct pci_dev *dev, struct resource_list *head) 200pdev_sort_resources(struct pci_dev *dev, struct resource_list *head)
diff --git a/drivers/scsi/qla1280.c b/drivers/scsi/qla1280.c
index 77bb2351500c..680f6063954b 100644
--- a/drivers/scsi/qla1280.c
+++ b/drivers/scsi/qla1280.c
@@ -397,30 +397,6 @@
397#include "ql1280_fw.h" 397#include "ql1280_fw.h"
398#include "ql1040_fw.h" 398#include "ql1040_fw.h"
399 399
400
401/*
402 * Missing PCI ID's
403 */
404#ifndef PCI_DEVICE_ID_QLOGIC_ISP1080
405#define PCI_DEVICE_ID_QLOGIC_ISP1080 0x1080
406#endif
407#ifndef PCI_DEVICE_ID_QLOGIC_ISP1240
408#define PCI_DEVICE_ID_QLOGIC_ISP1240 0x1240
409#endif
410#ifndef PCI_DEVICE_ID_QLOGIC_ISP1280
411#define PCI_DEVICE_ID_QLOGIC_ISP1280 0x1280
412#endif
413#ifndef PCI_DEVICE_ID_QLOGIC_ISP10160
414#define PCI_DEVICE_ID_QLOGIC_ISP10160 0x1016
415#endif
416#ifndef PCI_DEVICE_ID_QLOGIC_ISP12160
417#define PCI_DEVICE_ID_QLOGIC_ISP12160 0x1216
418#endif
419
420#ifndef PCI_VENDOR_ID_AMI
421#define PCI_VENDOR_ID_AMI 0x101e
422#endif
423
424#ifndef BITS_PER_LONG 400#ifndef BITS_PER_LONG
425#error "BITS_PER_LONG not defined!" 401#error "BITS_PER_LONG not defined!"
426#endif 402#endif
diff --git a/drivers/scsi/sata_vsc.c b/drivers/scsi/sata_vsc.c
index 8a29ce340b47..27d658704cf9 100644
--- a/drivers/scsi/sata_vsc.c
+++ b/drivers/scsi/sata_vsc.c
@@ -433,13 +433,14 @@ err_out:
433 433
434 434
435/* 435/*
436 * 0x1725/0x7174 is the Vitesse VSC-7174 436 * Intel 31244 is supposed to be identical.
437 * 0x8086/0x3200 is the Intel 31244, which is supposed to be identical 437 * Compatibility is untested as of yet.
438 * compatibility is untested as of yet
439 */ 438 */
440static const struct pci_device_id vsc_sata_pci_tbl[] = { 439static const struct pci_device_id vsc_sata_pci_tbl[] = {
441 { 0x1725, 0x7174, PCI_ANY_ID, PCI_ANY_ID, 0x10600, 0xFFFFFF, 0 }, 440 { PCI_VENDOR_ID_VITESSE, PCI_DEVICE_ID_VITESSE_VSC7174,
442 { 0x8086, 0x3200, PCI_ANY_ID, PCI_ANY_ID, 0x10600, 0xFFFFFF, 0 }, 441 PCI_ANY_ID, PCI_ANY_ID, 0x10600, 0xFFFFFF, 0 },
442 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_GD31244,
443 PCI_ANY_ID, PCI_ANY_ID, 0x10600, 0xFFFFFF, 0 },
443 { } 444 { }
444}; 445};
445 446