diff options
Diffstat (limited to 'arch/x86/pci')
-rw-r--r-- | arch/x86/pci/acpi.c | 7 | ||||
-rw-r--r-- | arch/x86/pci/fixup.c | 12 | ||||
-rw-r--r-- | arch/x86/pci/i386.c | 85 | ||||
-rw-r--r-- | arch/x86/pci/mrst.c | 40 | ||||
-rw-r--r-- | arch/x86/pci/xen.c | 27 |
5 files changed, 160 insertions, 11 deletions
diff --git a/arch/x86/pci/acpi.c b/arch/x86/pci/acpi.c index 49a5cb55429b..ed2835e148b5 100644 --- a/arch/x86/pci/acpi.c +++ b/arch/x86/pci/acpi.c | |||
@@ -416,7 +416,12 @@ struct pci_bus * __devinit pci_acpi_scan_root(struct acpi_pci_root *root) | |||
416 | kfree(sd); | 416 | kfree(sd); |
417 | } else { | 417 | } else { |
418 | get_current_resources(device, busnum, domain, &resources); | 418 | get_current_resources(device, busnum, domain, &resources); |
419 | if (list_empty(&resources)) | 419 | |
420 | /* | ||
421 | * _CRS with no apertures is normal, so only fall back to | ||
422 | * defaults or native bridge info if we're ignoring _CRS. | ||
423 | */ | ||
424 | if (!pci_use_crs) | ||
420 | x86_pci_root_bus_resources(busnum, &resources); | 425 | x86_pci_root_bus_resources(busnum, &resources); |
421 | bus = pci_create_root_bus(NULL, busnum, &pci_root_ops, sd, | 426 | bus = pci_create_root_bus(NULL, busnum, &pci_root_ops, sd, |
422 | &resources); | 427 | &resources); |
diff --git a/arch/x86/pci/fixup.c b/arch/x86/pci/fixup.c index 6dd89555fbfa..d0e6e403b4f6 100644 --- a/arch/x86/pci/fixup.c +++ b/arch/x86/pci/fixup.c | |||
@@ -164,11 +164,11 @@ DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8367_0, pci_fixup_ | |||
164 | */ | 164 | */ |
165 | static void __devinit pci_fixup_transparent_bridge(struct pci_dev *dev) | 165 | static void __devinit pci_fixup_transparent_bridge(struct pci_dev *dev) |
166 | { | 166 | { |
167 | if ((dev->class >> 8) == PCI_CLASS_BRIDGE_PCI && | 167 | if ((dev->device & 0xff00) == 0x2400) |
168 | (dev->device & 0xff00) == 0x2400) | ||
169 | dev->transparent = 1; | 168 | dev->transparent = 1; |
170 | } | 169 | } |
171 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_ANY_ID, pci_fixup_transparent_bridge); | 170 | DECLARE_PCI_FIXUP_CLASS_HEADER(PCI_VENDOR_ID_INTEL, PCI_ANY_ID, |
171 | PCI_CLASS_BRIDGE_PCI, 8, pci_fixup_transparent_bridge); | ||
172 | 172 | ||
173 | /* | 173 | /* |
174 | * Fixup for C1 Halt Disconnect problem on nForce2 systems. | 174 | * Fixup for C1 Halt Disconnect problem on nForce2 systems. |
@@ -322,9 +322,6 @@ static void __devinit pci_fixup_video(struct pci_dev *pdev) | |||
322 | struct pci_bus *bus; | 322 | struct pci_bus *bus; |
323 | u16 config; | 323 | u16 config; |
324 | 324 | ||
325 | if ((pdev->class >> 8) != PCI_CLASS_DISPLAY_VGA) | ||
326 | return; | ||
327 | |||
328 | /* Is VGA routed to us? */ | 325 | /* Is VGA routed to us? */ |
329 | bus = pdev->bus; | 326 | bus = pdev->bus; |
330 | while (bus) { | 327 | while (bus) { |
@@ -353,7 +350,8 @@ static void __devinit pci_fixup_video(struct pci_dev *pdev) | |||
353 | dev_printk(KERN_DEBUG, &pdev->dev, "Boot video device\n"); | 350 | dev_printk(KERN_DEBUG, &pdev->dev, "Boot video device\n"); |
354 | } | 351 | } |
355 | } | 352 | } |
356 | DECLARE_PCI_FIXUP_FINAL(PCI_ANY_ID, PCI_ANY_ID, pci_fixup_video); | 353 | DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_ANY_ID, PCI_ANY_ID, |
354 | PCI_CLASS_DISPLAY_VGA, 8, pci_fixup_video); | ||
357 | 355 | ||
358 | 356 | ||
359 | static const struct dmi_system_id __devinitconst msi_k8t_dmi_table[] = { | 357 | static const struct dmi_system_id __devinitconst msi_k8t_dmi_table[] = { |
diff --git a/arch/x86/pci/i386.c b/arch/x86/pci/i386.c index 91821a1a0c3a..831971e731f7 100644 --- a/arch/x86/pci/i386.c +++ b/arch/x86/pci/i386.c | |||
@@ -39,6 +39,87 @@ | |||
39 | #include <asm/io_apic.h> | 39 | #include <asm/io_apic.h> |
40 | 40 | ||
41 | 41 | ||
42 | /* | ||
43 | * This list of dynamic mappings is for temporarily maintaining | ||
44 | * original BIOS BAR addresses for possible reinstatement. | ||
45 | */ | ||
46 | struct pcibios_fwaddrmap { | ||
47 | struct list_head list; | ||
48 | struct pci_dev *dev; | ||
49 | resource_size_t fw_addr[DEVICE_COUNT_RESOURCE]; | ||
50 | }; | ||
51 | |||
52 | static LIST_HEAD(pcibios_fwaddrmappings); | ||
53 | static DEFINE_SPINLOCK(pcibios_fwaddrmap_lock); | ||
54 | |||
55 | /* Must be called with 'pcibios_fwaddrmap_lock' lock held. */ | ||
56 | static struct pcibios_fwaddrmap *pcibios_fwaddrmap_lookup(struct pci_dev *dev) | ||
57 | { | ||
58 | struct pcibios_fwaddrmap *map; | ||
59 | |||
60 | WARN_ON(!spin_is_locked(&pcibios_fwaddrmap_lock)); | ||
61 | |||
62 | list_for_each_entry(map, &pcibios_fwaddrmappings, list) | ||
63 | if (map->dev == dev) | ||
64 | return map; | ||
65 | |||
66 | return NULL; | ||
67 | } | ||
68 | |||
69 | static void | ||
70 | pcibios_save_fw_addr(struct pci_dev *dev, int idx, resource_size_t fw_addr) | ||
71 | { | ||
72 | unsigned long flags; | ||
73 | struct pcibios_fwaddrmap *map; | ||
74 | |||
75 | spin_lock_irqsave(&pcibios_fwaddrmap_lock, flags); | ||
76 | map = pcibios_fwaddrmap_lookup(dev); | ||
77 | if (!map) { | ||
78 | spin_unlock_irqrestore(&pcibios_fwaddrmap_lock, flags); | ||
79 | map = kzalloc(sizeof(*map), GFP_KERNEL); | ||
80 | if (!map) | ||
81 | return; | ||
82 | |||
83 | map->dev = pci_dev_get(dev); | ||
84 | map->fw_addr[idx] = fw_addr; | ||
85 | INIT_LIST_HEAD(&map->list); | ||
86 | |||
87 | spin_lock_irqsave(&pcibios_fwaddrmap_lock, flags); | ||
88 | list_add_tail(&map->list, &pcibios_fwaddrmappings); | ||
89 | } else | ||
90 | map->fw_addr[idx] = fw_addr; | ||
91 | spin_unlock_irqrestore(&pcibios_fwaddrmap_lock, flags); | ||
92 | } | ||
93 | |||
94 | resource_size_t pcibios_retrieve_fw_addr(struct pci_dev *dev, int idx) | ||
95 | { | ||
96 | unsigned long flags; | ||
97 | struct pcibios_fwaddrmap *map; | ||
98 | resource_size_t fw_addr = 0; | ||
99 | |||
100 | spin_lock_irqsave(&pcibios_fwaddrmap_lock, flags); | ||
101 | map = pcibios_fwaddrmap_lookup(dev); | ||
102 | if (map) | ||
103 | fw_addr = map->fw_addr[idx]; | ||
104 | spin_unlock_irqrestore(&pcibios_fwaddrmap_lock, flags); | ||
105 | |||
106 | return fw_addr; | ||
107 | } | ||
108 | |||
109 | static void pcibios_fw_addr_list_del(void) | ||
110 | { | ||
111 | unsigned long flags; | ||
112 | struct pcibios_fwaddrmap *entry, *next; | ||
113 | |||
114 | spin_lock_irqsave(&pcibios_fwaddrmap_lock, flags); | ||
115 | list_for_each_entry_safe(entry, next, &pcibios_fwaddrmappings, list) { | ||
116 | list_del(&entry->list); | ||
117 | pci_dev_put(entry->dev); | ||
118 | kfree(entry); | ||
119 | } | ||
120 | spin_unlock_irqrestore(&pcibios_fwaddrmap_lock, flags); | ||
121 | } | ||
122 | |||
42 | static int | 123 | static int |
43 | skip_isa_ioresource_align(struct pci_dev *dev) { | 124 | skip_isa_ioresource_align(struct pci_dev *dev) { |
44 | 125 | ||
@@ -182,7 +263,8 @@ static void __init pcibios_allocate_resources(int pass) | |||
182 | idx, r, disabled, pass); | 263 | idx, r, disabled, pass); |
183 | if (pci_claim_resource(dev, idx) < 0) { | 264 | if (pci_claim_resource(dev, idx) < 0) { |
184 | /* We'll assign a new address later */ | 265 | /* We'll assign a new address later */ |
185 | dev->fw_addr[idx] = r->start; | 266 | pcibios_save_fw_addr(dev, |
267 | idx, r->start); | ||
186 | r->end -= r->start; | 268 | r->end -= r->start; |
187 | r->start = 0; | 269 | r->start = 0; |
188 | } | 270 | } |
@@ -228,6 +310,7 @@ static int __init pcibios_assign_resources(void) | |||
228 | } | 310 | } |
229 | 311 | ||
230 | pci_assign_unassigned_resources(); | 312 | pci_assign_unassigned_resources(); |
313 | pcibios_fw_addr_list_del(); | ||
231 | 314 | ||
232 | return 0; | 315 | return 0; |
233 | } | 316 | } |
diff --git a/arch/x86/pci/mrst.c b/arch/x86/pci/mrst.c index cb29191cee58..140942f66b31 100644 --- a/arch/x86/pci/mrst.c +++ b/arch/x86/pci/mrst.c | |||
@@ -43,6 +43,8 @@ | |||
43 | #define PCI_FIXED_BAR_4_SIZE 0x14 | 43 | #define PCI_FIXED_BAR_4_SIZE 0x14 |
44 | #define PCI_FIXED_BAR_5_SIZE 0x1c | 44 | #define PCI_FIXED_BAR_5_SIZE 0x1c |
45 | 45 | ||
46 | static int pci_soc_mode = 0; | ||
47 | |||
46 | /** | 48 | /** |
47 | * fixed_bar_cap - return the offset of the fixed BAR cap if found | 49 | * fixed_bar_cap - return the offset of the fixed BAR cap if found |
48 | * @bus: PCI bus | 50 | * @bus: PCI bus |
@@ -148,7 +150,9 @@ static bool type1_access_ok(unsigned int bus, unsigned int devfn, int reg) | |||
148 | */ | 150 | */ |
149 | if (reg >= 0x100 || reg == PCI_STATUS || reg == PCI_HEADER_TYPE) | 151 | if (reg >= 0x100 || reg == PCI_STATUS || reg == PCI_HEADER_TYPE) |
150 | return 0; | 152 | return 0; |
151 | if (bus == 0 && (devfn == PCI_DEVFN(2, 0) || devfn == PCI_DEVFN(0, 0))) | 153 | if (bus == 0 && (devfn == PCI_DEVFN(2, 0) |
154 | || devfn == PCI_DEVFN(0, 0) | ||
155 | || devfn == PCI_DEVFN(3, 0))) | ||
152 | return 1; | 156 | return 1; |
153 | return 0; /* langwell on others */ | 157 | return 0; /* langwell on others */ |
154 | } | 158 | } |
@@ -231,14 +235,43 @@ struct pci_ops pci_mrst_ops = { | |||
231 | */ | 235 | */ |
232 | int __init pci_mrst_init(void) | 236 | int __init pci_mrst_init(void) |
233 | { | 237 | { |
234 | printk(KERN_INFO "Moorestown platform detected, using MRST PCI ops\n"); | 238 | printk(KERN_INFO "Intel MID platform detected, using MID PCI ops\n"); |
235 | pci_mmcfg_late_init(); | 239 | pci_mmcfg_late_init(); |
236 | pcibios_enable_irq = mrst_pci_irq_enable; | 240 | pcibios_enable_irq = mrst_pci_irq_enable; |
237 | pci_root_ops = pci_mrst_ops; | 241 | pci_root_ops = pci_mrst_ops; |
242 | pci_soc_mode = 1; | ||
238 | /* Continue with standard init */ | 243 | /* Continue with standard init */ |
239 | return 1; | 244 | return 1; |
240 | } | 245 | } |
241 | 246 | ||
247 | /* Langwell devices are not true pci devices, they are not subject to 10 ms | ||
248 | * d3 to d0 delay required by pci spec. | ||
249 | */ | ||
250 | static void __devinit pci_d3delay_fixup(struct pci_dev *dev) | ||
251 | { | ||
252 | /* PCI fixups are effectively decided compile time. If we have a dual | ||
253 | SoC/non-SoC kernel we don't want to mangle d3 on non SoC devices */ | ||
254 | if (!pci_soc_mode) | ||
255 | return; | ||
256 | /* true pci devices in lincroft should allow type 1 access, the rest | ||
257 | * are langwell fake pci devices. | ||
258 | */ | ||
259 | if (type1_access_ok(dev->bus->number, dev->devfn, PCI_DEVICE_ID)) | ||
260 | return; | ||
261 | dev->d3_delay = 0; | ||
262 | } | ||
263 | DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_ANY_ID, pci_d3delay_fixup); | ||
264 | |||
265 | static void __devinit mrst_power_off_unused_dev(struct pci_dev *dev) | ||
266 | { | ||
267 | pci_set_power_state(dev, PCI_D3cold); | ||
268 | } | ||
269 | DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x0801, mrst_power_off_unused_dev); | ||
270 | DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x0809, mrst_power_off_unused_dev); | ||
271 | DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x080C, mrst_power_off_unused_dev); | ||
272 | DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x0812, mrst_power_off_unused_dev); | ||
273 | DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x0815, mrst_power_off_unused_dev); | ||
274 | |||
242 | /* | 275 | /* |
243 | * Langwell devices reside at fixed offsets, don't try to move them. | 276 | * Langwell devices reside at fixed offsets, don't try to move them. |
244 | */ | 277 | */ |
@@ -248,6 +281,9 @@ static void __devinit pci_fixed_bar_fixup(struct pci_dev *dev) | |||
248 | u32 size; | 281 | u32 size; |
249 | int i; | 282 | int i; |
250 | 283 | ||
284 | if (!pci_soc_mode) | ||
285 | return; | ||
286 | |||
251 | /* Must have extended configuration space */ | 287 | /* Must have extended configuration space */ |
252 | if (dev->cfg_size < PCIE_CAP_OFFSET + 4) | 288 | if (dev->cfg_size < PCIE_CAP_OFFSET + 4) |
253 | return; | 289 | return; |
diff --git a/arch/x86/pci/xen.c b/arch/x86/pci/xen.c index d99346ea8fdb..7415aa927913 100644 --- a/arch/x86/pci/xen.c +++ b/arch/x86/pci/xen.c | |||
@@ -324,6 +324,32 @@ static int xen_initdom_setup_msi_irqs(struct pci_dev *dev, int nvec, int type) | |||
324 | out: | 324 | out: |
325 | return ret; | 325 | return ret; |
326 | } | 326 | } |
327 | |||
328 | static void xen_initdom_restore_msi_irqs(struct pci_dev *dev, int irq) | ||
329 | { | ||
330 | int ret = 0; | ||
331 | |||
332 | if (pci_seg_supported) { | ||
333 | struct physdev_pci_device restore_ext; | ||
334 | |||
335 | restore_ext.seg = pci_domain_nr(dev->bus); | ||
336 | restore_ext.bus = dev->bus->number; | ||
337 | restore_ext.devfn = dev->devfn; | ||
338 | ret = HYPERVISOR_physdev_op(PHYSDEVOP_restore_msi_ext, | ||
339 | &restore_ext); | ||
340 | if (ret == -ENOSYS) | ||
341 | pci_seg_supported = false; | ||
342 | WARN(ret && ret != -ENOSYS, "restore_msi_ext -> %d\n", ret); | ||
343 | } | ||
344 | if (!pci_seg_supported) { | ||
345 | struct physdev_restore_msi restore; | ||
346 | |||
347 | restore.bus = dev->bus->number; | ||
348 | restore.devfn = dev->devfn; | ||
349 | ret = HYPERVISOR_physdev_op(PHYSDEVOP_restore_msi, &restore); | ||
350 | WARN(ret && ret != -ENOSYS, "restore_msi -> %d\n", ret); | ||
351 | } | ||
352 | } | ||
327 | #endif | 353 | #endif |
328 | 354 | ||
329 | static void xen_teardown_msi_irqs(struct pci_dev *dev) | 355 | static void xen_teardown_msi_irqs(struct pci_dev *dev) |
@@ -446,6 +472,7 @@ int __init pci_xen_initial_domain(void) | |||
446 | #ifdef CONFIG_PCI_MSI | 472 | #ifdef CONFIG_PCI_MSI |
447 | x86_msi.setup_msi_irqs = xen_initdom_setup_msi_irqs; | 473 | x86_msi.setup_msi_irqs = xen_initdom_setup_msi_irqs; |
448 | x86_msi.teardown_msi_irq = xen_teardown_msi_irq; | 474 | x86_msi.teardown_msi_irq = xen_teardown_msi_irq; |
475 | x86_msi.restore_msi_irqs = xen_initdom_restore_msi_irqs; | ||
449 | #endif | 476 | #endif |
450 | xen_setup_acpi_sci(); | 477 | xen_setup_acpi_sci(); |
451 | __acpi_register_gsi = acpi_register_gsi_xen; | 478 | __acpi_register_gsi = acpi_register_gsi_xen; |