diff options
Diffstat (limited to 'arch/x86/pci')
-rw-r--r-- | arch/x86/pci/Makefile | 2 | ||||
-rw-r--r-- | arch/x86/pci/acpi.c | 103 | ||||
-rw-r--r-- | arch/x86/pci/amd_bus.c | 35 | ||||
-rw-r--r-- | arch/x86/pci/broadcom_bus.c | 11 | ||||
-rw-r--r-- | arch/x86/pci/ce4100.c | 316 | ||||
-rw-r--r-- | arch/x86/pci/common.c | 58 | ||||
-rw-r--r-- | arch/x86/pci/direct.c | 17 | ||||
-rw-r--r-- | arch/x86/pci/i386.c | 9 | ||||
-rw-r--r-- | arch/x86/pci/irq.c | 21 | ||||
-rw-r--r-- | arch/x86/pci/mmconfig-shared.c | 14 | ||||
-rw-r--r-- | arch/x86/pci/olpc.c | 2 | ||||
-rw-r--r-- | arch/x86/pci/pcbios.c | 23 | ||||
-rw-r--r-- | arch/x86/pci/xen.c | 571 |
13 files changed, 1120 insertions, 62 deletions
diff --git a/arch/x86/pci/Makefile b/arch/x86/pci/Makefile index a0207a7fdf39..6b8759f7634e 100644 --- a/arch/x86/pci/Makefile +++ b/arch/x86/pci/Makefile | |||
@@ -4,8 +4,10 @@ obj-$(CONFIG_PCI_BIOS) += pcbios.o | |||
4 | obj-$(CONFIG_PCI_MMCONFIG) += mmconfig_$(BITS).o direct.o mmconfig-shared.o | 4 | obj-$(CONFIG_PCI_MMCONFIG) += mmconfig_$(BITS).o direct.o mmconfig-shared.o |
5 | obj-$(CONFIG_PCI_DIRECT) += direct.o | 5 | obj-$(CONFIG_PCI_DIRECT) += direct.o |
6 | obj-$(CONFIG_PCI_OLPC) += olpc.o | 6 | obj-$(CONFIG_PCI_OLPC) += olpc.o |
7 | obj-$(CONFIG_PCI_XEN) += xen.o | ||
7 | 8 | ||
8 | obj-y += fixup.o | 9 | obj-y += fixup.o |
10 | obj-$(CONFIG_X86_INTEL_CE) += ce4100.o | ||
9 | obj-$(CONFIG_ACPI) += acpi.o | 11 | obj-$(CONFIG_ACPI) += acpi.o |
10 | obj-y += legacy.o irq.o | 12 | obj-y += legacy.o irq.o |
11 | 13 | ||
diff --git a/arch/x86/pci/acpi.c b/arch/x86/pci/acpi.c index 15466c096ba5..68c3c1395202 100644 --- a/arch/x86/pci/acpi.c +++ b/arch/x86/pci/acpi.c | |||
@@ -138,7 +138,6 @@ setup_resource(struct acpi_resource *acpi_res, void *data) | |||
138 | struct acpi_resource_address64 addr; | 138 | struct acpi_resource_address64 addr; |
139 | acpi_status status; | 139 | acpi_status status; |
140 | unsigned long flags; | 140 | unsigned long flags; |
141 | struct resource *root, *conflict; | ||
142 | u64 start, end; | 141 | u64 start, end; |
143 | 142 | ||
144 | status = resource_to_addr(acpi_res, &addr); | 143 | status = resource_to_addr(acpi_res, &addr); |
@@ -146,12 +145,10 @@ setup_resource(struct acpi_resource *acpi_res, void *data) | |||
146 | return AE_OK; | 145 | return AE_OK; |
147 | 146 | ||
148 | if (addr.resource_type == ACPI_MEMORY_RANGE) { | 147 | if (addr.resource_type == ACPI_MEMORY_RANGE) { |
149 | root = &iomem_resource; | ||
150 | flags = IORESOURCE_MEM; | 148 | flags = IORESOURCE_MEM; |
151 | if (addr.info.mem.caching == ACPI_PREFETCHABLE_MEMORY) | 149 | if (addr.info.mem.caching == ACPI_PREFETCHABLE_MEMORY) |
152 | flags |= IORESOURCE_PREFETCH; | 150 | flags |= IORESOURCE_PREFETCH; |
153 | } else if (addr.resource_type == ACPI_IO_RANGE) { | 151 | } else if (addr.resource_type == ACPI_IO_RANGE) { |
154 | root = &ioport_resource; | ||
155 | flags = IORESOURCE_IO; | 152 | flags = IORESOURCE_IO; |
156 | } else | 153 | } else |
157 | return AE_OK; | 154 | return AE_OK; |
@@ -172,25 +169,90 @@ setup_resource(struct acpi_resource *acpi_res, void *data) | |||
172 | return AE_OK; | 169 | return AE_OK; |
173 | } | 170 | } |
174 | 171 | ||
175 | conflict = insert_resource_conflict(root, res); | 172 | info->res_num++; |
176 | if (conflict) { | 173 | if (addr.translation_offset) |
177 | dev_err(&info->bridge->dev, | 174 | dev_info(&info->bridge->dev, "host bridge window %pR " |
178 | "address space collision: host bridge window %pR " | 175 | "(PCI address [%#llx-%#llx])\n", |
179 | "conflicts with %s %pR\n", | 176 | res, res->start - addr.translation_offset, |
180 | res, conflict->name, conflict); | 177 | res->end - addr.translation_offset); |
181 | } else { | 178 | else |
182 | pci_bus_add_resource(info->bus, res, 0); | 179 | dev_info(&info->bridge->dev, "host bridge window %pR\n", res); |
183 | info->res_num++; | 180 | |
184 | if (addr.translation_offset) | 181 | return AE_OK; |
185 | dev_info(&info->bridge->dev, "host bridge window %pR " | 182 | } |
186 | "(PCI address [%#llx-%#llx])\n", | 183 | |
187 | res, res->start - addr.translation_offset, | 184 | static bool resource_contains(struct resource *res, resource_size_t point) |
188 | res->end - addr.translation_offset); | 185 | { |
186 | if (res->start <= point && point <= res->end) | ||
187 | return true; | ||
188 | return false; | ||
189 | } | ||
190 | |||
191 | static void coalesce_windows(struct pci_root_info *info, unsigned long type) | ||
192 | { | ||
193 | int i, j; | ||
194 | struct resource *res1, *res2; | ||
195 | |||
196 | for (i = 0; i < info->res_num; i++) { | ||
197 | res1 = &info->res[i]; | ||
198 | if (!(res1->flags & type)) | ||
199 | continue; | ||
200 | |||
201 | for (j = i + 1; j < info->res_num; j++) { | ||
202 | res2 = &info->res[j]; | ||
203 | if (!(res2->flags & type)) | ||
204 | continue; | ||
205 | |||
206 | /* | ||
207 | * I don't like throwing away windows because then | ||
208 | * our resources no longer match the ACPI _CRS, but | ||
209 | * the kernel resource tree doesn't allow overlaps. | ||
210 | */ | ||
211 | if (resource_contains(res1, res2->start) || | ||
212 | resource_contains(res1, res2->end) || | ||
213 | resource_contains(res2, res1->start) || | ||
214 | resource_contains(res2, res1->end)) { | ||
215 | res1->start = min(res1->start, res2->start); | ||
216 | res1->end = max(res1->end, res2->end); | ||
217 | dev_info(&info->bridge->dev, | ||
218 | "host bridge window expanded to %pR; %pR ignored\n", | ||
219 | res1, res2); | ||
220 | res2->flags = 0; | ||
221 | } | ||
222 | } | ||
223 | } | ||
224 | } | ||
225 | |||
226 | static void add_resources(struct pci_root_info *info) | ||
227 | { | ||
228 | int i; | ||
229 | struct resource *res, *root, *conflict; | ||
230 | |||
231 | if (!pci_use_crs) | ||
232 | return; | ||
233 | |||
234 | coalesce_windows(info, IORESOURCE_MEM); | ||
235 | coalesce_windows(info, IORESOURCE_IO); | ||
236 | |||
237 | for (i = 0; i < info->res_num; i++) { | ||
238 | res = &info->res[i]; | ||
239 | |||
240 | if (res->flags & IORESOURCE_MEM) | ||
241 | root = &iomem_resource; | ||
242 | else if (res->flags & IORESOURCE_IO) | ||
243 | root = &ioport_resource; | ||
189 | else | 244 | else |
190 | dev_info(&info->bridge->dev, | 245 | continue; |
191 | "host bridge window %pR\n", res); | 246 | |
247 | conflict = insert_resource_conflict(root, res); | ||
248 | if (conflict) | ||
249 | dev_err(&info->bridge->dev, | ||
250 | "address space collision: host bridge window %pR " | ||
251 | "conflicts with %s %pR\n", | ||
252 | res, conflict->name, conflict); | ||
253 | else | ||
254 | pci_bus_add_resource(info->bus, res, 0); | ||
192 | } | 255 | } |
193 | return AE_OK; | ||
194 | } | 256 | } |
195 | 257 | ||
196 | static void | 258 | static void |
@@ -224,6 +286,7 @@ get_current_resources(struct acpi_device *device, int busnum, | |||
224 | acpi_walk_resources(device->handle, METHOD_NAME__CRS, setup_resource, | 286 | acpi_walk_resources(device->handle, METHOD_NAME__CRS, setup_resource, |
225 | &info); | 287 | &info); |
226 | 288 | ||
289 | add_resources(&info); | ||
227 | return; | 290 | return; |
228 | 291 | ||
229 | name_alloc_fail: | 292 | name_alloc_fail: |
diff --git a/arch/x86/pci/amd_bus.c b/arch/x86/pci/amd_bus.c index fc1e8fe07e5c..026e4931d162 100644 --- a/arch/x86/pci/amd_bus.c +++ b/arch/x86/pci/amd_bus.c | |||
@@ -4,6 +4,7 @@ | |||
4 | #include <linux/cpu.h> | 4 | #include <linux/cpu.h> |
5 | #include <linux/range.h> | 5 | #include <linux/range.h> |
6 | 6 | ||
7 | #include <asm/amd_nb.h> | ||
7 | #include <asm/pci_x86.h> | 8 | #include <asm/pci_x86.h> |
8 | 9 | ||
9 | #include <asm/pci-direct.h> | 10 | #include <asm/pci-direct.h> |
@@ -349,7 +350,7 @@ static int __init early_fill_mp_bus_info(void) | |||
349 | 350 | ||
350 | #define ENABLE_CF8_EXT_CFG (1ULL << 46) | 351 | #define ENABLE_CF8_EXT_CFG (1ULL << 46) |
351 | 352 | ||
352 | static void enable_pci_io_ecs(void *unused) | 353 | static void __cpuinit enable_pci_io_ecs(void *unused) |
353 | { | 354 | { |
354 | u64 reg; | 355 | u64 reg; |
355 | rdmsrl(MSR_AMD64_NB_CFG, reg); | 356 | rdmsrl(MSR_AMD64_NB_CFG, reg); |
@@ -378,6 +379,34 @@ static struct notifier_block __cpuinitdata amd_cpu_notifier = { | |||
378 | .notifier_call = amd_cpu_notify, | 379 | .notifier_call = amd_cpu_notify, |
379 | }; | 380 | }; |
380 | 381 | ||
382 | static void __init pci_enable_pci_io_ecs(void) | ||
383 | { | ||
384 | #ifdef CONFIG_AMD_NB | ||
385 | unsigned int i, n; | ||
386 | |||
387 | for (n = i = 0; !n && amd_nb_bus_dev_ranges[i].dev_limit; ++i) { | ||
388 | u8 bus = amd_nb_bus_dev_ranges[i].bus; | ||
389 | u8 slot = amd_nb_bus_dev_ranges[i].dev_base; | ||
390 | u8 limit = amd_nb_bus_dev_ranges[i].dev_limit; | ||
391 | |||
392 | for (; slot < limit; ++slot) { | ||
393 | u32 val = read_pci_config(bus, slot, 3, 0); | ||
394 | |||
395 | if (!early_is_amd_nb(val)) | ||
396 | continue; | ||
397 | |||
398 | val = read_pci_config(bus, slot, 3, 0x8c); | ||
399 | if (!(val & (ENABLE_CF8_EXT_CFG >> 32))) { | ||
400 | val |= ENABLE_CF8_EXT_CFG >> 32; | ||
401 | write_pci_config(bus, slot, 3, 0x8c, val); | ||
402 | } | ||
403 | ++n; | ||
404 | } | ||
405 | } | ||
406 | pr_info("Extended Config Space enabled on %u nodes\n", n); | ||
407 | #endif | ||
408 | } | ||
409 | |||
381 | static int __init pci_io_ecs_init(void) | 410 | static int __init pci_io_ecs_init(void) |
382 | { | 411 | { |
383 | int cpu; | 412 | int cpu; |
@@ -386,6 +415,10 @@ static int __init pci_io_ecs_init(void) | |||
386 | if (boot_cpu_data.x86 < 0x10) | 415 | if (boot_cpu_data.x86 < 0x10) |
387 | return 0; | 416 | return 0; |
388 | 417 | ||
418 | /* Try the PCI method first. */ | ||
419 | if (early_pci_allowed()) | ||
420 | pci_enable_pci_io_ecs(); | ||
421 | |||
389 | register_cpu_notifier(&amd_cpu_notifier); | 422 | register_cpu_notifier(&amd_cpu_notifier); |
390 | for_each_online_cpu(cpu) | 423 | for_each_online_cpu(cpu) |
391 | amd_cpu_notify(&amd_cpu_notifier, (unsigned long)CPU_ONLINE, | 424 | amd_cpu_notify(&amd_cpu_notifier, (unsigned long)CPU_ONLINE, |
diff --git a/arch/x86/pci/broadcom_bus.c b/arch/x86/pci/broadcom_bus.c index 0846a5bbbfbd..ab8269b0da29 100644 --- a/arch/x86/pci/broadcom_bus.c +++ b/arch/x86/pci/broadcom_bus.c | |||
@@ -9,6 +9,7 @@ | |||
9 | * option) any later version. | 9 | * option) any later version. |
10 | */ | 10 | */ |
11 | 11 | ||
12 | #include <linux/acpi.h> | ||
12 | #include <linux/delay.h> | 13 | #include <linux/delay.h> |
13 | #include <linux/dmi.h> | 14 | #include <linux/dmi.h> |
14 | #include <linux/pci.h> | 15 | #include <linux/pci.h> |
@@ -25,12 +26,14 @@ static void __devinit cnb20le_res(struct pci_dev *dev) | |||
25 | u8 fbus, lbus; | 26 | u8 fbus, lbus; |
26 | int i; | 27 | int i; |
27 | 28 | ||
29 | #ifdef CONFIG_ACPI | ||
28 | /* | 30 | /* |
29 | * The x86_pci_root_bus_res_quirks() function already refuses to use | 31 | * We should get host bridge information from ACPI unless the BIOS |
30 | * this information if ACPI _CRS was used. Therefore, we don't bother | 32 | * doesn't support it. |
31 | * checking if ACPI is enabled, and just generate the information | ||
32 | * for both the ACPI _CRS and no ACPI cases. | ||
33 | */ | 33 | */ |
34 | if (acpi_os_get_root_pointer()) | ||
35 | return; | ||
36 | #endif | ||
34 | 37 | ||
35 | info = &pci_root_info[pci_root_num]; | 38 | info = &pci_root_info[pci_root_num]; |
36 | pci_root_num++; | 39 | pci_root_num++; |
diff --git a/arch/x86/pci/ce4100.c b/arch/x86/pci/ce4100.c new file mode 100644 index 000000000000..67858be4b52b --- /dev/null +++ b/arch/x86/pci/ce4100.c | |||
@@ -0,0 +1,316 @@ | |||
1 | /* | ||
2 | * GPL LICENSE SUMMARY | ||
3 | * | ||
4 | * Copyright(c) 2010 Intel Corporation. All rights reserved. | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify | ||
7 | * it under the terms of version 2 of the GNU General Public License as | ||
8 | * published by the Free Software Foundation. | ||
9 | * | ||
10 | * This program is distributed in the hope that it will be useful, but | ||
11 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | ||
13 | * General Public License for more details. | ||
14 | * | ||
15 | * You should have received a copy of the GNU General Public License | ||
16 | * along with this program; if not, write to the Free Software | ||
17 | * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. | ||
18 | * The full GNU General Public License is included in this distribution | ||
19 | * in the file called LICENSE.GPL. | ||
20 | * | ||
21 | * Contact Information: | ||
22 | * Intel Corporation | ||
23 | * 2200 Mission College Blvd. | ||
24 | * Santa Clara, CA 97052 | ||
25 | * | ||
26 | * This provides access methods for PCI registers that mis-behave on | ||
27 | * the CE4100. Each register can be assigned a private init, read and | ||
28 | * write routine. The exception to this is the bridge device. The | ||
29 | * bridge device is the only device on bus zero (0) that requires any | ||
30 | * fixup so it is a special case ATM | ||
31 | */ | ||
32 | |||
33 | #include <linux/kernel.h> | ||
34 | #include <linux/pci.h> | ||
35 | #include <linux/init.h> | ||
36 | |||
37 | #include <asm/ce4100.h> | ||
38 | #include <asm/pci_x86.h> | ||
39 | |||
40 | struct sim_reg { | ||
41 | u32 value; | ||
42 | u32 mask; | ||
43 | }; | ||
44 | |||
45 | struct sim_dev_reg { | ||
46 | int dev_func; | ||
47 | int reg; | ||
48 | void (*init)(struct sim_dev_reg *reg); | ||
49 | void (*read)(struct sim_dev_reg *reg, u32 *value); | ||
50 | void (*write)(struct sim_dev_reg *reg, u32 value); | ||
51 | struct sim_reg sim_reg; | ||
52 | }; | ||
53 | |||
54 | struct sim_reg_op { | ||
55 | void (*init)(struct sim_dev_reg *reg); | ||
56 | void (*read)(struct sim_dev_reg *reg, u32 value); | ||
57 | void (*write)(struct sim_dev_reg *reg, u32 value); | ||
58 | }; | ||
59 | |||
60 | #define MB (1024 * 1024) | ||
61 | #define KB (1024) | ||
62 | #define SIZE_TO_MASK(size) (~(size - 1)) | ||
63 | |||
64 | #define DEFINE_REG(device, func, offset, size, init_op, read_op, write_op)\ | ||
65 | { PCI_DEVFN(device, func), offset, init_op, read_op, write_op,\ | ||
66 | {0, SIZE_TO_MASK(size)} }, | ||
67 | |||
68 | static void reg_init(struct sim_dev_reg *reg) | ||
69 | { | ||
70 | pci_direct_conf1.read(0, 1, reg->dev_func, reg->reg, 4, | ||
71 | ®->sim_reg.value); | ||
72 | } | ||
73 | |||
74 | static void reg_read(struct sim_dev_reg *reg, u32 *value) | ||
75 | { | ||
76 | unsigned long flags; | ||
77 | |||
78 | raw_spin_lock_irqsave(&pci_config_lock, flags); | ||
79 | *value = reg->sim_reg.value; | ||
80 | raw_spin_unlock_irqrestore(&pci_config_lock, flags); | ||
81 | } | ||
82 | |||
83 | static void reg_write(struct sim_dev_reg *reg, u32 value) | ||
84 | { | ||
85 | unsigned long flags; | ||
86 | |||
87 | raw_spin_lock_irqsave(&pci_config_lock, flags); | ||
88 | reg->sim_reg.value = (value & reg->sim_reg.mask) | | ||
89 | (reg->sim_reg.value & ~reg->sim_reg.mask); | ||
90 | raw_spin_unlock_irqrestore(&pci_config_lock, flags); | ||
91 | } | ||
92 | |||
93 | static void sata_reg_init(struct sim_dev_reg *reg) | ||
94 | { | ||
95 | pci_direct_conf1.read(0, 1, PCI_DEVFN(14, 0), 0x10, 4, | ||
96 | ®->sim_reg.value); | ||
97 | reg->sim_reg.value += 0x400; | ||
98 | } | ||
99 | |||
100 | static void ehci_reg_read(struct sim_dev_reg *reg, u32 *value) | ||
101 | { | ||
102 | reg_read(reg, value); | ||
103 | if (*value != reg->sim_reg.mask) | ||
104 | *value |= 0x100; | ||
105 | } | ||
106 | |||
107 | void sata_revid_init(struct sim_dev_reg *reg) | ||
108 | { | ||
109 | reg->sim_reg.value = 0x01060100; | ||
110 | reg->sim_reg.mask = 0; | ||
111 | } | ||
112 | |||
113 | static void sata_revid_read(struct sim_dev_reg *reg, u32 *value) | ||
114 | { | ||
115 | reg_read(reg, value); | ||
116 | } | ||
117 | |||
118 | static struct sim_dev_reg bus1_fixups[] = { | ||
119 | DEFINE_REG(2, 0, 0x10, (16*MB), reg_init, reg_read, reg_write) | ||
120 | DEFINE_REG(2, 0, 0x14, (256), reg_init, reg_read, reg_write) | ||
121 | DEFINE_REG(2, 1, 0x10, (64*KB), reg_init, reg_read, reg_write) | ||
122 | DEFINE_REG(3, 0, 0x10, (64*KB), reg_init, reg_read, reg_write) | ||
123 | DEFINE_REG(4, 0, 0x10, (128*KB), reg_init, reg_read, reg_write) | ||
124 | DEFINE_REG(4, 1, 0x10, (128*KB), reg_init, reg_read, reg_write) | ||
125 | DEFINE_REG(6, 0, 0x10, (512*KB), reg_init, reg_read, reg_write) | ||
126 | DEFINE_REG(6, 1, 0x10, (512*KB), reg_init, reg_read, reg_write) | ||
127 | DEFINE_REG(6, 2, 0x10, (64*KB), reg_init, reg_read, reg_write) | ||
128 | DEFINE_REG(8, 0, 0x10, (1*MB), reg_init, reg_read, reg_write) | ||
129 | DEFINE_REG(8, 1, 0x10, (64*KB), reg_init, reg_read, reg_write) | ||
130 | DEFINE_REG(8, 2, 0x10, (64*KB), reg_init, reg_read, reg_write) | ||
131 | DEFINE_REG(9, 0, 0x10 , (1*MB), reg_init, reg_read, reg_write) | ||
132 | DEFINE_REG(9, 0, 0x14, (64*KB), reg_init, reg_read, reg_write) | ||
133 | DEFINE_REG(10, 0, 0x10, (256), reg_init, reg_read, reg_write) | ||
134 | DEFINE_REG(10, 0, 0x14, (256*MB), reg_init, reg_read, reg_write) | ||
135 | DEFINE_REG(11, 0, 0x10, (256), reg_init, reg_read, reg_write) | ||
136 | DEFINE_REG(11, 0, 0x14, (256), reg_init, reg_read, reg_write) | ||
137 | DEFINE_REG(11, 1, 0x10, (256), reg_init, reg_read, reg_write) | ||
138 | DEFINE_REG(11, 2, 0x10, (256), reg_init, reg_read, reg_write) | ||
139 | DEFINE_REG(11, 2, 0x14, (256), reg_init, reg_read, reg_write) | ||
140 | DEFINE_REG(11, 2, 0x18, (256), reg_init, reg_read, reg_write) | ||
141 | DEFINE_REG(11, 3, 0x10, (256), reg_init, reg_read, reg_write) | ||
142 | DEFINE_REG(11, 3, 0x14, (256), reg_init, reg_read, reg_write) | ||
143 | DEFINE_REG(11, 4, 0x10, (256), reg_init, reg_read, reg_write) | ||
144 | DEFINE_REG(11, 5, 0x10, (64*KB), reg_init, reg_read, reg_write) | ||
145 | DEFINE_REG(11, 6, 0x10, (256), reg_init, reg_read, reg_write) | ||
146 | DEFINE_REG(11, 7, 0x10, (64*KB), reg_init, reg_read, reg_write) | ||
147 | DEFINE_REG(12, 0, 0x10, (128*KB), reg_init, reg_read, reg_write) | ||
148 | DEFINE_REG(12, 0, 0x14, (256), reg_init, reg_read, reg_write) | ||
149 | DEFINE_REG(12, 1, 0x10, (1024), reg_init, reg_read, reg_write) | ||
150 | DEFINE_REG(13, 0, 0x10, (32*KB), reg_init, ehci_reg_read, reg_write) | ||
151 | DEFINE_REG(13, 1, 0x10, (32*KB), reg_init, ehci_reg_read, reg_write) | ||
152 | DEFINE_REG(14, 0, 0x8, 0, sata_revid_init, sata_revid_read, 0) | ||
153 | DEFINE_REG(14, 0, 0x10, 0, reg_init, reg_read, reg_write) | ||
154 | DEFINE_REG(14, 0, 0x14, 0, reg_init, reg_read, reg_write) | ||
155 | DEFINE_REG(14, 0, 0x18, 0, reg_init, reg_read, reg_write) | ||
156 | DEFINE_REG(14, 0, 0x1C, 0, reg_init, reg_read, reg_write) | ||
157 | DEFINE_REG(14, 0, 0x20, 0, reg_init, reg_read, reg_write) | ||
158 | DEFINE_REG(14, 0, 0x24, (0x200), sata_reg_init, reg_read, reg_write) | ||
159 | DEFINE_REG(15, 0, 0x10, (64*KB), reg_init, reg_read, reg_write) | ||
160 | DEFINE_REG(15, 0, 0x14, (64*KB), reg_init, reg_read, reg_write) | ||
161 | DEFINE_REG(16, 0, 0x10, (64*KB), reg_init, reg_read, reg_write) | ||
162 | DEFINE_REG(16, 0, 0x14, (64*MB), reg_init, reg_read, reg_write) | ||
163 | DEFINE_REG(16, 0, 0x18, (64*MB), reg_init, reg_read, reg_write) | ||
164 | DEFINE_REG(17, 0, 0x10, (128*KB), reg_init, reg_read, reg_write) | ||
165 | DEFINE_REG(18, 0, 0x10, (1*KB), reg_init, reg_read, reg_write) | ||
166 | }; | ||
167 | |||
168 | static void __init init_sim_regs(void) | ||
169 | { | ||
170 | int i; | ||
171 | |||
172 | for (i = 0; i < ARRAY_SIZE(bus1_fixups); i++) { | ||
173 | if (bus1_fixups[i].init) | ||
174 | bus1_fixups[i].init(&bus1_fixups[i]); | ||
175 | } | ||
176 | } | ||
177 | |||
178 | static inline void extract_bytes(u32 *value, int reg, int len) | ||
179 | { | ||
180 | uint32_t mask; | ||
181 | |||
182 | *value >>= ((reg & 3) * 8); | ||
183 | mask = 0xFFFFFFFF >> ((4 - len) * 8); | ||
184 | *value &= mask; | ||
185 | } | ||
186 | |||
187 | int bridge_read(unsigned int devfn, int reg, int len, u32 *value) | ||
188 | { | ||
189 | u32 av_bridge_base, av_bridge_limit; | ||
190 | int retval = 0; | ||
191 | |||
192 | switch (reg) { | ||
193 | /* Make BARs appear to not request any memory. */ | ||
194 | case PCI_BASE_ADDRESS_0: | ||
195 | case PCI_BASE_ADDRESS_0 + 1: | ||
196 | case PCI_BASE_ADDRESS_0 + 2: | ||
197 | case PCI_BASE_ADDRESS_0 + 3: | ||
198 | *value = 0; | ||
199 | break; | ||
200 | |||
201 | /* Since subordinate bus number register is hardwired | ||
202 | * to zero and read only, so do the simulation. | ||
203 | */ | ||
204 | case PCI_PRIMARY_BUS: | ||
205 | if (len == 4) | ||
206 | *value = 0x00010100; | ||
207 | break; | ||
208 | |||
209 | case PCI_SUBORDINATE_BUS: | ||
210 | *value = 1; | ||
211 | break; | ||
212 | |||
213 | case PCI_MEMORY_BASE: | ||
214 | case PCI_MEMORY_LIMIT: | ||
215 | /* Get the A/V bridge base address. */ | ||
216 | pci_direct_conf1.read(0, 0, devfn, | ||
217 | PCI_BASE_ADDRESS_0, 4, &av_bridge_base); | ||
218 | |||
219 | av_bridge_limit = av_bridge_base + (512*MB - 1); | ||
220 | av_bridge_limit >>= 16; | ||
221 | av_bridge_limit &= 0xFFF0; | ||
222 | |||
223 | av_bridge_base >>= 16; | ||
224 | av_bridge_base &= 0xFFF0; | ||
225 | |||
226 | if (reg == PCI_MEMORY_LIMIT) | ||
227 | *value = av_bridge_limit; | ||
228 | else if (len == 2) | ||
229 | *value = av_bridge_base; | ||
230 | else | ||
231 | *value = (av_bridge_limit << 16) | av_bridge_base; | ||
232 | break; | ||
233 | /* Make prefetchable memory limit smaller than prefetchable | ||
234 | * memory base, so not claim prefetchable memory space. | ||
235 | */ | ||
236 | case PCI_PREF_MEMORY_BASE: | ||
237 | *value = 0xFFF0; | ||
238 | break; | ||
239 | case PCI_PREF_MEMORY_LIMIT: | ||
240 | *value = 0x0; | ||
241 | break; | ||
242 | /* Make IO limit smaller than IO base, so not claim IO space. */ | ||
243 | case PCI_IO_BASE: | ||
244 | *value = 0xF0; | ||
245 | break; | ||
246 | case PCI_IO_LIMIT: | ||
247 | *value = 0; | ||
248 | break; | ||
249 | default: | ||
250 | retval = 1; | ||
251 | } | ||
252 | return retval; | ||
253 | } | ||
254 | |||
255 | static int ce4100_conf_read(unsigned int seg, unsigned int bus, | ||
256 | unsigned int devfn, int reg, int len, u32 *value) | ||
257 | { | ||
258 | int i; | ||
259 | |||
260 | if (bus == 1) { | ||
261 | for (i = 0; i < ARRAY_SIZE(bus1_fixups); i++) { | ||
262 | if (bus1_fixups[i].dev_func == devfn && | ||
263 | bus1_fixups[i].reg == (reg & ~3) && | ||
264 | bus1_fixups[i].read) { | ||
265 | bus1_fixups[i].read(&(bus1_fixups[i]), | ||
266 | value); | ||
267 | extract_bytes(value, reg, len); | ||
268 | return 0; | ||
269 | } | ||
270 | } | ||
271 | } | ||
272 | |||
273 | if (bus == 0 && (PCI_DEVFN(1, 0) == devfn) && | ||
274 | !bridge_read(devfn, reg, len, value)) | ||
275 | return 0; | ||
276 | |||
277 | return pci_direct_conf1.read(seg, bus, devfn, reg, len, value); | ||
278 | } | ||
279 | |||
280 | static int ce4100_conf_write(unsigned int seg, unsigned int bus, | ||
281 | unsigned int devfn, int reg, int len, u32 value) | ||
282 | { | ||
283 | int i; | ||
284 | |||
285 | if (bus == 1) { | ||
286 | for (i = 0; i < ARRAY_SIZE(bus1_fixups); i++) { | ||
287 | if (bus1_fixups[i].dev_func == devfn && | ||
288 | bus1_fixups[i].reg == (reg & ~3) && | ||
289 | bus1_fixups[i].write) { | ||
290 | bus1_fixups[i].write(&(bus1_fixups[i]), | ||
291 | value); | ||
292 | return 0; | ||
293 | } | ||
294 | } | ||
295 | } | ||
296 | |||
297 | /* Discard writes to A/V bridge BAR. */ | ||
298 | if (bus == 0 && PCI_DEVFN(1, 0) == devfn && | ||
299 | ((reg & ~3) == PCI_BASE_ADDRESS_0)) | ||
300 | return 0; | ||
301 | |||
302 | return pci_direct_conf1.write(seg, bus, devfn, reg, len, value); | ||
303 | } | ||
304 | |||
305 | struct pci_raw_ops ce4100_pci_conf = { | ||
306 | .read = ce4100_conf_read, | ||
307 | .write = ce4100_conf_write, | ||
308 | }; | ||
309 | |||
310 | int __init ce4100_pci_init(void) | ||
311 | { | ||
312 | init_sim_regs(); | ||
313 | raw_pci_ops = &ce4100_pci_conf; | ||
314 | /* Indicate caller that it should invoke pci_legacy_init() */ | ||
315 | return 1; | ||
316 | } | ||
diff --git a/arch/x86/pci/common.c b/arch/x86/pci/common.c index a0772af64efb..5fe75026ecc2 100644 --- a/arch/x86/pci/common.c +++ b/arch/x86/pci/common.c | |||
@@ -22,6 +22,7 @@ unsigned int pci_probe = PCI_PROBE_BIOS | PCI_PROBE_CONF1 | PCI_PROBE_CONF2 | | |||
22 | 22 | ||
23 | unsigned int pci_early_dump_regs; | 23 | unsigned int pci_early_dump_regs; |
24 | static int pci_bf_sort; | 24 | static int pci_bf_sort; |
25 | static int smbios_type_b1_flag; | ||
25 | int pci_routeirq; | 26 | int pci_routeirq; |
26 | int noioapicquirk; | 27 | int noioapicquirk; |
27 | #ifdef CONFIG_X86_REROUTE_FOR_BROKEN_BOOT_IRQS | 28 | #ifdef CONFIG_X86_REROUTE_FOR_BROKEN_BOOT_IRQS |
@@ -185,6 +186,39 @@ static int __devinit set_bf_sort(const struct dmi_system_id *d) | |||
185 | return 0; | 186 | return 0; |
186 | } | 187 | } |
187 | 188 | ||
189 | static void __devinit read_dmi_type_b1(const struct dmi_header *dm, | ||
190 | void *private_data) | ||
191 | { | ||
192 | u8 *d = (u8 *)dm + 4; | ||
193 | |||
194 | if (dm->type != 0xB1) | ||
195 | return; | ||
196 | switch (((*(u32 *)d) >> 9) & 0x03) { | ||
197 | case 0x00: | ||
198 | printk(KERN_INFO "dmi type 0xB1 record - unknown flag\n"); | ||
199 | break; | ||
200 | case 0x01: /* set pci=bfsort */ | ||
201 | smbios_type_b1_flag = 1; | ||
202 | break; | ||
203 | case 0x02: /* do not set pci=bfsort */ | ||
204 | smbios_type_b1_flag = 2; | ||
205 | break; | ||
206 | default: | ||
207 | break; | ||
208 | } | ||
209 | } | ||
210 | |||
211 | static int __devinit find_sort_method(const struct dmi_system_id *d) | ||
212 | { | ||
213 | dmi_walk(read_dmi_type_b1, NULL); | ||
214 | |||
215 | if (smbios_type_b1_flag == 1) { | ||
216 | set_bf_sort(d); | ||
217 | return 0; | ||
218 | } | ||
219 | return -1; | ||
220 | } | ||
221 | |||
188 | /* | 222 | /* |
189 | * Enable renumbering of PCI bus# ranges to reach all PCI busses (Cardbus) | 223 | * Enable renumbering of PCI bus# ranges to reach all PCI busses (Cardbus) |
190 | */ | 224 | */ |
@@ -213,6 +247,13 @@ static const struct dmi_system_id __devinitconst pciprobe_dmi_table[] = { | |||
213 | }, | 247 | }, |
214 | #endif /* __i386__ */ | 248 | #endif /* __i386__ */ |
215 | { | 249 | { |
250 | .callback = find_sort_method, | ||
251 | .ident = "Dell System", | ||
252 | .matches = { | ||
253 | DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc"), | ||
254 | }, | ||
255 | }, | ||
256 | { | ||
216 | .callback = set_bf_sort, | 257 | .callback = set_bf_sort, |
217 | .ident = "Dell PowerEdge 1950", | 258 | .ident = "Dell PowerEdge 1950", |
218 | .matches = { | 259 | .matches = { |
@@ -421,16 +462,10 @@ struct pci_bus * __devinit pcibios_scan_root(int busnum) | |||
421 | 462 | ||
422 | return bus; | 463 | return bus; |
423 | } | 464 | } |
424 | 465 | void __init pcibios_set_cache_line_size(void) | |
425 | int __init pcibios_init(void) | ||
426 | { | 466 | { |
427 | struct cpuinfo_x86 *c = &boot_cpu_data; | 467 | struct cpuinfo_x86 *c = &boot_cpu_data; |
428 | 468 | ||
429 | if (!raw_pci_ops) { | ||
430 | printk(KERN_WARNING "PCI: System does not support PCI\n"); | ||
431 | return 0; | ||
432 | } | ||
433 | |||
434 | /* | 469 | /* |
435 | * Set PCI cacheline size to that of the CPU if the CPU has reported it. | 470 | * Set PCI cacheline size to that of the CPU if the CPU has reported it. |
436 | * (For older CPUs that don't support cpuid, we se it to 32 bytes | 471 | * (For older CPUs that don't support cpuid, we se it to 32 bytes |
@@ -445,7 +480,16 @@ int __init pcibios_init(void) | |||
445 | pci_dfl_cache_line_size = 32 >> 2; | 480 | pci_dfl_cache_line_size = 32 >> 2; |
446 | printk(KERN_DEBUG "PCI: Unknown cacheline size. Setting to 32 bytes\n"); | 481 | printk(KERN_DEBUG "PCI: Unknown cacheline size. Setting to 32 bytes\n"); |
447 | } | 482 | } |
483 | } | ||
484 | |||
485 | int __init pcibios_init(void) | ||
486 | { | ||
487 | if (!raw_pci_ops) { | ||
488 | printk(KERN_WARNING "PCI: System does not support PCI\n"); | ||
489 | return 0; | ||
490 | } | ||
448 | 491 | ||
492 | pcibios_set_cache_line_size(); | ||
449 | pcibios_resource_survey(); | 493 | pcibios_resource_survey(); |
450 | 494 | ||
451 | if (pci_bf_sort >= pci_force_bf) | 495 | if (pci_bf_sort >= pci_force_bf) |
diff --git a/arch/x86/pci/direct.c b/arch/x86/pci/direct.c index bd33620b0071..e6fd8473fb7b 100644 --- a/arch/x86/pci/direct.c +++ b/arch/x86/pci/direct.c | |||
@@ -280,12 +280,9 @@ void __init pci_direct_init(int type) | |||
280 | 280 | ||
281 | int __init pci_direct_probe(void) | 281 | int __init pci_direct_probe(void) |
282 | { | 282 | { |
283 | struct resource *region, *region2; | ||
284 | |||
285 | if ((pci_probe & PCI_PROBE_CONF1) == 0) | 283 | if ((pci_probe & PCI_PROBE_CONF1) == 0) |
286 | goto type2; | 284 | goto type2; |
287 | region = request_region(0xCF8, 8, "PCI conf1"); | 285 | if (!request_region(0xCF8, 8, "PCI conf1")) |
288 | if (!region) | ||
289 | goto type2; | 286 | goto type2; |
290 | 287 | ||
291 | if (pci_check_type1()) { | 288 | if (pci_check_type1()) { |
@@ -293,16 +290,14 @@ int __init pci_direct_probe(void) | |||
293 | port_cf9_safe = true; | 290 | port_cf9_safe = true; |
294 | return 1; | 291 | return 1; |
295 | } | 292 | } |
296 | release_resource(region); | 293 | release_region(0xCF8, 8); |
297 | 294 | ||
298 | type2: | 295 | type2: |
299 | if ((pci_probe & PCI_PROBE_CONF2) == 0) | 296 | if ((pci_probe & PCI_PROBE_CONF2) == 0) |
300 | return 0; | 297 | return 0; |
301 | region = request_region(0xCF8, 4, "PCI conf2"); | 298 | if (!request_region(0xCF8, 4, "PCI conf2")) |
302 | if (!region) | ||
303 | return 0; | 299 | return 0; |
304 | region2 = request_region(0xC000, 0x1000, "PCI conf2"); | 300 | if (!request_region(0xC000, 0x1000, "PCI conf2")) |
305 | if (!region2) | ||
306 | goto fail2; | 301 | goto fail2; |
307 | 302 | ||
308 | if (pci_check_type2()) { | 303 | if (pci_check_type2()) { |
@@ -311,8 +306,8 @@ int __init pci_direct_probe(void) | |||
311 | return 2; | 306 | return 2; |
312 | } | 307 | } |
313 | 308 | ||
314 | release_resource(region2); | 309 | release_region(0xC000, 0x1000); |
315 | fail2: | 310 | fail2: |
316 | release_resource(region); | 311 | release_region(0xCF8, 4); |
317 | return 0; | 312 | return 0; |
318 | } | 313 | } |
diff --git a/arch/x86/pci/i386.c b/arch/x86/pci/i386.c index 55253095be84..494f2e7ea2b4 100644 --- a/arch/x86/pci/i386.c +++ b/arch/x86/pci/i386.c | |||
@@ -72,9 +72,6 @@ pcibios_align_resource(void *data, const struct resource *res, | |||
72 | return start; | 72 | return start; |
73 | if (start & 0x300) | 73 | if (start & 0x300) |
74 | start = (start + 0x3ff) & ~0x3ff; | 74 | start = (start + 0x3ff) & ~0x3ff; |
75 | } else if (res->flags & IORESOURCE_MEM) { | ||
76 | if (start < BIOS_END) | ||
77 | start = BIOS_END; | ||
78 | } | 75 | } |
79 | return start; | 76 | return start; |
80 | } | 77 | } |
@@ -244,7 +241,7 @@ void __init pcibios_resource_survey(void) | |||
244 | e820_reserve_resources_late(); | 241 | e820_reserve_resources_late(); |
245 | /* | 242 | /* |
246 | * Insert the IO APIC resources after PCI initialization has | 243 | * Insert the IO APIC resources after PCI initialization has |
247 | * occured to handle IO APICS that are mapped in on a BAR in | 244 | * occurred to handle IO APICS that are mapped in on a BAR in |
248 | * PCI space, but before trying to assign unassigned pci res. | 245 | * PCI space, but before trying to assign unassigned pci res. |
249 | */ | 246 | */ |
250 | ioapic_insert_resources(); | 247 | ioapic_insert_resources(); |
@@ -307,10 +304,12 @@ int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma, | |||
307 | /* | 304 | /* |
308 | * ioremap() and ioremap_nocache() defaults to UC MINUS for now. | 305 | * ioremap() and ioremap_nocache() defaults to UC MINUS for now. |
309 | * To avoid attribute conflicts, request UC MINUS here | 306 | * To avoid attribute conflicts, request UC MINUS here |
310 | * aswell. | 307 | * as well. |
311 | */ | 308 | */ |
312 | prot |= _PAGE_CACHE_UC_MINUS; | 309 | prot |= _PAGE_CACHE_UC_MINUS; |
313 | 310 | ||
311 | prot |= _PAGE_IOMAP; /* creating a mapping for IO */ | ||
312 | |||
314 | vma->vm_page_prot = __pgprot(prot); | 313 | vma->vm_page_prot = __pgprot(prot); |
315 | 314 | ||
316 | if (io_remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff, | 315 | if (io_remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff, |
diff --git a/arch/x86/pci/irq.c b/arch/x86/pci/irq.c index f547ee05f715..372e9b8989b3 100644 --- a/arch/x86/pci/irq.c +++ b/arch/x86/pci/irq.c | |||
@@ -584,32 +584,33 @@ static __init int intel_router_probe(struct irq_router *r, struct pci_dev *route | |||
584 | case PCI_DEVICE_ID_INTEL_ICH9_3: | 584 | case PCI_DEVICE_ID_INTEL_ICH9_3: |
585 | case PCI_DEVICE_ID_INTEL_ICH9_4: | 585 | case PCI_DEVICE_ID_INTEL_ICH9_4: |
586 | case PCI_DEVICE_ID_INTEL_ICH9_5: | 586 | case PCI_DEVICE_ID_INTEL_ICH9_5: |
587 | case PCI_DEVICE_ID_INTEL_TOLAPAI_0: | 587 | case PCI_DEVICE_ID_INTEL_EP80579_0: |
588 | case PCI_DEVICE_ID_INTEL_ICH10_0: | 588 | case PCI_DEVICE_ID_INTEL_ICH10_0: |
589 | case PCI_DEVICE_ID_INTEL_ICH10_1: | 589 | case PCI_DEVICE_ID_INTEL_ICH10_1: |
590 | case PCI_DEVICE_ID_INTEL_ICH10_2: | 590 | case PCI_DEVICE_ID_INTEL_ICH10_2: |
591 | case PCI_DEVICE_ID_INTEL_ICH10_3: | 591 | case PCI_DEVICE_ID_INTEL_ICH10_3: |
592 | case PCI_DEVICE_ID_INTEL_PATSBURG_LPC_0: | ||
593 | case PCI_DEVICE_ID_INTEL_PATSBURG_LPC_1: | ||
592 | r->name = "PIIX/ICH"; | 594 | r->name = "PIIX/ICH"; |
593 | r->get = pirq_piix_get; | 595 | r->get = pirq_piix_get; |
594 | r->set = pirq_piix_set; | 596 | r->set = pirq_piix_set; |
595 | return 1; | 597 | return 1; |
596 | } | 598 | } |
597 | 599 | ||
598 | if ((device >= PCI_DEVICE_ID_INTEL_PCH_LPC_MIN) && | 600 | if ((device >= PCI_DEVICE_ID_INTEL_5_3400_SERIES_LPC_MIN && |
599 | (device <= PCI_DEVICE_ID_INTEL_PCH_LPC_MAX)) { | 601 | device <= PCI_DEVICE_ID_INTEL_5_3400_SERIES_LPC_MAX) |
602 | || (device >= PCI_DEVICE_ID_INTEL_COUGARPOINT_LPC_MIN && | ||
603 | device <= PCI_DEVICE_ID_INTEL_COUGARPOINT_LPC_MAX) | ||
604 | || (device >= PCI_DEVICE_ID_INTEL_DH89XXCC_LPC_MIN && | ||
605 | device <= PCI_DEVICE_ID_INTEL_DH89XXCC_LPC_MAX) | ||
606 | || (device >= PCI_DEVICE_ID_INTEL_PANTHERPOINT_LPC_MIN && | ||
607 | device <= PCI_DEVICE_ID_INTEL_PANTHERPOINT_LPC_MAX)) { | ||
600 | r->name = "PIIX/ICH"; | 608 | r->name = "PIIX/ICH"; |
601 | r->get = pirq_piix_get; | 609 | r->get = pirq_piix_get; |
602 | r->set = pirq_piix_set; | 610 | r->set = pirq_piix_set; |
603 | return 1; | 611 | return 1; |
604 | } | 612 | } |
605 | 613 | ||
606 | if ((device >= PCI_DEVICE_ID_INTEL_CPT_LPC_MIN) && | ||
607 | (device <= PCI_DEVICE_ID_INTEL_CPT_LPC_MAX)) { | ||
608 | r->name = "PIIX/ICH"; | ||
609 | r->get = pirq_piix_get; | ||
610 | r->set = pirq_piix_set; | ||
611 | return 1; | ||
612 | } | ||
613 | return 0; | 614 | return 0; |
614 | } | 615 | } |
615 | 616 | ||
diff --git a/arch/x86/pci/mmconfig-shared.c b/arch/x86/pci/mmconfig-shared.c index a918553ebc75..750c346ef50a 100644 --- a/arch/x86/pci/mmconfig-shared.c +++ b/arch/x86/pci/mmconfig-shared.c | |||
@@ -65,7 +65,6 @@ static __init struct pci_mmcfg_region *pci_mmconfig_add(int segment, int start, | |||
65 | int end, u64 addr) | 65 | int end, u64 addr) |
66 | { | 66 | { |
67 | struct pci_mmcfg_region *new; | 67 | struct pci_mmcfg_region *new; |
68 | int num_buses; | ||
69 | struct resource *res; | 68 | struct resource *res; |
70 | 69 | ||
71 | if (addr == 0) | 70 | if (addr == 0) |
@@ -82,10 +81,9 @@ static __init struct pci_mmcfg_region *pci_mmconfig_add(int segment, int start, | |||
82 | 81 | ||
83 | list_add_sorted(new); | 82 | list_add_sorted(new); |
84 | 83 | ||
85 | num_buses = end - start + 1; | ||
86 | res = &new->res; | 84 | res = &new->res; |
87 | res->start = addr + PCI_MMCFG_BUS_OFFSET(start); | 85 | res->start = addr + PCI_MMCFG_BUS_OFFSET(start); |
88 | res->end = addr + PCI_MMCFG_BUS_OFFSET(num_buses) - 1; | 86 | res->end = addr + PCI_MMCFG_BUS_OFFSET(end + 1) - 1; |
89 | res->flags = IORESOURCE_MEM | IORESOURCE_BUSY; | 87 | res->flags = IORESOURCE_MEM | IORESOURCE_BUSY; |
90 | snprintf(new->name, PCI_MMCFG_RESOURCE_NAME_LEN, | 88 | snprintf(new->name, PCI_MMCFG_RESOURCE_NAME_LEN, |
91 | "PCI MMCONFIG %04x [bus %02x-%02x]", segment, start, end); | 89 | "PCI MMCONFIG %04x [bus %02x-%02x]", segment, start, end); |
@@ -608,6 +606,16 @@ static void __init __pci_mmcfg_init(int early) | |||
608 | if (list_empty(&pci_mmcfg_list)) | 606 | if (list_empty(&pci_mmcfg_list)) |
609 | return; | 607 | return; |
610 | 608 | ||
609 | if (pcibios_last_bus < 0) { | ||
610 | const struct pci_mmcfg_region *cfg; | ||
611 | |||
612 | list_for_each_entry(cfg, &pci_mmcfg_list, list) { | ||
613 | if (cfg->segment) | ||
614 | break; | ||
615 | pcibios_last_bus = cfg->end_bus; | ||
616 | } | ||
617 | } | ||
618 | |||
611 | if (pci_mmcfg_arch_init()) | 619 | if (pci_mmcfg_arch_init()) |
612 | pci_probe = (pci_probe & ~PCI_PROBE_MASK) | PCI_PROBE_MMCONF; | 620 | pci_probe = (pci_probe & ~PCI_PROBE_MASK) | PCI_PROBE_MMCONF; |
613 | else { | 621 | else { |
diff --git a/arch/x86/pci/olpc.c b/arch/x86/pci/olpc.c index b34815408f58..13700ec8e2e4 100644 --- a/arch/x86/pci/olpc.c +++ b/arch/x86/pci/olpc.c | |||
@@ -304,7 +304,7 @@ static struct pci_raw_ops pci_olpc_conf = { | |||
304 | 304 | ||
305 | int __init pci_olpc_init(void) | 305 | int __init pci_olpc_init(void) |
306 | { | 306 | { |
307 | printk(KERN_INFO "PCI: Using configuration type OLPC\n"); | 307 | printk(KERN_INFO "PCI: Using configuration type OLPC XO-1\n"); |
308 | raw_pci_ops = &pci_olpc_conf; | 308 | raw_pci_ops = &pci_olpc_conf; |
309 | is_lx = is_geode_lx(); | 309 | is_lx = is_geode_lx(); |
310 | return 0; | 310 | return 0; |
diff --git a/arch/x86/pci/pcbios.c b/arch/x86/pci/pcbios.c index 2492d165096a..a5f7d0d63de0 100644 --- a/arch/x86/pci/pcbios.c +++ b/arch/x86/pci/pcbios.c | |||
@@ -9,6 +9,7 @@ | |||
9 | #include <linux/uaccess.h> | 9 | #include <linux/uaccess.h> |
10 | #include <asm/pci_x86.h> | 10 | #include <asm/pci_x86.h> |
11 | #include <asm/pci-functions.h> | 11 | #include <asm/pci-functions.h> |
12 | #include <asm/cacheflush.h> | ||
12 | 13 | ||
13 | /* BIOS32 signature: "_32_" */ | 14 | /* BIOS32 signature: "_32_" */ |
14 | #define BIOS32_SIGNATURE (('_' << 0) + ('3' << 8) + ('2' << 16) + ('_' << 24)) | 15 | #define BIOS32_SIGNATURE (('_' << 0) + ('3' << 8) + ('2' << 16) + ('_' << 24)) |
@@ -25,6 +26,27 @@ | |||
25 | #define PCIBIOS_HW_TYPE1_SPEC 0x10 | 26 | #define PCIBIOS_HW_TYPE1_SPEC 0x10 |
26 | #define PCIBIOS_HW_TYPE2_SPEC 0x20 | 27 | #define PCIBIOS_HW_TYPE2_SPEC 0x20 |
27 | 28 | ||
29 | int pcibios_enabled; | ||
30 | |||
31 | /* According to the BIOS specification at: | ||
32 | * http://members.datafast.net.au/dft0802/specs/bios21.pdf, we could | ||
33 | * restrict the x zone to some pages and make it ro. But this may be | ||
34 | * broken on some bios, complex to handle with static_protections. | ||
35 | * We could make the 0xe0000-0x100000 range rox, but this can break | ||
36 | * some ISA mapping. | ||
37 | * | ||
38 | * So we let's an rw and x hole when pcibios is used. This shouldn't | ||
39 | * happen for modern system with mmconfig, and if you don't want it | ||
40 | * you could disable pcibios... | ||
41 | */ | ||
42 | static inline void set_bios_x(void) | ||
43 | { | ||
44 | pcibios_enabled = 1; | ||
45 | set_memory_x(PAGE_OFFSET + BIOS_BEGIN, (BIOS_END - BIOS_BEGIN) >> PAGE_SHIFT); | ||
46 | if (__supported_pte_mask & _PAGE_NX) | ||
47 | printk(KERN_INFO "PCI : PCI BIOS aera is rw and x. Use pci=nobios if you want it NX.\n"); | ||
48 | } | ||
49 | |||
28 | /* | 50 | /* |
29 | * This is the standard structure used to identify the entry point | 51 | * This is the standard structure used to identify the entry point |
30 | * to the BIOS32 Service Directory, as documented in | 52 | * to the BIOS32 Service Directory, as documented in |
@@ -332,6 +354,7 @@ static struct pci_raw_ops * __devinit pci_find_bios(void) | |||
332 | DBG("PCI: BIOS32 Service Directory entry at 0x%lx\n", | 354 | DBG("PCI: BIOS32 Service Directory entry at 0x%lx\n", |
333 | bios32_entry); | 355 | bios32_entry); |
334 | bios32_indirect.address = bios32_entry + PAGE_OFFSET; | 356 | bios32_indirect.address = bios32_entry + PAGE_OFFSET; |
357 | set_bios_x(); | ||
335 | if (check_pcibios()) | 358 | if (check_pcibios()) |
336 | return &pci_bios_access; | 359 | return &pci_bios_access; |
337 | } | 360 | } |
diff --git a/arch/x86/pci/xen.c b/arch/x86/pci/xen.c new file mode 100644 index 000000000000..f567965c0620 --- /dev/null +++ b/arch/x86/pci/xen.c | |||
@@ -0,0 +1,571 @@ | |||
1 | /* | ||
2 | * Xen PCI Frontend Stub - puts some "dummy" functions in to the Linux | ||
3 | * x86 PCI core to support the Xen PCI Frontend | ||
4 | * | ||
5 | * Author: Ryan Wilson <hap9@epoch.ncsc.mil> | ||
6 | */ | ||
7 | #include <linux/module.h> | ||
8 | #include <linux/init.h> | ||
9 | #include <linux/pci.h> | ||
10 | #include <linux/acpi.h> | ||
11 | |||
12 | #include <linux/io.h> | ||
13 | #include <asm/io_apic.h> | ||
14 | #include <asm/pci_x86.h> | ||
15 | |||
16 | #include <asm/xen/hypervisor.h> | ||
17 | |||
18 | #include <xen/features.h> | ||
19 | #include <xen/events.h> | ||
20 | #include <asm/xen/pci.h> | ||
21 | |||
22 | #ifdef CONFIG_ACPI | ||
23 | static int acpi_register_gsi_xen_hvm(struct device *dev, u32 gsi, | ||
24 | int trigger, int polarity) | ||
25 | { | ||
26 | int rc, irq; | ||
27 | struct physdev_map_pirq map_irq; | ||
28 | int shareable = 0; | ||
29 | char *name; | ||
30 | |||
31 | if (!xen_hvm_domain()) | ||
32 | return -1; | ||
33 | |||
34 | map_irq.domid = DOMID_SELF; | ||
35 | map_irq.type = MAP_PIRQ_TYPE_GSI; | ||
36 | map_irq.index = gsi; | ||
37 | map_irq.pirq = -1; | ||
38 | |||
39 | rc = HYPERVISOR_physdev_op(PHYSDEVOP_map_pirq, &map_irq); | ||
40 | if (rc) { | ||
41 | printk(KERN_WARNING "xen map irq failed %d\n", rc); | ||
42 | return -1; | ||
43 | } | ||
44 | |||
45 | if (trigger == ACPI_EDGE_SENSITIVE) { | ||
46 | shareable = 0; | ||
47 | name = "ioapic-edge"; | ||
48 | } else { | ||
49 | shareable = 1; | ||
50 | name = "ioapic-level"; | ||
51 | } | ||
52 | |||
53 | irq = xen_bind_pirq_gsi_to_irq(gsi, map_irq.pirq, shareable, name); | ||
54 | |||
55 | printk(KERN_DEBUG "xen: --> irq=%d, pirq=%d\n", irq, map_irq.pirq); | ||
56 | |||
57 | return irq; | ||
58 | } | ||
59 | #endif | ||
60 | |||
61 | #if defined(CONFIG_PCI_MSI) | ||
62 | #include <linux/msi.h> | ||
63 | #include <asm/msidef.h> | ||
64 | |||
65 | struct xen_pci_frontend_ops *xen_pci_frontend; | ||
66 | EXPORT_SYMBOL_GPL(xen_pci_frontend); | ||
67 | |||
68 | #define XEN_PIRQ_MSI_DATA (MSI_DATA_TRIGGER_EDGE | \ | ||
69 | MSI_DATA_LEVEL_ASSERT | (3 << 8) | MSI_DATA_VECTOR(0)) | ||
70 | |||
71 | static void xen_msi_compose_msg(struct pci_dev *pdev, unsigned int pirq, | ||
72 | struct msi_msg *msg) | ||
73 | { | ||
74 | /* We set vector == 0 to tell the hypervisor we don't care about it, | ||
75 | * but we want a pirq setup instead. | ||
76 | * We use the dest_id field to pass the pirq that we want. */ | ||
77 | msg->address_hi = MSI_ADDR_BASE_HI | MSI_ADDR_EXT_DEST_ID(pirq); | ||
78 | msg->address_lo = | ||
79 | MSI_ADDR_BASE_LO | | ||
80 | MSI_ADDR_DEST_MODE_PHYSICAL | | ||
81 | MSI_ADDR_REDIRECTION_CPU | | ||
82 | MSI_ADDR_DEST_ID(pirq); | ||
83 | |||
84 | msg->data = XEN_PIRQ_MSI_DATA; | ||
85 | } | ||
86 | |||
87 | static int xen_hvm_setup_msi_irqs(struct pci_dev *dev, int nvec, int type) | ||
88 | { | ||
89 | int irq, pirq; | ||
90 | struct msi_desc *msidesc; | ||
91 | struct msi_msg msg; | ||
92 | |||
93 | list_for_each_entry(msidesc, &dev->msi_list, list) { | ||
94 | __read_msi_msg(msidesc, &msg); | ||
95 | pirq = MSI_ADDR_EXT_DEST_ID(msg.address_hi) | | ||
96 | ((msg.address_lo >> MSI_ADDR_DEST_ID_SHIFT) & 0xff); | ||
97 | if (msg.data != XEN_PIRQ_MSI_DATA || | ||
98 | xen_irq_from_pirq(pirq) < 0) { | ||
99 | pirq = xen_allocate_pirq_msi(dev, msidesc); | ||
100 | if (pirq < 0) | ||
101 | goto error; | ||
102 | xen_msi_compose_msg(dev, pirq, &msg); | ||
103 | __write_msi_msg(msidesc, &msg); | ||
104 | dev_dbg(&dev->dev, "xen: msi bound to pirq=%d\n", pirq); | ||
105 | } else { | ||
106 | dev_dbg(&dev->dev, | ||
107 | "xen: msi already bound to pirq=%d\n", pirq); | ||
108 | } | ||
109 | irq = xen_bind_pirq_msi_to_irq(dev, msidesc, pirq, 0, | ||
110 | (type == PCI_CAP_ID_MSIX) ? | ||
111 | "msi-x" : "msi", | ||
112 | DOMID_SELF); | ||
113 | if (irq < 0) | ||
114 | goto error; | ||
115 | dev_dbg(&dev->dev, | ||
116 | "xen: msi --> pirq=%d --> irq=%d\n", pirq, irq); | ||
117 | } | ||
118 | return 0; | ||
119 | |||
120 | error: | ||
121 | dev_err(&dev->dev, | ||
122 | "Xen PCI frontend has not registered MSI/MSI-X support!\n"); | ||
123 | return -ENODEV; | ||
124 | } | ||
125 | |||
126 | /* | ||
127 | * For MSI interrupts we have to use drivers/xen/event.s functions to | ||
128 | * allocate an irq_desc and setup the right */ | ||
129 | |||
130 | |||
131 | static int xen_setup_msi_irqs(struct pci_dev *dev, int nvec, int type) | ||
132 | { | ||
133 | int irq, ret, i; | ||
134 | struct msi_desc *msidesc; | ||
135 | int *v; | ||
136 | |||
137 | v = kzalloc(sizeof(int) * max(1, nvec), GFP_KERNEL); | ||
138 | if (!v) | ||
139 | return -ENOMEM; | ||
140 | |||
141 | if (type == PCI_CAP_ID_MSIX) | ||
142 | ret = xen_pci_frontend_enable_msix(dev, v, nvec); | ||
143 | else | ||
144 | ret = xen_pci_frontend_enable_msi(dev, v); | ||
145 | if (ret) | ||
146 | goto error; | ||
147 | i = 0; | ||
148 | list_for_each_entry(msidesc, &dev->msi_list, list) { | ||
149 | irq = xen_bind_pirq_msi_to_irq(dev, msidesc, v[i], 0, | ||
150 | (type == PCI_CAP_ID_MSIX) ? | ||
151 | "pcifront-msi-x" : | ||
152 | "pcifront-msi", | ||
153 | DOMID_SELF); | ||
154 | if (irq < 0) | ||
155 | goto free; | ||
156 | i++; | ||
157 | } | ||
158 | kfree(v); | ||
159 | return 0; | ||
160 | |||
161 | error: | ||
162 | dev_err(&dev->dev, "Xen PCI frontend has not registered MSI/MSI-X support!\n"); | ||
163 | free: | ||
164 | kfree(v); | ||
165 | return ret; | ||
166 | } | ||
167 | |||
168 | static void xen_teardown_msi_irqs(struct pci_dev *dev) | ||
169 | { | ||
170 | struct msi_desc *msidesc; | ||
171 | |||
172 | msidesc = list_entry(dev->msi_list.next, struct msi_desc, list); | ||
173 | if (msidesc->msi_attrib.is_msix) | ||
174 | xen_pci_frontend_disable_msix(dev); | ||
175 | else | ||
176 | xen_pci_frontend_disable_msi(dev); | ||
177 | |||
178 | /* Free the IRQ's and the msidesc using the generic code. */ | ||
179 | default_teardown_msi_irqs(dev); | ||
180 | } | ||
181 | |||
182 | static void xen_teardown_msi_irq(unsigned int irq) | ||
183 | { | ||
184 | xen_destroy_irq(irq); | ||
185 | } | ||
186 | |||
187 | #ifdef CONFIG_XEN_DOM0 | ||
188 | static int xen_initdom_setup_msi_irqs(struct pci_dev *dev, int nvec, int type) | ||
189 | { | ||
190 | int ret = 0; | ||
191 | struct msi_desc *msidesc; | ||
192 | |||
193 | list_for_each_entry(msidesc, &dev->msi_list, list) { | ||
194 | struct physdev_map_pirq map_irq; | ||
195 | domid_t domid; | ||
196 | |||
197 | domid = ret = xen_find_device_domain_owner(dev); | ||
198 | /* N.B. Casting int's -ENODEV to uint16_t results in 0xFFED, | ||
199 | * hence check ret value for < 0. */ | ||
200 | if (ret < 0) | ||
201 | domid = DOMID_SELF; | ||
202 | |||
203 | memset(&map_irq, 0, sizeof(map_irq)); | ||
204 | map_irq.domid = domid; | ||
205 | map_irq.type = MAP_PIRQ_TYPE_MSI; | ||
206 | map_irq.index = -1; | ||
207 | map_irq.pirq = -1; | ||
208 | map_irq.bus = dev->bus->number; | ||
209 | map_irq.devfn = dev->devfn; | ||
210 | |||
211 | if (type == PCI_CAP_ID_MSIX) { | ||
212 | int pos; | ||
213 | u32 table_offset, bir; | ||
214 | |||
215 | pos = pci_find_capability(dev, PCI_CAP_ID_MSIX); | ||
216 | |||
217 | pci_read_config_dword(dev, pos + PCI_MSIX_TABLE, | ||
218 | &table_offset); | ||
219 | bir = (u8)(table_offset & PCI_MSIX_FLAGS_BIRMASK); | ||
220 | |||
221 | map_irq.table_base = pci_resource_start(dev, bir); | ||
222 | map_irq.entry_nr = msidesc->msi_attrib.entry_nr; | ||
223 | } | ||
224 | |||
225 | ret = HYPERVISOR_physdev_op(PHYSDEVOP_map_pirq, &map_irq); | ||
226 | if (ret) { | ||
227 | dev_warn(&dev->dev, "xen map irq failed %d for %d domain\n", | ||
228 | ret, domid); | ||
229 | goto out; | ||
230 | } | ||
231 | |||
232 | ret = xen_bind_pirq_msi_to_irq(dev, msidesc, | ||
233 | map_irq.pirq, map_irq.index, | ||
234 | (type == PCI_CAP_ID_MSIX) ? | ||
235 | "msi-x" : "msi", | ||
236 | domid); | ||
237 | if (ret < 0) | ||
238 | goto out; | ||
239 | } | ||
240 | ret = 0; | ||
241 | out: | ||
242 | return ret; | ||
243 | } | ||
244 | #endif | ||
245 | #endif | ||
246 | |||
247 | static int xen_pcifront_enable_irq(struct pci_dev *dev) | ||
248 | { | ||
249 | int rc; | ||
250 | int share = 1; | ||
251 | int pirq; | ||
252 | u8 gsi; | ||
253 | |||
254 | rc = pci_read_config_byte(dev, PCI_INTERRUPT_LINE, &gsi); | ||
255 | if (rc < 0) { | ||
256 | dev_warn(&dev->dev, "Xen PCI: failed to read interrupt line: %d\n", | ||
257 | rc); | ||
258 | return rc; | ||
259 | } | ||
260 | |||
261 | rc = xen_allocate_pirq_gsi(gsi); | ||
262 | if (rc < 0) { | ||
263 | dev_warn(&dev->dev, "Xen PCI: failed to allocate a PIRQ for GSI%d: %d\n", | ||
264 | gsi, rc); | ||
265 | return rc; | ||
266 | } | ||
267 | pirq = rc; | ||
268 | |||
269 | if (gsi < NR_IRQS_LEGACY) | ||
270 | share = 0; | ||
271 | |||
272 | rc = xen_bind_pirq_gsi_to_irq(gsi, pirq, share, "pcifront"); | ||
273 | if (rc < 0) { | ||
274 | dev_warn(&dev->dev, "Xen PCI: failed to bind GSI%d (PIRQ%d) to IRQ: %d\n", | ||
275 | gsi, pirq, rc); | ||
276 | return rc; | ||
277 | } | ||
278 | |||
279 | dev->irq = rc; | ||
280 | dev_info(&dev->dev, "Xen PCI mapped GSI%d to IRQ%d\n", gsi, dev->irq); | ||
281 | return 0; | ||
282 | } | ||
283 | |||
284 | int __init pci_xen_init(void) | ||
285 | { | ||
286 | if (!xen_pv_domain() || xen_initial_domain()) | ||
287 | return -ENODEV; | ||
288 | |||
289 | printk(KERN_INFO "PCI: setting up Xen PCI frontend stub\n"); | ||
290 | |||
291 | pcibios_set_cache_line_size(); | ||
292 | |||
293 | pcibios_enable_irq = xen_pcifront_enable_irq; | ||
294 | pcibios_disable_irq = NULL; | ||
295 | |||
296 | #ifdef CONFIG_ACPI | ||
297 | /* Keep ACPI out of the picture */ | ||
298 | acpi_noirq = 1; | ||
299 | #endif | ||
300 | |||
301 | #ifdef CONFIG_PCI_MSI | ||
302 | x86_msi.setup_msi_irqs = xen_setup_msi_irqs; | ||
303 | x86_msi.teardown_msi_irq = xen_teardown_msi_irq; | ||
304 | x86_msi.teardown_msi_irqs = xen_teardown_msi_irqs; | ||
305 | #endif | ||
306 | return 0; | ||
307 | } | ||
308 | |||
309 | int __init pci_xen_hvm_init(void) | ||
310 | { | ||
311 | if (!xen_feature(XENFEAT_hvm_pirqs)) | ||
312 | return 0; | ||
313 | |||
314 | #ifdef CONFIG_ACPI | ||
315 | /* | ||
316 | * We don't want to change the actual ACPI delivery model, | ||
317 | * just how GSIs get registered. | ||
318 | */ | ||
319 | __acpi_register_gsi = acpi_register_gsi_xen_hvm; | ||
320 | #endif | ||
321 | |||
322 | #ifdef CONFIG_PCI_MSI | ||
323 | x86_msi.setup_msi_irqs = xen_hvm_setup_msi_irqs; | ||
324 | x86_msi.teardown_msi_irq = xen_teardown_msi_irq; | ||
325 | #endif | ||
326 | return 0; | ||
327 | } | ||
328 | |||
329 | #ifdef CONFIG_XEN_DOM0 | ||
330 | static int xen_register_pirq(u32 gsi, int gsi_override, int triggering) | ||
331 | { | ||
332 | int rc, pirq, irq = -1; | ||
333 | struct physdev_map_pirq map_irq; | ||
334 | int shareable = 0; | ||
335 | char *name; | ||
336 | |||
337 | if (!xen_pv_domain()) | ||
338 | return -1; | ||
339 | |||
340 | if (triggering == ACPI_EDGE_SENSITIVE) { | ||
341 | shareable = 0; | ||
342 | name = "ioapic-edge"; | ||
343 | } else { | ||
344 | shareable = 1; | ||
345 | name = "ioapic-level"; | ||
346 | } | ||
347 | pirq = xen_allocate_pirq_gsi(gsi); | ||
348 | if (pirq < 0) | ||
349 | goto out; | ||
350 | |||
351 | if (gsi_override >= 0) | ||
352 | irq = xen_bind_pirq_gsi_to_irq(gsi_override, pirq, shareable, name); | ||
353 | else | ||
354 | irq = xen_bind_pirq_gsi_to_irq(gsi, pirq, shareable, name); | ||
355 | if (irq < 0) | ||
356 | goto out; | ||
357 | |||
358 | printk(KERN_DEBUG "xen: --> pirq=%d -> irq=%d (gsi=%d)\n", pirq, irq, gsi); | ||
359 | |||
360 | map_irq.domid = DOMID_SELF; | ||
361 | map_irq.type = MAP_PIRQ_TYPE_GSI; | ||
362 | map_irq.index = gsi; | ||
363 | map_irq.pirq = pirq; | ||
364 | |||
365 | rc = HYPERVISOR_physdev_op(PHYSDEVOP_map_pirq, &map_irq); | ||
366 | if (rc) { | ||
367 | printk(KERN_WARNING "xen map irq failed %d\n", rc); | ||
368 | return -1; | ||
369 | } | ||
370 | |||
371 | out: | ||
372 | return irq; | ||
373 | } | ||
374 | |||
375 | static int xen_register_gsi(u32 gsi, int gsi_override, int triggering, int polarity) | ||
376 | { | ||
377 | int rc, irq; | ||
378 | struct physdev_setup_gsi setup_gsi; | ||
379 | |||
380 | if (!xen_pv_domain()) | ||
381 | return -1; | ||
382 | |||
383 | printk(KERN_DEBUG "xen: registering gsi %u triggering %d polarity %d\n", | ||
384 | gsi, triggering, polarity); | ||
385 | |||
386 | irq = xen_register_pirq(gsi, gsi_override, triggering); | ||
387 | |||
388 | setup_gsi.gsi = gsi; | ||
389 | setup_gsi.triggering = (triggering == ACPI_EDGE_SENSITIVE ? 0 : 1); | ||
390 | setup_gsi.polarity = (polarity == ACPI_ACTIVE_HIGH ? 0 : 1); | ||
391 | |||
392 | rc = HYPERVISOR_physdev_op(PHYSDEVOP_setup_gsi, &setup_gsi); | ||
393 | if (rc == -EEXIST) | ||
394 | printk(KERN_INFO "Already setup the GSI :%d\n", gsi); | ||
395 | else if (rc) { | ||
396 | printk(KERN_ERR "Failed to setup GSI :%d, err_code:%d\n", | ||
397 | gsi, rc); | ||
398 | } | ||
399 | |||
400 | return irq; | ||
401 | } | ||
402 | |||
403 | static __init void xen_setup_acpi_sci(void) | ||
404 | { | ||
405 | int rc; | ||
406 | int trigger, polarity; | ||
407 | int gsi = acpi_sci_override_gsi; | ||
408 | int irq = -1; | ||
409 | int gsi_override = -1; | ||
410 | |||
411 | if (!gsi) | ||
412 | return; | ||
413 | |||
414 | rc = acpi_get_override_irq(gsi, &trigger, &polarity); | ||
415 | if (rc) { | ||
416 | printk(KERN_WARNING "xen: acpi_get_override_irq failed for acpi" | ||
417 | " sci, rc=%d\n", rc); | ||
418 | return; | ||
419 | } | ||
420 | trigger = trigger ? ACPI_LEVEL_SENSITIVE : ACPI_EDGE_SENSITIVE; | ||
421 | polarity = polarity ? ACPI_ACTIVE_LOW : ACPI_ACTIVE_HIGH; | ||
422 | |||
423 | printk(KERN_INFO "xen: sci override: global_irq=%d trigger=%d " | ||
424 | "polarity=%d\n", gsi, trigger, polarity); | ||
425 | |||
426 | /* Before we bind the GSI to a Linux IRQ, check whether | ||
427 | * we need to override it with bus_irq (IRQ) value. Usually for | ||
428 | * IRQs below IRQ_LEGACY_IRQ this holds IRQ == GSI, as so: | ||
429 | * ACPI: INT_SRC_OVR (bus 0 bus_irq 9 global_irq 9 low level) | ||
430 | * but there are oddballs where the IRQ != GSI: | ||
431 | * ACPI: INT_SRC_OVR (bus 0 bus_irq 9 global_irq 20 low level) | ||
432 | * which ends up being: gsi_to_irq[9] == 20 | ||
433 | * (which is what acpi_gsi_to_irq ends up calling when starting the | ||
434 | * the ACPI interpreter and keels over since IRQ 9 has not been | ||
435 | * setup as we had setup IRQ 20 for it). | ||
436 | */ | ||
437 | /* Check whether the GSI != IRQ */ | ||
438 | if (acpi_gsi_to_irq(gsi, &irq) == 0) { | ||
439 | if (irq >= 0 && irq != gsi) | ||
440 | /* Bugger, we MUST have that IRQ. */ | ||
441 | gsi_override = irq; | ||
442 | } | ||
443 | |||
444 | gsi = xen_register_gsi(gsi, gsi_override, trigger, polarity); | ||
445 | printk(KERN_INFO "xen: acpi sci %d\n", gsi); | ||
446 | |||
447 | return; | ||
448 | } | ||
449 | |||
450 | static int acpi_register_gsi_xen(struct device *dev, u32 gsi, | ||
451 | int trigger, int polarity) | ||
452 | { | ||
453 | return xen_register_gsi(gsi, -1 /* no GSI override */, trigger, polarity); | ||
454 | } | ||
455 | |||
456 | static int __init pci_xen_initial_domain(void) | ||
457 | { | ||
458 | #ifdef CONFIG_PCI_MSI | ||
459 | x86_msi.setup_msi_irqs = xen_initdom_setup_msi_irqs; | ||
460 | x86_msi.teardown_msi_irq = xen_teardown_msi_irq; | ||
461 | #endif | ||
462 | xen_setup_acpi_sci(); | ||
463 | __acpi_register_gsi = acpi_register_gsi_xen; | ||
464 | |||
465 | return 0; | ||
466 | } | ||
467 | |||
468 | void __init xen_setup_pirqs(void) | ||
469 | { | ||
470 | int pirq, irq; | ||
471 | |||
472 | pci_xen_initial_domain(); | ||
473 | |||
474 | if (0 == nr_ioapics) { | ||
475 | for (irq = 0; irq < NR_IRQS_LEGACY; irq++) { | ||
476 | pirq = xen_allocate_pirq_gsi(irq); | ||
477 | if (WARN(pirq < 0, | ||
478 | "Could not allocate PIRQ for legacy interrupt\n")) | ||
479 | break; | ||
480 | irq = xen_bind_pirq_gsi_to_irq(irq, pirq, 0, "xt-pic"); | ||
481 | } | ||
482 | return; | ||
483 | } | ||
484 | |||
485 | /* Pre-allocate legacy irqs */ | ||
486 | for (irq = 0; irq < NR_IRQS_LEGACY; irq++) { | ||
487 | int trigger, polarity; | ||
488 | |||
489 | if (acpi_get_override_irq(irq, &trigger, &polarity) == -1) | ||
490 | continue; | ||
491 | |||
492 | xen_register_pirq(irq, -1 /* no GSI override */, | ||
493 | trigger ? ACPI_LEVEL_SENSITIVE : ACPI_EDGE_SENSITIVE); | ||
494 | } | ||
495 | } | ||
496 | #endif | ||
497 | |||
498 | #ifdef CONFIG_XEN_DOM0 | ||
499 | struct xen_device_domain_owner { | ||
500 | domid_t domain; | ||
501 | struct pci_dev *dev; | ||
502 | struct list_head list; | ||
503 | }; | ||
504 | |||
505 | static DEFINE_SPINLOCK(dev_domain_list_spinlock); | ||
506 | static struct list_head dev_domain_list = LIST_HEAD_INIT(dev_domain_list); | ||
507 | |||
508 | static struct xen_device_domain_owner *find_device(struct pci_dev *dev) | ||
509 | { | ||
510 | struct xen_device_domain_owner *owner; | ||
511 | |||
512 | list_for_each_entry(owner, &dev_domain_list, list) { | ||
513 | if (owner->dev == dev) | ||
514 | return owner; | ||
515 | } | ||
516 | return NULL; | ||
517 | } | ||
518 | |||
519 | int xen_find_device_domain_owner(struct pci_dev *dev) | ||
520 | { | ||
521 | struct xen_device_domain_owner *owner; | ||
522 | int domain = -ENODEV; | ||
523 | |||
524 | spin_lock(&dev_domain_list_spinlock); | ||
525 | owner = find_device(dev); | ||
526 | if (owner) | ||
527 | domain = owner->domain; | ||
528 | spin_unlock(&dev_domain_list_spinlock); | ||
529 | return domain; | ||
530 | } | ||
531 | EXPORT_SYMBOL_GPL(xen_find_device_domain_owner); | ||
532 | |||
533 | int xen_register_device_domain_owner(struct pci_dev *dev, uint16_t domain) | ||
534 | { | ||
535 | struct xen_device_domain_owner *owner; | ||
536 | |||
537 | owner = kzalloc(sizeof(struct xen_device_domain_owner), GFP_KERNEL); | ||
538 | if (!owner) | ||
539 | return -ENODEV; | ||
540 | |||
541 | spin_lock(&dev_domain_list_spinlock); | ||
542 | if (find_device(dev)) { | ||
543 | spin_unlock(&dev_domain_list_spinlock); | ||
544 | kfree(owner); | ||
545 | return -EEXIST; | ||
546 | } | ||
547 | owner->domain = domain; | ||
548 | owner->dev = dev; | ||
549 | list_add_tail(&owner->list, &dev_domain_list); | ||
550 | spin_unlock(&dev_domain_list_spinlock); | ||
551 | return 0; | ||
552 | } | ||
553 | EXPORT_SYMBOL_GPL(xen_register_device_domain_owner); | ||
554 | |||
555 | int xen_unregister_device_domain_owner(struct pci_dev *dev) | ||
556 | { | ||
557 | struct xen_device_domain_owner *owner; | ||
558 | |||
559 | spin_lock(&dev_domain_list_spinlock); | ||
560 | owner = find_device(dev); | ||
561 | if (!owner) { | ||
562 | spin_unlock(&dev_domain_list_spinlock); | ||
563 | return -ENODEV; | ||
564 | } | ||
565 | list_del(&owner->list); | ||
566 | spin_unlock(&dev_domain_list_spinlock); | ||
567 | kfree(owner); | ||
568 | return 0; | ||
569 | } | ||
570 | EXPORT_SYMBOL_GPL(xen_unregister_device_domain_owner); | ||
571 | #endif | ||