diff options
author | Thomas Gleixner <tglx@linutronix.de> | 2010-12-09 12:17:25 -0500 |
---|---|---|
committer | Thomas Gleixner <tglx@linutronix.de> | 2010-12-09 12:17:25 -0500 |
commit | d834a9dcecae834cd6b2bc5e50e1907738d9cf6a (patch) | |
tree | 0589d753465d3fe359ba451ba6cb7798df03aaa2 /arch/x86/pci | |
parent | a38c5380ef9f088be9f49b6e4c5d80af8b1b5cd4 (diff) | |
parent | f658bcfb2607bf0808966a69cf74135ce98e5c2d (diff) |
Merge branch 'x86/amd-nb' into x86/apic-cleanups
Reason: apic cleanup series depends on x86/apic, x86/amd-nb x86/platform
Conflicts:
arch/x86/include/asm/io_apic.h
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'arch/x86/pci')
-rw-r--r-- | arch/x86/pci/Makefile | 1 | ||||
-rw-r--r-- | arch/x86/pci/acpi.c | 103 | ||||
-rw-r--r-- | arch/x86/pci/common.c | 17 | ||||
-rw-r--r-- | arch/x86/pci/i386.c | 19 | ||||
-rw-r--r-- | arch/x86/pci/irq.c | 11 | ||||
-rw-r--r-- | arch/x86/pci/mmconfig-shared.c | 4 | ||||
-rw-r--r-- | arch/x86/pci/xen.c | 416 |
7 files changed, 530 insertions, 41 deletions
diff --git a/arch/x86/pci/Makefile b/arch/x86/pci/Makefile index a0207a7fdf39..effd96e33f16 100644 --- a/arch/x86/pci/Makefile +++ b/arch/x86/pci/Makefile | |||
@@ -4,6 +4,7 @@ obj-$(CONFIG_PCI_BIOS) += pcbios.o | |||
4 | obj-$(CONFIG_PCI_MMCONFIG) += mmconfig_$(BITS).o direct.o mmconfig-shared.o | 4 | obj-$(CONFIG_PCI_MMCONFIG) += mmconfig_$(BITS).o direct.o mmconfig-shared.o |
5 | obj-$(CONFIG_PCI_DIRECT) += direct.o | 5 | obj-$(CONFIG_PCI_DIRECT) += direct.o |
6 | obj-$(CONFIG_PCI_OLPC) += olpc.o | 6 | obj-$(CONFIG_PCI_OLPC) += olpc.o |
7 | obj-$(CONFIG_PCI_XEN) += xen.o | ||
7 | 8 | ||
8 | obj-y += fixup.o | 9 | obj-y += fixup.o |
9 | obj-$(CONFIG_ACPI) += acpi.o | 10 | obj-$(CONFIG_ACPI) += acpi.o |
diff --git a/arch/x86/pci/acpi.c b/arch/x86/pci/acpi.c index 15466c096ba5..0972315c3860 100644 --- a/arch/x86/pci/acpi.c +++ b/arch/x86/pci/acpi.c | |||
@@ -138,7 +138,6 @@ setup_resource(struct acpi_resource *acpi_res, void *data) | |||
138 | struct acpi_resource_address64 addr; | 138 | struct acpi_resource_address64 addr; |
139 | acpi_status status; | 139 | acpi_status status; |
140 | unsigned long flags; | 140 | unsigned long flags; |
141 | struct resource *root, *conflict; | ||
142 | u64 start, end; | 141 | u64 start, end; |
143 | 142 | ||
144 | status = resource_to_addr(acpi_res, &addr); | 143 | status = resource_to_addr(acpi_res, &addr); |
@@ -146,12 +145,10 @@ setup_resource(struct acpi_resource *acpi_res, void *data) | |||
146 | return AE_OK; | 145 | return AE_OK; |
147 | 146 | ||
148 | if (addr.resource_type == ACPI_MEMORY_RANGE) { | 147 | if (addr.resource_type == ACPI_MEMORY_RANGE) { |
149 | root = &iomem_resource; | ||
150 | flags = IORESOURCE_MEM; | 148 | flags = IORESOURCE_MEM; |
151 | if (addr.info.mem.caching == ACPI_PREFETCHABLE_MEMORY) | 149 | if (addr.info.mem.caching == ACPI_PREFETCHABLE_MEMORY) |
152 | flags |= IORESOURCE_PREFETCH; | 150 | flags |= IORESOURCE_PREFETCH; |
153 | } else if (addr.resource_type == ACPI_IO_RANGE) { | 151 | } else if (addr.resource_type == ACPI_IO_RANGE) { |
154 | root = &ioport_resource; | ||
155 | flags = IORESOURCE_IO; | 152 | flags = IORESOURCE_IO; |
156 | } else | 153 | } else |
157 | return AE_OK; | 154 | return AE_OK; |
@@ -172,25 +169,90 @@ setup_resource(struct acpi_resource *acpi_res, void *data) | |||
172 | return AE_OK; | 169 | return AE_OK; |
173 | } | 170 | } |
174 | 171 | ||
175 | conflict = insert_resource_conflict(root, res); | 172 | info->res_num++; |
176 | if (conflict) { | 173 | if (addr.translation_offset) |
177 | dev_err(&info->bridge->dev, | 174 | dev_info(&info->bridge->dev, "host bridge window %pR " |
178 | "address space collision: host bridge window %pR " | 175 | "(PCI address [%#llx-%#llx])\n", |
179 | "conflicts with %s %pR\n", | 176 | res, res->start - addr.translation_offset, |
180 | res, conflict->name, conflict); | 177 | res->end - addr.translation_offset); |
181 | } else { | 178 | else |
182 | pci_bus_add_resource(info->bus, res, 0); | 179 | dev_info(&info->bridge->dev, "host bridge window %pR\n", res); |
183 | info->res_num++; | 180 | |
184 | if (addr.translation_offset) | 181 | return AE_OK; |
185 | dev_info(&info->bridge->dev, "host bridge window %pR " | 182 | } |
186 | "(PCI address [%#llx-%#llx])\n", | 183 | |
187 | res, res->start - addr.translation_offset, | 184 | static bool resource_contains(struct resource *res, resource_size_t point) |
188 | res->end - addr.translation_offset); | 185 | { |
186 | if (res->start <= point && point <= res->end) | ||
187 | return true; | ||
188 | return false; | ||
189 | } | ||
190 | |||
191 | static void coalesce_windows(struct pci_root_info *info, int type) | ||
192 | { | ||
193 | int i, j; | ||
194 | struct resource *res1, *res2; | ||
195 | |||
196 | for (i = 0; i < info->res_num; i++) { | ||
197 | res1 = &info->res[i]; | ||
198 | if (!(res1->flags & type)) | ||
199 | continue; | ||
200 | |||
201 | for (j = i + 1; j < info->res_num; j++) { | ||
202 | res2 = &info->res[j]; | ||
203 | if (!(res2->flags & type)) | ||
204 | continue; | ||
205 | |||
206 | /* | ||
207 | * I don't like throwing away windows because then | ||
208 | * our resources no longer match the ACPI _CRS, but | ||
209 | * the kernel resource tree doesn't allow overlaps. | ||
210 | */ | ||
211 | if (resource_contains(res1, res2->start) || | ||
212 | resource_contains(res1, res2->end) || | ||
213 | resource_contains(res2, res1->start) || | ||
214 | resource_contains(res2, res1->end)) { | ||
215 | res1->start = min(res1->start, res2->start); | ||
216 | res1->end = max(res1->end, res2->end); | ||
217 | dev_info(&info->bridge->dev, | ||
218 | "host bridge window expanded to %pR; %pR ignored\n", | ||
219 | res1, res2); | ||
220 | res2->flags = 0; | ||
221 | } | ||
222 | } | ||
223 | } | ||
224 | } | ||
225 | |||
226 | static void add_resources(struct pci_root_info *info) | ||
227 | { | ||
228 | int i; | ||
229 | struct resource *res, *root, *conflict; | ||
230 | |||
231 | if (!pci_use_crs) | ||
232 | return; | ||
233 | |||
234 | coalesce_windows(info, IORESOURCE_MEM); | ||
235 | coalesce_windows(info, IORESOURCE_IO); | ||
236 | |||
237 | for (i = 0; i < info->res_num; i++) { | ||
238 | res = &info->res[i]; | ||
239 | |||
240 | if (res->flags & IORESOURCE_MEM) | ||
241 | root = &iomem_resource; | ||
242 | else if (res->flags & IORESOURCE_IO) | ||
243 | root = &ioport_resource; | ||
189 | else | 244 | else |
190 | dev_info(&info->bridge->dev, | 245 | continue; |
191 | "host bridge window %pR\n", res); | 246 | |
247 | conflict = insert_resource_conflict(root, res); | ||
248 | if (conflict) | ||
249 | dev_err(&info->bridge->dev, | ||
250 | "address space collision: host bridge window %pR " | ||
251 | "conflicts with %s %pR\n", | ||
252 | res, conflict->name, conflict); | ||
253 | else | ||
254 | pci_bus_add_resource(info->bus, res, 0); | ||
192 | } | 255 | } |
193 | return AE_OK; | ||
194 | } | 256 | } |
195 | 257 | ||
196 | static void | 258 | static void |
@@ -224,6 +286,7 @@ get_current_resources(struct acpi_device *device, int busnum, | |||
224 | acpi_walk_resources(device->handle, METHOD_NAME__CRS, setup_resource, | 286 | acpi_walk_resources(device->handle, METHOD_NAME__CRS, setup_resource, |
225 | &info); | 287 | &info); |
226 | 288 | ||
289 | add_resources(&info); | ||
227 | return; | 290 | return; |
228 | 291 | ||
229 | name_alloc_fail: | 292 | name_alloc_fail: |
diff --git a/arch/x86/pci/common.c b/arch/x86/pci/common.c index a0772af64efb..f7c8a399978c 100644 --- a/arch/x86/pci/common.c +++ b/arch/x86/pci/common.c | |||
@@ -421,16 +421,10 @@ struct pci_bus * __devinit pcibios_scan_root(int busnum) | |||
421 | 421 | ||
422 | return bus; | 422 | return bus; |
423 | } | 423 | } |
424 | 424 | void __init pcibios_set_cache_line_size(void) | |
425 | int __init pcibios_init(void) | ||
426 | { | 425 | { |
427 | struct cpuinfo_x86 *c = &boot_cpu_data; | 426 | struct cpuinfo_x86 *c = &boot_cpu_data; |
428 | 427 | ||
429 | if (!raw_pci_ops) { | ||
430 | printk(KERN_WARNING "PCI: System does not support PCI\n"); | ||
431 | return 0; | ||
432 | } | ||
433 | |||
434 | /* | 428 | /* |
435 | * Set PCI cacheline size to that of the CPU if the CPU has reported it. | 429 | * Set PCI cacheline size to that of the CPU if the CPU has reported it. |
436 | * (For older CPUs that don't support cpuid, we se it to 32 bytes | 430 | * (For older CPUs that don't support cpuid, we se it to 32 bytes |
@@ -445,7 +439,16 @@ int __init pcibios_init(void) | |||
445 | pci_dfl_cache_line_size = 32 >> 2; | 439 | pci_dfl_cache_line_size = 32 >> 2; |
446 | printk(KERN_DEBUG "PCI: Unknown cacheline size. Setting to 32 bytes\n"); | 440 | printk(KERN_DEBUG "PCI: Unknown cacheline size. Setting to 32 bytes\n"); |
447 | } | 441 | } |
442 | } | ||
443 | |||
444 | int __init pcibios_init(void) | ||
445 | { | ||
446 | if (!raw_pci_ops) { | ||
447 | printk(KERN_WARNING "PCI: System does not support PCI\n"); | ||
448 | return 0; | ||
449 | } | ||
448 | 450 | ||
451 | pcibios_set_cache_line_size(); | ||
449 | pcibios_resource_survey(); | 452 | pcibios_resource_survey(); |
450 | 453 | ||
451 | if (pci_bf_sort >= pci_force_bf) | 454 | if (pci_bf_sort >= pci_force_bf) |
diff --git a/arch/x86/pci/i386.c b/arch/x86/pci/i386.c index 55253095be84..c4bb261c106e 100644 --- a/arch/x86/pci/i386.c +++ b/arch/x86/pci/i386.c | |||
@@ -65,16 +65,21 @@ pcibios_align_resource(void *data, const struct resource *res, | |||
65 | resource_size_t size, resource_size_t align) | 65 | resource_size_t size, resource_size_t align) |
66 | { | 66 | { |
67 | struct pci_dev *dev = data; | 67 | struct pci_dev *dev = data; |
68 | resource_size_t start = res->start; | 68 | resource_size_t start = round_down(res->end - size + 1, align); |
69 | 69 | ||
70 | if (res->flags & IORESOURCE_IO) { | 70 | if (res->flags & IORESOURCE_IO) { |
71 | if (skip_isa_ioresource_align(dev)) | 71 | |
72 | return start; | 72 | /* |
73 | if (start & 0x300) | 73 | * If we're avoiding ISA aliases, the largest contiguous I/O |
74 | start = (start + 0x3ff) & ~0x3ff; | 74 | * port space is 256 bytes. Clearing bits 9 and 10 preserves |
75 | * all 256-byte and smaller alignments, so the result will | ||
76 | * still be correctly aligned. | ||
77 | */ | ||
78 | if (!skip_isa_ioresource_align(dev)) | ||
79 | start &= ~0x300; | ||
75 | } else if (res->flags & IORESOURCE_MEM) { | 80 | } else if (res->flags & IORESOURCE_MEM) { |
76 | if (start < BIOS_END) | 81 | if (start < BIOS_END) |
77 | start = BIOS_END; | 82 | start = res->end; /* fail; no space */ |
78 | } | 83 | } |
79 | return start; | 84 | return start; |
80 | } | 85 | } |
@@ -311,6 +316,8 @@ int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma, | |||
311 | */ | 316 | */ |
312 | prot |= _PAGE_CACHE_UC_MINUS; | 317 | prot |= _PAGE_CACHE_UC_MINUS; |
313 | 318 | ||
319 | prot |= _PAGE_IOMAP; /* creating a mapping for IO */ | ||
320 | |||
314 | vma->vm_page_prot = __pgprot(prot); | 321 | vma->vm_page_prot = __pgprot(prot); |
315 | 322 | ||
316 | if (io_remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff, | 323 | if (io_remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff, |
diff --git a/arch/x86/pci/irq.c b/arch/x86/pci/irq.c index f547ee05f715..9f9bfb705cf9 100644 --- a/arch/x86/pci/irq.c +++ b/arch/x86/pci/irq.c | |||
@@ -584,27 +584,28 @@ static __init int intel_router_probe(struct irq_router *r, struct pci_dev *route | |||
584 | case PCI_DEVICE_ID_INTEL_ICH9_3: | 584 | case PCI_DEVICE_ID_INTEL_ICH9_3: |
585 | case PCI_DEVICE_ID_INTEL_ICH9_4: | 585 | case PCI_DEVICE_ID_INTEL_ICH9_4: |
586 | case PCI_DEVICE_ID_INTEL_ICH9_5: | 586 | case PCI_DEVICE_ID_INTEL_ICH9_5: |
587 | case PCI_DEVICE_ID_INTEL_TOLAPAI_0: | 587 | case PCI_DEVICE_ID_INTEL_EP80579_0: |
588 | case PCI_DEVICE_ID_INTEL_ICH10_0: | 588 | case PCI_DEVICE_ID_INTEL_ICH10_0: |
589 | case PCI_DEVICE_ID_INTEL_ICH10_1: | 589 | case PCI_DEVICE_ID_INTEL_ICH10_1: |
590 | case PCI_DEVICE_ID_INTEL_ICH10_2: | 590 | case PCI_DEVICE_ID_INTEL_ICH10_2: |
591 | case PCI_DEVICE_ID_INTEL_ICH10_3: | 591 | case PCI_DEVICE_ID_INTEL_ICH10_3: |
592 | case PCI_DEVICE_ID_INTEL_PATSBURG_LPC: | ||
592 | r->name = "PIIX/ICH"; | 593 | r->name = "PIIX/ICH"; |
593 | r->get = pirq_piix_get; | 594 | r->get = pirq_piix_get; |
594 | r->set = pirq_piix_set; | 595 | r->set = pirq_piix_set; |
595 | return 1; | 596 | return 1; |
596 | } | 597 | } |
597 | 598 | ||
598 | if ((device >= PCI_DEVICE_ID_INTEL_PCH_LPC_MIN) && | 599 | if ((device >= PCI_DEVICE_ID_INTEL_5_3400_SERIES_LPC_MIN) && |
599 | (device <= PCI_DEVICE_ID_INTEL_PCH_LPC_MAX)) { | 600 | (device <= PCI_DEVICE_ID_INTEL_5_3400_SERIES_LPC_MAX)) { |
600 | r->name = "PIIX/ICH"; | 601 | r->name = "PIIX/ICH"; |
601 | r->get = pirq_piix_get; | 602 | r->get = pirq_piix_get; |
602 | r->set = pirq_piix_set; | 603 | r->set = pirq_piix_set; |
603 | return 1; | 604 | return 1; |
604 | } | 605 | } |
605 | 606 | ||
606 | if ((device >= PCI_DEVICE_ID_INTEL_CPT_LPC_MIN) && | 607 | if ((device >= PCI_DEVICE_ID_INTEL_COUGARPOINT_LPC_MIN) && |
607 | (device <= PCI_DEVICE_ID_INTEL_CPT_LPC_MAX)) { | 608 | (device <= PCI_DEVICE_ID_INTEL_COUGARPOINT_LPC_MAX)) { |
608 | r->name = "PIIX/ICH"; | 609 | r->name = "PIIX/ICH"; |
609 | r->get = pirq_piix_get; | 610 | r->get = pirq_piix_get; |
610 | r->set = pirq_piix_set; | 611 | r->set = pirq_piix_set; |
diff --git a/arch/x86/pci/mmconfig-shared.c b/arch/x86/pci/mmconfig-shared.c index a918553ebc75..e282886616a0 100644 --- a/arch/x86/pci/mmconfig-shared.c +++ b/arch/x86/pci/mmconfig-shared.c | |||
@@ -65,7 +65,6 @@ static __init struct pci_mmcfg_region *pci_mmconfig_add(int segment, int start, | |||
65 | int end, u64 addr) | 65 | int end, u64 addr) |
66 | { | 66 | { |
67 | struct pci_mmcfg_region *new; | 67 | struct pci_mmcfg_region *new; |
68 | int num_buses; | ||
69 | struct resource *res; | 68 | struct resource *res; |
70 | 69 | ||
71 | if (addr == 0) | 70 | if (addr == 0) |
@@ -82,10 +81,9 @@ static __init struct pci_mmcfg_region *pci_mmconfig_add(int segment, int start, | |||
82 | 81 | ||
83 | list_add_sorted(new); | 82 | list_add_sorted(new); |
84 | 83 | ||
85 | num_buses = end - start + 1; | ||
86 | res = &new->res; | 84 | res = &new->res; |
87 | res->start = addr + PCI_MMCFG_BUS_OFFSET(start); | 85 | res->start = addr + PCI_MMCFG_BUS_OFFSET(start); |
88 | res->end = addr + PCI_MMCFG_BUS_OFFSET(num_buses) - 1; | 86 | res->end = addr + PCI_MMCFG_BUS_OFFSET(end + 1) - 1; |
89 | res->flags = IORESOURCE_MEM | IORESOURCE_BUSY; | 87 | res->flags = IORESOURCE_MEM | IORESOURCE_BUSY; |
90 | snprintf(new->name, PCI_MMCFG_RESOURCE_NAME_LEN, | 88 | snprintf(new->name, PCI_MMCFG_RESOURCE_NAME_LEN, |
91 | "PCI MMCONFIG %04x [bus %02x-%02x]", segment, start, end); | 89 | "PCI MMCONFIG %04x [bus %02x-%02x]", segment, start, end); |
diff --git a/arch/x86/pci/xen.c b/arch/x86/pci/xen.c new file mode 100644 index 000000000000..d7b5109f7a9c --- /dev/null +++ b/arch/x86/pci/xen.c | |||
@@ -0,0 +1,416 @@ | |||
1 | /* | ||
2 | * Xen PCI Frontend Stub - puts some "dummy" functions in to the Linux | ||
3 | * x86 PCI core to support the Xen PCI Frontend | ||
4 | * | ||
5 | * Author: Ryan Wilson <hap9@epoch.ncsc.mil> | ||
6 | */ | ||
7 | #include <linux/module.h> | ||
8 | #include <linux/init.h> | ||
9 | #include <linux/pci.h> | ||
10 | #include <linux/acpi.h> | ||
11 | |||
12 | #include <linux/io.h> | ||
13 | #include <asm/io_apic.h> | ||
14 | #include <asm/pci_x86.h> | ||
15 | |||
16 | #include <asm/xen/hypervisor.h> | ||
17 | |||
18 | #include <xen/features.h> | ||
19 | #include <xen/events.h> | ||
20 | #include <asm/xen/pci.h> | ||
21 | |||
22 | #ifdef CONFIG_ACPI | ||
23 | static int xen_hvm_register_pirq(u32 gsi, int triggering) | ||
24 | { | ||
25 | int rc, irq; | ||
26 | struct physdev_map_pirq map_irq; | ||
27 | int shareable = 0; | ||
28 | char *name; | ||
29 | |||
30 | if (!xen_hvm_domain()) | ||
31 | return -1; | ||
32 | |||
33 | map_irq.domid = DOMID_SELF; | ||
34 | map_irq.type = MAP_PIRQ_TYPE_GSI; | ||
35 | map_irq.index = gsi; | ||
36 | map_irq.pirq = -1; | ||
37 | |||
38 | rc = HYPERVISOR_physdev_op(PHYSDEVOP_map_pirq, &map_irq); | ||
39 | if (rc) { | ||
40 | printk(KERN_WARNING "xen map irq failed %d\n", rc); | ||
41 | return -1; | ||
42 | } | ||
43 | |||
44 | if (triggering == ACPI_EDGE_SENSITIVE) { | ||
45 | shareable = 0; | ||
46 | name = "ioapic-edge"; | ||
47 | } else { | ||
48 | shareable = 1; | ||
49 | name = "ioapic-level"; | ||
50 | } | ||
51 | |||
52 | irq = xen_map_pirq_gsi(map_irq.pirq, gsi, shareable, name); | ||
53 | |||
54 | printk(KERN_DEBUG "xen: --> irq=%d, pirq=%d\n", irq, map_irq.pirq); | ||
55 | |||
56 | return irq; | ||
57 | } | ||
58 | |||
59 | static int acpi_register_gsi_xen_hvm(struct device *dev, u32 gsi, | ||
60 | int trigger, int polarity) | ||
61 | { | ||
62 | return xen_hvm_register_pirq(gsi, trigger); | ||
63 | } | ||
64 | #endif | ||
65 | |||
66 | #if defined(CONFIG_PCI_MSI) | ||
67 | #include <linux/msi.h> | ||
68 | #include <asm/msidef.h> | ||
69 | |||
70 | struct xen_pci_frontend_ops *xen_pci_frontend; | ||
71 | EXPORT_SYMBOL_GPL(xen_pci_frontend); | ||
72 | |||
73 | static void xen_msi_compose_msg(struct pci_dev *pdev, unsigned int pirq, | ||
74 | struct msi_msg *msg) | ||
75 | { | ||
76 | /* We set vector == 0 to tell the hypervisor we don't care about it, | ||
77 | * but we want a pirq setup instead. | ||
78 | * We use the dest_id field to pass the pirq that we want. */ | ||
79 | msg->address_hi = MSI_ADDR_BASE_HI | MSI_ADDR_EXT_DEST_ID(pirq); | ||
80 | msg->address_lo = | ||
81 | MSI_ADDR_BASE_LO | | ||
82 | MSI_ADDR_DEST_MODE_PHYSICAL | | ||
83 | MSI_ADDR_REDIRECTION_CPU | | ||
84 | MSI_ADDR_DEST_ID(pirq); | ||
85 | |||
86 | msg->data = | ||
87 | MSI_DATA_TRIGGER_EDGE | | ||
88 | MSI_DATA_LEVEL_ASSERT | | ||
89 | /* delivery mode reserved */ | ||
90 | (3 << 8) | | ||
91 | MSI_DATA_VECTOR(0); | ||
92 | } | ||
93 | |||
94 | static int xen_hvm_setup_msi_irqs(struct pci_dev *dev, int nvec, int type) | ||
95 | { | ||
96 | int irq, pirq, ret = 0; | ||
97 | struct msi_desc *msidesc; | ||
98 | struct msi_msg msg; | ||
99 | |||
100 | list_for_each_entry(msidesc, &dev->msi_list, list) { | ||
101 | xen_allocate_pirq_msi((type == PCI_CAP_ID_MSIX) ? | ||
102 | "msi-x" : "msi", &irq, &pirq); | ||
103 | if (irq < 0 || pirq < 0) | ||
104 | goto error; | ||
105 | printk(KERN_DEBUG "xen: msi --> irq=%d, pirq=%d\n", irq, pirq); | ||
106 | xen_msi_compose_msg(dev, pirq, &msg); | ||
107 | ret = set_irq_msi(irq, msidesc); | ||
108 | if (ret < 0) | ||
109 | goto error_while; | ||
110 | write_msi_msg(irq, &msg); | ||
111 | } | ||
112 | return 0; | ||
113 | |||
114 | error_while: | ||
115 | unbind_from_irqhandler(irq, NULL); | ||
116 | error: | ||
117 | if (ret == -ENODEV) | ||
118 | dev_err(&dev->dev, "Xen PCI frontend has not registered" \ | ||
119 | " MSI/MSI-X support!\n"); | ||
120 | |||
121 | return ret; | ||
122 | } | ||
123 | |||
124 | /* | ||
125 | * For MSI interrupts we have to use drivers/xen/event.s functions to | ||
126 | * allocate an irq_desc and setup the right */ | ||
127 | |||
128 | |||
129 | static int xen_setup_msi_irqs(struct pci_dev *dev, int nvec, int type) | ||
130 | { | ||
131 | int irq, ret, i; | ||
132 | struct msi_desc *msidesc; | ||
133 | int *v; | ||
134 | |||
135 | v = kzalloc(sizeof(int) * max(1, nvec), GFP_KERNEL); | ||
136 | if (!v) | ||
137 | return -ENOMEM; | ||
138 | |||
139 | if (type == PCI_CAP_ID_MSIX) | ||
140 | ret = xen_pci_frontend_enable_msix(dev, &v, nvec); | ||
141 | else | ||
142 | ret = xen_pci_frontend_enable_msi(dev, &v); | ||
143 | if (ret) | ||
144 | goto error; | ||
145 | i = 0; | ||
146 | list_for_each_entry(msidesc, &dev->msi_list, list) { | ||
147 | irq = xen_allocate_pirq(v[i], 0, /* not sharable */ | ||
148 | (type == PCI_CAP_ID_MSIX) ? | ||
149 | "pcifront-msi-x" : "pcifront-msi"); | ||
150 | if (irq < 0) { | ||
151 | ret = -1; | ||
152 | goto free; | ||
153 | } | ||
154 | |||
155 | ret = set_irq_msi(irq, msidesc); | ||
156 | if (ret) | ||
157 | goto error_while; | ||
158 | i++; | ||
159 | } | ||
160 | kfree(v); | ||
161 | return 0; | ||
162 | |||
163 | error_while: | ||
164 | unbind_from_irqhandler(irq, NULL); | ||
165 | error: | ||
166 | if (ret == -ENODEV) | ||
167 | dev_err(&dev->dev, "Xen PCI frontend has not registered" \ | ||
168 | " MSI/MSI-X support!\n"); | ||
169 | free: | ||
170 | kfree(v); | ||
171 | return ret; | ||
172 | } | ||
173 | |||
174 | static void xen_teardown_msi_irqs(struct pci_dev *dev) | ||
175 | { | ||
176 | struct msi_desc *msidesc; | ||
177 | |||
178 | msidesc = list_entry(dev->msi_list.next, struct msi_desc, list); | ||
179 | if (msidesc->msi_attrib.is_msix) | ||
180 | xen_pci_frontend_disable_msix(dev); | ||
181 | else | ||
182 | xen_pci_frontend_disable_msi(dev); | ||
183 | } | ||
184 | |||
185 | static void xen_teardown_msi_irq(unsigned int irq) | ||
186 | { | ||
187 | xen_destroy_irq(irq); | ||
188 | } | ||
189 | |||
190 | static int xen_initdom_setup_msi_irqs(struct pci_dev *dev, int nvec, int type) | ||
191 | { | ||
192 | int irq, ret; | ||
193 | struct msi_desc *msidesc; | ||
194 | |||
195 | list_for_each_entry(msidesc, &dev->msi_list, list) { | ||
196 | irq = xen_create_msi_irq(dev, msidesc, type); | ||
197 | if (irq < 0) | ||
198 | return -1; | ||
199 | |||
200 | ret = set_irq_msi(irq, msidesc); | ||
201 | if (ret) | ||
202 | goto error; | ||
203 | } | ||
204 | return 0; | ||
205 | |||
206 | error: | ||
207 | xen_destroy_irq(irq); | ||
208 | return ret; | ||
209 | } | ||
210 | #endif | ||
211 | |||
212 | static int xen_pcifront_enable_irq(struct pci_dev *dev) | ||
213 | { | ||
214 | int rc; | ||
215 | int share = 1; | ||
216 | |||
217 | dev_info(&dev->dev, "Xen PCI enabling IRQ: %d\n", dev->irq); | ||
218 | |||
219 | if (dev->irq < 0) | ||
220 | return -EINVAL; | ||
221 | |||
222 | if (dev->irq < NR_IRQS_LEGACY) | ||
223 | share = 0; | ||
224 | |||
225 | rc = xen_allocate_pirq(dev->irq, share, "pcifront"); | ||
226 | if (rc < 0) { | ||
227 | dev_warn(&dev->dev, "Xen PCI IRQ: %d, failed to register:%d\n", | ||
228 | dev->irq, rc); | ||
229 | return rc; | ||
230 | } | ||
231 | return 0; | ||
232 | } | ||
233 | |||
234 | int __init pci_xen_init(void) | ||
235 | { | ||
236 | if (!xen_pv_domain() || xen_initial_domain()) | ||
237 | return -ENODEV; | ||
238 | |||
239 | printk(KERN_INFO "PCI: setting up Xen PCI frontend stub\n"); | ||
240 | |||
241 | pcibios_set_cache_line_size(); | ||
242 | |||
243 | pcibios_enable_irq = xen_pcifront_enable_irq; | ||
244 | pcibios_disable_irq = NULL; | ||
245 | |||
246 | #ifdef CONFIG_ACPI | ||
247 | /* Keep ACPI out of the picture */ | ||
248 | acpi_noirq = 1; | ||
249 | #endif | ||
250 | |||
251 | #ifdef CONFIG_PCI_MSI | ||
252 | x86_msi.setup_msi_irqs = xen_setup_msi_irqs; | ||
253 | x86_msi.teardown_msi_irq = xen_teardown_msi_irq; | ||
254 | x86_msi.teardown_msi_irqs = xen_teardown_msi_irqs; | ||
255 | #endif | ||
256 | return 0; | ||
257 | } | ||
258 | |||
259 | int __init pci_xen_hvm_init(void) | ||
260 | { | ||
261 | if (!xen_feature(XENFEAT_hvm_pirqs)) | ||
262 | return 0; | ||
263 | |||
264 | #ifdef CONFIG_ACPI | ||
265 | /* | ||
266 | * We don't want to change the actual ACPI delivery model, | ||
267 | * just how GSIs get registered. | ||
268 | */ | ||
269 | __acpi_register_gsi = acpi_register_gsi_xen_hvm; | ||
270 | #endif | ||
271 | |||
272 | #ifdef CONFIG_PCI_MSI | ||
273 | x86_msi.setup_msi_irqs = xen_hvm_setup_msi_irqs; | ||
274 | x86_msi.teardown_msi_irq = xen_teardown_msi_irq; | ||
275 | #endif | ||
276 | return 0; | ||
277 | } | ||
278 | |||
279 | #ifdef CONFIG_XEN_DOM0 | ||
280 | static int xen_register_pirq(u32 gsi, int triggering) | ||
281 | { | ||
282 | int rc, irq; | ||
283 | struct physdev_map_pirq map_irq; | ||
284 | int shareable = 0; | ||
285 | char *name; | ||
286 | |||
287 | if (!xen_pv_domain()) | ||
288 | return -1; | ||
289 | |||
290 | if (triggering == ACPI_EDGE_SENSITIVE) { | ||
291 | shareable = 0; | ||
292 | name = "ioapic-edge"; | ||
293 | } else { | ||
294 | shareable = 1; | ||
295 | name = "ioapic-level"; | ||
296 | } | ||
297 | |||
298 | irq = xen_allocate_pirq(gsi, shareable, name); | ||
299 | |||
300 | printk(KERN_DEBUG "xen: --> irq=%d\n", irq); | ||
301 | |||
302 | if (irq < 0) | ||
303 | goto out; | ||
304 | |||
305 | map_irq.domid = DOMID_SELF; | ||
306 | map_irq.type = MAP_PIRQ_TYPE_GSI; | ||
307 | map_irq.index = gsi; | ||
308 | map_irq.pirq = irq; | ||
309 | |||
310 | rc = HYPERVISOR_physdev_op(PHYSDEVOP_map_pirq, &map_irq); | ||
311 | if (rc) { | ||
312 | printk(KERN_WARNING "xen map irq failed %d\n", rc); | ||
313 | return -1; | ||
314 | } | ||
315 | |||
316 | out: | ||
317 | return irq; | ||
318 | } | ||
319 | |||
320 | static int xen_register_gsi(u32 gsi, int triggering, int polarity) | ||
321 | { | ||
322 | int rc, irq; | ||
323 | struct physdev_setup_gsi setup_gsi; | ||
324 | |||
325 | if (!xen_pv_domain()) | ||
326 | return -1; | ||
327 | |||
328 | printk(KERN_DEBUG "xen: registering gsi %u triggering %d polarity %d\n", | ||
329 | gsi, triggering, polarity); | ||
330 | |||
331 | irq = xen_register_pirq(gsi, triggering); | ||
332 | |||
333 | setup_gsi.gsi = gsi; | ||
334 | setup_gsi.triggering = (triggering == ACPI_EDGE_SENSITIVE ? 0 : 1); | ||
335 | setup_gsi.polarity = (polarity == ACPI_ACTIVE_HIGH ? 0 : 1); | ||
336 | |||
337 | rc = HYPERVISOR_physdev_op(PHYSDEVOP_setup_gsi, &setup_gsi); | ||
338 | if (rc == -EEXIST) | ||
339 | printk(KERN_INFO "Already setup the GSI :%d\n", gsi); | ||
340 | else if (rc) { | ||
341 | printk(KERN_ERR "Failed to setup GSI :%d, err_code:%d\n", | ||
342 | gsi, rc); | ||
343 | } | ||
344 | |||
345 | return irq; | ||
346 | } | ||
347 | |||
348 | static __init void xen_setup_acpi_sci(void) | ||
349 | { | ||
350 | int rc; | ||
351 | int trigger, polarity; | ||
352 | int gsi = acpi_sci_override_gsi; | ||
353 | |||
354 | if (!gsi) | ||
355 | return; | ||
356 | |||
357 | rc = acpi_get_override_irq(gsi, &trigger, &polarity); | ||
358 | if (rc) { | ||
359 | printk(KERN_WARNING "xen: acpi_get_override_irq failed for acpi" | ||
360 | " sci, rc=%d\n", rc); | ||
361 | return; | ||
362 | } | ||
363 | trigger = trigger ? ACPI_LEVEL_SENSITIVE : ACPI_EDGE_SENSITIVE; | ||
364 | polarity = polarity ? ACPI_ACTIVE_LOW : ACPI_ACTIVE_HIGH; | ||
365 | |||
366 | printk(KERN_INFO "xen: sci override: global_irq=%d trigger=%d " | ||
367 | "polarity=%d\n", gsi, trigger, polarity); | ||
368 | |||
369 | gsi = xen_register_gsi(gsi, trigger, polarity); | ||
370 | printk(KERN_INFO "xen: acpi sci %d\n", gsi); | ||
371 | |||
372 | return; | ||
373 | } | ||
374 | |||
375 | static int acpi_register_gsi_xen(struct device *dev, u32 gsi, | ||
376 | int trigger, int polarity) | ||
377 | { | ||
378 | return xen_register_gsi(gsi, trigger, polarity); | ||
379 | } | ||
380 | |||
381 | static int __init pci_xen_initial_domain(void) | ||
382 | { | ||
383 | #ifdef CONFIG_PCI_MSI | ||
384 | x86_msi.setup_msi_irqs = xen_initdom_setup_msi_irqs; | ||
385 | x86_msi.teardown_msi_irq = xen_teardown_msi_irq; | ||
386 | #endif | ||
387 | xen_setup_acpi_sci(); | ||
388 | __acpi_register_gsi = acpi_register_gsi_xen; | ||
389 | |||
390 | return 0; | ||
391 | } | ||
392 | |||
393 | void __init xen_setup_pirqs(void) | ||
394 | { | ||
395 | int irq; | ||
396 | |||
397 | pci_xen_initial_domain(); | ||
398 | |||
399 | if (0 == nr_ioapics) { | ||
400 | for (irq = 0; irq < NR_IRQS_LEGACY; irq++) | ||
401 | xen_allocate_pirq(irq, 0, "xt-pic"); | ||
402 | return; | ||
403 | } | ||
404 | |||
405 | /* Pre-allocate legacy irqs */ | ||
406 | for (irq = 0; irq < NR_IRQS_LEGACY; irq++) { | ||
407 | int trigger, polarity; | ||
408 | |||
409 | if (acpi_get_override_irq(irq, &trigger, &polarity) == -1) | ||
410 | continue; | ||
411 | |||
412 | xen_register_pirq(irq, | ||
413 | trigger ? ACPI_LEVEL_SENSITIVE : ACPI_EDGE_SENSITIVE); | ||
414 | } | ||
415 | } | ||
416 | #endif | ||