aboutsummaryrefslogtreecommitdiffstats
path: root/arch/tile
diff options
context:
space:
mode:
authorChris Metcalf <cmetcalf@tilera.com>2012-04-07 17:10:17 -0400
committerChris Metcalf <cmetcalf@tilera.com>2012-07-18 16:39:11 -0400
commit129622672d70711c6c844fb529381ff0dad9085a (patch)
treeea05d97ec3a457814e282c5cf8423c9e30994cb9 /arch/tile
parentbce5bbbb23f780a792be7e594af7cd4b4aae1cd4 (diff)
arch/tile: tilegx PCI root complex support
This change implements PCIe root complex support for tilegx using the kernel support layer for accessing the TRIO hardware shim. Reviewed-by: Bjorn Helgaas <bhelgaas@google.com> [changes in 07487f3] Signed-off-by: Chris Metcalf <cmetcalf@tilera.com>
Diffstat (limited to 'arch/tile')
-rw-r--r--arch/tile/Kconfig3
-rw-r--r--arch/tile/include/asm/pci.h98
-rw-r--r--arch/tile/kernel/Makefile4
-rw-r--r--arch/tile/kernel/pci_gx.c1544
-rw-r--r--arch/tile/kernel/setup.c6
-rw-r--r--arch/tile/mm/pgtable.c7
6 files changed, 1645 insertions, 17 deletions
diff --git a/arch/tile/Kconfig b/arch/tile/Kconfig
index 645979cfb71..a5302d31922 100644
--- a/arch/tile/Kconfig
+++ b/arch/tile/Kconfig
@@ -356,6 +356,9 @@ config PCI
356 default y 356 default y
357 select PCI_DOMAINS 357 select PCI_DOMAINS
358 select GENERIC_PCI_IOMAP 358 select GENERIC_PCI_IOMAP
359 select TILE_GXIO_TRIO if TILEGX
360 select ARCH_SUPPORTS_MSI if TILEGX
361 select PCI_MSI if TILEGX
359 ---help--- 362 ---help---
360 Enable PCI root complex support, so PCIe endpoint devices can 363 Enable PCI root complex support, so PCIe endpoint devices can
361 be attached to the Tile chip. Many, but not all, PCI devices 364 be attached to the Tile chip. Many, but not all, PCI devices
diff --git a/arch/tile/include/asm/pci.h b/arch/tile/include/asm/pci.h
index 32e6cbe8dff..2c224c47d8a 100644
--- a/arch/tile/include/asm/pci.h
+++ b/arch/tile/include/asm/pci.h
@@ -16,8 +16,11 @@
16#define _ASM_TILE_PCI_H 16#define _ASM_TILE_PCI_H
17 17
18#include <linux/pci.h> 18#include <linux/pci.h>
19#include <linux/numa.h>
19#include <asm-generic/pci_iomap.h> 20#include <asm-generic/pci_iomap.h>
20 21
22#ifndef __tilegx__
23
21/* 24/*
22 * Structure of a PCI controller (host bridge) 25 * Structure of a PCI controller (host bridge)
23 */ 26 */
@@ -41,6 +44,91 @@ struct pci_controller {
41}; 44};
42 45
43/* 46/*
47 * This flag tells if the platform is TILEmpower that needs
48 * special configuration for the PLX switch chip.
49 */
50extern int tile_plx_gen1;
51
52static inline void pci_iounmap(struct pci_dev *dev, void __iomem *addr) {}
53
54#define TILE_NUM_PCIE 2
55
56#else
57
58#include <asm/page.h>
59#include <gxio/trio.h>
60
61/**
62 * We reserve the hugepage-size address range at the top of the 64-bit address
63 * space to serve as the PCI window, emulating the BAR0 space of an endpoint
64 * device. This window is used by the chip-to-chip applications running on
65 * the RC node. The reason for carving out this window is that Mem-Maps that
66 * back up this window will not overlap with those that map the real physical
67 * memory.
68 */
69#define PCIE_HOST_BAR0_SIZE HPAGE_SIZE
70#define PCIE_HOST_BAR0_START HPAGE_MASK
71
72/**
73 * The first PAGE_SIZE of the above "BAR" window is mapped to the
74 * gxpci_host_regs structure.
75 */
76#define PCIE_HOST_REGS_SIZE PAGE_SIZE
77
78/*
79 * This is the PCI address where the Mem-Map interrupt regions start.
80 * We use the 2nd to the last huge page of the 64-bit address space.
81 * The last huge page is used for the rootcomplex "bar", for C2C purpose.
82 */
83#define MEM_MAP_INTR_REGIONS_BASE (HPAGE_MASK - HPAGE_SIZE)
84
85/*
86 * Each Mem-Map interrupt region occupies 4KB.
87 */
88#define MEM_MAP_INTR_REGION_SIZE (1<< TRIO_MAP_MEM_LIM__ADDR_SHIFT)
89
90/*
91 * Structure of a PCI controller (host bridge) on Gx.
92 */
93struct pci_controller {
94
95 /* Pointer back to the TRIO that this PCIe port is connected to. */
96 gxio_trio_context_t *trio;
97 int mac; /* PCIe mac index on the TRIO shim */
98 int trio_index; /* Index of TRIO shim that contains the MAC. */
99
100 int pio_mem_index; /* PIO region index for memory access */
101
102 /*
103 * Mem-Map regions for all the memory controllers so that Linux can
104 * map all of its physical memory space to the PCI bus.
105 */
106 int mem_maps[MAX_NUMNODES];
107
108 int index; /* PCI domain number */
109 struct pci_bus *root_bus;
110
111 int last_busno;
112
113 struct pci_ops *ops;
114
115 /* Table that maps the INTx numbers to Linux irq numbers. */
116 int irq_intx_table[4];
117
118 struct resource mem_space;
119
120 /* Address ranges that are routed to this controller/bridge. */
121 struct resource mem_resources[3];
122};
123
124extern struct pci_controller pci_controllers[TILEGX_NUM_TRIO * TILEGX_TRIO_PCIES];
125extern gxio_trio_context_t trio_contexts[TILEGX_NUM_TRIO];
126
127extern void pci_iounmap(struct pci_dev *dev, void __iomem *);
128
129#endif /* __tilegx__ */
130
131/*
44 * The hypervisor maps the entirety of CPA-space as bus addresses, so 132 * The hypervisor maps the entirety of CPA-space as bus addresses, so
45 * bus addresses are physical addresses. The networking and block 133 * bus addresses are physical addresses. The networking and block
46 * device layers use this boolean for bounce buffer decisions. 134 * device layers use this boolean for bounce buffer decisions.
@@ -50,12 +138,8 @@ struct pci_controller {
50int __init tile_pci_init(void); 138int __init tile_pci_init(void);
51int __init pcibios_init(void); 139int __init pcibios_init(void);
52 140
53static inline void pci_iounmap(struct pci_dev *dev, void __iomem *addr) {}
54
55void __devinit pcibios_fixup_bus(struct pci_bus *bus); 141void __devinit pcibios_fixup_bus(struct pci_bus *bus);
56 142
57#define TILE_NUM_PCIE 2
58
59#define pci_domain_nr(bus) (((struct pci_controller *)(bus)->sysdata)->index) 143#define pci_domain_nr(bus) (((struct pci_controller *)(bus)->sysdata)->index)
60 144
61/* 145/*
@@ -79,12 +163,6 @@ static inline int pcibios_assign_all_busses(void)
79#define PCIBIOS_MIN_MEM 0 163#define PCIBIOS_MIN_MEM 0
80#define PCIBIOS_MIN_IO 0 164#define PCIBIOS_MIN_IO 0
81 165
82/*
83 * This flag tells if the platform is TILEmpower that needs
84 * special configuration for the PLX switch chip.
85 */
86extern int tile_plx_gen1;
87
88/* Use any cpu for PCI. */ 166/* Use any cpu for PCI. */
89#define cpumask_of_pcibus(bus) cpu_online_mask 167#define cpumask_of_pcibus(bus) cpu_online_mask
90 168
diff --git a/arch/tile/kernel/Makefile b/arch/tile/kernel/Makefile
index 5de99248d8d..49d4ce3cd7f 100644
--- a/arch/tile/kernel/Makefile
+++ b/arch/tile/kernel/Makefile
@@ -14,4 +14,8 @@ obj-$(CONFIG_SMP) += smpboot.o smp.o tlb.o
14obj-$(CONFIG_MODULES) += module.o 14obj-$(CONFIG_MODULES) += module.o
15obj-$(CONFIG_EARLY_PRINTK) += early_printk.o 15obj-$(CONFIG_EARLY_PRINTK) += early_printk.o
16obj-$(CONFIG_KEXEC) += machine_kexec.o relocate_kernel_$(BITS).o 16obj-$(CONFIG_KEXEC) += machine_kexec.o relocate_kernel_$(BITS).o
17ifdef CONFIG_TILEGX
18obj-$(CONFIG_PCI) += pci_gx.o
19else
17obj-$(CONFIG_PCI) += pci.o 20obj-$(CONFIG_PCI) += pci.o
21endif
diff --git a/arch/tile/kernel/pci_gx.c b/arch/tile/kernel/pci_gx.c
new file mode 100644
index 00000000000..1b996bb628f
--- /dev/null
+++ b/arch/tile/kernel/pci_gx.c
@@ -0,0 +1,1544 @@
1/*
2 * Copyright 2012 Tilera Corporation. All Rights Reserved.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation, version 2.
7 *
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
11 * NON INFRINGEMENT. See the GNU General Public License for
12 * more details.
13 */
14
15#include <linux/kernel.h>
16#include <linux/mmzone.h>
17#include <linux/pci.h>
18#include <linux/delay.h>
19#include <linux/string.h>
20#include <linux/init.h>
21#include <linux/capability.h>
22#include <linux/sched.h>
23#include <linux/errno.h>
24#include <linux/irq.h>
25#include <linux/msi.h>
26#include <linux/io.h>
27#include <linux/uaccess.h>
28#include <linux/ctype.h>
29
30#include <asm/processor.h>
31#include <asm/sections.h>
32#include <asm/byteorder.h>
33
34#include <gxio/iorpc_globals.h>
35#include <gxio/kiorpc.h>
36#include <gxio/trio.h>
37#include <gxio/iorpc_trio.h>
38#include <hv/drv_trio_intf.h>
39
40#include <arch/sim.h>
41
42/*
43 * Initialization flow and process
44 * -------------------------------
45 *
46 * This files containes the routines to search for PCI buses,
47 * enumerate the buses, and configure any attached devices.
48 *
49 * There are two entry points here:
50 * 1) tile_pci_init
51 * This sets up the pci_controller structs, and opens the
52 * FDs to the hypervisor. This is called from setup_arch() early
53 * in the boot process.
54 * 2) pcibios_init
55 * This probes the PCI bus(es) for any attached hardware. It's
56 * called by subsys_initcall. All of the real work is done by the
57 * generic Linux PCI layer.
58 *
59 */
60
61#define DEBUG_PCI_CFG 0
62
63#if DEBUG_PCI_CFG
64#define TRACE_CFG_WR(size, val, bus, dev, func, offset) \
65 pr_info("CFG WR %d-byte VAL %#x to bus %d dev %d func %d addr %u\n", \
66 size, val, bus, dev, func, offset & 0xFFF);
67#define TRACE_CFG_RD(size, val, bus, dev, func, offset) \
68 pr_info("CFG RD %d-byte VAL %#x from bus %d dev %d func %d addr %u\n", \
69 size, val, bus, dev, func, offset & 0xFFF);
70#else
71#define TRACE_CFG_WR(...)
72#define TRACE_CFG_RD(...)
73#endif
74
75static int __devinitdata pci_probe = 1;
76
77/* Information on the PCIe RC ports configuration. */
78static int __devinitdata pcie_rc[TILEGX_NUM_TRIO][TILEGX_TRIO_PCIES];
79
80/*
81 * On some platforms with one or more Gx endpoint ports, we need to
82 * delay the PCIe RC port probe for a few seconds to work around
83 * a HW PCIe link-training bug. The exact delay is specified with
84 * a kernel boot argument in the form of "pcie_rc_delay=T,P,S",
85 * where T is the TRIO instance number, P is the port number and S is
86 * the delay in seconds. If the delay is not provided, the value
87 * will be DEFAULT_RC_DELAY.
88 */
89static int __devinitdata rc_delay[TILEGX_NUM_TRIO][TILEGX_TRIO_PCIES];
90
91/* Default number of seconds that the PCIe RC port probe can be delayed. */
92#define DEFAULT_RC_DELAY 10
93
94/* Max number of seconds that the PCIe RC port probe can be delayed. */
95#define MAX_RC_DELAY 20
96
97/* Array of the PCIe ports configuration info obtained from the BIB. */
98struct pcie_port_property pcie_ports[TILEGX_NUM_TRIO][TILEGX_TRIO_PCIES];
99
100/* All drivers share the TRIO contexts defined here. */
101gxio_trio_context_t trio_contexts[TILEGX_NUM_TRIO];
102
103/* Pointer to an array of PCIe RC controllers. */
104struct pci_controller pci_controllers[TILEGX_NUM_TRIO * TILEGX_TRIO_PCIES];
105int num_rc_controllers;
106static int num_ep_controllers;
107
108static struct pci_ops tile_cfg_ops;
109
110/* Mask of CPUs that should receive PCIe interrupts. */
111static struct cpumask intr_cpus_map;
112
113/*
114 * We don't need to worry about the alignment of resources.
115 */
116resource_size_t pcibios_align_resource(void *data, const struct resource *res,
117 resource_size_t size, resource_size_t align)
118{
119 return res->start;
120}
121EXPORT_SYMBOL(pcibios_align_resource);
122
123
124/*
125 * Pick a CPU to receive and handle the PCIe interrupts, based on the IRQ #.
126 * For now, we simply send interrupts to non-dataplane CPUs.
127 * We may implement methods to allow user to specify the target CPUs,
128 * e.g. via boot arguments.
129 */
130static int tile_irq_cpu(int irq)
131{
132 unsigned int count;
133 int i = 0;
134 int cpu;
135
136 count = cpumask_weight(&intr_cpus_map);
137 if (unlikely(count == 0)) {
138 pr_warning("intr_cpus_map empty, interrupts will be"
139 " delievered to dataplane tiles\n");
140 return irq % (smp_height * smp_width);
141 }
142
143 count = irq % count;
144 for_each_cpu(cpu, &intr_cpus_map) {
145 if (i++ == count)
146 break;
147 }
148 return cpu;
149}
150
151/*
152 * Open a file descriptor to the TRIO shim.
153 */
154static int __devinit tile_pcie_open(int trio_index)
155{
156 gxio_trio_context_t *context = &trio_contexts[trio_index];
157 int ret;
158
159 /*
160 * This opens a file descriptor to the TRIO shim.
161 */
162 ret = gxio_trio_init(context, trio_index);
163 if (ret < 0)
164 return ret;
165
166 /*
167 * Allocate an ASID for the kernel.
168 */
169 ret = gxio_trio_alloc_asids(context, 1, 0, 0);
170 if (ret < 0) {
171 pr_err("PCI: ASID alloc failure on TRIO %d, give up\n",
172 trio_index);
173 goto asid_alloc_failure;
174 }
175
176 context->asid = ret;
177
178#ifdef USE_SHARED_PCIE_CONFIG_REGION
179 /*
180 * Alloc a PIO region for config access, shared by all MACs per TRIO.
181 * This shouldn't fail since the kernel is supposed to the first
182 * client of the TRIO's PIO regions.
183 */
184 ret = gxio_trio_alloc_pio_regions(context, 1, 0, 0);
185 if (ret < 0) {
186 pr_err("PCI: CFG PIO alloc failure on TRIO %d, give up\n",
187 trio_index);
188 goto pio_alloc_failure;
189 }
190
191 context->pio_cfg_index = ret;
192
193 /*
194 * For PIO CFG, the bus_address_hi parameter is 0. The mac parameter
195 * is also 0 because it is specified in PIO_REGION_SETUP_CFG_ADDR.
196 */
197 ret = gxio_trio_init_pio_region_aux(context, context->pio_cfg_index,
198 0, 0, HV_TRIO_PIO_FLAG_CONFIG_SPACE);
199 if (ret < 0) {
200 pr_err("PCI: CFG PIO init failure on TRIO %d, give up\n",
201 trio_index);
202 goto pio_alloc_failure;
203 }
204#endif
205
206 return ret;
207
208asid_alloc_failure:
209#ifdef USE_SHARED_PCIE_CONFIG_REGION
210pio_alloc_failure:
211#endif
212 hv_dev_close(context->fd);
213
214 return ret;
215}
216
217static void
218tilegx_legacy_irq_ack(struct irq_data *d)
219{
220 __insn_mtspr(SPR_IPI_EVENT_RESET_K, 1UL << d->irq);
221}
222
223static void
224tilegx_legacy_irq_mask(struct irq_data *d)
225{
226 __insn_mtspr(SPR_IPI_MASK_SET_K, 1UL << d->irq);
227}
228
229static void
230tilegx_legacy_irq_unmask(struct irq_data *d)
231{
232 __insn_mtspr(SPR_IPI_MASK_RESET_K, 1UL << d->irq);
233}
234
235static struct irq_chip tilegx_legacy_irq_chip = {
236 .name = "tilegx_legacy_irq",
237 .irq_ack = tilegx_legacy_irq_ack,
238 .irq_mask = tilegx_legacy_irq_mask,
239 .irq_unmask = tilegx_legacy_irq_unmask,
240
241 /* TBD: support set_affinity. */
242};
243
244/*
245 * This is a wrapper function of the kernel level-trigger interrupt
246 * handler handle_level_irq() for PCI legacy interrupts. The TRIO
247 * is configured such that only INTx Assert interrupts are proxied
248 * to Linux which just calls handle_level_irq() after clearing the
249 * MAC INTx Assert status bit associated with this interrupt.
250 */
251static void
252trio_handle_level_irq(unsigned int irq, struct irq_desc *desc)
253{
254 struct pci_controller *controller = irq_desc_get_handler_data(desc);
255 gxio_trio_context_t *trio_context = controller->trio;
256 uint64_t intx = (uint64_t)irq_desc_get_chip_data(desc);
257 int mac = controller->mac;
258 unsigned int reg_offset;
259 uint64_t level_mask;
260
261 handle_level_irq(irq, desc);
262
263 /*
264 * Clear the INTx Level status, otherwise future interrupts are
265 * not sent.
266 */
267 reg_offset = (TRIO_PCIE_INTFC_MAC_INT_STS <<
268 TRIO_CFG_REGION_ADDR__REG_SHIFT) |
269 (TRIO_CFG_REGION_ADDR__INTFC_VAL_MAC_INTERFACE <<
270 TRIO_CFG_REGION_ADDR__INTFC_SHIFT ) |
271 (mac << TRIO_CFG_REGION_ADDR__MAC_SEL_SHIFT);
272
273 level_mask = TRIO_PCIE_INTFC_MAC_INT_STS__INT_LEVEL_MASK << intx;
274
275 __gxio_mmio_write(trio_context->mmio_base_mac + reg_offset, level_mask);
276}
277
278/*
279 * Create kernel irqs and set up the handlers for the legacy interrupts.
280 * Also some minimum initialization for the MSI support.
281 */
282static int __devinit tile_init_irqs(struct pci_controller *controller)
283{
284 int i;
285 int j;
286 int irq;
287 int result;
288
289 cpumask_copy(&intr_cpus_map, cpu_online_mask);
290
291
292 for (i = 0; i < 4; i++) {
293 gxio_trio_context_t *context = controller->trio;
294 int cpu;
295
296 /* Ask the kernel to allocate an IRQ. */
297 irq = create_irq();
298 if (irq < 0) {
299 pr_err("PCI: no free irq vectors, failed for %d\n", i);
300
301 goto free_irqs;
302 }
303 controller->irq_intx_table[i] = irq;
304
305 /* Distribute the 4 IRQs to different tiles. */
306 cpu = tile_irq_cpu(irq);
307
308 /* Configure the TRIO intr binding for this IRQ. */
309 result = gxio_trio_config_legacy_intr(context, cpu_x(cpu),
310 cpu_y(cpu), KERNEL_PL,
311 irq, controller->mac, i);
312 if (result < 0) {
313 pr_err("PCI: MAC intx config failed for %d\n", i);
314
315 goto free_irqs;
316 }
317
318 /*
319 * Register the IRQ handler with the kernel.
320 */
321 irq_set_chip_and_handler(irq, &tilegx_legacy_irq_chip,
322 trio_handle_level_irq);
323 irq_set_chip_data(irq, (void *)(uint64_t)i);
324 irq_set_handler_data(irq, controller);
325 }
326
327 return 0;
328
329free_irqs:
330 for (j = 0; j < i; j++)
331 destroy_irq(controller->irq_intx_table[j]);
332
333 return -1;
334}
335
336/*
337 * First initialization entry point, called from setup_arch().
338 *
339 * Find valid controllers and fill in pci_controller structs for each
340 * of them.
341 *
342 * Returns the number of controllers discovered.
343 */
344int __init tile_pci_init(void)
345{
346 int num_trio_shims = 0;
347 int ctl_index = 0;
348 int i, j;
349
350 if (!pci_probe) {
351 pr_info("PCI: disabled by boot argument\n");
352 return 0;
353 }
354
355 pr_info("PCI: Searching for controllers...\n");
356
357 /*
358 * We loop over all the TRIO shims.
359 */
360 for (i = 0; i < TILEGX_NUM_TRIO; i++) {
361 int ret;
362
363 ret = tile_pcie_open(i);
364 if (ret < 0)
365 continue;
366
367 num_trio_shims++;
368 }
369
370 if (num_trio_shims == 0 || sim_is_simulator())
371 return 0;
372
373 /*
374 * Now determine which PCIe ports are configured to operate in RC mode.
375 * We look at the Board Information Block first and then see if there
376 * are any overriding configuration by the HW strapping pin.
377 */
378 for (i = 0; i < TILEGX_NUM_TRIO; i++) {
379 gxio_trio_context_t *context = &trio_contexts[i];
380 int ret;
381
382 if (context->fd < 0)
383 continue;
384
385 ret = hv_dev_pread(context->fd, 0,
386 (HV_VirtAddr)&pcie_ports[i][0],
387 sizeof(struct pcie_port_property) * TILEGX_TRIO_PCIES,
388 GXIO_TRIO_OP_GET_PORT_PROPERTY);
389 if (ret < 0) {
390 pr_err("PCI: PCIE_GET_PORT_PROPERTY failure, error %d,"
391 " on TRIO %d\n", ret, i);
392 continue;
393 }
394
395 for (j = 0; j < TILEGX_TRIO_PCIES; j++) {
396 if (pcie_ports[i][j].allow_rc) {
397 pcie_rc[i][j] = 1;
398 num_rc_controllers++;
399 }
400 else if (pcie_ports[i][j].allow_ep) {
401 num_ep_controllers++;
402 }
403 }
404 }
405
406 /*
407 * Return if no PCIe ports are configured to operate in RC mode.
408 */
409 if (num_rc_controllers == 0)
410 return 0;
411
412 /*
413 * Set the TRIO pointer and MAC index for each PCIe RC port.
414 */
415 for (i = 0; i < TILEGX_NUM_TRIO; i++) {
416 for (j = 0; j < TILEGX_TRIO_PCIES; j++) {
417 if (pcie_rc[i][j]) {
418 pci_controllers[ctl_index].trio =
419 &trio_contexts[i];
420 pci_controllers[ctl_index].mac = j;
421 pci_controllers[ctl_index].trio_index = i;
422 ctl_index++;
423 if (ctl_index == num_rc_controllers)
424 goto out;
425 }
426 }
427 }
428
429out:
430 /*
431 * Configure each PCIe RC port.
432 */
433 for (i = 0; i < num_rc_controllers; i++) {
434 /*
435 * Configure the PCIe MAC to run in RC mode.
436 */
437
438 struct pci_controller *controller = &pci_controllers[i];
439
440 controller->index = i;
441 controller->last_busno = 0xff;
442 controller->ops = &tile_cfg_ops;
443
444 }
445
446 return num_rc_controllers;
447}
448
449/*
450 * (pin - 1) converts from the PCI standard's [1:4] convention to
451 * a normal [0:3] range.
452 */
453static int tile_map_irq(const struct pci_dev *dev, u8 device, u8 pin)
454{
455 struct pci_controller *controller =
456 (struct pci_controller *)dev->sysdata;
457 return controller->irq_intx_table[pin - 1];
458}
459
460
461static void __devinit fixup_read_and_payload_sizes(struct pci_controller *
462 controller)
463{
464 gxio_trio_context_t *trio_context = controller->trio;
465 struct pci_bus *root_bus = controller->root_bus;
466 TRIO_PCIE_RC_DEVICE_CONTROL_t dev_control;
467 TRIO_PCIE_RC_DEVICE_CAP_t rc_dev_cap;
468 unsigned int reg_offset;
469 struct pci_bus *child;
470 int mac;
471 int err;
472
473 mac = controller->mac;
474
475 /*
476 * Set our max read request size to be 4KB.
477 */
478 reg_offset =
479 (TRIO_PCIE_RC_DEVICE_CONTROL <<
480 TRIO_CFG_REGION_ADDR__REG_SHIFT) |
481 (TRIO_CFG_REGION_ADDR__INTFC_VAL_MAC_STANDARD <<
482 TRIO_CFG_REGION_ADDR__INTFC_SHIFT ) |
483 (mac << TRIO_CFG_REGION_ADDR__MAC_SEL_SHIFT);
484
485 dev_control.word = __gxio_mmio_read32(trio_context->mmio_base_mac +
486 reg_offset);
487 dev_control.max_read_req_sz = 5;
488 __gxio_mmio_write32(trio_context->mmio_base_mac + reg_offset,
489 dev_control.word);
490
491 /*
492 * Set the max payload size supported by this Gx PCIe MAC.
493 * Though Gx PCIe supports Max Payload Size of up to 1024 bytes,
494 * experiments have shown that setting MPS to 256 yields the
495 * best performance.
496 */
497 reg_offset =
498 (TRIO_PCIE_RC_DEVICE_CAP <<
499 TRIO_CFG_REGION_ADDR__REG_SHIFT) |
500 (TRIO_CFG_REGION_ADDR__INTFC_VAL_MAC_STANDARD <<
501 TRIO_CFG_REGION_ADDR__INTFC_SHIFT ) |
502 (mac << TRIO_CFG_REGION_ADDR__MAC_SEL_SHIFT);
503
504 rc_dev_cap.word = __gxio_mmio_read32(trio_context->mmio_base_mac +
505 reg_offset);
506 rc_dev_cap.mps_sup = 1;
507 __gxio_mmio_write32(trio_context->mmio_base_mac + reg_offset,
508 rc_dev_cap.word);
509
510 /* Configure PCI Express MPS setting. */
511 list_for_each_entry(child, &root_bus->children, node) {
512 struct pci_dev *self = child->self;
513 if (!self)
514 continue;
515
516 pcie_bus_configure_settings(child, self->pcie_mpss);
517 }
518
519 /*
520 * Set the mac_config register in trio based on the MPS/MRS of the link.
521 */
522 reg_offset =
523 (TRIO_PCIE_RC_DEVICE_CONTROL <<
524 TRIO_CFG_REGION_ADDR__REG_SHIFT) |
525 (TRIO_CFG_REGION_ADDR__INTFC_VAL_MAC_STANDARD <<
526 TRIO_CFG_REGION_ADDR__INTFC_SHIFT ) |
527 (mac << TRIO_CFG_REGION_ADDR__MAC_SEL_SHIFT);
528
529 dev_control.word = __gxio_mmio_read32(trio_context->mmio_base_mac +
530 reg_offset);
531
532 err = gxio_trio_set_mps_mrs(trio_context,
533 dev_control.max_payload_size,
534 dev_control.max_read_req_sz,
535 mac);
536 if (err < 0) {
537 pr_err("PCI: PCIE_CONFIGURE_MAC_MPS_MRS failure, "
538 "MAC %d on TRIO %d\n",
539 mac, controller->trio_index);
540 }
541}
542
543static int __devinit setup_pcie_rc_delay(char *str)
544{
545 unsigned long delay = 0;
546 unsigned long trio_index;
547 unsigned long mac;
548
549 if (str == NULL || !isdigit(*str))
550 return -EINVAL;
551 trio_index = simple_strtoul(str, (char **)&str, 10);
552 if (trio_index >= TILEGX_NUM_TRIO)
553 return -EINVAL;
554
555 if (*str != ',')
556 return -EINVAL;
557
558 str++;
559 if (!isdigit(*str))
560 return -EINVAL;
561 mac = simple_strtoul(str, (char **)&str, 10);
562 if (mac >= TILEGX_TRIO_PCIES)
563 return -EINVAL;
564
565 if (*str != '\0') {
566 if (*str != ',')
567 return -EINVAL;
568
569 str++;
570 if (!isdigit(*str))
571 return -EINVAL;
572 delay = simple_strtoul(str, (char **)&str, 10);
573 if (delay > MAX_RC_DELAY)
574 return -EINVAL;
575 }
576
577 rc_delay[trio_index][mac] = delay ? : DEFAULT_RC_DELAY;
578 pr_info("Delaying PCIe RC link training for %u sec"
579 " on MAC %lu on TRIO %lu\n", rc_delay[trio_index][mac],
580 mac, trio_index);
581 return 0;
582}
583early_param("pcie_rc_delay", setup_pcie_rc_delay);
584
585/*
586 * Second PCI initialization entry point, called by subsys_initcall.
587 *
588 * The controllers have been set up by the time we get here, by a call to
589 * tile_pci_init.
590 */
591int __init pcibios_init(void)
592{
593 resource_size_t offset;
594 LIST_HEAD(resources);
595 int i;
596
597 if (num_rc_controllers == 0 && num_ep_controllers == 0)
598 return 0;
599
600 pr_info("PCI: Probing PCI hardware\n");
601
602 /*
603 * We loop over all the TRIO shims and set up the MMIO mappings.
604 * This step can't be done in tile_pci_init because the MM subsystem
605 * hasn't been initialized then.
606 */
607 for (i = 0; i < TILEGX_NUM_TRIO; i++) {
608 gxio_trio_context_t *context = &trio_contexts[i];
609
610 if (context->fd < 0)
611 continue;
612
613 /*
614 * Map in the MMIO space for the MAC.
615 */
616 offset = 0;
617 context->mmio_base_mac =
618 iorpc_ioremap(context->fd, offset,
619 HV_TRIO_CONFIG_IOREMAP_SIZE);
620 if (context->mmio_base_mac == NULL) {
621 pr_err("PCI: MAC map failure on TRIO %d\n", i);
622
623 hv_dev_close(context->fd);
624 context->fd = -1;
625 continue;
626 }
627 }
628
629 /*
630 * Delay a bit in case devices aren't ready. Some devices are
631 * known to require at least 20ms here, but we use a more
632 * conservative value.
633 */
634 msleep(250);
635
636 /* Scan all of the recorded PCI controllers. */
637 for (i = 0; i < num_rc_controllers; i++) {
638 struct pci_controller *controller = &pci_controllers[i];
639 gxio_trio_context_t *trio_context = controller->trio;
640 TRIO_PCIE_INTFC_PORT_CONFIG_t port_config;
641 TRIO_PCIE_INTFC_PORT_STATUS_t port_status;
642 TRIO_PCIE_INTFC_TX_FIFO_CTL_t tx_fifo_ctl;
643 struct pci_bus *bus;
644 unsigned int reg_offset;
645 unsigned int class_code_revision;
646 int trio_index;
647 int mac;
648#ifndef USE_SHARED_PCIE_CONFIG_REGION
649 int ret;
650#endif
651
652 if (trio_context->fd < 0)
653 continue;
654
655 trio_index = controller->trio_index;
656 mac = controller->mac;
657
658 /*
659 * Check the port strap state which will override the BIB
660 * setting.
661 */
662
663 reg_offset =
664 (TRIO_PCIE_INTFC_PORT_CONFIG <<
665 TRIO_CFG_REGION_ADDR__REG_SHIFT) |
666 (TRIO_CFG_REGION_ADDR__INTFC_VAL_MAC_INTERFACE <<
667 TRIO_CFG_REGION_ADDR__INTFC_SHIFT ) |
668 (mac << TRIO_CFG_REGION_ADDR__MAC_SEL_SHIFT);
669
670 port_config.word =
671 __gxio_mmio_read(trio_context->mmio_base_mac +
672 reg_offset);
673
674 if ((port_config.strap_state !=
675 TRIO_PCIE_INTFC_PORT_CONFIG__STRAP_STATE_VAL_AUTO_CONFIG_RC) &&
676 (port_config.strap_state !=
677 TRIO_PCIE_INTFC_PORT_CONFIG__STRAP_STATE_VAL_AUTO_CONFIG_RC_G1)) {
678 /*
679 * If this is really intended to be an EP port,
680 * record it so that the endpoint driver will know about it.
681 */
682 if (port_config.strap_state ==
683 TRIO_PCIE_INTFC_PORT_CONFIG__STRAP_STATE_VAL_AUTO_CONFIG_ENDPOINT ||
684 port_config.strap_state ==
685 TRIO_PCIE_INTFC_PORT_CONFIG__STRAP_STATE_VAL_AUTO_CONFIG_ENDPOINT_G1)
686 pcie_ports[trio_index][mac].allow_ep = 1;
687
688 continue;
689 }
690
691 /*
692 * Delay the RC link training if needed.
693 */
694 if (rc_delay[trio_index][mac])
695 msleep(rc_delay[trio_index][mac] * 1000);
696
697 ret = gxio_trio_force_rc_link_up(trio_context, mac);
698 if (ret < 0)
699 pr_err("PCI: PCIE_FORCE_LINK_UP failure, "
700 "MAC %d on TRIO %d\n", mac, trio_index);
701
702 pr_info("PCI: Found PCI controller #%d on TRIO %d MAC %d\n", i,
703 trio_index, controller->mac);
704
705 /*
706 * Wait a bit here because some EP devices take longer
707 * to come up.
708 */
709 msleep(1000);
710
711 /*
712 * Check for PCIe link-up status.
713 */
714
715 reg_offset =
716 (TRIO_PCIE_INTFC_PORT_STATUS <<
717 TRIO_CFG_REGION_ADDR__REG_SHIFT) |
718 (TRIO_CFG_REGION_ADDR__INTFC_VAL_MAC_INTERFACE <<
719 TRIO_CFG_REGION_ADDR__INTFC_SHIFT ) |
720 (mac << TRIO_CFG_REGION_ADDR__MAC_SEL_SHIFT);
721
722 port_status.word =
723 __gxio_mmio_read(trio_context->mmio_base_mac +
724 reg_offset);
725 if (!port_status.dl_up) {
726 pr_err("PCI: link is down, MAC %d on TRIO %d\n",
727 mac, trio_index);
728 continue;
729 }
730
731 /*
732 * Ensure that the link can come out of L1 power down state.
733 * Strictly speaking, this is needed only in the case of
734 * heavy RC-initiated DMAs.
735 */
736 reg_offset =
737 (TRIO_PCIE_INTFC_TX_FIFO_CTL <<
738 TRIO_CFG_REGION_ADDR__REG_SHIFT) |
739 (TRIO_CFG_REGION_ADDR__INTFC_VAL_MAC_INTERFACE <<
740 TRIO_CFG_REGION_ADDR__INTFC_SHIFT ) |
741 (mac << TRIO_CFG_REGION_ADDR__MAC_SEL_SHIFT);
742 tx_fifo_ctl.word =
743 __gxio_mmio_read(trio_context->mmio_base_mac +
744 reg_offset);
745 tx_fifo_ctl.min_p_credits = 0;
746 __gxio_mmio_write(trio_context->mmio_base_mac + reg_offset,
747 tx_fifo_ctl.word);
748
749 /*
750 * Change the device ID so that Linux bus crawl doesn't confuse
751 * the internal bridge with any Tilera endpoints.
752 */
753
754 reg_offset =
755 (TRIO_PCIE_RC_DEVICE_ID_VEN_ID <<
756 TRIO_CFG_REGION_ADDR__REG_SHIFT) |
757 (TRIO_CFG_REGION_ADDR__INTFC_VAL_MAC_STANDARD <<
758 TRIO_CFG_REGION_ADDR__INTFC_SHIFT ) |
759 (mac << TRIO_CFG_REGION_ADDR__MAC_SEL_SHIFT);
760
761 __gxio_mmio_write32(trio_context->mmio_base_mac + reg_offset,
762 (TILERA_GX36_RC_DEV_ID <<
763 TRIO_PCIE_RC_DEVICE_ID_VEN_ID__DEV_ID_SHIFT) |
764 TILERA_VENDOR_ID);
765
766 /*
767 * Set the internal P2P bridge class code.
768 */
769
770 reg_offset =
771 (TRIO_PCIE_RC_REVISION_ID <<
772 TRIO_CFG_REGION_ADDR__REG_SHIFT) |
773 (TRIO_CFG_REGION_ADDR__INTFC_VAL_MAC_STANDARD <<
774 TRIO_CFG_REGION_ADDR__INTFC_SHIFT ) |
775 (mac << TRIO_CFG_REGION_ADDR__MAC_SEL_SHIFT);
776
777 class_code_revision =
778 __gxio_mmio_read32(trio_context->mmio_base_mac +
779 reg_offset);
780 class_code_revision = (class_code_revision & 0xff ) |
781 (PCI_CLASS_BRIDGE_PCI << 16);
782
783 __gxio_mmio_write32(trio_context->mmio_base_mac +
784 reg_offset, class_code_revision);
785
786#ifdef USE_SHARED_PCIE_CONFIG_REGION
787
788 /*
789 * Map in the MMIO space for the PIO region.
790 */
791 offset = HV_TRIO_PIO_OFFSET(trio_context->pio_cfg_index) |
792 (((unsigned long long)mac) <<
793 TRIO_TILE_PIO_REGION_SETUP_CFG_ADDR__MAC_SHIFT);
794
795#else
796
797 /*
798 * Alloc a PIO region for PCI config access per MAC.
799 */
800 ret = gxio_trio_alloc_pio_regions(trio_context, 1, 0, 0);
801 if (ret < 0) {
802 pr_err("PCI: PCI CFG PIO alloc failure for mac %d "
803 "on TRIO %d, give up\n", mac, trio_index);
804
805 /* TBD: cleanup ... */
806
807 continue;
808 }
809
810 trio_context->pio_cfg_index[mac] = ret;
811
812 /*
813 * For PIO CFG, the bus_address_hi parameter is 0.
814 */
815 ret = gxio_trio_init_pio_region_aux(trio_context,
816 trio_context->pio_cfg_index[mac],
817 mac, 0, HV_TRIO_PIO_FLAG_CONFIG_SPACE);
818 if (ret < 0) {
819 pr_err("PCI: PCI CFG PIO init failure for mac %d "
820 "on TRIO %d, give up\n", mac, trio_index);
821
822 /* TBD: cleanup ... */
823
824 continue;
825 }
826
827 offset = HV_TRIO_PIO_OFFSET(trio_context->pio_cfg_index[mac]) |
828 (((unsigned long long)mac) <<
829 TRIO_TILE_PIO_REGION_SETUP_CFG_ADDR__MAC_SHIFT);
830
831#endif
832
833 trio_context->mmio_base_pio_cfg[mac] =
834 iorpc_ioremap(trio_context->fd, offset,
835 (1 << TRIO_TILE_PIO_REGION_SETUP_CFG_ADDR__MAC_SHIFT));
836 if (trio_context->mmio_base_pio_cfg[mac] == NULL) {
837 pr_err("PCI: PIO map failure for mac %d on TRIO %d\n",
838 mac, trio_index);
839
840 /* TBD: cleanup ... */
841
842 continue;
843 }
844
845 /*
846 * Initialize the PCIe interrupts.
847 */
848 if (tile_init_irqs(controller)) {
849 pr_err("PCI: IRQs init failure for mac %d on TRIO %d\n",
850 mac, trio_index);
851
852 continue;
853 }
854
855 pci_add_resource(&resources, &iomem_resource);
856 bus = pci_scan_root_bus(NULL, 0, controller->ops,
857 controller, &resources);
858 controller->root_bus = bus;
859 controller->last_busno = bus->subordinate;
860
861 }
862
863 /* Do machine dependent PCI interrupt routing */
864 pci_fixup_irqs(pci_common_swizzle, tile_map_irq);
865
866 /*
867 * This comes from the generic Linux PCI driver.
868 *
869 * It allocates all of the resources (I/O memory, etc)
870 * associated with the devices read in above.
871 */
872
873 pci_assign_unassigned_resources();
874
875 /* Record the I/O resources in the PCI controller structure. */
876 for (i = 0; i < num_rc_controllers; i++) {
877 struct pci_controller *controller = &pci_controllers[i];
878 gxio_trio_context_t *trio_context = controller->trio;
879 struct pci_bus *root_bus = pci_controllers[i].root_bus;
880 struct pci_bus *next_bus;
881 uint32_t bus_address_hi;
882 struct pci_dev *dev;
883 int ret;
884 int j;
885
886 /*
887 * Skip controllers that are not properly initialized or
888 * have down links.
889 */
890 if (root_bus == NULL)
891 continue;
892
893 /* Configure the max_payload_size values for this domain. */
894 fixup_read_and_payload_sizes(controller);
895
896 list_for_each_entry(dev, &root_bus->devices, bus_list) {
897 /* Find the PCI host controller, ie. the 1st bridge. */
898 if ((dev->class >> 8) == PCI_CLASS_BRIDGE_PCI &&
899 (PCI_SLOT(dev->devfn) == 0)) {
900 next_bus = dev->subordinate;
901 pci_controllers[i].mem_resources[0] =
902 *next_bus->resource[0];
903 pci_controllers[i].mem_resources[1] =
904 *next_bus->resource[1];
905 pci_controllers[i].mem_resources[2] =
906 *next_bus->resource[2];
907
908 break;
909 }
910 }
911
912 if (pci_controllers[i].mem_resources[1].flags & IORESOURCE_MEM)
913 bus_address_hi =
914 pci_controllers[i].mem_resources[1].start >> 32;
915 else if (pci_controllers[i].mem_resources[2].flags & IORESOURCE_PREFETCH)
916 bus_address_hi =
917 pci_controllers[i].mem_resources[2].start >> 32;
918 else {
919 /* This is unlikely. */
920 pr_err("PCI: no memory resources on TRIO %d mac %d\n",
921 controller->trio_index, controller->mac);
922 continue;
923 }
924
925 /*
926 * We always assign 32-bit PCI bus BAR ranges.
927 */
928 BUG_ON(bus_address_hi != 0);
929
930 /*
931 * Alloc a PIO region for PCI memory access for each RC port.
932 */
933 ret = gxio_trio_alloc_pio_regions(trio_context, 1, 0, 0);
934 if (ret < 0) {
935 pr_err("PCI: MEM PIO alloc failure on TRIO %d mac %d, "
936 "give up\n", controller->trio_index,
937 controller->mac);
938
939 /* TBD: cleanup ... */
940
941 continue;
942 }
943
944 controller->pio_mem_index = ret;
945
946 /*
947 * For PIO MEM, the bus_address_hi parameter is hard-coded 0
948 * because we always assign 32-bit PCI bus BAR ranges.
949 */
950 ret = gxio_trio_init_pio_region_aux(trio_context,
951 controller->pio_mem_index,
952 controller->mac,
953 bus_address_hi,
954 0);
955 if (ret < 0) {
956 pr_err("PCI: MEM PIO init failure on TRIO %d mac %d, "
957 "give up\n", controller->trio_index,
958 controller->mac);
959
960 /* TBD: cleanup ... */
961
962 continue;
963 }
964
965 /*
966 * Configure a Mem-Map region for each memory controller so
967 * that Linux can map all of its PA space to the PCI bus.
968 * Use the IOMMU to handle hash-for-home memory.
969 */
970 for_each_online_node(j) {
971 unsigned long start_pfn = node_start_pfn[j];
972 unsigned long end_pfn = node_end_pfn[j];
973 unsigned long nr_pages = end_pfn - start_pfn;
974
975 ret = gxio_trio_alloc_memory_maps(trio_context, 1, 0,
976 0);
977 if (ret < 0) {
978 pr_err("PCI: Mem-Map alloc failure on TRIO %d "
979 "mac %d for MC %d, give up\n",
980 controller->trio_index,
981 controller->mac, j);
982
983 /* TBD: cleanup ... */
984
985 goto alloc_mem_map_failed;
986 }
987
988 controller->mem_maps[j] = ret;
989
990 /*
991 * Initialize the Mem-Map and the I/O MMU so that all
992 * the physical memory can be accessed by the endpoint
993 * devices. The base bus address is set to the base CPA
994 * of this memory controller, so is the base VA. The
995 * I/O MMU table essentially translates the CPA to
996 * the real PA.
997 */
998 ret = gxio_trio_init_memory_map_mmu_aux(trio_context,
999 controller->mem_maps[j],
1000 start_pfn << PAGE_SHIFT,
1001 nr_pages << PAGE_SHIFT,
1002 trio_context->asid,
1003 controller->mac,
1004 start_pfn << PAGE_SHIFT,
1005 j,
1006 GXIO_TRIO_ORDER_MODE_UNORDERED);
1007 if (ret < 0) {
1008 pr_err("PCI: Mem-Map init failure on TRIO %d "
1009 "mac %d for MC %d, give up\n",
1010 controller->trio_index,
1011 controller->mac, j);
1012
1013 /* TBD: cleanup ... */
1014
1015 goto alloc_mem_map_failed;
1016 }
1017
1018 continue;
1019
1020alloc_mem_map_failed:
1021 break;
1022 }
1023
1024 }
1025
1026 return 0;
1027}
1028subsys_initcall(pcibios_init);
1029
1030/*
1031 * No bus fixups needed.
1032 */
1033void __devinit pcibios_fixup_bus(struct pci_bus *bus)
1034{
1035 /* Nothing needs to be done. */
1036}
1037
1038/*
1039 * This can be called from the generic PCI layer, but doesn't need to
1040 * do anything.
1041 */
1042char __devinit *pcibios_setup(char *str)
1043{
1044 if (!strcmp(str, "off")) {
1045 pci_probe = 0;
1046 return NULL;
1047 }
1048 return str;
1049}
1050
1051/*
1052 * This is called from the generic Linux layer.
1053 */
1054void __devinit pcibios_update_irq(struct pci_dev *dev, int irq)
1055{
1056 pci_write_config_byte(dev, PCI_INTERRUPT_LINE, irq);
1057}
1058
1059/*
1060 * Enable memory address decoding, as appropriate, for the
1061 * device described by the 'dev' struct. The I/O decoding
1062 * is disabled, though the TILE-Gx supports I/O addressing.
1063 *
1064 * This is called from the generic PCI layer, and can be called
1065 * for bridges or endpoints.
1066 */
1067int pcibios_enable_device(struct pci_dev *dev, int mask)
1068{
1069 return pci_enable_resources(dev, mask);
1070}
1071
1072/* Map a PCI MMIO bus address into VA space. */
1073void __iomem *ioremap(resource_size_t phys_addr, unsigned long size)
1074{
1075 struct pci_controller *controller = NULL;
1076 resource_size_t bar_start;
1077 resource_size_t bar_end;
1078 resource_size_t offset;
1079 resource_size_t start;
1080 resource_size_t end;
1081 int trio_fd;
1082 int i, j;
1083
1084 start = phys_addr;
1085 end = phys_addr + size - 1;
1086
1087 /*
1088 * In the following, each PCI controller's mem_resources[1]
1089 * represents its (non-prefetchable) PCI memory resource and
1090 * mem_resources[2] refers to its prefetchable PCI memory resource.
1091 * By searching phys_addr in each controller's mem_resources[], we can
1092 * determine the controller that should accept the PCI memory access.
1093 */
1094
1095 for (i = 0; i < num_rc_controllers; i++) {
1096 /*
1097 * Skip controllers that are not properly initialized or
1098 * have down links.
1099 */
1100 if (pci_controllers[i].root_bus == NULL)
1101 continue;
1102
1103 for (j = 1; j < 3; j++) {
1104 bar_start =
1105 pci_controllers[i].mem_resources[j].start;
1106 bar_end =
1107 pci_controllers[i].mem_resources[j].end;
1108
1109 if ((start >= bar_start) && (end <= bar_end)) {
1110
1111 controller = &pci_controllers[i];
1112
1113 goto got_it;
1114 }
1115 }
1116 }
1117
1118 if (controller == NULL)
1119 return NULL;
1120
1121got_it:
1122 trio_fd = controller->trio->fd;
1123
1124 offset = HV_TRIO_PIO_OFFSET(controller->pio_mem_index) + phys_addr;
1125
1126 /*
1127 * We need to keep the PCI bus address's in-page offset in the VA.
1128 */
1129 return iorpc_ioremap(trio_fd, offset, size) +
1130 (phys_addr & (PAGE_SIZE - 1));
1131}
1132EXPORT_SYMBOL(ioremap);
1133
1134void pci_iounmap(struct pci_dev *dev, void __iomem *addr)
1135{
1136 iounmap(addr);
1137}
1138EXPORT_SYMBOL(pci_iounmap);
1139
1140/****************************************************************
1141 *
1142 * Tile PCI config space read/write routines
1143 *
1144 ****************************************************************/
1145
1146/*
1147 * These are the normal read and write ops
1148 * These are expanded with macros from pci_bus_read_config_byte() etc.
1149 *
1150 * devfn is the combined PCI device & function.
1151 *
1152 * offset is in bytes, from the start of config space for the
1153 * specified bus & device.
1154 */
1155
1156static int __devinit tile_cfg_read(struct pci_bus *bus,
1157 unsigned int devfn,
1158 int offset,
1159 int size,
1160 u32 *val)
1161{
1162 struct pci_controller *controller = bus->sysdata;
1163 gxio_trio_context_t *trio_context = controller->trio;
1164 int busnum = bus->number & 0xff;
1165 int device = PCI_SLOT(devfn);
1166 int function = PCI_FUNC(devfn);
1167 int config_type = 1;
1168 TRIO_TILE_PIO_REGION_SETUP_CFG_ADDR_t cfg_addr;
1169 void *mmio_addr;
1170
1171 /*
1172 * Map all accesses to the local device (bus == 0) into the
1173 * MMIO space of the MAC. Accesses to the downstream devices
1174 * go to the PIO space.
1175 */
1176 if (busnum == 0) {
1177 if (device == 0) {
1178 /*
1179 * This is the internal downstream P2P bridge,
1180 * access directly.
1181 */
1182 unsigned int reg_offset;
1183
1184 reg_offset = ((offset & 0xFFF) <<
1185 TRIO_CFG_REGION_ADDR__REG_SHIFT) |
1186 (TRIO_CFG_REGION_ADDR__INTFC_VAL_MAC_PROTECTED
1187 << TRIO_CFG_REGION_ADDR__INTFC_SHIFT ) |
1188 (controller->mac <<
1189 TRIO_CFG_REGION_ADDR__MAC_SEL_SHIFT);
1190
1191 mmio_addr = trio_context->mmio_base_mac + reg_offset;
1192
1193 goto valid_device;
1194
1195 } else {
1196 /*
1197 * We fake an empty device for (device > 0),
1198 * since there is only one device on bus 0.
1199 */
1200 goto invalid_device;
1201 }
1202 }
1203
1204 /*
1205 * Accesses to the directly attached device (bus == 1) have to be
1206 * sent as type-0 configs.
1207 */
1208
1209 if (busnum == 1) {
1210 /*
1211 * There is only one device off of our built-in P2P bridge.
1212 */
1213 if (device != 0)
1214 goto invalid_device;
1215
1216 config_type = 0;
1217 }
1218
1219 cfg_addr.word = 0;
1220 cfg_addr.reg_addr = (offset & 0xFFF);
1221 cfg_addr.fn = function;
1222 cfg_addr.dev = device;
1223 cfg_addr.bus = busnum;
1224 cfg_addr.type = config_type;
1225
1226 /*
1227 * Note that we don't set the mac field in cfg_addr because the
1228 * mapping is per port.
1229 */
1230
1231 mmio_addr = trio_context->mmio_base_pio_cfg[controller->mac] +
1232 cfg_addr.word;
1233
1234valid_device:
1235
1236 switch (size) {
1237 case 4:
1238 *val = __gxio_mmio_read32(mmio_addr);
1239 break;
1240
1241 case 2:
1242 *val = __gxio_mmio_read16(mmio_addr);
1243 break;
1244
1245 case 1:
1246 *val = __gxio_mmio_read8(mmio_addr);
1247 break;
1248
1249 default:
1250 return PCIBIOS_FUNC_NOT_SUPPORTED;
1251 }
1252
1253 TRACE_CFG_RD(size, *val, busnum, device, function, offset);
1254
1255 return 0;
1256
1257invalid_device:
1258
1259 switch (size) {
1260 case 4:
1261 *val = 0xFFFFFFFF;
1262 break;
1263
1264 case 2:
1265 *val = 0xFFFF;
1266 break;
1267
1268 case 1:
1269 *val = 0xFF;
1270 break;
1271
1272 default:
1273 return PCIBIOS_FUNC_NOT_SUPPORTED;
1274 }
1275
1276 return 0;
1277}
1278
1279
1280/*
1281 * See tile_cfg_read() for relevent comments.
1282 * Note that "val" is the value to write, not a pointer to that value.
1283 */
1284static int __devinit tile_cfg_write(struct pci_bus *bus,
1285 unsigned int devfn,
1286 int offset,
1287 int size,
1288 u32 val)
1289{
1290 struct pci_controller *controller = bus->sysdata;
1291 gxio_trio_context_t *trio_context = controller->trio;
1292 int busnum = bus->number & 0xff;
1293 int device = PCI_SLOT(devfn);
1294 int function = PCI_FUNC(devfn);
1295 int config_type = 1;
1296 TRIO_TILE_PIO_REGION_SETUP_CFG_ADDR_t cfg_addr;
1297 void *mmio_addr;
1298 u32 val_32 = (u32)val;
1299 u16 val_16 = (u16)val;
1300 u8 val_8 = (u8)val;
1301
1302 /*
1303 * Map all accesses to the local device (bus == 0) into the
1304 * MMIO space of the MAC. Accesses to the downstream devices
1305 * go to the PIO space.
1306 */
1307 if (busnum == 0) {
1308 if (device == 0) {
1309 /*
1310 * This is the internal downstream P2P bridge,
1311 * access directly.
1312 */
1313 unsigned int reg_offset;
1314
1315 reg_offset = ((offset & 0xFFF) <<
1316 TRIO_CFG_REGION_ADDR__REG_SHIFT) |
1317 (TRIO_CFG_REGION_ADDR__INTFC_VAL_MAC_PROTECTED
1318 << TRIO_CFG_REGION_ADDR__INTFC_SHIFT ) |
1319 (controller->mac <<
1320 TRIO_CFG_REGION_ADDR__MAC_SEL_SHIFT);
1321
1322 mmio_addr = trio_context->mmio_base_mac + reg_offset;
1323
1324 goto valid_device;
1325
1326 } else {
1327 /*
1328 * We fake an empty device for (device > 0),
1329 * since there is only one device on bus 0.
1330 */
1331 goto invalid_device;
1332 }
1333 }
1334
1335 /*
1336 * Accesses to the directly attached device (bus == 1) have to be
1337 * sent as type-0 configs.
1338 */
1339
1340 if (busnum == 1) {
1341 /*
1342 * There is only one device off of our built-in P2P bridge.
1343 */
1344 if (device != 0)
1345 goto invalid_device;
1346
1347 config_type = 0;
1348 }
1349
1350 cfg_addr.word = 0;
1351 cfg_addr.reg_addr = (offset & 0xFFF);
1352 cfg_addr.fn = function;
1353 cfg_addr.dev = device;
1354 cfg_addr.bus = busnum;
1355 cfg_addr.type = config_type;
1356
1357 /*
1358 * Note that we don't set the mac field in cfg_addr because the
1359 * mapping is per port.
1360 */
1361
1362 mmio_addr = trio_context->mmio_base_pio_cfg[controller->mac] +
1363 cfg_addr.word;
1364
1365valid_device:
1366
1367 switch (size) {
1368 case 4:
1369 __gxio_mmio_write32(mmio_addr, val_32);
1370 TRACE_CFG_WR(size, val_32, busnum, device, function, offset);
1371 break;
1372
1373 case 2:
1374 __gxio_mmio_write16(mmio_addr, val_16);
1375 TRACE_CFG_WR(size, val_16, busnum, device, function, offset);
1376 break;
1377
1378 case 1:
1379 __gxio_mmio_write8(mmio_addr, val_8);
1380 TRACE_CFG_WR(size, val_8, busnum, device, function, offset);
1381 break;
1382
1383 default:
1384 return PCIBIOS_FUNC_NOT_SUPPORTED;
1385 }
1386
1387invalid_device:
1388
1389 return 0;
1390}
1391
1392
1393static struct pci_ops tile_cfg_ops = {
1394 .read = tile_cfg_read,
1395 .write = tile_cfg_write,
1396};
1397
1398
1399/*
1400 * MSI support starts here.
1401 */
1402static unsigned int
1403tilegx_msi_startup(struct irq_data *d)
1404{
1405 if (d->msi_desc)
1406 unmask_msi_irq(d);
1407
1408 return 0;
1409}
1410
1411static void
1412tilegx_msi_ack(struct irq_data *d)
1413{
1414 __insn_mtspr(SPR_IPI_EVENT_RESET_K, 1UL << d->irq);
1415}
1416
1417static void
1418tilegx_msi_mask(struct irq_data *d)
1419{
1420 mask_msi_irq(d);
1421 __insn_mtspr(SPR_IPI_MASK_SET_K, 1UL << d->irq);
1422}
1423
1424static void
1425tilegx_msi_unmask(struct irq_data *d)
1426{
1427 __insn_mtspr(SPR_IPI_MASK_RESET_K, 1UL << d->irq);
1428 unmask_msi_irq(d);
1429}
1430
1431static struct irq_chip tilegx_msi_chip = {
1432 .name = "tilegx_msi",
1433 .irq_startup = tilegx_msi_startup,
1434 .irq_ack = tilegx_msi_ack,
1435 .irq_mask = tilegx_msi_mask,
1436 .irq_unmask = tilegx_msi_unmask,
1437
1438 /* TBD: support set_affinity. */
1439};
1440
1441int arch_setup_msi_irq(struct pci_dev *pdev, struct msi_desc *desc)
1442{
1443 struct pci_controller *controller;
1444 gxio_trio_context_t *trio_context;
1445 struct msi_msg msg;
1446 int default_irq;
1447 uint64_t mem_map_base;
1448 uint64_t mem_map_limit;
1449 u64 msi_addr;
1450 int mem_map;
1451 int cpu;
1452 int irq;
1453 int ret;
1454
1455 irq = create_irq();
1456 if (irq < 0)
1457 return irq;
1458
1459 /*
1460 * Since we use a 64-bit Mem-Map to accept the MSI write, we fail
1461 * devices that are not capable of generating a 64-bit message address.
1462 * These devices will fall back to using the legacy interrupts.
1463 * Most PCIe endpoint devices do support 64-bit message addressing.
1464 */
1465 if (desc->msi_attrib.is_64 == 0) {
1466 dev_printk(KERN_INFO, &pdev->dev,
1467 "64-bit MSI message address not supported, "
1468 "falling back to legacy interrupts.\n");
1469
1470 ret = -ENOMEM;
1471 goto is_64_failure;
1472 }
1473
1474 default_irq = desc->msi_attrib.default_irq;
1475 controller = irq_get_handler_data(default_irq);
1476
1477 BUG_ON(!controller);
1478
1479 trio_context = controller->trio;
1480
1481 /*
1482 * Allocate the Mem-Map that will accept the MSI write and
1483 * trigger the TILE-side interrupts.
1484 */
1485 mem_map = gxio_trio_alloc_memory_maps(trio_context, 1, 0, 0);
1486 if (mem_map < 0) {
1487 dev_printk(KERN_INFO, &pdev->dev,
1488 "%s Mem-Map alloc failure. "
1489 "Failed to initialize MSI interrupts. "
1490 "Falling back to legacy interrupts.\n",
1491 desc->msi_attrib.is_msix ? "MSI-X" : "MSI");
1492
1493 ret = -ENOMEM;
1494 goto msi_mem_map_alloc_failure;
1495 }
1496
1497 /* We try to distribute different IRQs to different tiles. */
1498 cpu = tile_irq_cpu(irq);
1499
1500 /*
1501 * Now call up to the HV to configure the Mem-Map interrupt and
1502 * set up the IPI binding.
1503 */
1504 mem_map_base = MEM_MAP_INTR_REGIONS_BASE +
1505 mem_map * MEM_MAP_INTR_REGION_SIZE;
1506 mem_map_limit = mem_map_base + MEM_MAP_INTR_REGION_SIZE - 1;
1507
1508 ret = gxio_trio_config_msi_intr(trio_context, cpu_x(cpu), cpu_y(cpu),
1509 KERNEL_PL, irq, controller->mac,
1510 mem_map, mem_map_base, mem_map_limit,
1511 trio_context->asid);
1512 if (ret < 0) {
1513 dev_printk(KERN_INFO, &pdev->dev, "HV MSI config failed.\n");
1514
1515 goto hv_msi_config_failure;
1516 }
1517
1518 irq_set_msi_desc(irq, desc);
1519
1520 msi_addr = mem_map_base + TRIO_MAP_MEM_REG_INT3 - TRIO_MAP_MEM_REG_INT0;
1521
1522 msg.address_hi = msi_addr >> 32;
1523 msg.address_lo = msi_addr & 0xffffffff;
1524
1525 msg.data = mem_map;
1526
1527 write_msi_msg(irq, &msg);
1528 irq_set_chip_and_handler(irq, &tilegx_msi_chip, handle_level_irq);
1529 irq_set_handler_data(irq, controller);
1530
1531 return 0;
1532
1533hv_msi_config_failure:
1534 /* Free mem-map */
1535msi_mem_map_alloc_failure:
1536is_64_failure:
1537 destroy_irq(irq);
1538 return ret;
1539}
1540
1541void arch_teardown_msi_irq(unsigned int irq)
1542{
1543 destroy_irq(irq);
1544}
diff --git a/arch/tile/kernel/setup.c b/arch/tile/kernel/setup.c
index dd87f342039..6d179dfcc15 100644
--- a/arch/tile/kernel/setup.c
+++ b/arch/tile/kernel/setup.c
@@ -1344,6 +1344,7 @@ void __init setup_arch(char **cmdline_p)
1344 1344
1345 1345
1346#ifdef CONFIG_PCI 1346#ifdef CONFIG_PCI
1347#if !defined (__tilegx__)
1347 /* 1348 /*
1348 * Initialize the PCI structures. This is done before memory 1349 * Initialize the PCI structures. This is done before memory
1349 * setup so that we know whether or not a pci_reserve region 1350 * setup so that we know whether or not a pci_reserve region
@@ -1351,6 +1352,7 @@ void __init setup_arch(char **cmdline_p)
1351 */ 1352 */
1352 if (tile_pci_init() == 0) 1353 if (tile_pci_init() == 0)
1353 pci_reserve_mb = 0; 1354 pci_reserve_mb = 0;
1355#endif
1354 1356
1355 /* PCI systems reserve a region just below 4GB for mapping iomem. */ 1357 /* PCI systems reserve a region just below 4GB for mapping iomem. */
1356 pci_reserve_end_pfn = (1 << (32 - PAGE_SHIFT)); 1358 pci_reserve_end_pfn = (1 << (32 - PAGE_SHIFT));
@@ -1379,6 +1381,10 @@ void __init setup_arch(char **cmdline_p)
1379 setup_cpu(1); 1381 setup_cpu(1);
1380 setup_clock(); 1382 setup_clock();
1381 load_hv_initrd(); 1383 load_hv_initrd();
1384
1385#if defined(CONFIG_PCI) && defined (__tilegx__)
1386 tile_pci_init();
1387#endif
1382} 1388}
1383 1389
1384 1390
diff --git a/arch/tile/mm/pgtable.c b/arch/tile/mm/pgtable.c
index 345edfed9fc..de0de0c0e8a 100644
--- a/arch/tile/mm/pgtable.c
+++ b/arch/tile/mm/pgtable.c
@@ -575,13 +575,6 @@ void __iomem *ioremap_prot(resource_size_t phys_addr, unsigned long size,
575} 575}
576EXPORT_SYMBOL(ioremap_prot); 576EXPORT_SYMBOL(ioremap_prot);
577 577
578/* Map a PCI MMIO bus address into VA space. */
579void __iomem *ioremap(resource_size_t phys_addr, unsigned long size)
580{
581 panic("ioremap for PCI MMIO is not supported");
582}
583EXPORT_SYMBOL(ioremap);
584
585/* Unmap an MMIO VA mapping. */ 578/* Unmap an MMIO VA mapping. */
586void iounmap(volatile void __iomem *addr_in) 579void iounmap(volatile void __iomem *addr_in)
587{ 580{