aboutsummaryrefslogtreecommitdiffstats
path: root/arch/mips/pci/pci-octeon.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/mips/pci/pci-octeon.c')
-rw-r--r--arch/mips/pci/pci-octeon.c675
1 files changed, 675 insertions, 0 deletions
diff --git a/arch/mips/pci/pci-octeon.c b/arch/mips/pci/pci-octeon.c
new file mode 100644
index 000000000000..9cb0c807f564
--- /dev/null
+++ b/arch/mips/pci/pci-octeon.c
@@ -0,0 +1,675 @@
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 2005-2009 Cavium Networks
7 */
8#include <linux/kernel.h>
9#include <linux/init.h>
10#include <linux/pci.h>
11#include <linux/interrupt.h>
12#include <linux/time.h>
13#include <linux/delay.h>
14
15#include <asm/time.h>
16
17#include <asm/octeon/octeon.h>
18#include <asm/octeon/cvmx-npi-defs.h>
19#include <asm/octeon/cvmx-pci-defs.h>
20#include <asm/octeon/pci-octeon.h>
21
22#define USE_OCTEON_INTERNAL_ARBITER
23
24/*
25 * Octeon's PCI controller uses did=3, subdid=2 for PCI IO
26 * addresses. Use PCI endian swapping 1 so no address swapping is
27 * necessary. The Linux io routines will endian swap the data.
28 */
29#define OCTEON_PCI_IOSPACE_BASE 0x80011a0400000000ull
30#define OCTEON_PCI_IOSPACE_SIZE (1ull<<32)
31
32/* Octeon't PCI controller uses did=3, subdid=3 for PCI memory. */
33#define OCTEON_PCI_MEMSPACE_OFFSET (0x00011b0000000000ull)
34
35/**
36 * This is the bit decoding used for the Octeon PCI controller addresses
37 */
38union octeon_pci_address {
39 uint64_t u64;
40 struct {
41 uint64_t upper:2;
42 uint64_t reserved:13;
43 uint64_t io:1;
44 uint64_t did:5;
45 uint64_t subdid:3;
46 uint64_t reserved2:4;
47 uint64_t endian_swap:2;
48 uint64_t reserved3:10;
49 uint64_t bus:8;
50 uint64_t dev:5;
51 uint64_t func:3;
52 uint64_t reg:8;
53 } s;
54};
55
56int __initdata (*octeon_pcibios_map_irq)(const struct pci_dev *dev,
57 u8 slot, u8 pin);
58enum octeon_dma_bar_type octeon_dma_bar_type = OCTEON_DMA_BAR_TYPE_INVALID;
59
60/**
61 * Map a PCI device to the appropriate interrupt line
62 *
63 * @dev: The Linux PCI device structure for the device to map
64 * @slot: The slot number for this device on __BUS 0__. Linux
65 * enumerates through all the bridges and figures out the
66 * slot on Bus 0 where this device eventually hooks to.
67 * @pin: The PCI interrupt pin read from the device, then swizzled
68 * as it goes through each bridge.
69 * Returns Interrupt number for the device
70 */
71int __init pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
72{
73 if (octeon_pcibios_map_irq)
74 return octeon_pcibios_map_irq(dev, slot, pin);
75 else
76 panic("octeon_pcibios_map_irq not set.");
77}
78
79
80/*
81 * Called to perform platform specific PCI setup
82 */
83int pcibios_plat_dev_init(struct pci_dev *dev)
84{
85 uint16_t config;
86 uint32_t dconfig;
87 int pos;
88 /*
89 * Force the Cache line setting to 64 bytes. The standard
90 * Linux bus scan doesn't seem to set it. Octeon really has
91 * 128 byte lines, but Intel bridges get really upset if you
92 * try and set values above 64 bytes. Value is specified in
93 * 32bit words.
94 */
95 pci_write_config_byte(dev, PCI_CACHE_LINE_SIZE, 64 / 4);
96 /* Set latency timers for all devices */
97 pci_write_config_byte(dev, PCI_LATENCY_TIMER, 48);
98
99 /* Enable reporting System errors and parity errors on all devices */
100 /* Enable parity checking and error reporting */
101 pci_read_config_word(dev, PCI_COMMAND, &config);
102 config |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
103 pci_write_config_word(dev, PCI_COMMAND, config);
104
105 if (dev->subordinate) {
106 /* Set latency timers on sub bridges */
107 pci_write_config_byte(dev, PCI_SEC_LATENCY_TIMER, 48);
108 /* More bridge error detection */
109 pci_read_config_word(dev, PCI_BRIDGE_CONTROL, &config);
110 config |= PCI_BRIDGE_CTL_PARITY | PCI_BRIDGE_CTL_SERR;
111 pci_write_config_word(dev, PCI_BRIDGE_CONTROL, config);
112 }
113
114 /* Enable the PCIe normal error reporting */
115 pos = pci_find_capability(dev, PCI_CAP_ID_EXP);
116 if (pos) {
117 /* Update Device Control */
118 pci_read_config_word(dev, pos + PCI_EXP_DEVCTL, &config);
119 /* Correctable Error Reporting */
120 config |= PCI_EXP_DEVCTL_CERE;
121 /* Non-Fatal Error Reporting */
122 config |= PCI_EXP_DEVCTL_NFERE;
123 /* Fatal Error Reporting */
124 config |= PCI_EXP_DEVCTL_FERE;
125 /* Unsupported Request */
126 config |= PCI_EXP_DEVCTL_URRE;
127 pci_write_config_word(dev, pos + PCI_EXP_DEVCTL, config);
128 }
129
130 /* Find the Advanced Error Reporting capability */
131 pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR);
132 if (pos) {
133 /* Clear Uncorrectable Error Status */
134 pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_STATUS,
135 &dconfig);
136 pci_write_config_dword(dev, pos + PCI_ERR_UNCOR_STATUS,
137 dconfig);
138 /* Enable reporting of all uncorrectable errors */
139 /* Uncorrectable Error Mask - turned on bits disable errors */
140 pci_write_config_dword(dev, pos + PCI_ERR_UNCOR_MASK, 0);
141 /*
142 * Leave severity at HW default. This only controls if
143 * errors are reported as uncorrectable or
144 * correctable, not if the error is reported.
145 */
146 /* PCI_ERR_UNCOR_SEVER - Uncorrectable Error Severity */
147 /* Clear Correctable Error Status */
148 pci_read_config_dword(dev, pos + PCI_ERR_COR_STATUS, &dconfig);
149 pci_write_config_dword(dev, pos + PCI_ERR_COR_STATUS, dconfig);
150 /* Enable reporting of all correctable errors */
151 /* Correctable Error Mask - turned on bits disable errors */
152 pci_write_config_dword(dev, pos + PCI_ERR_COR_MASK, 0);
153 /* Advanced Error Capabilities */
154 pci_read_config_dword(dev, pos + PCI_ERR_CAP, &dconfig);
155 /* ECRC Generation Enable */
156 if (config & PCI_ERR_CAP_ECRC_GENC)
157 config |= PCI_ERR_CAP_ECRC_GENE;
158 /* ECRC Check Enable */
159 if (config & PCI_ERR_CAP_ECRC_CHKC)
160 config |= PCI_ERR_CAP_ECRC_CHKE;
161 pci_write_config_dword(dev, pos + PCI_ERR_CAP, dconfig);
162 /* PCI_ERR_HEADER_LOG - Header Log Register (16 bytes) */
163 /* Report all errors to the root complex */
164 pci_write_config_dword(dev, pos + PCI_ERR_ROOT_COMMAND,
165 PCI_ERR_ROOT_CMD_COR_EN |
166 PCI_ERR_ROOT_CMD_NONFATAL_EN |
167 PCI_ERR_ROOT_CMD_FATAL_EN);
168 /* Clear the Root status register */
169 pci_read_config_dword(dev, pos + PCI_ERR_ROOT_STATUS, &dconfig);
170 pci_write_config_dword(dev, pos + PCI_ERR_ROOT_STATUS, dconfig);
171 }
172
173 return 0;
174}
175
176/**
177 * Return the mapping of PCI device number to IRQ line. Each
178 * character in the return string represents the interrupt
179 * line for the device at that position. Device 1 maps to the
180 * first character, etc. The characters A-D are used for PCI
181 * interrupts.
182 *
183 * Returns PCI interrupt mapping
184 */
185const char *octeon_get_pci_interrupts(void)
186{
187 /*
188 * Returning an empty string causes the interrupts to be
189 * routed based on the PCI specification. From the PCI spec:
190 *
191 * INTA# of Device Number 0 is connected to IRQW on the system
192 * board. (Device Number has no significance regarding being
193 * located on the system board or in a connector.) INTA# of
194 * Device Number 1 is connected to IRQX on the system
195 * board. INTA# of Device Number 2 is connected to IRQY on the
196 * system board. INTA# of Device Number 3 is connected to IRQZ
197 * on the system board. The table below describes how each
198 * agent's INTx# lines are connected to the system board
199 * interrupt lines. The following equation can be used to
200 * determine to which INTx# signal on the system board a given
201 * device's INTx# line(s) is connected.
202 *
203 * MB = (D + I) MOD 4 MB = System board Interrupt (IRQW = 0,
204 * IRQX = 1, IRQY = 2, and IRQZ = 3) D = Device Number I =
205 * Interrupt Number (INTA# = 0, INTB# = 1, INTC# = 2, and
206 * INTD# = 3)
207 */
208 switch (octeon_bootinfo->board_type) {
209 case CVMX_BOARD_TYPE_NAO38:
210 /* This is really the NAC38 */
211 return "AAAAADABAAAAAAAAAAAAAAAAAAAAAAAA";
212 case CVMX_BOARD_TYPE_THUNDER:
213 return "";
214 case CVMX_BOARD_TYPE_EBH3000:
215 return "";
216 case CVMX_BOARD_TYPE_EBH3100:
217 case CVMX_BOARD_TYPE_CN3010_EVB_HS5:
218 case CVMX_BOARD_TYPE_CN3005_EVB_HS5:
219 return "AAABAAAAAAAAAAAAAAAAAAAAAAAAAAAA";
220 case CVMX_BOARD_TYPE_BBGW_REF:
221 return "AABCD";
222 default:
223 return "";
224 }
225}
226
227/**
228 * Map a PCI device to the appropriate interrupt line
229 *
230 * @dev: The Linux PCI device structure for the device to map
231 * @slot: The slot number for this device on __BUS 0__. Linux
232 * enumerates through all the bridges and figures out the
233 * slot on Bus 0 where this device eventually hooks to.
234 * @pin: The PCI interrupt pin read from the device, then swizzled
235 * as it goes through each bridge.
236 * Returns Interrupt number for the device
237 */
238int __init octeon_pci_pcibios_map_irq(const struct pci_dev *dev,
239 u8 slot, u8 pin)
240{
241 int irq_num;
242 const char *interrupts;
243 int dev_num;
244
245 /* Get the board specific interrupt mapping */
246 interrupts = octeon_get_pci_interrupts();
247
248 dev_num = dev->devfn >> 3;
249 if (dev_num < strlen(interrupts))
250 irq_num = ((interrupts[dev_num] - 'A' + pin - 1) & 3) +
251 OCTEON_IRQ_PCI_INT0;
252 else
253 irq_num = ((slot + pin - 3) & 3) + OCTEON_IRQ_PCI_INT0;
254 return irq_num;
255}
256
257
258/*
259 * Read a value from configuration space
260 */
261static int octeon_read_config(struct pci_bus *bus, unsigned int devfn,
262 int reg, int size, u32 *val)
263{
264 union octeon_pci_address pci_addr;
265
266 pci_addr.u64 = 0;
267 pci_addr.s.upper = 2;
268 pci_addr.s.io = 1;
269 pci_addr.s.did = 3;
270 pci_addr.s.subdid = 1;
271 pci_addr.s.endian_swap = 1;
272 pci_addr.s.bus = bus->number;
273 pci_addr.s.dev = devfn >> 3;
274 pci_addr.s.func = devfn & 0x7;
275 pci_addr.s.reg = reg;
276
277#if PCI_CONFIG_SPACE_DELAY
278 udelay(PCI_CONFIG_SPACE_DELAY);
279#endif
280 switch (size) {
281 case 4:
282 *val = le32_to_cpu(cvmx_read64_uint32(pci_addr.u64));
283 return PCIBIOS_SUCCESSFUL;
284 case 2:
285 *val = le16_to_cpu(cvmx_read64_uint16(pci_addr.u64));
286 return PCIBIOS_SUCCESSFUL;
287 case 1:
288 *val = cvmx_read64_uint8(pci_addr.u64);
289 return PCIBIOS_SUCCESSFUL;
290 }
291 return PCIBIOS_FUNC_NOT_SUPPORTED;
292}
293
294
295/*
296 * Write a value to PCI configuration space
297 */
298static int octeon_write_config(struct pci_bus *bus, unsigned int devfn,
299 int reg, int size, u32 val)
300{
301 union octeon_pci_address pci_addr;
302
303 pci_addr.u64 = 0;
304 pci_addr.s.upper = 2;
305 pci_addr.s.io = 1;
306 pci_addr.s.did = 3;
307 pci_addr.s.subdid = 1;
308 pci_addr.s.endian_swap = 1;
309 pci_addr.s.bus = bus->number;
310 pci_addr.s.dev = devfn >> 3;
311 pci_addr.s.func = devfn & 0x7;
312 pci_addr.s.reg = reg;
313
314#if PCI_CONFIG_SPACE_DELAY
315 udelay(PCI_CONFIG_SPACE_DELAY);
316#endif
317 switch (size) {
318 case 4:
319 cvmx_write64_uint32(pci_addr.u64, cpu_to_le32(val));
320 return PCIBIOS_SUCCESSFUL;
321 case 2:
322 cvmx_write64_uint16(pci_addr.u64, cpu_to_le16(val));
323 return PCIBIOS_SUCCESSFUL;
324 case 1:
325 cvmx_write64_uint8(pci_addr.u64, val);
326 return PCIBIOS_SUCCESSFUL;
327 }
328 return PCIBIOS_FUNC_NOT_SUPPORTED;
329}
330
331
332static struct pci_ops octeon_pci_ops = {
333 octeon_read_config,
334 octeon_write_config,
335};
336
337static struct resource octeon_pci_mem_resource = {
338 .start = 0,
339 .end = 0,
340 .name = "Octeon PCI MEM",
341 .flags = IORESOURCE_MEM,
342};
343
344/*
345 * PCI ports must be above 16KB so the ISA bus filtering in the PCI-X to PCI
346 * bridge
347 */
348static struct resource octeon_pci_io_resource = {
349 .start = 0x4000,
350 .end = OCTEON_PCI_IOSPACE_SIZE - 1,
351 .name = "Octeon PCI IO",
352 .flags = IORESOURCE_IO,
353};
354
355static struct pci_controller octeon_pci_controller = {
356 .pci_ops = &octeon_pci_ops,
357 .mem_resource = &octeon_pci_mem_resource,
358 .mem_offset = OCTEON_PCI_MEMSPACE_OFFSET,
359 .io_resource = &octeon_pci_io_resource,
360 .io_offset = 0,
361 .io_map_base = OCTEON_PCI_IOSPACE_BASE,
362};
363
364
365/*
366 * Low level initialize the Octeon PCI controller
367 */
368static void octeon_pci_initialize(void)
369{
370 union cvmx_pci_cfg01 cfg01;
371 union cvmx_npi_ctl_status ctl_status;
372 union cvmx_pci_ctl_status_2 ctl_status_2;
373 union cvmx_pci_cfg19 cfg19;
374 union cvmx_pci_cfg16 cfg16;
375 union cvmx_pci_cfg22 cfg22;
376 union cvmx_pci_cfg56 cfg56;
377
378 /* Reset the PCI Bus */
379 cvmx_write_csr(CVMX_CIU_SOFT_PRST, 0x1);
380 cvmx_read_csr(CVMX_CIU_SOFT_PRST);
381
382 udelay(2000); /* Hold PCI reset for 2 ms */
383
384 ctl_status.u64 = 0; /* cvmx_read_csr(CVMX_NPI_CTL_STATUS); */
385 ctl_status.s.max_word = 1;
386 ctl_status.s.timer = 1;
387 cvmx_write_csr(CVMX_NPI_CTL_STATUS, ctl_status.u64);
388
389 /* Deassert PCI reset and advertize PCX Host Mode Device Capability
390 (64b) */
391 cvmx_write_csr(CVMX_CIU_SOFT_PRST, 0x4);
392 cvmx_read_csr(CVMX_CIU_SOFT_PRST);
393
394 udelay(2000); /* Wait 2 ms after deasserting PCI reset */
395
396 ctl_status_2.u32 = 0;
397 ctl_status_2.s.tsr_hwm = 1; /* Initializes to 0. Must be set
398 before any PCI reads. */
399 ctl_status_2.s.bar2pres = 1; /* Enable BAR2 */
400 ctl_status_2.s.bar2_enb = 1;
401 ctl_status_2.s.bar2_cax = 1; /* Don't use L2 */
402 ctl_status_2.s.bar2_esx = 1;
403 ctl_status_2.s.pmo_amod = 1; /* Round robin priority */
404 if (octeon_dma_bar_type == OCTEON_DMA_BAR_TYPE_BIG) {
405 /* BAR1 hole */
406 ctl_status_2.s.bb1_hole = OCTEON_PCI_BAR1_HOLE_BITS;
407 ctl_status_2.s.bb1_siz = 1; /* BAR1 is 2GB */
408 ctl_status_2.s.bb_ca = 1; /* Don't use L2 with big bars */
409 ctl_status_2.s.bb_es = 1; /* Big bar in byte swap mode */
410 ctl_status_2.s.bb1 = 1; /* BAR1 is big */
411 ctl_status_2.s.bb0 = 1; /* BAR0 is big */
412 }
413
414 octeon_npi_write32(CVMX_NPI_PCI_CTL_STATUS_2, ctl_status_2.u32);
415 udelay(2000); /* Wait 2 ms before doing PCI reads */
416
417 ctl_status_2.u32 = octeon_npi_read32(CVMX_NPI_PCI_CTL_STATUS_2);
418 pr_notice("PCI Status: %s %s-bit\n",
419 ctl_status_2.s.ap_pcix ? "PCI-X" : "PCI",
420 ctl_status_2.s.ap_64ad ? "64" : "32");
421
422 if (OCTEON_IS_MODEL(OCTEON_CN58XX) || OCTEON_IS_MODEL(OCTEON_CN50XX)) {
423 union cvmx_pci_cnt_reg cnt_reg_start;
424 union cvmx_pci_cnt_reg cnt_reg_end;
425 unsigned long cycles, pci_clock;
426
427 cnt_reg_start.u64 = cvmx_read_csr(CVMX_NPI_PCI_CNT_REG);
428 cycles = read_c0_cvmcount();
429 udelay(1000);
430 cnt_reg_end.u64 = cvmx_read_csr(CVMX_NPI_PCI_CNT_REG);
431 cycles = read_c0_cvmcount() - cycles;
432 pci_clock = (cnt_reg_end.s.pcicnt - cnt_reg_start.s.pcicnt) /
433 (cycles / (mips_hpt_frequency / 1000000));
434 pr_notice("PCI Clock: %lu MHz\n", pci_clock);
435 }
436
437 /*
438 * TDOMC must be set to one in PCI mode. TDOMC should be set to 4
439 * in PCI-X mode to allow four oustanding splits. Otherwise,
440 * should not change from its reset value. Don't write PCI_CFG19
441 * in PCI mode (0x82000001 reset value), write it to 0x82000004
442 * after PCI-X mode is known. MRBCI,MDWE,MDRE -> must be zero.
443 * MRBCM -> must be one.
444 */
445 if (ctl_status_2.s.ap_pcix) {
446 cfg19.u32 = 0;
447 /*
448 * Target Delayed/Split request outstanding maximum
449 * count. [1..31] and 0=32. NOTE: If the user
450 * programs these bits beyond the Designed Maximum
451 * outstanding count, then the designed maximum table
452 * depth will be used instead. No additional
453 * Deferred/Split transactions will be accepted if
454 * this outstanding maximum count is
455 * reached. Furthermore, no additional deferred/split
456 * transactions will be accepted if the I/O delay/ I/O
457 * Split Request outstanding maximum is reached.
458 */
459 cfg19.s.tdomc = 4;
460 /*
461 * Master Deferred Read Request Outstanding Max Count
462 * (PCI only). CR4C[26:24] Max SAC cycles MAX DAC
463 * cycles 000 8 4 001 1 0 010 2 1 011 3 1 100 4 2 101
464 * 5 2 110 6 3 111 7 3 For example, if these bits are
465 * programmed to 100, the core can support 2 DAC
466 * cycles, 4 SAC cycles or a combination of 1 DAC and
467 * 2 SAC cycles. NOTE: For the PCI-X maximum
468 * outstanding split transactions, refer to
469 * CRE0[22:20].
470 */
471 cfg19.s.mdrrmc = 2;
472 /*
473 * Master Request (Memory Read) Byte Count/Byte Enable
474 * select. 0 = Byte Enables valid. In PCI mode, a
475 * burst transaction cannot be performed using Memory
476 * Read command=4?h6. 1 = DWORD Byte Count valid
477 * (default). In PCI Mode, the memory read byte
478 * enables are automatically generated by the
479 * core. Note: N3 Master Request transaction sizes are
480 * always determined through the
481 * am_attr[<35:32>|<7:0>] field.
482 */
483 cfg19.s.mrbcm = 1;
484 octeon_npi_write32(CVMX_NPI_PCI_CFG19, cfg19.u32);
485 }
486
487
488 cfg01.u32 = 0;
489 cfg01.s.msae = 1; /* Memory Space Access Enable */
490 cfg01.s.me = 1; /* Master Enable */
491 cfg01.s.pee = 1; /* PERR# Enable */
492 cfg01.s.see = 1; /* System Error Enable */
493 cfg01.s.fbbe = 1; /* Fast Back to Back Transaction Enable */
494
495 octeon_npi_write32(CVMX_NPI_PCI_CFG01, cfg01.u32);
496
497#ifdef USE_OCTEON_INTERNAL_ARBITER
498 /*
499 * When OCTEON is a PCI host, most systems will use OCTEON's
500 * internal arbiter, so must enable it before any PCI/PCI-X
501 * traffic can occur.
502 */
503 {
504 union cvmx_npi_pci_int_arb_cfg pci_int_arb_cfg;
505
506 pci_int_arb_cfg.u64 = 0;
507 pci_int_arb_cfg.s.en = 1; /* Internal arbiter enable */
508 cvmx_write_csr(CVMX_NPI_PCI_INT_ARB_CFG, pci_int_arb_cfg.u64);
509 }
510#endif /* USE_OCTEON_INTERNAL_ARBITER */
511
512 /*
513 * Preferrably written to 1 to set MLTD. [RDSATI,TRTAE,
514 * TWTAE,TMAE,DPPMR -> must be zero. TILT -> must not be set to
515 * 1..7.
516 */
517 cfg16.u32 = 0;
518 cfg16.s.mltd = 1; /* Master Latency Timer Disable */
519 octeon_npi_write32(CVMX_NPI_PCI_CFG16, cfg16.u32);
520
521 /*
522 * Should be written to 0x4ff00. MTTV -> must be zero.
523 * FLUSH -> must be 1. MRV -> should be 0xFF.
524 */
525 cfg22.u32 = 0;
526 /* Master Retry Value [1..255] and 0=infinite */
527 cfg22.s.mrv = 0xff;
528 /*
529 * AM_DO_FLUSH_I control NOTE: This bit MUST BE ONE for proper
530 * N3K operation.
531 */
532 cfg22.s.flush = 1;
533 octeon_npi_write32(CVMX_NPI_PCI_CFG22, cfg22.u32);
534
535 /*
536 * MOST Indicates the maximum number of outstanding splits (in -1
537 * notation) when OCTEON is in PCI-X mode. PCI-X performance is
538 * affected by the MOST selection. Should generally be written
539 * with one of 0x3be807, 0x2be807, 0x1be807, or 0x0be807,
540 * depending on the desired MOST of 3, 2, 1, or 0, respectively.
541 */
542 cfg56.u32 = 0;
543 cfg56.s.pxcid = 7; /* RO - PCI-X Capability ID */
544 cfg56.s.ncp = 0xe8; /* RO - Next Capability Pointer */
545 cfg56.s.dpere = 1; /* Data Parity Error Recovery Enable */
546 cfg56.s.roe = 1; /* Relaxed Ordering Enable */
547 cfg56.s.mmbc = 1; /* Maximum Memory Byte Count
548 [0=512B,1=1024B,2=2048B,3=4096B] */
549 cfg56.s.most = 3; /* Maximum outstanding Split transactions [0=1
550 .. 7=32] */
551
552 octeon_npi_write32(CVMX_NPI_PCI_CFG56, cfg56.u32);
553
554 /*
555 * Affects PCI performance when OCTEON services reads to its
556 * BAR1/BAR2. Refer to Section 10.6.1. The recommended values are
557 * 0x22, 0x33, and 0x33 for PCI_READ_CMD_6, PCI_READ_CMD_C, and
558 * PCI_READ_CMD_E, respectively. Unfortunately due to errata DDR-700,
559 * these values need to be changed so they won't possibly prefetch off
560 * of the end of memory if PCI is DMAing a buffer at the end of
561 * memory. Note that these values differ from their reset values.
562 */
563 octeon_npi_write32(CVMX_NPI_PCI_READ_CMD_6, 0x21);
564 octeon_npi_write32(CVMX_NPI_PCI_READ_CMD_C, 0x31);
565 octeon_npi_write32(CVMX_NPI_PCI_READ_CMD_E, 0x31);
566}
567
568
569/*
570 * Initialize the Octeon PCI controller
571 */
572static int __init octeon_pci_setup(void)
573{
574 union cvmx_npi_mem_access_subidx mem_access;
575 int index;
576
577 /* Only these chips have PCI */
578 if (octeon_has_feature(OCTEON_FEATURE_PCIE))
579 return 0;
580
581 /* Point pcibios_map_irq() to the PCI version of it */
582 octeon_pcibios_map_irq = octeon_pci_pcibios_map_irq;
583
584 /* Only use the big bars on chips that support it */
585 if (OCTEON_IS_MODEL(OCTEON_CN31XX) ||
586 OCTEON_IS_MODEL(OCTEON_CN38XX_PASS2) ||
587 OCTEON_IS_MODEL(OCTEON_CN38XX_PASS1))
588 octeon_dma_bar_type = OCTEON_DMA_BAR_TYPE_SMALL;
589 else
590 octeon_dma_bar_type = OCTEON_DMA_BAR_TYPE_BIG;
591
592 /* PCI I/O and PCI MEM values */
593 set_io_port_base(OCTEON_PCI_IOSPACE_BASE);
594 ioport_resource.start = 0;
595 ioport_resource.end = OCTEON_PCI_IOSPACE_SIZE - 1;
596 if (!octeon_is_pci_host()) {
597 pr_notice("Not in host mode, PCI Controller not initialized\n");
598 return 0;
599 }
600
601 pr_notice("%s Octeon big bar support\n",
602 (octeon_dma_bar_type ==
603 OCTEON_DMA_BAR_TYPE_BIG) ? "Enabling" : "Disabling");
604
605 octeon_pci_initialize();
606
607 mem_access.u64 = 0;
608 mem_access.s.esr = 1; /* Endian-Swap on read. */
609 mem_access.s.esw = 1; /* Endian-Swap on write. */
610 mem_access.s.nsr = 0; /* No-Snoop on read. */
611 mem_access.s.nsw = 0; /* No-Snoop on write. */
612 mem_access.s.ror = 0; /* Relax Read on read. */
613 mem_access.s.row = 0; /* Relax Order on write. */
614 mem_access.s.ba = 0; /* PCI Address bits [63:36]. */
615 cvmx_write_csr(CVMX_NPI_MEM_ACCESS_SUBID3, mem_access.u64);
616
617 /*
618 * Remap the Octeon BAR 2 above all 32 bit devices
619 * (0x8000000000ul). This is done here so it is remapped
620 * before the readl()'s below. We don't want BAR2 overlapping
621 * with BAR0/BAR1 during these reads.
622 */
623 octeon_npi_write32(CVMX_NPI_PCI_CFG08, 0);
624 octeon_npi_write32(CVMX_NPI_PCI_CFG09, 0x80);
625
626 /* Disable the BAR1 movable mappings */
627 for (index = 0; index < 32; index++)
628 octeon_npi_write32(CVMX_NPI_PCI_BAR1_INDEXX(index), 0);
629
630 if (octeon_dma_bar_type == OCTEON_DMA_BAR_TYPE_BIG) {
631 /* Remap the Octeon BAR 0 to 0-2GB */
632 octeon_npi_write32(CVMX_NPI_PCI_CFG04, 0);
633 octeon_npi_write32(CVMX_NPI_PCI_CFG05, 0);
634
635 /*
636 * Remap the Octeon BAR 1 to map 2GB-4GB (minus the
637 * BAR 1 hole).
638 */
639 octeon_npi_write32(CVMX_NPI_PCI_CFG06, 2ul << 30);
640 octeon_npi_write32(CVMX_NPI_PCI_CFG07, 0);
641
642 /* Devices go after BAR1 */
643 octeon_pci_mem_resource.start =
644 OCTEON_PCI_MEMSPACE_OFFSET + (4ul << 30) -
645 (OCTEON_PCI_BAR1_HOLE_SIZE << 20);
646 octeon_pci_mem_resource.end =
647 octeon_pci_mem_resource.start + (1ul << 30);
648 } else {
649 /* Remap the Octeon BAR 0 to map 128MB-(128MB+4KB) */
650 octeon_npi_write32(CVMX_NPI_PCI_CFG04, 128ul << 20);
651 octeon_npi_write32(CVMX_NPI_PCI_CFG05, 0);
652
653 /* Remap the Octeon BAR 1 to map 0-128MB */
654 octeon_npi_write32(CVMX_NPI_PCI_CFG06, 0);
655 octeon_npi_write32(CVMX_NPI_PCI_CFG07, 0);
656
657 /* Devices go after BAR0 */
658 octeon_pci_mem_resource.start =
659 OCTEON_PCI_MEMSPACE_OFFSET + (128ul << 20) +
660 (4ul << 10);
661 octeon_pci_mem_resource.end =
662 octeon_pci_mem_resource.start + (1ul << 30);
663 }
664
665 register_pci_controller(&octeon_pci_controller);
666
667 /*
668 * Clear any errors that might be pending from before the bus
669 * was setup properly.
670 */
671 cvmx_write_csr(CVMX_NPI_PCI_INT_SUM2, -1);
672 return 0;
673}
674
675arch_initcall(octeon_pci_setup);