aboutsummaryrefslogtreecommitdiffstats
path: root/arch/ppc/syslib/mv64x60.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/ppc/syslib/mv64x60.c')
-rw-r--r--arch/ppc/syslib/mv64x60.c2485
1 files changed, 0 insertions, 2485 deletions
diff --git a/arch/ppc/syslib/mv64x60.c b/arch/ppc/syslib/mv64x60.c
deleted file mode 100644
index 418f3053de52..000000000000
--- a/arch/ppc/syslib/mv64x60.c
+++ /dev/null
@@ -1,2485 +0,0 @@
1/*
2 * Common routines for the Marvell/Galileo Discovery line of host bridges
3 * (gt64260, mv64360, mv64460, ...).
4 *
5 * Author: Mark A. Greer <mgreer@mvista.com>
6 *
7 * 2004 (c) MontaVista, Software, Inc. This file is licensed under
8 * the terms of the GNU General Public License version 2. This program
9 * is licensed "as is" without any warranty of any kind, whether express
10 * or implied.
11 */
12#include <linux/kernel.h>
13#include <linux/init.h>
14#include <linux/pci.h>
15#include <linux/slab.h>
16#include <linux/module.h>
17#include <linux/mutex.h>
18#include <linux/string.h>
19#include <linux/spinlock.h>
20#include <linux/mv643xx.h>
21#include <linux/platform_device.h>
22
23#include <asm/byteorder.h>
24#include <asm/io.h>
25#include <asm/irq.h>
26#include <asm/uaccess.h>
27#include <asm/machdep.h>
28#include <asm/pci-bridge.h>
29#include <asm/delay.h>
30#include <asm/mv64x60.h>
31
32
33u8 mv64x60_pci_exclude_bridge = 1;
34DEFINE_SPINLOCK(mv64x60_lock);
35
36static phys_addr_t mv64x60_bridge_pbase;
37static void __iomem *mv64x60_bridge_vbase;
38static u32 mv64x60_bridge_type = MV64x60_TYPE_INVALID;
39static u32 mv64x60_bridge_rev;
40#if defined(CONFIG_SYSFS) && !defined(CONFIG_GT64260)
41static struct pci_controller sysfs_hose_a;
42#endif
43
44static u32 gt64260_translate_size(u32 base, u32 size, u32 num_bits);
45static u32 gt64260_untranslate_size(u32 base, u32 size, u32 num_bits);
46static void gt64260_set_pci2mem_window(struct pci_controller *hose, u32 bus,
47 u32 window, u32 base);
48static void gt64260_set_pci2regs_window(struct mv64x60_handle *bh,
49 struct pci_controller *hose, u32 bus, u32 base);
50static u32 gt64260_is_enabled_32bit(struct mv64x60_handle *bh, u32 window);
51static void gt64260_enable_window_32bit(struct mv64x60_handle *bh, u32 window);
52static void gt64260_disable_window_32bit(struct mv64x60_handle *bh, u32 window);
53static void gt64260_enable_window_64bit(struct mv64x60_handle *bh, u32 window);
54static void gt64260_disable_window_64bit(struct mv64x60_handle *bh, u32 window);
55static void gt64260_disable_all_windows(struct mv64x60_handle *bh,
56 struct mv64x60_setup_info *si);
57static void gt64260a_chip_specific_init(struct mv64x60_handle *bh,
58 struct mv64x60_setup_info *si);
59static void gt64260b_chip_specific_init(struct mv64x60_handle *bh,
60 struct mv64x60_setup_info *si);
61
62static u32 mv64360_translate_size(u32 base, u32 size, u32 num_bits);
63static u32 mv64360_untranslate_size(u32 base, u32 size, u32 num_bits);
64static void mv64360_set_pci2mem_window(struct pci_controller *hose, u32 bus,
65 u32 window, u32 base);
66static void mv64360_set_pci2regs_window(struct mv64x60_handle *bh,
67 struct pci_controller *hose, u32 bus, u32 base);
68static u32 mv64360_is_enabled_32bit(struct mv64x60_handle *bh, u32 window);
69static void mv64360_enable_window_32bit(struct mv64x60_handle *bh, u32 window);
70static void mv64360_disable_window_32bit(struct mv64x60_handle *bh, u32 window);
71static void mv64360_enable_window_64bit(struct mv64x60_handle *bh, u32 window);
72static void mv64360_disable_window_64bit(struct mv64x60_handle *bh, u32 window);
73static void mv64360_disable_all_windows(struct mv64x60_handle *bh,
74 struct mv64x60_setup_info *si);
75static void mv64360_config_io2mem_windows(struct mv64x60_handle *bh,
76 struct mv64x60_setup_info *si,
77 u32 mem_windows[MV64x60_CPU2MEM_WINDOWS][2]);
78static void mv64360_set_mpsc2regs_window(struct mv64x60_handle *bh, u32 base);
79static void mv64360_chip_specific_init(struct mv64x60_handle *bh,
80 struct mv64x60_setup_info *si);
81static void mv64460_chip_specific_init(struct mv64x60_handle *bh,
82 struct mv64x60_setup_info *si);
83
84
85/*
86 * Define tables that have the chip-specific info for each type of
87 * Marvell bridge chip.
88 */
89static struct mv64x60_chip_info gt64260a_ci __initdata = { /* GT64260A */
90 .translate_size = gt64260_translate_size,
91 .untranslate_size = gt64260_untranslate_size,
92 .set_pci2mem_window = gt64260_set_pci2mem_window,
93 .set_pci2regs_window = gt64260_set_pci2regs_window,
94 .is_enabled_32bit = gt64260_is_enabled_32bit,
95 .enable_window_32bit = gt64260_enable_window_32bit,
96 .disable_window_32bit = gt64260_disable_window_32bit,
97 .enable_window_64bit = gt64260_enable_window_64bit,
98 .disable_window_64bit = gt64260_disable_window_64bit,
99 .disable_all_windows = gt64260_disable_all_windows,
100 .chip_specific_init = gt64260a_chip_specific_init,
101 .window_tab_32bit = gt64260_32bit_windows,
102 .window_tab_64bit = gt64260_64bit_windows,
103};
104
105static struct mv64x60_chip_info gt64260b_ci __initdata = { /* GT64260B */
106 .translate_size = gt64260_translate_size,
107 .untranslate_size = gt64260_untranslate_size,
108 .set_pci2mem_window = gt64260_set_pci2mem_window,
109 .set_pci2regs_window = gt64260_set_pci2regs_window,
110 .is_enabled_32bit = gt64260_is_enabled_32bit,
111 .enable_window_32bit = gt64260_enable_window_32bit,
112 .disable_window_32bit = gt64260_disable_window_32bit,
113 .enable_window_64bit = gt64260_enable_window_64bit,
114 .disable_window_64bit = gt64260_disable_window_64bit,
115 .disable_all_windows = gt64260_disable_all_windows,
116 .chip_specific_init = gt64260b_chip_specific_init,
117 .window_tab_32bit = gt64260_32bit_windows,
118 .window_tab_64bit = gt64260_64bit_windows,
119};
120
121static struct mv64x60_chip_info mv64360_ci __initdata = { /* MV64360 */
122 .translate_size = mv64360_translate_size,
123 .untranslate_size = mv64360_untranslate_size,
124 .set_pci2mem_window = mv64360_set_pci2mem_window,
125 .set_pci2regs_window = mv64360_set_pci2regs_window,
126 .is_enabled_32bit = mv64360_is_enabled_32bit,
127 .enable_window_32bit = mv64360_enable_window_32bit,
128 .disable_window_32bit = mv64360_disable_window_32bit,
129 .enable_window_64bit = mv64360_enable_window_64bit,
130 .disable_window_64bit = mv64360_disable_window_64bit,
131 .disable_all_windows = mv64360_disable_all_windows,
132 .config_io2mem_windows = mv64360_config_io2mem_windows,
133 .set_mpsc2regs_window = mv64360_set_mpsc2regs_window,
134 .chip_specific_init = mv64360_chip_specific_init,
135 .window_tab_32bit = mv64360_32bit_windows,
136 .window_tab_64bit = mv64360_64bit_windows,
137};
138
139static struct mv64x60_chip_info mv64460_ci __initdata = { /* MV64460 */
140 .translate_size = mv64360_translate_size,
141 .untranslate_size = mv64360_untranslate_size,
142 .set_pci2mem_window = mv64360_set_pci2mem_window,
143 .set_pci2regs_window = mv64360_set_pci2regs_window,
144 .is_enabled_32bit = mv64360_is_enabled_32bit,
145 .enable_window_32bit = mv64360_enable_window_32bit,
146 .disable_window_32bit = mv64360_disable_window_32bit,
147 .enable_window_64bit = mv64360_enable_window_64bit,
148 .disable_window_64bit = mv64360_disable_window_64bit,
149 .disable_all_windows = mv64360_disable_all_windows,
150 .config_io2mem_windows = mv64360_config_io2mem_windows,
151 .set_mpsc2regs_window = mv64360_set_mpsc2regs_window,
152 .chip_specific_init = mv64460_chip_specific_init,
153 .window_tab_32bit = mv64360_32bit_windows,
154 .window_tab_64bit = mv64360_64bit_windows,
155};
156
157/*
158 *****************************************************************************
159 *
160 * Platform Device Definitions
161 *
162 *****************************************************************************
163 */
164#ifdef CONFIG_SERIAL_MPSC
165static struct mpsc_shared_pdata mv64x60_mpsc_shared_pdata = {
166 .mrr_val = 0x3ffffe38,
167 .rcrr_val = 0,
168 .tcrr_val = 0,
169 .intr_cause_val = 0,
170 .intr_mask_val = 0,
171};
172
173static struct resource mv64x60_mpsc_shared_resources[] = {
174 /* Do not change the order of the IORESOURCE_MEM resources */
175 [0] = {
176 .name = "mpsc routing base",
177 .start = MV64x60_MPSC_ROUTING_OFFSET,
178 .end = MV64x60_MPSC_ROUTING_OFFSET +
179 MPSC_ROUTING_REG_BLOCK_SIZE - 1,
180 .flags = IORESOURCE_MEM,
181 },
182 [1] = {
183 .name = "sdma intr base",
184 .start = MV64x60_SDMA_INTR_OFFSET,
185 .end = MV64x60_SDMA_INTR_OFFSET +
186 MPSC_SDMA_INTR_REG_BLOCK_SIZE - 1,
187 .flags = IORESOURCE_MEM,
188 },
189};
190
191static struct platform_device mpsc_shared_device = { /* Shared device */
192 .name = MPSC_SHARED_NAME,
193 .id = 0,
194 .num_resources = ARRAY_SIZE(mv64x60_mpsc_shared_resources),
195 .resource = mv64x60_mpsc_shared_resources,
196 .dev = {
197 .platform_data = &mv64x60_mpsc_shared_pdata,
198 },
199};
200
201static struct mpsc_pdata mv64x60_mpsc0_pdata = {
202 .mirror_regs = 0,
203 .cache_mgmt = 0,
204 .max_idle = 0,
205 .default_baud = 9600,
206 .default_bits = 8,
207 .default_parity = 'n',
208 .default_flow = 'n',
209 .chr_1_val = 0x00000000,
210 .chr_2_val = 0x00000000,
211 .chr_10_val = 0x00000003,
212 .mpcr_val = 0,
213 .bcr_val = 0,
214 .brg_can_tune = 0,
215 .brg_clk_src = 8, /* Default to TCLK */
216 .brg_clk_freq = 100000000, /* Default to 100 MHz */
217};
218
219static struct resource mv64x60_mpsc0_resources[] = {
220 /* Do not change the order of the IORESOURCE_MEM resources */
221 [0] = {
222 .name = "mpsc 0 base",
223 .start = MV64x60_MPSC_0_OFFSET,
224 .end = MV64x60_MPSC_0_OFFSET + MPSC_REG_BLOCK_SIZE - 1,
225 .flags = IORESOURCE_MEM,
226 },
227 [1] = {
228 .name = "sdma 0 base",
229 .start = MV64x60_SDMA_0_OFFSET,
230 .end = MV64x60_SDMA_0_OFFSET + MPSC_SDMA_REG_BLOCK_SIZE - 1,
231 .flags = IORESOURCE_MEM,
232 },
233 [2] = {
234 .name = "brg 0 base",
235 .start = MV64x60_BRG_0_OFFSET,
236 .end = MV64x60_BRG_0_OFFSET + MPSC_BRG_REG_BLOCK_SIZE - 1,
237 .flags = IORESOURCE_MEM,
238 },
239 [3] = {
240 .name = "sdma 0 irq",
241 .start = MV64x60_IRQ_SDMA_0,
242 .end = MV64x60_IRQ_SDMA_0,
243 .flags = IORESOURCE_IRQ,
244 },
245};
246
247static struct platform_device mpsc0_device = {
248 .name = MPSC_CTLR_NAME,
249 .id = 0,
250 .num_resources = ARRAY_SIZE(mv64x60_mpsc0_resources),
251 .resource = mv64x60_mpsc0_resources,
252 .dev = {
253 .platform_data = &mv64x60_mpsc0_pdata,
254 },
255};
256
257static struct mpsc_pdata mv64x60_mpsc1_pdata = {
258 .mirror_regs = 0,
259 .cache_mgmt = 0,
260 .max_idle = 0,
261 .default_baud = 9600,
262 .default_bits = 8,
263 .default_parity = 'n',
264 .default_flow = 'n',
265 .chr_1_val = 0x00000000,
266 .chr_1_val = 0x00000000,
267 .chr_2_val = 0x00000000,
268 .chr_10_val = 0x00000003,
269 .mpcr_val = 0,
270 .bcr_val = 0,
271 .brg_can_tune = 0,
272 .brg_clk_src = 8, /* Default to TCLK */
273 .brg_clk_freq = 100000000, /* Default to 100 MHz */
274};
275
276static struct resource mv64x60_mpsc1_resources[] = {
277 /* Do not change the order of the IORESOURCE_MEM resources */
278 [0] = {
279 .name = "mpsc 1 base",
280 .start = MV64x60_MPSC_1_OFFSET,
281 .end = MV64x60_MPSC_1_OFFSET + MPSC_REG_BLOCK_SIZE - 1,
282 .flags = IORESOURCE_MEM,
283 },
284 [1] = {
285 .name = "sdma 1 base",
286 .start = MV64x60_SDMA_1_OFFSET,
287 .end = MV64x60_SDMA_1_OFFSET + MPSC_SDMA_REG_BLOCK_SIZE - 1,
288 .flags = IORESOURCE_MEM,
289 },
290 [2] = {
291 .name = "brg 1 base",
292 .start = MV64x60_BRG_1_OFFSET,
293 .end = MV64x60_BRG_1_OFFSET + MPSC_BRG_REG_BLOCK_SIZE - 1,
294 .flags = IORESOURCE_MEM,
295 },
296 [3] = {
297 .name = "sdma 1 irq",
298 .start = MV64360_IRQ_SDMA_1,
299 .end = MV64360_IRQ_SDMA_1,
300 .flags = IORESOURCE_IRQ,
301 },
302};
303
304static struct platform_device mpsc1_device = {
305 .name = MPSC_CTLR_NAME,
306 .id = 1,
307 .num_resources = ARRAY_SIZE(mv64x60_mpsc1_resources),
308 .resource = mv64x60_mpsc1_resources,
309 .dev = {
310 .platform_data = &mv64x60_mpsc1_pdata,
311 },
312};
313#endif
314
315#if defined(CONFIG_MV643XX_ETH) || defined(CONFIG_MV643XX_ETH_MODULE)
316static struct resource mv64x60_eth_shared_resources[] = {
317 [0] = {
318 .name = "ethernet shared base",
319 .start = MV643XX_ETH_SHARED_REGS,
320 .end = MV643XX_ETH_SHARED_REGS +
321 MV643XX_ETH_SHARED_REGS_SIZE - 1,
322 .flags = IORESOURCE_MEM,
323 },
324};
325
326static struct platform_device mv64x60_eth_shared_device = {
327 .name = MV643XX_ETH_SHARED_NAME,
328 .id = 0,
329 .num_resources = ARRAY_SIZE(mv64x60_eth_shared_resources),
330 .resource = mv64x60_eth_shared_resources,
331};
332
333#ifdef CONFIG_MV643XX_ETH_0
334static struct resource mv64x60_eth0_resources[] = {
335 [0] = {
336 .name = "eth0 irq",
337 .start = MV64x60_IRQ_ETH_0,
338 .end = MV64x60_IRQ_ETH_0,
339 .flags = IORESOURCE_IRQ,
340 },
341};
342
343static struct mv643xx_eth_platform_data eth0_pd = {
344 .shared = &mv64x60_eth_shared_device;
345 .port_number = 0,
346};
347
348static struct platform_device eth0_device = {
349 .name = MV643XX_ETH_NAME,
350 .id = 0,
351 .num_resources = ARRAY_SIZE(mv64x60_eth0_resources),
352 .resource = mv64x60_eth0_resources,
353 .dev = {
354 .platform_data = &eth0_pd,
355 },
356};
357#endif
358
359#ifdef CONFIG_MV643XX_ETH_1
360static struct resource mv64x60_eth1_resources[] = {
361 [0] = {
362 .name = "eth1 irq",
363 .start = MV64x60_IRQ_ETH_1,
364 .end = MV64x60_IRQ_ETH_1,
365 .flags = IORESOURCE_IRQ,
366 },
367};
368
369static struct mv643xx_eth_platform_data eth1_pd = {
370 .shared = &mv64x60_eth_shared_device;
371 .port_number = 1,
372};
373
374static struct platform_device eth1_device = {
375 .name = MV643XX_ETH_NAME,
376 .id = 1,
377 .num_resources = ARRAY_SIZE(mv64x60_eth1_resources),
378 .resource = mv64x60_eth1_resources,
379 .dev = {
380 .platform_data = &eth1_pd,
381 },
382};
383#endif
384
385#ifdef CONFIG_MV643XX_ETH_2
386static struct resource mv64x60_eth2_resources[] = {
387 [0] = {
388 .name = "eth2 irq",
389 .start = MV64x60_IRQ_ETH_2,
390 .end = MV64x60_IRQ_ETH_2,
391 .flags = IORESOURCE_IRQ,
392 },
393};
394
395static struct mv643xx_eth_platform_data eth2_pd = {
396 .shared = &mv64x60_eth_shared_device;
397 .port_number = 2,
398};
399
400static struct platform_device eth2_device = {
401 .name = MV643XX_ETH_NAME,
402 .id = 2,
403 .num_resources = ARRAY_SIZE(mv64x60_eth2_resources),
404 .resource = mv64x60_eth2_resources,
405 .dev = {
406 .platform_data = &eth2_pd,
407 },
408};
409#endif
410#endif
411
412#ifdef CONFIG_I2C_MV64XXX
413static struct mv64xxx_i2c_pdata mv64xxx_i2c_pdata = {
414 .freq_m = 8,
415 .freq_n = 3,
416 .timeout = 1000, /* Default timeout of 1 second */
417};
418
419static struct resource mv64xxx_i2c_resources[] = {
420 /* Do not change the order of the IORESOURCE_MEM resources */
421 [0] = {
422 .name = "mv64xxx i2c base",
423 .start = MV64XXX_I2C_OFFSET,
424 .end = MV64XXX_I2C_OFFSET + MV64XXX_I2C_REG_BLOCK_SIZE - 1,
425 .flags = IORESOURCE_MEM,
426 },
427 [1] = {
428 .name = "mv64xxx i2c irq",
429 .start = MV64x60_IRQ_I2C,
430 .end = MV64x60_IRQ_I2C,
431 .flags = IORESOURCE_IRQ,
432 },
433};
434
435static struct platform_device i2c_device = {
436 .name = MV64XXX_I2C_CTLR_NAME,
437 .id = 0,
438 .num_resources = ARRAY_SIZE(mv64xxx_i2c_resources),
439 .resource = mv64xxx_i2c_resources,
440 .dev = {
441 .platform_data = &mv64xxx_i2c_pdata,
442 },
443};
444#endif
445
446#ifdef CONFIG_WATCHDOG
447static struct mv64x60_wdt_pdata mv64x60_wdt_pdata = {
448 .timeout = 10, /* default watchdog expiry in seconds */
449 .bus_clk = 133, /* default bus clock in MHz */
450};
451
452static struct resource mv64x60_wdt_resources[] = {
453 [0] = {
454 .name = "mv64x60 wdt base",
455 .start = MV64x60_WDT_WDC,
456 .end = MV64x60_WDT_WDC + 8 - 1, /* two 32-bit registers */
457 .flags = IORESOURCE_MEM,
458 },
459};
460
461static struct platform_device wdt_device = {
462 .name = MV64x60_WDT_NAME,
463 .id = 0,
464 .num_resources = ARRAY_SIZE(mv64x60_wdt_resources),
465 .resource = mv64x60_wdt_resources,
466 .dev = {
467 .platform_data = &mv64x60_wdt_pdata,
468 },
469};
470#endif
471
472#if defined(CONFIG_SYSFS) && !defined(CONFIG_GT64260)
473static struct mv64xxx_pdata mv64xxx_pdata = {
474 .hs_reg_valid = 0,
475};
476
477static struct platform_device mv64xxx_device = { /* general mv64x60 stuff */
478 .name = MV64XXX_DEV_NAME,
479 .id = 0,
480 .dev = {
481 .platform_data = &mv64xxx_pdata,
482 },
483};
484#endif
485
486static struct platform_device *mv64x60_pd_devs[] __initdata = {
487#ifdef CONFIG_SERIAL_MPSC
488 &mpsc_shared_device,
489 &mpsc0_device,
490 &mpsc1_device,
491#endif
492#if defined(CONFIG_MV643XX_ETH) || defined(CONFIG_MV643XX_ETH_MODULE)
493 &mv64x60_eth_shared_device,
494#endif
495#ifdef CONFIG_MV643XX_ETH_0
496 &eth0_device,
497#endif
498#ifdef CONFIG_MV643XX_ETH_1
499 &eth1_device,
500#endif
501#ifdef CONFIG_MV643XX_ETH_2
502 &eth2_device,
503#endif
504#ifdef CONFIG_I2C_MV64XXX
505 &i2c_device,
506#endif
507#ifdef CONFIG_MV64X60_WDT
508 &wdt_device,
509#endif
510#if defined(CONFIG_SYSFS) && !defined(CONFIG_GT64260)
511 &mv64xxx_device,
512#endif
513};
514
515/*
516 *****************************************************************************
517 *
518 * Bridge Initialization Routines
519 *
520 *****************************************************************************
521 */
522/*
523 * mv64x60_init()
524 *
525 * Initialize the bridge based on setting passed in via 'si'. The bridge
526 * handle, 'bh', will be set so that it can be used to make subsequent
527 * calls to routines in this file.
528 */
529int __init
530mv64x60_init(struct mv64x60_handle *bh, struct mv64x60_setup_info *si)
531{
532 u32 mem_windows[MV64x60_CPU2MEM_WINDOWS][2];
533
534 if (ppc_md.progress)
535 ppc_md.progress("mv64x60 initialization", 0x0);
536
537 spin_lock_init(&mv64x60_lock);
538 mv64x60_early_init(bh, si);
539
540 if (mv64x60_get_type(bh) || mv64x60_setup_for_chip(bh)) {
541 iounmap(bh->v_base);
542 bh->v_base = 0;
543 if (ppc_md.progress)
544 ppc_md.progress("mv64x60_init: Can't determine chip",0);
545 return -1;
546 }
547
548 bh->ci->disable_all_windows(bh, si);
549 mv64x60_get_mem_windows(bh, mem_windows);
550 mv64x60_config_cpu2mem_windows(bh, si, mem_windows);
551
552 if (bh->ci->config_io2mem_windows)
553 bh->ci->config_io2mem_windows(bh, si, mem_windows);
554 if (bh->ci->set_mpsc2regs_window)
555 bh->ci->set_mpsc2regs_window(bh, si->phys_reg_base);
556
557 if (si->pci_1.enable_bus) {
558 bh->io_base_b = (u32)ioremap(si->pci_1.pci_io.cpu_base,
559 si->pci_1.pci_io.size);
560 isa_io_base = bh->io_base_b;
561 }
562
563 if (si->pci_0.enable_bus) {
564 bh->io_base_a = (u32)ioremap(si->pci_0.pci_io.cpu_base,
565 si->pci_0.pci_io.size);
566 isa_io_base = bh->io_base_a;
567
568 mv64x60_alloc_hose(bh, MV64x60_PCI0_CONFIG_ADDR,
569 MV64x60_PCI0_CONFIG_DATA, &bh->hose_a);
570 mv64x60_config_resources(bh->hose_a, &si->pci_0, bh->io_base_a);
571 mv64x60_config_pci_params(bh->hose_a, &si->pci_0);
572
573 mv64x60_config_cpu2pci_windows(bh, &si->pci_0, 0);
574 mv64x60_config_pci2mem_windows(bh, bh->hose_a, &si->pci_0, 0,
575 mem_windows);
576 bh->ci->set_pci2regs_window(bh, bh->hose_a, 0,
577 si->phys_reg_base);
578 }
579
580 if (si->pci_1.enable_bus) {
581 mv64x60_alloc_hose(bh, MV64x60_PCI1_CONFIG_ADDR,
582 MV64x60_PCI1_CONFIG_DATA, &bh->hose_b);
583 mv64x60_config_resources(bh->hose_b, &si->pci_1, bh->io_base_b);
584 mv64x60_config_pci_params(bh->hose_b, &si->pci_1);
585
586 mv64x60_config_cpu2pci_windows(bh, &si->pci_1, 1);
587 mv64x60_config_pci2mem_windows(bh, bh->hose_b, &si->pci_1, 1,
588 mem_windows);
589 bh->ci->set_pci2regs_window(bh, bh->hose_b, 1,
590 si->phys_reg_base);
591 }
592
593 bh->ci->chip_specific_init(bh, si);
594 mv64x60_pd_fixup(bh, mv64x60_pd_devs, ARRAY_SIZE(mv64x60_pd_devs));
595
596 return 0;
597}
598
599/*
600 * mv64x60_early_init()
601 *
602 * Do some bridge work that must take place before we start messing with
603 * the bridge for real.
604 */
605void __init
606mv64x60_early_init(struct mv64x60_handle *bh, struct mv64x60_setup_info *si)
607{
608 struct pci_controller hose_a, hose_b;
609
610 memset(bh, 0, sizeof(*bh));
611
612 bh->p_base = si->phys_reg_base;
613 bh->v_base = ioremap(bh->p_base, MV64x60_INTERNAL_SPACE_SIZE);
614
615 mv64x60_bridge_pbase = bh->p_base;
616 mv64x60_bridge_vbase = bh->v_base;
617
618 /* Assuming pci mode [reserved] bits 4:5 on 64260 are 0 */
619 bh->pci_mode_a = mv64x60_read(bh, MV64x60_PCI0_MODE) &
620 MV64x60_PCIMODE_MASK;
621 bh->pci_mode_b = mv64x60_read(bh, MV64x60_PCI1_MODE) &
622 MV64x60_PCIMODE_MASK;
623
624 /* Need temporary hose structs to call mv64x60_set_bus() */
625 memset(&hose_a, 0, sizeof(hose_a));
626 memset(&hose_b, 0, sizeof(hose_b));
627 setup_indirect_pci_nomap(&hose_a, bh->v_base + MV64x60_PCI0_CONFIG_ADDR,
628 bh->v_base + MV64x60_PCI0_CONFIG_DATA);
629 setup_indirect_pci_nomap(&hose_b, bh->v_base + MV64x60_PCI1_CONFIG_ADDR,
630 bh->v_base + MV64x60_PCI1_CONFIG_DATA);
631 bh->hose_a = &hose_a;
632 bh->hose_b = &hose_b;
633
634#if defined(CONFIG_SYSFS) && !defined(CONFIG_GT64260)
635 /* Save a copy of hose_a for sysfs functions -- hack */
636 memcpy(&sysfs_hose_a, &hose_a, sizeof(hose_a));
637#endif
638
639 mv64x60_set_bus(bh, 0, 0);
640 mv64x60_set_bus(bh, 1, 0);
641
642 bh->hose_a = NULL;
643 bh->hose_b = NULL;
644
645 /* Clear bit 0 of PCI addr decode control so PCI->CPU remap 1:1 */
646 mv64x60_clr_bits(bh, MV64x60_PCI0_PCI_DECODE_CNTL, 0x00000001);
647 mv64x60_clr_bits(bh, MV64x60_PCI1_PCI_DECODE_CNTL, 0x00000001);
648
649 /* Bit 12 MUST be 0; set bit 27--don't auto-update cpu remap regs */
650 mv64x60_clr_bits(bh, MV64x60_CPU_CONFIG, (1<<12));
651 mv64x60_set_bits(bh, MV64x60_CPU_CONFIG, (1<<27));
652
653 mv64x60_set_bits(bh, MV64x60_PCI0_TO_RETRY, 0xffff);
654 mv64x60_set_bits(bh, MV64x60_PCI1_TO_RETRY, 0xffff);
655}
656
657/*
658 *****************************************************************************
659 *
660 * Window Config Routines
661 *
662 *****************************************************************************
663 */
664/*
665 * mv64x60_get_32bit_window()
666 *
667 * Determine the base address and size of a 32-bit window on the bridge.
668 */
669void __init
670mv64x60_get_32bit_window(struct mv64x60_handle *bh, u32 window,
671 u32 *base, u32 *size)
672{
673 u32 val, base_reg, size_reg, base_bits, size_bits;
674 u32 (*get_from_field)(u32 val, u32 num_bits);
675
676 base_reg = bh->ci->window_tab_32bit[window].base_reg;
677
678 if (base_reg != 0) {
679 size_reg = bh->ci->window_tab_32bit[window].size_reg;
680 base_bits = bh->ci->window_tab_32bit[window].base_bits;
681 size_bits = bh->ci->window_tab_32bit[window].size_bits;
682 get_from_field= bh->ci->window_tab_32bit[window].get_from_field;
683
684 val = mv64x60_read(bh, base_reg);
685 *base = get_from_field(val, base_bits);
686
687 if (size_reg != 0) {
688 val = mv64x60_read(bh, size_reg);
689 val = get_from_field(val, size_bits);
690 *size = bh->ci->untranslate_size(*base, val, size_bits);
691 } else
692 *size = 0;
693 } else {
694 *base = 0;
695 *size = 0;
696 }
697
698 pr_debug("get 32bit window: %d, base: 0x%x, size: 0x%x\n",
699 window, *base, *size);
700}
701
702/*
703 * mv64x60_set_32bit_window()
704 *
705 * Set the base address and size of a 32-bit window on the bridge.
706 */
707void __init
708mv64x60_set_32bit_window(struct mv64x60_handle *bh, u32 window,
709 u32 base, u32 size, u32 other_bits)
710{
711 u32 val, base_reg, size_reg, base_bits, size_bits;
712 u32 (*map_to_field)(u32 val, u32 num_bits);
713
714 pr_debug("set 32bit window: %d, base: 0x%x, size: 0x%x, other: 0x%x\n",
715 window, base, size, other_bits);
716
717 base_reg = bh->ci->window_tab_32bit[window].base_reg;
718
719 if (base_reg != 0) {
720 size_reg = bh->ci->window_tab_32bit[window].size_reg;
721 base_bits = bh->ci->window_tab_32bit[window].base_bits;
722 size_bits = bh->ci->window_tab_32bit[window].size_bits;
723 map_to_field = bh->ci->window_tab_32bit[window].map_to_field;
724
725 val = map_to_field(base, base_bits) | other_bits;
726 mv64x60_write(bh, base_reg, val);
727
728 if (size_reg != 0) {
729 val = bh->ci->translate_size(base, size, size_bits);
730 val = map_to_field(val, size_bits);
731 mv64x60_write(bh, size_reg, val);
732 }
733
734 (void)mv64x60_read(bh, base_reg); /* Flush FIFO */
735 }
736}
737
738/*
739 * mv64x60_get_64bit_window()
740 *
741 * Determine the base address and size of a 64-bit window on the bridge.
742 */
743void __init
744mv64x60_get_64bit_window(struct mv64x60_handle *bh, u32 window,
745 u32 *base_hi, u32 *base_lo, u32 *size)
746{
747 u32 val, base_lo_reg, size_reg, base_lo_bits, size_bits;
748 u32 (*get_from_field)(u32 val, u32 num_bits);
749
750 base_lo_reg = bh->ci->window_tab_64bit[window].base_lo_reg;
751
752 if (base_lo_reg != 0) {
753 size_reg = bh->ci->window_tab_64bit[window].size_reg;
754 base_lo_bits = bh->ci->window_tab_64bit[window].base_lo_bits;
755 size_bits = bh->ci->window_tab_64bit[window].size_bits;
756 get_from_field= bh->ci->window_tab_64bit[window].get_from_field;
757
758 *base_hi = mv64x60_read(bh,
759 bh->ci->window_tab_64bit[window].base_hi_reg);
760
761 val = mv64x60_read(bh, base_lo_reg);
762 *base_lo = get_from_field(val, base_lo_bits);
763
764 if (size_reg != 0) {
765 val = mv64x60_read(bh, size_reg);
766 val = get_from_field(val, size_bits);
767 *size = bh->ci->untranslate_size(*base_lo, val,
768 size_bits);
769 } else
770 *size = 0;
771 } else {
772 *base_hi = 0;
773 *base_lo = 0;
774 *size = 0;
775 }
776
777 pr_debug("get 64bit window: %d, base hi: 0x%x, base lo: 0x%x, "
778 "size: 0x%x\n", window, *base_hi, *base_lo, *size);
779}
780
781/*
782 * mv64x60_set_64bit_window()
783 *
784 * Set the base address and size of a 64-bit window on the bridge.
785 */
786void __init
787mv64x60_set_64bit_window(struct mv64x60_handle *bh, u32 window,
788 u32 base_hi, u32 base_lo, u32 size, u32 other_bits)
789{
790 u32 val, base_lo_reg, size_reg, base_lo_bits, size_bits;
791 u32 (*map_to_field)(u32 val, u32 num_bits);
792
793 pr_debug("set 64bit window: %d, base hi: 0x%x, base lo: 0x%x, "
794 "size: 0x%x, other: 0x%x\n",
795 window, base_hi, base_lo, size, other_bits);
796
797 base_lo_reg = bh->ci->window_tab_64bit[window].base_lo_reg;
798
799 if (base_lo_reg != 0) {
800 size_reg = bh->ci->window_tab_64bit[window].size_reg;
801 base_lo_bits = bh->ci->window_tab_64bit[window].base_lo_bits;
802 size_bits = bh->ci->window_tab_64bit[window].size_bits;
803 map_to_field = bh->ci->window_tab_64bit[window].map_to_field;
804
805 mv64x60_write(bh, bh->ci->window_tab_64bit[window].base_hi_reg,
806 base_hi);
807
808 val = map_to_field(base_lo, base_lo_bits) | other_bits;
809 mv64x60_write(bh, base_lo_reg, val);
810
811 if (size_reg != 0) {
812 val = bh->ci->translate_size(base_lo, size, size_bits);
813 val = map_to_field(val, size_bits);
814 mv64x60_write(bh, size_reg, val);
815 }
816
817 (void)mv64x60_read(bh, base_lo_reg); /* Flush FIFO */
818 }
819}
820
821/*
822 * mv64x60_mask()
823 *
824 * Take the high-order 'num_bits' of 'val' & mask off low bits.
825 */
826u32 __init
827mv64x60_mask(u32 val, u32 num_bits)
828{
829 return val & (0xffffffff << (32 - num_bits));
830}
831
832/*
833 * mv64x60_shift_left()
834 *
835 * Take the low-order 'num_bits' of 'val', shift left to align at bit 31 (MSB).
836 */
837u32 __init
838mv64x60_shift_left(u32 val, u32 num_bits)
839{
840 return val << (32 - num_bits);
841}
842
843/*
844 * mv64x60_shift_right()
845 *
846 * Take the high-order 'num_bits' of 'val', shift right to align at bit 0 (LSB).
847 */
848u32 __init
849mv64x60_shift_right(u32 val, u32 num_bits)
850{
851 return val >> (32 - num_bits);
852}
853
854/*
855 *****************************************************************************
856 *
857 * Chip Identification Routines
858 *
859 *****************************************************************************
860 */
861/*
862 * mv64x60_get_type()
863 *
864 * Determine the type of bridge chip we have.
865 */
866int __init
867mv64x60_get_type(struct mv64x60_handle *bh)
868{
869 struct pci_controller hose;
870 u16 val;
871 u8 save_exclude;
872
873 memset(&hose, 0, sizeof(hose));
874 setup_indirect_pci_nomap(&hose, bh->v_base + MV64x60_PCI0_CONFIG_ADDR,
875 bh->v_base + MV64x60_PCI0_CONFIG_DATA);
876
877 save_exclude = mv64x60_pci_exclude_bridge;
878 mv64x60_pci_exclude_bridge = 0;
879 /* Sanity check of bridge's Vendor ID */
880 early_read_config_word(&hose, 0, PCI_DEVFN(0, 0), PCI_VENDOR_ID, &val);
881
882 if (val != PCI_VENDOR_ID_MARVELL) {
883 mv64x60_pci_exclude_bridge = save_exclude;
884 return -1;
885 }
886
887 /* Get the revision of the chip */
888 early_read_config_word(&hose, 0, PCI_DEVFN(0, 0), PCI_CLASS_REVISION,
889 &val);
890 bh->rev = (u32)(val & 0xff);
891
892 /* Figure out the type of Marvell bridge it is */
893 early_read_config_word(&hose, 0, PCI_DEVFN(0, 0), PCI_DEVICE_ID, &val);
894 mv64x60_pci_exclude_bridge = save_exclude;
895
896 switch (val) {
897 case PCI_DEVICE_ID_MARVELL_GT64260:
898 switch (bh->rev) {
899 case GT64260_REV_A:
900 bh->type = MV64x60_TYPE_GT64260A;
901 break;
902
903 default:
904 printk(KERN_WARNING "Unsupported GT64260 rev %04x\n",
905 bh->rev);
906 /* Assume its similar to a 'B' rev and fallthru */
907 case GT64260_REV_B:
908 bh->type = MV64x60_TYPE_GT64260B;
909 break;
910 }
911 break;
912
913 case PCI_DEVICE_ID_MARVELL_MV64360:
914 /* Marvell won't tell me how to distinguish a 64361 & 64362 */
915 bh->type = MV64x60_TYPE_MV64360;
916 break;
917
918 case PCI_DEVICE_ID_MARVELL_MV64460:
919 bh->type = MV64x60_TYPE_MV64460;
920 break;
921
922 default:
923 printk(KERN_ERR "Unknown Marvell bridge type %04x\n", val);
924 return -1;
925 }
926
927 /* Hang onto bridge type & rev for PIC code */
928 mv64x60_bridge_type = bh->type;
929 mv64x60_bridge_rev = bh->rev;
930
931 return 0;
932}
933
934/*
935 * mv64x60_setup_for_chip()
936 *
937 * Set 'bh' to use the proper set of routine for the bridge chip that we have.
938 */
939int __init
940mv64x60_setup_for_chip(struct mv64x60_handle *bh)
941{
942 int rc = 0;
943
944 /* Set up chip-specific info based on the chip/bridge type */
945 switch(bh->type) {
946 case MV64x60_TYPE_GT64260A:
947 bh->ci = &gt64260a_ci;
948 break;
949
950 case MV64x60_TYPE_GT64260B:
951 bh->ci = &gt64260b_ci;
952 break;
953
954 case MV64x60_TYPE_MV64360:
955 bh->ci = &mv64360_ci;
956 break;
957
958 case MV64x60_TYPE_MV64460:
959 bh->ci = &mv64460_ci;
960 break;
961
962 case MV64x60_TYPE_INVALID:
963 default:
964 if (ppc_md.progress)
965 ppc_md.progress("mv64x60: Unsupported bridge", 0x0);
966 printk(KERN_ERR "mv64x60: Unsupported bridge\n");
967 rc = -1;
968 }
969
970 return rc;
971}
972
973/*
974 * mv64x60_get_bridge_vbase()
975 *
976 * Return the virtual address of the bridge's registers.
977 */
978void __iomem *
979mv64x60_get_bridge_vbase(void)
980{
981 return mv64x60_bridge_vbase;
982}
983
984/*
985 * mv64x60_get_bridge_type()
986 *
987 * Return the type of bridge on the platform.
988 */
989u32
990mv64x60_get_bridge_type(void)
991{
992 return mv64x60_bridge_type;
993}
994
995/*
996 * mv64x60_get_bridge_rev()
997 *
998 * Return the revision of the bridge on the platform.
999 */
1000u32
1001mv64x60_get_bridge_rev(void)
1002{
1003 return mv64x60_bridge_rev;
1004}
1005
1006/*
1007 *****************************************************************************
1008 *
1009 * System Memory Window Related Routines
1010 *
1011 *****************************************************************************
1012 */
1013/*
1014 * mv64x60_get_mem_size()
1015 *
1016 * Calculate the amount of memory that the memory controller is set up for.
1017 * This should only be used by board-specific code if there is no other
1018 * way to determine the amount of memory in the system.
1019 */
1020u32 __init
1021mv64x60_get_mem_size(u32 bridge_base, u32 chip_type)
1022{
1023 struct mv64x60_handle bh;
1024 u32 mem_windows[MV64x60_CPU2MEM_WINDOWS][2];
1025 u32 rc = 0;
1026
1027 memset(&bh, 0, sizeof(bh));
1028
1029 bh.type = chip_type;
1030 bh.v_base = (void *)bridge_base;
1031
1032 if (!mv64x60_setup_for_chip(&bh)) {
1033 mv64x60_get_mem_windows(&bh, mem_windows);
1034 rc = mv64x60_calc_mem_size(&bh, mem_windows);
1035 }
1036
1037 return rc;
1038}
1039
1040/*
1041 * mv64x60_get_mem_windows()
1042 *
1043 * Get the values in the memory controller & return in the 'mem_windows' array.
1044 */
1045void __init
1046mv64x60_get_mem_windows(struct mv64x60_handle *bh,
1047 u32 mem_windows[MV64x60_CPU2MEM_WINDOWS][2])
1048{
1049 u32 i, win;
1050
1051 for (win=MV64x60_CPU2MEM_0_WIN,i=0;win<=MV64x60_CPU2MEM_3_WIN;win++,i++)
1052 if (bh->ci->is_enabled_32bit(bh, win))
1053 mv64x60_get_32bit_window(bh, win,
1054 &mem_windows[i][0], &mem_windows[i][1]);
1055 else {
1056 mem_windows[i][0] = 0;
1057 mem_windows[i][1] = 0;
1058 }
1059}
1060
1061/*
1062 * mv64x60_calc_mem_size()
1063 *
1064 * Using the memory controller register values in 'mem_windows', determine
1065 * how much memory it is set up for.
1066 */
1067u32 __init
1068mv64x60_calc_mem_size(struct mv64x60_handle *bh,
1069 u32 mem_windows[MV64x60_CPU2MEM_WINDOWS][2])
1070{
1071 u32 i, total = 0;
1072
1073 for (i=0; i<MV64x60_CPU2MEM_WINDOWS; i++)
1074 total += mem_windows[i][1];
1075
1076 return total;
1077}
1078
1079/*
1080 *****************************************************************************
1081 *
1082 * CPU->System MEM, PCI Config Routines
1083 *
1084 *****************************************************************************
1085 */
1086/*
1087 * mv64x60_config_cpu2mem_windows()
1088 *
1089 * Configure CPU->Memory windows on the bridge.
1090 */
1091static u32 prot_tab[] __initdata = {
1092 MV64x60_CPU_PROT_0_WIN, MV64x60_CPU_PROT_1_WIN,
1093 MV64x60_CPU_PROT_2_WIN, MV64x60_CPU_PROT_3_WIN
1094};
1095
1096static u32 cpu_snoop_tab[] __initdata = {
1097 MV64x60_CPU_SNOOP_0_WIN, MV64x60_CPU_SNOOP_1_WIN,
1098 MV64x60_CPU_SNOOP_2_WIN, MV64x60_CPU_SNOOP_3_WIN
1099};
1100
1101void __init
1102mv64x60_config_cpu2mem_windows(struct mv64x60_handle *bh,
1103 struct mv64x60_setup_info *si,
1104 u32 mem_windows[MV64x60_CPU2MEM_WINDOWS][2])
1105{
1106 u32 i, win;
1107
1108 /* Set CPU protection & snoop windows */
1109 for (win=MV64x60_CPU2MEM_0_WIN,i=0;win<=MV64x60_CPU2MEM_3_WIN;win++,i++)
1110 if (bh->ci->is_enabled_32bit(bh, win)) {
1111 mv64x60_set_32bit_window(bh, prot_tab[i],
1112 mem_windows[i][0], mem_windows[i][1],
1113 si->cpu_prot_options[i]);
1114 bh->ci->enable_window_32bit(bh, prot_tab[i]);
1115
1116 if (bh->ci->window_tab_32bit[cpu_snoop_tab[i]].
1117 base_reg != 0) {
1118 mv64x60_set_32bit_window(bh, cpu_snoop_tab[i],
1119 mem_windows[i][0], mem_windows[i][1],
1120 si->cpu_snoop_options[i]);
1121 bh->ci->enable_window_32bit(bh,
1122 cpu_snoop_tab[i]);
1123 }
1124
1125 }
1126}
1127
1128/*
1129 * mv64x60_config_cpu2pci_windows()
1130 *
1131 * Configure the CPU->PCI windows for one of the PCI buses.
1132 */
1133static u32 win_tab[2][4] __initdata = {
1134 { MV64x60_CPU2PCI0_IO_WIN, MV64x60_CPU2PCI0_MEM_0_WIN,
1135 MV64x60_CPU2PCI0_MEM_1_WIN, MV64x60_CPU2PCI0_MEM_2_WIN },
1136 { MV64x60_CPU2PCI1_IO_WIN, MV64x60_CPU2PCI1_MEM_0_WIN,
1137 MV64x60_CPU2PCI1_MEM_1_WIN, MV64x60_CPU2PCI1_MEM_2_WIN },
1138};
1139
1140static u32 remap_tab[2][4] __initdata = {
1141 { MV64x60_CPU2PCI0_IO_REMAP_WIN, MV64x60_CPU2PCI0_MEM_0_REMAP_WIN,
1142 MV64x60_CPU2PCI0_MEM_1_REMAP_WIN, MV64x60_CPU2PCI0_MEM_2_REMAP_WIN },
1143 { MV64x60_CPU2PCI1_IO_REMAP_WIN, MV64x60_CPU2PCI1_MEM_0_REMAP_WIN,
1144 MV64x60_CPU2PCI1_MEM_1_REMAP_WIN, MV64x60_CPU2PCI1_MEM_2_REMAP_WIN }
1145};
1146
1147void __init
1148mv64x60_config_cpu2pci_windows(struct mv64x60_handle *bh,
1149 struct mv64x60_pci_info *pi, u32 bus)
1150{
1151 int i;
1152
1153 if (pi->pci_io.size > 0) {
1154 mv64x60_set_32bit_window(bh, win_tab[bus][0],
1155 pi->pci_io.cpu_base, pi->pci_io.size, pi->pci_io.swap);
1156 mv64x60_set_32bit_window(bh, remap_tab[bus][0],
1157 pi->pci_io.pci_base_lo, 0, 0);
1158 bh->ci->enable_window_32bit(bh, win_tab[bus][0]);
1159 } else /* Actually, the window should already be disabled */
1160 bh->ci->disable_window_32bit(bh, win_tab[bus][0]);
1161
1162 for (i=0; i<3; i++)
1163 if (pi->pci_mem[i].size > 0) {
1164 mv64x60_set_32bit_window(bh, win_tab[bus][i+1],
1165 pi->pci_mem[i].cpu_base, pi->pci_mem[i].size,
1166 pi->pci_mem[i].swap);
1167 mv64x60_set_64bit_window(bh, remap_tab[bus][i+1],
1168 pi->pci_mem[i].pci_base_hi,
1169 pi->pci_mem[i].pci_base_lo, 0, 0);
1170 bh->ci->enable_window_32bit(bh, win_tab[bus][i+1]);
1171 } else /* Actually, the window should already be disabled */
1172 bh->ci->disable_window_32bit(bh, win_tab[bus][i+1]);
1173}
1174
1175/*
1176 *****************************************************************************
1177 *
1178 * PCI->System MEM Config Routines
1179 *
1180 *****************************************************************************
1181 */
1182/*
1183 * mv64x60_config_pci2mem_windows()
1184 *
1185 * Configure the PCI->Memory windows on the bridge.
1186 */
1187static u32 pci_acc_tab[2][4] __initdata = {
1188 { MV64x60_PCI02MEM_ACC_CNTL_0_WIN, MV64x60_PCI02MEM_ACC_CNTL_1_WIN,
1189 MV64x60_PCI02MEM_ACC_CNTL_2_WIN, MV64x60_PCI02MEM_ACC_CNTL_3_WIN },
1190 { MV64x60_PCI12MEM_ACC_CNTL_0_WIN, MV64x60_PCI12MEM_ACC_CNTL_1_WIN,
1191 MV64x60_PCI12MEM_ACC_CNTL_2_WIN, MV64x60_PCI12MEM_ACC_CNTL_3_WIN }
1192};
1193
1194static u32 pci_snoop_tab[2][4] __initdata = {
1195 { MV64x60_PCI02MEM_SNOOP_0_WIN, MV64x60_PCI02MEM_SNOOP_1_WIN,
1196 MV64x60_PCI02MEM_SNOOP_2_WIN, MV64x60_PCI02MEM_SNOOP_3_WIN },
1197 { MV64x60_PCI12MEM_SNOOP_0_WIN, MV64x60_PCI12MEM_SNOOP_1_WIN,
1198 MV64x60_PCI12MEM_SNOOP_2_WIN, MV64x60_PCI12MEM_SNOOP_3_WIN }
1199};
1200
1201static u32 pci_size_tab[2][4] __initdata = {
1202 { MV64x60_PCI0_MEM_0_SIZE, MV64x60_PCI0_MEM_1_SIZE,
1203 MV64x60_PCI0_MEM_2_SIZE, MV64x60_PCI0_MEM_3_SIZE },
1204 { MV64x60_PCI1_MEM_0_SIZE, MV64x60_PCI1_MEM_1_SIZE,
1205 MV64x60_PCI1_MEM_2_SIZE, MV64x60_PCI1_MEM_3_SIZE }
1206};
1207
1208void __init
1209mv64x60_config_pci2mem_windows(struct mv64x60_handle *bh,
1210 struct pci_controller *hose, struct mv64x60_pci_info *pi,
1211 u32 bus, u32 mem_windows[MV64x60_CPU2MEM_WINDOWS][2])
1212{
1213 u32 i, win;
1214
1215 /*
1216 * Set the access control, snoop, BAR size, and window base addresses.
1217 * PCI->MEM windows base addresses will match exactly what the
1218 * CPU->MEM windows are.
1219 */
1220 for (win=MV64x60_CPU2MEM_0_WIN,i=0;win<=MV64x60_CPU2MEM_3_WIN;win++,i++)
1221 if (bh->ci->is_enabled_32bit(bh, win)) {
1222 mv64x60_set_64bit_window(bh,
1223 pci_acc_tab[bus][i], 0,
1224 mem_windows[i][0], mem_windows[i][1],
1225 pi->acc_cntl_options[i]);
1226 bh->ci->enable_window_64bit(bh, pci_acc_tab[bus][i]);
1227
1228 if (bh->ci->window_tab_64bit[
1229 pci_snoop_tab[bus][i]].base_lo_reg != 0) {
1230
1231 mv64x60_set_64bit_window(bh,
1232 pci_snoop_tab[bus][i], 0,
1233 mem_windows[i][0], mem_windows[i][1],
1234 pi->snoop_options[i]);
1235 bh->ci->enable_window_64bit(bh,
1236 pci_snoop_tab[bus][i]);
1237 }
1238
1239 bh->ci->set_pci2mem_window(hose, bus, i,
1240 mem_windows[i][0]);
1241 mv64x60_write(bh, pci_size_tab[bus][i],
1242 mv64x60_mask(mem_windows[i][1] - 1, 20));
1243
1244 /* Enable the window */
1245 mv64x60_clr_bits(bh, ((bus == 0) ?
1246 MV64x60_PCI0_BAR_ENABLE :
1247 MV64x60_PCI1_BAR_ENABLE), (1 << i));
1248 }
1249}
1250
1251/*
1252 *****************************************************************************
1253 *
1254 * Hose & Resource Alloc/Init Routines
1255 *
1256 *****************************************************************************
1257 */
1258/*
1259 * mv64x60_alloc_hoses()
1260 *
1261 * Allocate the PCI hose structures for the bridge's PCI buses.
1262 */
1263void __init
1264mv64x60_alloc_hose(struct mv64x60_handle *bh, u32 cfg_addr, u32 cfg_data,
1265 struct pci_controller **hose)
1266{
1267 *hose = pcibios_alloc_controller();
1268 setup_indirect_pci_nomap(*hose, bh->v_base + cfg_addr,
1269 bh->v_base + cfg_data);
1270}
1271
1272/*
1273 * mv64x60_config_resources()
1274 *
1275 * Calculate the offsets, etc. for the hose structures to reflect all of
1276 * the address remapping that happens as you go from CPU->PCI and PCI->MEM.
1277 */
1278void __init
1279mv64x60_config_resources(struct pci_controller *hose,
1280 struct mv64x60_pci_info *pi, u32 io_base)
1281{
1282 int i;
1283 /* 2 hoses; 4 resources/hose; string <= 64 bytes */
1284 static char s[2][4][64];
1285
1286 if (pi->pci_io.size != 0) {
1287 sprintf(s[hose->index][0], "PCI hose %d I/O Space",
1288 hose->index);
1289 pci_init_resource(&hose->io_resource, io_base - isa_io_base,
1290 io_base - isa_io_base + pi->pci_io.size - 1,
1291 IORESOURCE_IO, s[hose->index][0]);
1292 hose->io_space.start = pi->pci_io.pci_base_lo;
1293 hose->io_space.end = pi->pci_io.pci_base_lo + pi->pci_io.size-1;
1294 hose->io_base_phys = pi->pci_io.cpu_base;
1295 hose->io_base_virt = (void *)isa_io_base;
1296 }
1297
1298 for (i=0; i<3; i++)
1299 if (pi->pci_mem[i].size != 0) {
1300 sprintf(s[hose->index][i+1], "PCI hose %d MEM Space %d",
1301 hose->index, i);
1302 pci_init_resource(&hose->mem_resources[i],
1303 pi->pci_mem[i].cpu_base,
1304 pi->pci_mem[i].cpu_base + pi->pci_mem[i].size-1,
1305 IORESOURCE_MEM, s[hose->index][i+1]);
1306 }
1307
1308 hose->mem_space.end = pi->pci_mem[0].pci_base_lo +
1309 pi->pci_mem[0].size - 1;
1310 hose->pci_mem_offset = pi->pci_mem[0].cpu_base -
1311 pi->pci_mem[0].pci_base_lo;
1312}
1313
1314/*
1315 * mv64x60_config_pci_params()
1316 *
1317 * Configure a hose's PCI config space parameters.
1318 */
1319void __init
1320mv64x60_config_pci_params(struct pci_controller *hose,
1321 struct mv64x60_pci_info *pi)
1322{
1323 u32 devfn;
1324 u16 u16_val;
1325 u8 save_exclude;
1326
1327 devfn = PCI_DEVFN(0,0);
1328
1329 save_exclude = mv64x60_pci_exclude_bridge;
1330 mv64x60_pci_exclude_bridge = 0;
1331
1332 /* Set class code to indicate host bridge */
1333 u16_val = PCI_CLASS_BRIDGE_HOST; /* 0x0600 (host bridge) */
1334 early_write_config_word(hose, 0, devfn, PCI_CLASS_DEVICE, u16_val);
1335
1336 /* Enable bridge to be PCI master & respond to PCI MEM cycles */
1337 early_read_config_word(hose, 0, devfn, PCI_COMMAND, &u16_val);
1338 u16_val &= ~(PCI_COMMAND_IO | PCI_COMMAND_INVALIDATE |
1339 PCI_COMMAND_PARITY | PCI_COMMAND_SERR | PCI_COMMAND_FAST_BACK);
1340 u16_val |= pi->pci_cmd_bits | PCI_COMMAND_MASTER | PCI_COMMAND_MEMORY;
1341 early_write_config_word(hose, 0, devfn, PCI_COMMAND, u16_val);
1342
1343 /* Set latency timer, cache line size, clear BIST */
1344 u16_val = (pi->latency_timer << 8) | (L1_CACHE_BYTES >> 2);
1345 early_write_config_word(hose, 0, devfn, PCI_CACHE_LINE_SIZE, u16_val);
1346
1347 mv64x60_pci_exclude_bridge = save_exclude;
1348}
1349
1350/*
1351 *****************************************************************************
1352 *
1353 * PCI Related Routine
1354 *
1355 *****************************************************************************
1356 */
1357/*
1358 * mv64x60_set_bus()
1359 *
1360 * Set the bus number for the hose directly under the bridge.
1361 */
1362void __init
1363mv64x60_set_bus(struct mv64x60_handle *bh, u32 bus, u32 child_bus)
1364{
1365 struct pci_controller *hose;
1366 u32 pci_mode, p2p_cfg, pci_cfg_offset, val;
1367 u8 save_exclude;
1368
1369 if (bus == 0) {
1370 pci_mode = bh->pci_mode_a;
1371 p2p_cfg = MV64x60_PCI0_P2P_CONFIG;
1372 pci_cfg_offset = 0x64;
1373 hose = bh->hose_a;
1374 } else {
1375 pci_mode = bh->pci_mode_b;
1376 p2p_cfg = MV64x60_PCI1_P2P_CONFIG;
1377 pci_cfg_offset = 0xe4;
1378 hose = bh->hose_b;
1379 }
1380
1381 child_bus &= 0xff;
1382 val = mv64x60_read(bh, p2p_cfg);
1383
1384 if (pci_mode == MV64x60_PCIMODE_CONVENTIONAL) {
1385 val &= 0xe0000000; /* Force dev num to 0, turn off P2P bridge */
1386 val |= (child_bus << 16) | 0xff;
1387 mv64x60_write(bh, p2p_cfg, val);
1388 (void)mv64x60_read(bh, p2p_cfg); /* Flush FIFO */
1389 } else { /* PCI-X */
1390 /*
1391 * Need to use the current bus/dev number (that's in the
1392 * P2P CONFIG reg) to access the bridge's pci config space.
1393 */
1394 save_exclude = mv64x60_pci_exclude_bridge;
1395 mv64x60_pci_exclude_bridge = 0;
1396 early_write_config_dword(hose, (val & 0x00ff0000) >> 16,
1397 PCI_DEVFN(((val & 0x1f000000) >> 24), 0),
1398 pci_cfg_offset, child_bus << 8);
1399 mv64x60_pci_exclude_bridge = save_exclude;
1400 }
1401}
1402
1403/*
1404 * mv64x60_pci_exclude_device()
1405 *
1406 * This routine is used to make the bridge not appear when the
1407 * PCI subsystem is accessing PCI devices (in PCI config space).
1408 */
1409int
1410mv64x60_pci_exclude_device(u8 bus, u8 devfn)
1411{
1412 struct pci_controller *hose;
1413
1414 hose = pci_bus_to_hose(bus);
1415
1416 /* Skip slot 0 on both hoses */
1417 if ((mv64x60_pci_exclude_bridge == 1) && (PCI_SLOT(devfn) == 0) &&
1418 (hose->first_busno == bus))
1419
1420 return PCIBIOS_DEVICE_NOT_FOUND;
1421 else
1422 return PCIBIOS_SUCCESSFUL;
1423} /* mv64x60_pci_exclude_device() */
1424
1425/*
1426 *****************************************************************************
1427 *
1428 * Platform Device Routines
1429 *
1430 *****************************************************************************
1431 */
1432
1433/*
1434 * mv64x60_pd_fixup()
1435 *
1436 * Need to add the base addr of where the bridge's regs are mapped in the
1437 * physical addr space so drivers can ioremap() them.
1438 */
1439void __init
1440mv64x60_pd_fixup(struct mv64x60_handle *bh, struct platform_device *pd_devs[],
1441 u32 entries)
1442{
1443 struct resource *r;
1444 u32 i, j;
1445
1446 for (i=0; i<entries; i++) {
1447 j = 0;
1448
1449 while ((r = platform_get_resource(pd_devs[i],IORESOURCE_MEM,j))
1450 != NULL) {
1451
1452 r->start += bh->p_base;
1453 r->end += bh->p_base;
1454 j++;
1455 }
1456 }
1457}
1458
1459/*
1460 * mv64x60_add_pds()
1461 *
1462 * Add the mv64x60 platform devices to the list of platform devices.
1463 */
1464static int __init
1465mv64x60_add_pds(void)
1466{
1467 return platform_add_devices(mv64x60_pd_devs,
1468 ARRAY_SIZE(mv64x60_pd_devs));
1469}
1470arch_initcall(mv64x60_add_pds);
1471
1472/*
1473 *****************************************************************************
1474 *
1475 * GT64260-Specific Routines
1476 *
1477 *****************************************************************************
1478 */
1479/*
1480 * gt64260_translate_size()
1481 *
1482 * On the GT64260, the size register is really the "top" address of the window.
1483 */
1484static u32 __init
1485gt64260_translate_size(u32 base, u32 size, u32 num_bits)
1486{
1487 return base + mv64x60_mask(size - 1, num_bits);
1488}
1489
1490/*
1491 * gt64260_untranslate_size()
1492 *
1493 * Translate the top address of a window into a window size.
1494 */
1495static u32 __init
1496gt64260_untranslate_size(u32 base, u32 size, u32 num_bits)
1497{
1498 if (size >= base)
1499 size = size - base + (1 << (32 - num_bits));
1500 else
1501 size = 0;
1502
1503 return size;
1504}
1505
1506/*
1507 * gt64260_set_pci2mem_window()
1508 *
1509 * The PCI->MEM window registers are actually in PCI config space so need
1510 * to set them by setting the correct config space BARs.
1511 */
1512static u32 gt64260_reg_addrs[2][4] __initdata = {
1513 { 0x10, 0x14, 0x18, 0x1c }, { 0x90, 0x94, 0x98, 0x9c }
1514};
1515
1516static void __init
1517gt64260_set_pci2mem_window(struct pci_controller *hose, u32 bus, u32 window,
1518 u32 base)
1519{
1520 u8 save_exclude;
1521
1522 pr_debug("set pci->mem window: %d, hose: %d, base: 0x%x\n", window,
1523 hose->index, base);
1524
1525 save_exclude = mv64x60_pci_exclude_bridge;
1526 mv64x60_pci_exclude_bridge = 0;
1527 early_write_config_dword(hose, 0, PCI_DEVFN(0, 0),
1528 gt64260_reg_addrs[bus][window], mv64x60_mask(base, 20) | 0x8);
1529 mv64x60_pci_exclude_bridge = save_exclude;
1530}
1531
1532/*
1533 * gt64260_set_pci2regs_window()
1534 *
1535 * Set where the bridge's registers appear in PCI MEM space.
1536 */
1537static u32 gt64260_offset[2] __initdata = {0x20, 0xa0};
1538
1539static void __init
1540gt64260_set_pci2regs_window(struct mv64x60_handle *bh,
1541 struct pci_controller *hose, u32 bus, u32 base)
1542{
1543 u8 save_exclude;
1544
1545 pr_debug("set pci->internal regs hose: %d, base: 0x%x\n", hose->index,
1546 base);
1547
1548 save_exclude = mv64x60_pci_exclude_bridge;
1549 mv64x60_pci_exclude_bridge = 0;
1550 early_write_config_dword(hose, 0, PCI_DEVFN(0,0), gt64260_offset[bus],
1551 (base << 16));
1552 mv64x60_pci_exclude_bridge = save_exclude;
1553}
1554
1555/*
1556 * gt64260_is_enabled_32bit()
1557 *
1558 * On a GT64260, a window is enabled iff its top address is >= to its base
1559 * address.
1560 */
1561static u32 __init
1562gt64260_is_enabled_32bit(struct mv64x60_handle *bh, u32 window)
1563{
1564 u32 rc = 0;
1565
1566 if ((gt64260_32bit_windows[window].base_reg != 0) &&
1567 (gt64260_32bit_windows[window].size_reg != 0) &&
1568 ((mv64x60_read(bh, gt64260_32bit_windows[window].size_reg) &
1569 ((1 << gt64260_32bit_windows[window].size_bits) - 1)) >=
1570 (mv64x60_read(bh, gt64260_32bit_windows[window].base_reg) &
1571 ((1 << gt64260_32bit_windows[window].base_bits) - 1))))
1572
1573 rc = 1;
1574
1575 return rc;
1576}
1577
1578/*
1579 * gt64260_enable_window_32bit()
1580 *
1581 * On the GT64260, a window is enabled iff the top address is >= to the base
1582 * address of the window. Since the window has already been configured by
1583 * the time this routine is called, we have nothing to do here.
1584 */
1585static void __init
1586gt64260_enable_window_32bit(struct mv64x60_handle *bh, u32 window)
1587{
1588 pr_debug("enable 32bit window: %d\n", window);
1589}
1590
1591/*
1592 * gt64260_disable_window_32bit()
1593 *
1594 * On a GT64260, you disable a window by setting its top address to be less
1595 * than its base address.
1596 */
1597static void __init
1598gt64260_disable_window_32bit(struct mv64x60_handle *bh, u32 window)
1599{
1600 pr_debug("disable 32bit window: %d, base_reg: 0x%x, size_reg: 0x%x\n",
1601 window, gt64260_32bit_windows[window].base_reg,
1602 gt64260_32bit_windows[window].size_reg);
1603
1604 if ((gt64260_32bit_windows[window].base_reg != 0) &&
1605 (gt64260_32bit_windows[window].size_reg != 0)) {
1606
1607 /* To disable, make bottom reg higher than top reg */
1608 mv64x60_write(bh, gt64260_32bit_windows[window].base_reg,0xfff);
1609 mv64x60_write(bh, gt64260_32bit_windows[window].size_reg, 0);
1610 }
1611}
1612
1613/*
1614 * gt64260_enable_window_64bit()
1615 *
1616 * On the GT64260, a window is enabled iff the top address is >= to the base
1617 * address of the window. Since the window has already been configured by
1618 * the time this routine is called, we have nothing to do here.
1619 */
1620static void __init
1621gt64260_enable_window_64bit(struct mv64x60_handle *bh, u32 window)
1622{
1623 pr_debug("enable 64bit window: %d\n", window);
1624}
1625
1626/*
1627 * gt64260_disable_window_64bit()
1628 *
1629 * On a GT64260, you disable a window by setting its top address to be less
1630 * than its base address.
1631 */
1632static void __init
1633gt64260_disable_window_64bit(struct mv64x60_handle *bh, u32 window)
1634{
1635 pr_debug("disable 64bit window: %d, base_reg: 0x%x, size_reg: 0x%x\n",
1636 window, gt64260_64bit_windows[window].base_lo_reg,
1637 gt64260_64bit_windows[window].size_reg);
1638
1639 if ((gt64260_64bit_windows[window].base_lo_reg != 0) &&
1640 (gt64260_64bit_windows[window].size_reg != 0)) {
1641
1642 /* To disable, make bottom reg higher than top reg */
1643 mv64x60_write(bh, gt64260_64bit_windows[window].base_lo_reg,
1644 0xfff);
1645 mv64x60_write(bh, gt64260_64bit_windows[window].base_hi_reg, 0);
1646 mv64x60_write(bh, gt64260_64bit_windows[window].size_reg, 0);
1647 }
1648}
1649
1650/*
1651 * gt64260_disable_all_windows()
1652 *
1653 * The GT64260 has several windows that aren't represented in the table of
1654 * windows at the top of this file. This routine turns all of them off
1655 * except for the memory controller windows, of course.
1656 */
1657static void __init
1658gt64260_disable_all_windows(struct mv64x60_handle *bh,
1659 struct mv64x60_setup_info *si)
1660{
1661 u32 i, preserve;
1662
1663 /* Disable 32bit windows (don't disable cpu->mem windows) */
1664 for (i=MV64x60_CPU2DEV_0_WIN; i<MV64x60_32BIT_WIN_COUNT; i++) {
1665 if (i < 32)
1666 preserve = si->window_preserve_mask_32_lo & (1 << i);
1667 else
1668 preserve = si->window_preserve_mask_32_hi & (1<<(i-32));
1669
1670 if (!preserve)
1671 gt64260_disable_window_32bit(bh, i);
1672 }
1673
1674 /* Disable 64bit windows */
1675 for (i=0; i<MV64x60_64BIT_WIN_COUNT; i++)
1676 if (!(si->window_preserve_mask_64 & (1<<i)))
1677 gt64260_disable_window_64bit(bh, i);
1678
1679 /* Turn off cpu protection windows not in gt64260_32bit_windows[] */
1680 mv64x60_write(bh, GT64260_CPU_PROT_BASE_4, 0xfff);
1681 mv64x60_write(bh, GT64260_CPU_PROT_SIZE_4, 0);
1682 mv64x60_write(bh, GT64260_CPU_PROT_BASE_5, 0xfff);
1683 mv64x60_write(bh, GT64260_CPU_PROT_SIZE_5, 0);
1684 mv64x60_write(bh, GT64260_CPU_PROT_BASE_6, 0xfff);
1685 mv64x60_write(bh, GT64260_CPU_PROT_SIZE_6, 0);
1686 mv64x60_write(bh, GT64260_CPU_PROT_BASE_7, 0xfff);
1687 mv64x60_write(bh, GT64260_CPU_PROT_SIZE_7, 0);
1688
1689 /* Turn off PCI->MEM access cntl wins not in gt64260_64bit_windows[] */
1690 mv64x60_write(bh, MV64x60_PCI0_ACC_CNTL_4_BASE_LO, 0xfff);
1691 mv64x60_write(bh, MV64x60_PCI0_ACC_CNTL_4_BASE_HI, 0);
1692 mv64x60_write(bh, MV64x60_PCI0_ACC_CNTL_4_SIZE, 0);
1693 mv64x60_write(bh, MV64x60_PCI0_ACC_CNTL_5_BASE_LO, 0xfff);
1694 mv64x60_write(bh, MV64x60_PCI0_ACC_CNTL_5_BASE_HI, 0);
1695 mv64x60_write(bh, MV64x60_PCI0_ACC_CNTL_5_SIZE, 0);
1696 mv64x60_write(bh, GT64260_PCI0_ACC_CNTL_6_BASE_LO, 0xfff);
1697 mv64x60_write(bh, GT64260_PCI0_ACC_CNTL_6_BASE_HI, 0);
1698 mv64x60_write(bh, GT64260_PCI0_ACC_CNTL_6_SIZE, 0);
1699 mv64x60_write(bh, GT64260_PCI0_ACC_CNTL_7_BASE_LO, 0xfff);
1700 mv64x60_write(bh, GT64260_PCI0_ACC_CNTL_7_BASE_HI, 0);
1701 mv64x60_write(bh, GT64260_PCI0_ACC_CNTL_7_SIZE, 0);
1702
1703 mv64x60_write(bh, MV64x60_PCI1_ACC_CNTL_4_BASE_LO, 0xfff);
1704 mv64x60_write(bh, MV64x60_PCI1_ACC_CNTL_4_BASE_HI, 0);
1705 mv64x60_write(bh, MV64x60_PCI1_ACC_CNTL_4_SIZE, 0);
1706 mv64x60_write(bh, MV64x60_PCI1_ACC_CNTL_5_BASE_LO, 0xfff);
1707 mv64x60_write(bh, MV64x60_PCI1_ACC_CNTL_5_BASE_HI, 0);
1708 mv64x60_write(bh, MV64x60_PCI1_ACC_CNTL_5_SIZE, 0);
1709 mv64x60_write(bh, GT64260_PCI1_ACC_CNTL_6_BASE_LO, 0xfff);
1710 mv64x60_write(bh, GT64260_PCI1_ACC_CNTL_6_BASE_HI, 0);
1711 mv64x60_write(bh, GT64260_PCI1_ACC_CNTL_6_SIZE, 0);
1712 mv64x60_write(bh, GT64260_PCI1_ACC_CNTL_7_BASE_LO, 0xfff);
1713 mv64x60_write(bh, GT64260_PCI1_ACC_CNTL_7_BASE_HI, 0);
1714 mv64x60_write(bh, GT64260_PCI1_ACC_CNTL_7_SIZE, 0);
1715
1716 /* Disable all PCI-><whatever> windows */
1717 mv64x60_set_bits(bh, MV64x60_PCI0_BAR_ENABLE, 0x07fffdff);
1718 mv64x60_set_bits(bh, MV64x60_PCI1_BAR_ENABLE, 0x07fffdff);
1719
1720 /*
1721 * Some firmwares enable a bunch of intr sources
1722 * for the PCI INT output pins.
1723 */
1724 mv64x60_write(bh, GT64260_IC_CPU_INTR_MASK_LO, 0);
1725 mv64x60_write(bh, GT64260_IC_CPU_INTR_MASK_HI, 0);
1726 mv64x60_write(bh, GT64260_IC_PCI0_INTR_MASK_LO, 0);
1727 mv64x60_write(bh, GT64260_IC_PCI0_INTR_MASK_HI, 0);
1728 mv64x60_write(bh, GT64260_IC_PCI1_INTR_MASK_LO, 0);
1729 mv64x60_write(bh, GT64260_IC_PCI1_INTR_MASK_HI, 0);
1730 mv64x60_write(bh, GT64260_IC_CPU_INT_0_MASK, 0);
1731 mv64x60_write(bh, GT64260_IC_CPU_INT_1_MASK, 0);
1732 mv64x60_write(bh, GT64260_IC_CPU_INT_2_MASK, 0);
1733 mv64x60_write(bh, GT64260_IC_CPU_INT_3_MASK, 0);
1734}
1735
1736/*
1737 * gt64260a_chip_specific_init()
1738 *
1739 * Implement errata workarounds for the GT64260A.
1740 */
1741static void __init
1742gt64260a_chip_specific_init(struct mv64x60_handle *bh,
1743 struct mv64x60_setup_info *si)
1744{
1745#ifdef CONFIG_SERIAL_MPSC
1746 struct resource *r;
1747#endif
1748#if !defined(CONFIG_NOT_COHERENT_CACHE)
1749 u32 val;
1750 u8 save_exclude;
1751#endif
1752
1753 if (si->pci_0.enable_bus)
1754 mv64x60_set_bits(bh, MV64x60_PCI0_CMD,
1755 ((1<<4) | (1<<5) | (1<<9) | (1<<13)));
1756
1757 if (si->pci_1.enable_bus)
1758 mv64x60_set_bits(bh, MV64x60_PCI1_CMD,
1759 ((1<<4) | (1<<5) | (1<<9) | (1<<13)));
1760
1761 /*
1762 * Dave Wilhardt found that bit 4 in the PCI Command registers must
1763 * be set if you are using cache coherency.
1764 */
1765#if !defined(CONFIG_NOT_COHERENT_CACHE)
1766 /* Res #MEM-4 -- cpu read buffer to buffer 1 */
1767 if ((mv64x60_read(bh, MV64x60_CPU_MODE) & 0xf0) == 0x40)
1768 mv64x60_set_bits(bh, GT64260_SDRAM_CONFIG, (1<<26));
1769
1770 save_exclude = mv64x60_pci_exclude_bridge;
1771 mv64x60_pci_exclude_bridge = 0;
1772 if (si->pci_0.enable_bus) {
1773 early_read_config_dword(bh->hose_a, 0, PCI_DEVFN(0,0),
1774 PCI_COMMAND, &val);
1775 val |= PCI_COMMAND_INVALIDATE;
1776 early_write_config_dword(bh->hose_a, 0, PCI_DEVFN(0,0),
1777 PCI_COMMAND, val);
1778 }
1779
1780 if (si->pci_1.enable_bus) {
1781 early_read_config_dword(bh->hose_b, 0, PCI_DEVFN(0,0),
1782 PCI_COMMAND, &val);
1783 val |= PCI_COMMAND_INVALIDATE;
1784 early_write_config_dword(bh->hose_b, 0, PCI_DEVFN(0,0),
1785 PCI_COMMAND, val);
1786 }
1787 mv64x60_pci_exclude_bridge = save_exclude;
1788#endif
1789
1790 /* Disable buffer/descriptor snooping */
1791 mv64x60_clr_bits(bh, 0xf280, (1<< 6) | (1<<14) | (1<<22) | (1<<30));
1792 mv64x60_clr_bits(bh, 0xf2c0, (1<< 6) | (1<<14) | (1<<22) | (1<<30));
1793
1794#ifdef CONFIG_SERIAL_MPSC
1795 mv64x60_mpsc0_pdata.mirror_regs = 1;
1796 mv64x60_mpsc0_pdata.cache_mgmt = 1;
1797 mv64x60_mpsc1_pdata.mirror_regs = 1;
1798 mv64x60_mpsc1_pdata.cache_mgmt = 1;
1799
1800 if ((r = platform_get_resource(&mpsc1_device, IORESOURCE_IRQ, 0))
1801 != NULL) {
1802 r->start = MV64x60_IRQ_SDMA_0;
1803 r->end = MV64x60_IRQ_SDMA_0;
1804 }
1805#endif
1806}
1807
1808/*
1809 * gt64260b_chip_specific_init()
1810 *
1811 * Implement errata workarounds for the GT64260B.
1812 */
1813static void __init
1814gt64260b_chip_specific_init(struct mv64x60_handle *bh,
1815 struct mv64x60_setup_info *si)
1816{
1817#ifdef CONFIG_SERIAL_MPSC
1818 struct resource *r;
1819#endif
1820#if !defined(CONFIG_NOT_COHERENT_CACHE)
1821 u32 val;
1822 u8 save_exclude;
1823#endif
1824
1825 if (si->pci_0.enable_bus)
1826 mv64x60_set_bits(bh, MV64x60_PCI0_CMD,
1827 ((1<<4) | (1<<5) | (1<<9) | (1<<13)));
1828
1829 if (si->pci_1.enable_bus)
1830 mv64x60_set_bits(bh, MV64x60_PCI1_CMD,
1831 ((1<<4) | (1<<5) | (1<<9) | (1<<13)));
1832
1833 /*
1834 * Dave Wilhardt found that bit 4 in the PCI Command registers must
1835 * be set if you are using cache coherency.
1836 */
1837#if !defined(CONFIG_NOT_COHERENT_CACHE)
1838 mv64x60_set_bits(bh, GT64260_CPU_WB_PRIORITY_BUFFER_DEPTH, 0xf);
1839
1840 /* Res #MEM-4 -- cpu read buffer to buffer 1 */
1841 if ((mv64x60_read(bh, MV64x60_CPU_MODE) & 0xf0) == 0x40)
1842 mv64x60_set_bits(bh, GT64260_SDRAM_CONFIG, (1<<26));
1843
1844 save_exclude = mv64x60_pci_exclude_bridge;
1845 mv64x60_pci_exclude_bridge = 0;
1846 if (si->pci_0.enable_bus) {
1847 early_read_config_dword(bh->hose_a, 0, PCI_DEVFN(0,0),
1848 PCI_COMMAND, &val);
1849 val |= PCI_COMMAND_INVALIDATE;
1850 early_write_config_dword(bh->hose_a, 0, PCI_DEVFN(0,0),
1851 PCI_COMMAND, val);
1852 }
1853
1854 if (si->pci_1.enable_bus) {
1855 early_read_config_dword(bh->hose_b, 0, PCI_DEVFN(0,0),
1856 PCI_COMMAND, &val);
1857 val |= PCI_COMMAND_INVALIDATE;
1858 early_write_config_dword(bh->hose_b, 0, PCI_DEVFN(0,0),
1859 PCI_COMMAND, val);
1860 }
1861 mv64x60_pci_exclude_bridge = save_exclude;
1862#endif
1863
1864 /* Disable buffer/descriptor snooping */
1865 mv64x60_clr_bits(bh, 0xf280, (1<< 6) | (1<<14) | (1<<22) | (1<<30));
1866 mv64x60_clr_bits(bh, 0xf2c0, (1<< 6) | (1<<14) | (1<<22) | (1<<30));
1867
1868#ifdef CONFIG_SERIAL_MPSC
1869 /*
1870 * The 64260B is not supposed to have the bug where the MPSC & ENET
1871 * can't access cache coherent regions. However, testing has shown
1872 * that the MPSC, at least, still has this bug.
1873 */
1874 mv64x60_mpsc0_pdata.cache_mgmt = 1;
1875 mv64x60_mpsc1_pdata.cache_mgmt = 1;
1876
1877 if ((r = platform_get_resource(&mpsc1_device, IORESOURCE_IRQ, 0))
1878 != NULL) {
1879 r->start = MV64x60_IRQ_SDMA_0;
1880 r->end = MV64x60_IRQ_SDMA_0;
1881 }
1882#endif
1883}
1884
1885/*
1886 *****************************************************************************
1887 *
1888 * MV64360-Specific Routines
1889 *
1890 *****************************************************************************
1891 */
1892/*
1893 * mv64360_translate_size()
1894 *
1895 * On the MV64360, the size register is set similar to the size you get
1896 * from a pci config space BAR register. That is, programmed from LSB to MSB
1897 * as a sequence of 1's followed by a sequence of 0's. IOW, "size -1" with the
1898 * assumption that the size is a power of 2.
1899 */
1900static u32 __init
1901mv64360_translate_size(u32 base_addr, u32 size, u32 num_bits)
1902{
1903 return mv64x60_mask(size - 1, num_bits);
1904}
1905
1906/*
1907 * mv64360_untranslate_size()
1908 *
1909 * Translate the size register value of a window into a window size.
1910 */
1911static u32 __init
1912mv64360_untranslate_size(u32 base_addr, u32 size, u32 num_bits)
1913{
1914 if (size > 0) {
1915 size >>= (32 - num_bits);
1916 size++;
1917 size <<= (32 - num_bits);
1918 }
1919
1920 return size;
1921}
1922
1923/*
1924 * mv64360_set_pci2mem_window()
1925 *
1926 * The PCI->MEM window registers are actually in PCI config space so need
1927 * to set them by setting the correct config space BARs.
1928 */
1929struct {
1930 u32 fcn;
1931 u32 base_hi_bar;
1932 u32 base_lo_bar;
1933} static mv64360_reg_addrs[2][4] __initdata = {
1934 {{ 0, 0x14, 0x10 }, { 0, 0x1c, 0x18 },
1935 { 1, 0x14, 0x10 }, { 1, 0x1c, 0x18 }},
1936 {{ 0, 0x94, 0x90 }, { 0, 0x9c, 0x98 },
1937 { 1, 0x94, 0x90 }, { 1, 0x9c, 0x98 }}
1938};
1939
1940static void __init
1941mv64360_set_pci2mem_window(struct pci_controller *hose, u32 bus, u32 window,
1942 u32 base)
1943{
1944 u8 save_exclude;
1945
1946 pr_debug("set pci->mem window: %d, hose: %d, base: 0x%x\n", window,
1947 hose->index, base);
1948
1949 save_exclude = mv64x60_pci_exclude_bridge;
1950 mv64x60_pci_exclude_bridge = 0;
1951 early_write_config_dword(hose, 0,
1952 PCI_DEVFN(0, mv64360_reg_addrs[bus][window].fcn),
1953 mv64360_reg_addrs[bus][window].base_hi_bar, 0);
1954 early_write_config_dword(hose, 0,
1955 PCI_DEVFN(0, mv64360_reg_addrs[bus][window].fcn),
1956 mv64360_reg_addrs[bus][window].base_lo_bar,
1957 mv64x60_mask(base,20) | 0xc);
1958 mv64x60_pci_exclude_bridge = save_exclude;
1959}
1960
1961/*
1962 * mv64360_set_pci2regs_window()
1963 *
1964 * Set where the bridge's registers appear in PCI MEM space.
1965 */
1966static u32 mv64360_offset[2][2] __initdata = {{0x20, 0x24}, {0xa0, 0xa4}};
1967
1968static void __init
1969mv64360_set_pci2regs_window(struct mv64x60_handle *bh,
1970 struct pci_controller *hose, u32 bus, u32 base)
1971{
1972 u8 save_exclude;
1973
1974 pr_debug("set pci->internal regs hose: %d, base: 0x%x\n", hose->index,
1975 base);
1976
1977 save_exclude = mv64x60_pci_exclude_bridge;
1978 mv64x60_pci_exclude_bridge = 0;
1979 early_write_config_dword(hose, 0, PCI_DEVFN(0,0),
1980 mv64360_offset[bus][0], (base << 16));
1981 early_write_config_dword(hose, 0, PCI_DEVFN(0,0),
1982 mv64360_offset[bus][1], 0);
1983 mv64x60_pci_exclude_bridge = save_exclude;
1984}
1985
1986/*
1987 * mv64360_is_enabled_32bit()
1988 *
1989 * On a MV64360, a window is enabled by either clearing a bit in the
1990 * CPU BAR Enable reg or setting a bit in the window's base reg.
1991 * Note that this doesn't work for windows on the PCI slave side but we don't
1992 * check those so its okay.
1993 */
1994static u32 __init
1995mv64360_is_enabled_32bit(struct mv64x60_handle *bh, u32 window)
1996{
1997 u32 extra, rc = 0;
1998
1999 if (((mv64360_32bit_windows[window].base_reg != 0) &&
2000 (mv64360_32bit_windows[window].size_reg != 0)) ||
2001 (window == MV64x60_CPU2SRAM_WIN)) {
2002
2003 extra = mv64360_32bit_windows[window].extra;
2004
2005 switch (extra & MV64x60_EXTRA_MASK) {
2006 case MV64x60_EXTRA_CPUWIN_ENAB:
2007 rc = (mv64x60_read(bh, MV64360_CPU_BAR_ENABLE) &
2008 (1 << (extra & 0x1f))) == 0;
2009 break;
2010
2011 case MV64x60_EXTRA_CPUPROT_ENAB:
2012 rc = (mv64x60_read(bh,
2013 mv64360_32bit_windows[window].base_reg) &
2014 (1 << (extra & 0x1f))) != 0;
2015 break;
2016
2017 case MV64x60_EXTRA_ENET_ENAB:
2018 rc = (mv64x60_read(bh, MV64360_ENET2MEM_BAR_ENABLE) &
2019 (1 << (extra & 0x7))) == 0;
2020 break;
2021
2022 case MV64x60_EXTRA_MPSC_ENAB:
2023 rc = (mv64x60_read(bh, MV64360_MPSC2MEM_BAR_ENABLE) &
2024 (1 << (extra & 0x3))) == 0;
2025 break;
2026
2027 case MV64x60_EXTRA_IDMA_ENAB:
2028 rc = (mv64x60_read(bh, MV64360_IDMA2MEM_BAR_ENABLE) &
2029 (1 << (extra & 0x7))) == 0;
2030 break;
2031
2032 default:
2033 printk(KERN_ERR "mv64360_is_enabled: %s\n",
2034 "32bit table corrupted");
2035 }
2036 }
2037
2038 return rc;
2039}
2040
2041/*
2042 * mv64360_enable_window_32bit()
2043 *
2044 * On a MV64360, a window is enabled by either clearing a bit in the
2045 * CPU BAR Enable reg or setting a bit in the window's base reg.
2046 */
2047static void __init
2048mv64360_enable_window_32bit(struct mv64x60_handle *bh, u32 window)
2049{
2050 u32 extra;
2051
2052 pr_debug("enable 32bit window: %d\n", window);
2053
2054 if (((mv64360_32bit_windows[window].base_reg != 0) &&
2055 (mv64360_32bit_windows[window].size_reg != 0)) ||
2056 (window == MV64x60_CPU2SRAM_WIN)) {
2057
2058 extra = mv64360_32bit_windows[window].extra;
2059
2060 switch (extra & MV64x60_EXTRA_MASK) {
2061 case MV64x60_EXTRA_CPUWIN_ENAB:
2062 mv64x60_clr_bits(bh, MV64360_CPU_BAR_ENABLE,
2063 (1 << (extra & 0x1f)));
2064 break;
2065
2066 case MV64x60_EXTRA_CPUPROT_ENAB:
2067 mv64x60_set_bits(bh,
2068 mv64360_32bit_windows[window].base_reg,
2069 (1 << (extra & 0x1f)));
2070 break;
2071
2072 case MV64x60_EXTRA_ENET_ENAB:
2073 mv64x60_clr_bits(bh, MV64360_ENET2MEM_BAR_ENABLE,
2074 (1 << (extra & 0x7)));
2075 break;
2076
2077 case MV64x60_EXTRA_MPSC_ENAB:
2078 mv64x60_clr_bits(bh, MV64360_MPSC2MEM_BAR_ENABLE,
2079 (1 << (extra & 0x3)));
2080 break;
2081
2082 case MV64x60_EXTRA_IDMA_ENAB:
2083 mv64x60_clr_bits(bh, MV64360_IDMA2MEM_BAR_ENABLE,
2084 (1 << (extra & 0x7)));
2085 break;
2086
2087 default:
2088 printk(KERN_ERR "mv64360_enable: %s\n",
2089 "32bit table corrupted");
2090 }
2091 }
2092}
2093
2094/*
2095 * mv64360_disable_window_32bit()
2096 *
2097 * On a MV64360, a window is disabled by either setting a bit in the
2098 * CPU BAR Enable reg or clearing a bit in the window's base reg.
2099 */
2100static void __init
2101mv64360_disable_window_32bit(struct mv64x60_handle *bh, u32 window)
2102{
2103 u32 extra;
2104
2105 pr_debug("disable 32bit window: %d, base_reg: 0x%x, size_reg: 0x%x\n",
2106 window, mv64360_32bit_windows[window].base_reg,
2107 mv64360_32bit_windows[window].size_reg);
2108
2109 if (((mv64360_32bit_windows[window].base_reg != 0) &&
2110 (mv64360_32bit_windows[window].size_reg != 0)) ||
2111 (window == MV64x60_CPU2SRAM_WIN)) {
2112
2113 extra = mv64360_32bit_windows[window].extra;
2114
2115 switch (extra & MV64x60_EXTRA_MASK) {
2116 case MV64x60_EXTRA_CPUWIN_ENAB:
2117 mv64x60_set_bits(bh, MV64360_CPU_BAR_ENABLE,
2118 (1 << (extra & 0x1f)));
2119 break;
2120
2121 case MV64x60_EXTRA_CPUPROT_ENAB:
2122 mv64x60_clr_bits(bh,
2123 mv64360_32bit_windows[window].base_reg,
2124 (1 << (extra & 0x1f)));
2125 break;
2126
2127 case MV64x60_EXTRA_ENET_ENAB:
2128 mv64x60_set_bits(bh, MV64360_ENET2MEM_BAR_ENABLE,
2129 (1 << (extra & 0x7)));
2130 break;
2131
2132 case MV64x60_EXTRA_MPSC_ENAB:
2133 mv64x60_set_bits(bh, MV64360_MPSC2MEM_BAR_ENABLE,
2134 (1 << (extra & 0x3)));
2135 break;
2136
2137 case MV64x60_EXTRA_IDMA_ENAB:
2138 mv64x60_set_bits(bh, MV64360_IDMA2MEM_BAR_ENABLE,
2139 (1 << (extra & 0x7)));
2140 break;
2141
2142 default:
2143 printk(KERN_ERR "mv64360_disable: %s\n",
2144 "32bit table corrupted");
2145 }
2146 }
2147}
2148
2149/*
2150 * mv64360_enable_window_64bit()
2151 *
2152 * On the MV64360, a 64-bit window is enabled by setting a bit in the window's
2153 * base reg.
2154 */
2155static void __init
2156mv64360_enable_window_64bit(struct mv64x60_handle *bh, u32 window)
2157{
2158 pr_debug("enable 64bit window: %d\n", window);
2159
2160 if ((mv64360_64bit_windows[window].base_lo_reg!= 0) &&
2161 (mv64360_64bit_windows[window].size_reg != 0)) {
2162
2163 if ((mv64360_64bit_windows[window].extra & MV64x60_EXTRA_MASK)
2164 == MV64x60_EXTRA_PCIACC_ENAB)
2165 mv64x60_set_bits(bh,
2166 mv64360_64bit_windows[window].base_lo_reg,
2167 (1 << (mv64360_64bit_windows[window].extra &
2168 0x1f)));
2169 else
2170 printk(KERN_ERR "mv64360_enable: %s\n",
2171 "64bit table corrupted");
2172 }
2173}
2174
2175/*
2176 * mv64360_disable_window_64bit()
2177 *
2178 * On a MV64360, a 64-bit window is disabled by clearing a bit in the window's
2179 * base reg.
2180 */
2181static void __init
2182mv64360_disable_window_64bit(struct mv64x60_handle *bh, u32 window)
2183{
2184 pr_debug("disable 64bit window: %d, base_reg: 0x%x, size_reg: 0x%x\n",
2185 window, mv64360_64bit_windows[window].base_lo_reg,
2186 mv64360_64bit_windows[window].size_reg);
2187
2188 if ((mv64360_64bit_windows[window].base_lo_reg != 0) &&
2189 (mv64360_64bit_windows[window].size_reg != 0)) {
2190 if ((mv64360_64bit_windows[window].extra & MV64x60_EXTRA_MASK)
2191 == MV64x60_EXTRA_PCIACC_ENAB)
2192 mv64x60_clr_bits(bh,
2193 mv64360_64bit_windows[window].base_lo_reg,
2194 (1 << (mv64360_64bit_windows[window].extra &
2195 0x1f)));
2196 else
2197 printk(KERN_ERR "mv64360_disable: %s\n",
2198 "64bit table corrupted");
2199 }
2200}
2201
2202/*
2203 * mv64360_disable_all_windows()
2204 *
2205 * The MV64360 has a few windows that aren't represented in the table of
2206 * windows at the top of this file. This routine turns all of them off
2207 * except for the memory controller windows, of course.
2208 */
2209static void __init
2210mv64360_disable_all_windows(struct mv64x60_handle *bh,
2211 struct mv64x60_setup_info *si)
2212{
2213 u32 preserve, i;
2214
2215 /* Disable 32bit windows (don't disable cpu->mem windows) */
2216 for (i=MV64x60_CPU2DEV_0_WIN; i<MV64x60_32BIT_WIN_COUNT; i++) {
2217 if (i < 32)
2218 preserve = si->window_preserve_mask_32_lo & (1 << i);
2219 else
2220 preserve = si->window_preserve_mask_32_hi & (1<<(i-32));
2221
2222 if (!preserve)
2223 mv64360_disable_window_32bit(bh, i);
2224 }
2225
2226 /* Disable 64bit windows */
2227 for (i=0; i<MV64x60_64BIT_WIN_COUNT; i++)
2228 if (!(si->window_preserve_mask_64 & (1<<i)))
2229 mv64360_disable_window_64bit(bh, i);
2230
2231 /* Turn off PCI->MEM access cntl wins not in mv64360_64bit_windows[] */
2232 mv64x60_clr_bits(bh, MV64x60_PCI0_ACC_CNTL_4_BASE_LO, 0);
2233 mv64x60_clr_bits(bh, MV64x60_PCI0_ACC_CNTL_5_BASE_LO, 0);
2234 mv64x60_clr_bits(bh, MV64x60_PCI1_ACC_CNTL_4_BASE_LO, 0);
2235 mv64x60_clr_bits(bh, MV64x60_PCI1_ACC_CNTL_5_BASE_LO, 0);
2236
2237 /* Disable all PCI-><whatever> windows */
2238 mv64x60_set_bits(bh, MV64x60_PCI0_BAR_ENABLE, 0x0000f9ff);
2239 mv64x60_set_bits(bh, MV64x60_PCI1_BAR_ENABLE, 0x0000f9ff);
2240}
2241
2242/*
2243 * mv64360_config_io2mem_windows()
2244 *
2245 * ENET, MPSC, and IDMA ctlrs on the MV64[34]60 have separate windows that
2246 * must be set up so that the respective ctlr can access system memory.
2247 */
2248static u32 enet_tab[MV64x60_CPU2MEM_WINDOWS] __initdata = {
2249 MV64x60_ENET2MEM_0_WIN, MV64x60_ENET2MEM_1_WIN,
2250 MV64x60_ENET2MEM_2_WIN, MV64x60_ENET2MEM_3_WIN,
2251};
2252
2253static u32 mpsc_tab[MV64x60_CPU2MEM_WINDOWS] __initdata = {
2254 MV64x60_MPSC2MEM_0_WIN, MV64x60_MPSC2MEM_1_WIN,
2255 MV64x60_MPSC2MEM_2_WIN, MV64x60_MPSC2MEM_3_WIN,
2256};
2257
2258static u32 idma_tab[MV64x60_CPU2MEM_WINDOWS] __initdata = {
2259 MV64x60_IDMA2MEM_0_WIN, MV64x60_IDMA2MEM_1_WIN,
2260 MV64x60_IDMA2MEM_2_WIN, MV64x60_IDMA2MEM_3_WIN,
2261};
2262
2263static u32 dram_selects[MV64x60_CPU2MEM_WINDOWS] __initdata =
2264 { 0xe, 0xd, 0xb, 0x7 };
2265
2266static void __init
2267mv64360_config_io2mem_windows(struct mv64x60_handle *bh,
2268 struct mv64x60_setup_info *si,
2269 u32 mem_windows[MV64x60_CPU2MEM_WINDOWS][2])
2270{
2271 u32 i, win;
2272
2273 pr_debug("config_io2regs_windows: enet, mpsc, idma -> bridge regs\n");
2274
2275 mv64x60_write(bh, MV64360_ENET2MEM_ACC_PROT_0, 0);
2276 mv64x60_write(bh, MV64360_ENET2MEM_ACC_PROT_1, 0);
2277 mv64x60_write(bh, MV64360_ENET2MEM_ACC_PROT_2, 0);
2278
2279 mv64x60_write(bh, MV64360_MPSC2MEM_ACC_PROT_0, 0);
2280 mv64x60_write(bh, MV64360_MPSC2MEM_ACC_PROT_1, 0);
2281
2282 mv64x60_write(bh, MV64360_IDMA2MEM_ACC_PROT_0, 0);
2283 mv64x60_write(bh, MV64360_IDMA2MEM_ACC_PROT_1, 0);
2284 mv64x60_write(bh, MV64360_IDMA2MEM_ACC_PROT_2, 0);
2285 mv64x60_write(bh, MV64360_IDMA2MEM_ACC_PROT_3, 0);
2286
2287 /* Assume that mem ctlr has no more windows than embedded I/O ctlr */
2288 for (win=MV64x60_CPU2MEM_0_WIN,i=0;win<=MV64x60_CPU2MEM_3_WIN;win++,i++)
2289 if (bh->ci->is_enabled_32bit(bh, win)) {
2290 mv64x60_set_32bit_window(bh, enet_tab[i],
2291 mem_windows[i][0], mem_windows[i][1],
2292 (dram_selects[i] << 8) |
2293 (si->enet_options[i] & 0x3000));
2294 bh->ci->enable_window_32bit(bh, enet_tab[i]);
2295
2296 /* Give enet r/w access to memory region */
2297 mv64x60_set_bits(bh, MV64360_ENET2MEM_ACC_PROT_0,
2298 (0x3 << (i << 1)));
2299 mv64x60_set_bits(bh, MV64360_ENET2MEM_ACC_PROT_1,
2300 (0x3 << (i << 1)));
2301 mv64x60_set_bits(bh, MV64360_ENET2MEM_ACC_PROT_2,
2302 (0x3 << (i << 1)));
2303
2304 mv64x60_set_32bit_window(bh, mpsc_tab[i],
2305 mem_windows[i][0], mem_windows[i][1],
2306 (dram_selects[i] << 8) |
2307 (si->mpsc_options[i] & 0x3000));
2308 bh->ci->enable_window_32bit(bh, mpsc_tab[i]);
2309
2310 /* Give mpsc r/w access to memory region */
2311 mv64x60_set_bits(bh, MV64360_MPSC2MEM_ACC_PROT_0,
2312 (0x3 << (i << 1)));
2313 mv64x60_set_bits(bh, MV64360_MPSC2MEM_ACC_PROT_1,
2314 (0x3 << (i << 1)));
2315
2316 mv64x60_set_32bit_window(bh, idma_tab[i],
2317 mem_windows[i][0], mem_windows[i][1],
2318 (dram_selects[i] << 8) |
2319 (si->idma_options[i] & 0x3000));
2320 bh->ci->enable_window_32bit(bh, idma_tab[i]);
2321
2322 /* Give idma r/w access to memory region */
2323 mv64x60_set_bits(bh, MV64360_IDMA2MEM_ACC_PROT_0,
2324 (0x3 << (i << 1)));
2325 mv64x60_set_bits(bh, MV64360_IDMA2MEM_ACC_PROT_1,
2326 (0x3 << (i << 1)));
2327 mv64x60_set_bits(bh, MV64360_IDMA2MEM_ACC_PROT_2,
2328 (0x3 << (i << 1)));
2329 mv64x60_set_bits(bh, MV64360_IDMA2MEM_ACC_PROT_3,
2330 (0x3 << (i << 1)));
2331 }
2332}
2333
2334/*
2335 * mv64360_set_mpsc2regs_window()
2336 *
2337 * MPSC has a window to the bridge's internal registers. Call this routine
2338 * to change that window so it doesn't conflict with the windows mapping the
2339 * mpsc to system memory.
2340 */
2341static void __init
2342mv64360_set_mpsc2regs_window(struct mv64x60_handle *bh, u32 base)
2343{
2344 pr_debug("set mpsc->internal regs, base: 0x%x\n", base);
2345 mv64x60_write(bh, MV64360_MPSC2REGS_BASE, base & 0xffff0000);
2346}
2347
2348/*
2349 * mv64360_chip_specific_init()
2350 *
2351 * Implement errata workarounds for the MV64360.
2352 */
2353static void __init
2354mv64360_chip_specific_init(struct mv64x60_handle *bh,
2355 struct mv64x60_setup_info *si)
2356{
2357#if !defined(CONFIG_NOT_COHERENT_CACHE)
2358 mv64x60_set_bits(bh, MV64360_D_UNIT_CONTROL_HIGH, (1<<24));
2359#endif
2360#ifdef CONFIG_SERIAL_MPSC
2361 mv64x60_mpsc0_pdata.brg_can_tune = 1;
2362 mv64x60_mpsc0_pdata.cache_mgmt = 1;
2363 mv64x60_mpsc1_pdata.brg_can_tune = 1;
2364 mv64x60_mpsc1_pdata.cache_mgmt = 1;
2365#endif
2366}
2367
2368/*
2369 * mv64460_chip_specific_init()
2370 *
2371 * Implement errata workarounds for the MV64460.
2372 */
2373static void __init
2374mv64460_chip_specific_init(struct mv64x60_handle *bh,
2375 struct mv64x60_setup_info *si)
2376{
2377#if !defined(CONFIG_NOT_COHERENT_CACHE)
2378 mv64x60_set_bits(bh, MV64360_D_UNIT_CONTROL_HIGH, (1<<24) | (1<<25));
2379 mv64x60_set_bits(bh, MV64460_D_UNIT_MMASK, (1<<1) | (1<<4));
2380#endif
2381#ifdef CONFIG_SERIAL_MPSC
2382 mv64x60_mpsc0_pdata.brg_can_tune = 1;
2383 mv64x60_mpsc0_pdata.cache_mgmt = 1;
2384 mv64x60_mpsc1_pdata.brg_can_tune = 1;
2385 mv64x60_mpsc1_pdata.cache_mgmt = 1;
2386#endif
2387}
2388
2389
2390#if defined(CONFIG_SYSFS) && !defined(CONFIG_GT64260)
2391/* Export the hotswap register via sysfs for enum event monitoring */
2392#define VAL_LEN_MAX 11 /* 32-bit hex or dec stringified number + '\n' */
2393
2394static DEFINE_MUTEX(mv64xxx_hs_lock);
2395
2396static ssize_t
2397mv64xxx_hs_reg_read(struct kobject *kobj, char *buf, loff_t off, size_t count)
2398{
2399 u32 v;
2400 u8 save_exclude;
2401
2402 if (off > 0)
2403 return 0;
2404 if (count < VAL_LEN_MAX)
2405 return -EINVAL;
2406
2407 if (mutex_lock_interruptible(&mv64xxx_hs_lock))
2408 return -ERESTARTSYS;
2409 save_exclude = mv64x60_pci_exclude_bridge;
2410 mv64x60_pci_exclude_bridge = 0;
2411 early_read_config_dword(&sysfs_hose_a, 0, PCI_DEVFN(0, 0),
2412 MV64360_PCICFG_CPCI_HOTSWAP, &v);
2413 mv64x60_pci_exclude_bridge = save_exclude;
2414 mutex_unlock(&mv64xxx_hs_lock);
2415
2416 return sprintf(buf, "0x%08x\n", v);
2417}
2418
2419static ssize_t
2420mv64xxx_hs_reg_write(struct kobject *kobj, char *buf, loff_t off, size_t count)
2421{
2422 u32 v;
2423 u8 save_exclude;
2424
2425 if (off > 0)
2426 return 0;
2427 if (count <= 0)
2428 return -EINVAL;
2429
2430 if (sscanf(buf, "%i", &v) == 1) {
2431 if (mutex_lock_interruptible(&mv64xxx_hs_lock))
2432 return -ERESTARTSYS;
2433 save_exclude = mv64x60_pci_exclude_bridge;
2434 mv64x60_pci_exclude_bridge = 0;
2435 early_write_config_dword(&sysfs_hose_a, 0, PCI_DEVFN(0, 0),
2436 MV64360_PCICFG_CPCI_HOTSWAP, v);
2437 mv64x60_pci_exclude_bridge = save_exclude;
2438 mutex_unlock(&mv64xxx_hs_lock);
2439 }
2440 else
2441 count = -EINVAL;
2442
2443 return count;
2444}
2445
2446static struct bin_attribute mv64xxx_hs_reg_attr = { /* Hotswap register */
2447 .attr = {
2448 .name = "hs_reg",
2449 .mode = S_IRUGO | S_IWUSR,
2450 },
2451 .size = VAL_LEN_MAX,
2452 .read = mv64xxx_hs_reg_read,
2453 .write = mv64xxx_hs_reg_write,
2454};
2455
2456/* Provide sysfs file indicating if this platform supports the hs_reg */
2457static ssize_t
2458mv64xxx_hs_reg_valid_show(struct device *dev, struct device_attribute *attr,
2459 char *buf)
2460{
2461 struct platform_device *pdev;
2462 struct mv64xxx_pdata *pdp;
2463 u32 v;
2464
2465 pdev = container_of(dev, struct platform_device, dev);
2466 pdp = (struct mv64xxx_pdata *)pdev->dev.platform_data;
2467
2468 if (mutex_lock_interruptible(&mv64xxx_hs_lock))
2469 return -ERESTARTSYS;
2470 v = pdp->hs_reg_valid;
2471 mutex_unlock(&mv64xxx_hs_lock);
2472
2473 return sprintf(buf, "%i\n", v);
2474}
2475static DEVICE_ATTR(hs_reg_valid, S_IRUGO, mv64xxx_hs_reg_valid_show, NULL);
2476
2477static int __init
2478mv64xxx_sysfs_init(void)
2479{
2480 sysfs_create_bin_file(&mv64xxx_device.dev.kobj, &mv64xxx_hs_reg_attr);
2481 sysfs_create_file(&mv64xxx_device.dev.kobj,&dev_attr_hs_reg_valid.attr);
2482 return 0;
2483}
2484subsys_initcall(mv64xxx_sysfs_init);
2485#endif